diff --git a/contrib/build/build.go b/contrib/build/build.go index 07601e07..e3678290 100644 --- a/contrib/build/build.go +++ b/contrib/build/build.go @@ -125,15 +125,11 @@ func main() { } build("pktd", ".", &conf) - build("pktwallet", "./pktwallet", &conf) build("pktctl", "./cmd/pktctl", &conf) - build("checksig", "./cmd/checksig", &conf) - build("pld", "./lnd/cmd/lnd", &conf) - build("pldctl", "./lnd/cmd/lncli", &conf) if strings.Contains(strings.Join(os.Args, "|"), "--test") { test() } else { fmt.Println("Pass the --test flag if you want to run the tests as well") } - fmt.Println("Everything looks good, type `./bin/pktwallet --create` to make a wallet") + fmt.Println("Everything looks good, type `./bin/pktd` to launch the full node.") } diff --git a/go.mod b/go.mod index d2b1e6d3..8708d85c 100644 --- a/go.mod +++ b/go.mod @@ -9,80 +9,33 @@ replace github.com/coreos/bbolt => go.etcd.io/bbolt v1.3.5 replace google.golang.org/grpc => google.golang.org/grpc v1.29.1 require ( - git.schwanenlied.me/yawning/bsaes.git v0.0.0-20180720073208-c0276d75487e // indirect - github.com/NebulousLabs/fastrand v0.0.0-20181203155948-6fb6489aac4e // indirect - github.com/NebulousLabs/go-upnp v0.0.0-20180202185039-29b680b06c82 - github.com/Yawning/aez v0.0.0-20180114000226-4dad034d9db2 github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da github.com/aead/siphash v1.0.1 github.com/arl/statsviz v0.2.2-0.20201115121518-5ea9f0cf1bd1 github.com/btcsuite/winsvc v1.0.0 - github.com/coreos/bbolt v0.0.0-00010101000000-000000000000 // indirect - github.com/coreos/etcd v3.3.22+incompatible - github.com/coreos/go-semver v0.3.0 // indirect - github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect - github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/dchest/blake2b v1.0.0 - github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect - github.com/dustin/go-humanize v1.0.0 // indirect - github.com/emirpasic/gods v1.12.1-0.20200630092735-7e2349589531 github.com/fsnotify/fsnotify v1.4.10-0.20200417215612-7f4cf4dd2b52 // indirect - github.com/go-errors/errors v1.0.1 - github.com/go-openapi/strfmt v0.19.5 // indirect - github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect - github.com/golang/protobuf v1.4.3 + github.com/golang/protobuf v1.4.3 // indirect github.com/golang/snappy v0.0.2 - github.com/google/btree v1.0.0 // indirect github.com/gorilla/websocket v1.4.3-0.20200912193213-c3dd95aea977 - github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 - github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/grpc-ecosystem/grpc-gateway v1.14.3 - github.com/jackpal/gateway v1.0.5 - github.com/jackpal/go-nat-pmp v0.0.0-20170405195558-28a68d0c24ad - github.com/jedib0t/go-pretty v4.3.0+incompatible github.com/jessevdk/go-flags v1.4.1-0.20200711081900-c17162fe8fd7 - github.com/jonboulle/clockwork v0.1.0 // indirect github.com/json-iterator/go v1.1.11-0.20200806011408-6821bec9fa5c - github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c // indirect - github.com/juju/errors v0.0.0-20190806202954-0232dcc7464d // indirect - github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 // indirect - github.com/juju/retry v0.0.0-20180821225755-9058e192b216 // indirect - github.com/juju/testing v0.0.0-20190723135506-ce30eb24acd2 // indirect - github.com/juju/utils v0.0.0-20180820210520-bf9cc5bdd62d // indirect - github.com/juju/version v0.0.0-20180108022336-b64dbd566305 // indirect github.com/kkdai/bstream v1.0.0 - github.com/lightninglabs/protobuf-hex-display v1.3.3-0.20191212020323-b444784ce75d - github.com/ltcsuite/ltcd v0.0.0-20190101042124-f37f8bf35796 - github.com/mattn/go-runewidth v0.0.9 // indirect - github.com/miekg/dns v0.0.0-20171125082028-79bfde677fa8 + github.com/kr/pretty v0.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.1 // indirect github.com/nxadm/tail v1.4.6-0.20201001195649-edf6bc2dfc36 // indirect github.com/onsi/ginkgo v1.14.3-0.20201013214636-dfe369837f25 github.com/onsi/gomega v1.10.3 - github.com/prometheus/client_golang v0.9.3 github.com/sethgrid/pester v1.1.1-0.20200617174401-d2ad9ec9a8b6 - github.com/soheilhy/cmux v0.1.4 // indirect github.com/stretchr/testify v1.6.1 - github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect - github.com/tv42/zbase32 v0.0.0-20160707012821-501572607d02 - github.com/urfave/cli v1.18.0 - github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect - go.etcd.io/bbolt v1.3.6-0.20200807205753-f6be82302843 - go.uber.org/zap v1.14.1 // indirect golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 - golang.org/x/net v0.0.0-20201029221708-28c70e62bb1d - golang.org/x/sys v0.0.0-20201029080932-201ba4db2418 + golang.org/x/net v0.0.0-20201029221708-28c70e62bb1d // indirect + golang.org/x/sys v0.15.0 // indirect golang.org/x/text v0.3.4 // indirect - golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - google.golang.org/genproto v0.0.0-20201021134325-0d71844de594 // indirect - google.golang.org/grpc v1.34.0-dev.0.20201021230544-4e8458e5c638 - gopkg.in/errgo.v1 v1.0.1 // indirect - gopkg.in/macaroon-bakery.v2 v2.0.1 - gopkg.in/macaroon.v2 v2.0.0 - gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 // indirect + google.golang.org/protobuf v1.24.0 // indirect + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect - sigs.k8s.io/yaml v1.1.0 // indirect ) diff --git a/go.sum b/go.sum index ded99c1a..22156792 100644 --- a/go.sum +++ b/go.sum @@ -1,89 +1,28 @@ -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/NebulousLabs/fastrand v0.0.0-20181203155948-6fb6489aac4e h1:n+DcnTNkQnHlwpsrHoQtkrJIO7CBx029fw6oR4vIob4= -github.com/NebulousLabs/fastrand v0.0.0-20181203155948-6fb6489aac4e/go.mod h1:Bdzq+51GR4/0DIhaICZEOm+OHvXGwwB2trKZ8B4Y6eQ= -github.com/NebulousLabs/go-upnp v0.0.0-20180202185039-29b680b06c82 h1:MG93+PZYs9PyEsj/n5/haQu2gK0h4tUtSy9ejtMwWa0= -github.com/NebulousLabs/go-upnp v0.0.0-20180202185039-29b680b06c82/go.mod h1:GbuBk21JqF+driLX3XtJYNZjGa45YDoa9IqCTzNSfEc= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/Yawning/aez v0.0.0-20180114000226-4dad034d9db2 h1:2be4ykKKov3M1yISM2E8gnGXZ/N2SsPawfnGiXxaYEU= -github.com/Yawning/aez v0.0.0-20180114000226-4dad034d9db2/go.mod h1:9pIqrY6SXNL8vjRQE5Hd/OL5GyK/9MrGUWs87z/eFfk= -github.com/Yawning/bsaes v0.0.0-20180720073208-c0276d75487e h1:n88VxLC80RPVHbFG/kq7ItMizCVRPCyLj63UMqxLkOw= -github.com/Yawning/bsaes v0.0.0-20180720073208-c0276d75487e/go.mod h1:3JAJz+vEO82SkYEkAa2lRPkQC7lslUY24HX3929i2Ec= github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da h1:KjTM2ks9d14ZYCvmHS9iAKVt9AyzRSqNU1qabPih5BY= github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da/go.mod h1:eHEWzANqSiWQsof+nXEI9bUVUyV6F53Fp89EuCh2EAA= github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/arl/statsviz v0.2.2-0.20201115121518-5ea9f0cf1bd1 h1:k6L9CoSCgZjUXhMhJgmaMx2WhW54cpBKHBoa6tmDcKg= github.com/arl/statsviz v0.2.2-0.20201115121518-5ea9f0cf1bd1/go.mod h1:Dg/DhcWPSzBVk70gVbZWcymzHDkYRhVpeScx5l+Zj7o= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= -github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= -github.com/btcsuite/golangcrypto v0.0.0-20150304025918-53f62d9b43e8/go.mod h1:tYvUd8KLhm/oXvUeSEs2VlLghFjQt9+ZaF9ghH0JNjc= -github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= -github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/coreos/etcd v3.3.22+incompatible h1:AnRMUyVdVvh1k7lHe61YEd227+CLoNogQuAypztGSK4= -github.com/coreos/etcd v3.3.22+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= -github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dchest/blake2b v1.0.0 h1:KK9LimVmE0MjRl9095XJmKqZ+iLxWATvlcpVFRtaw6s= github.com/dchest/blake2b v1.0.0/go.mod h1:U034kXgbJpCle2wSk5ybGIVhOSHCVLMDqOzcPEA0F7s= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/emirpasic/gods v1.12.1-0.20200630092735-7e2349589531 h1:gNOxjQ2UtCFsNdUvfF8fcifUheqb1z3tcDNso+QMDuk= -github.com/emirpasic/gods v1.12.1-0.20200630092735-7e2349589531/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/frankban/quicktest v1.2.2 h1:xfmOhhoH5fGPgbEAlhLpJH9p0z/0Qizio9osmvn9IUY= -github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.4.10-0.20200417215612-7f4cf4dd2b52 h1:0NmERxogGTU8hgzOhRKNoKivtBZkDW29GeuJtK9e0sc= github.com/fsnotify/fsnotify v1.4.10-0.20200417215612-7f4cf4dd2b52/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/strfmt v0.19.5 h1:0utjKrw+BAh8s57XE9Xz8DUBsVvPmRUB6styvl9wWIM= -github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= @@ -97,92 +36,35 @@ github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw= github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.4.3-0.20200912193213-c3dd95aea977 h1:a5PtLMWJYzuNNFNzGNl0oHZUsMJbE7qxvjSLbA3boiY= github.com/gorilla/websocket v1.4.3-0.20200912193213-c3dd95aea977/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.14.3 h1:OCJlWkOUoTnl0neNGlf4fUm3TmbEtguw7vR+nGtnDjY= -github.com/grpc-ecosystem/grpc-gateway v1.14.3/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/jackpal/gateway v1.0.5 h1:qzXWUJfuMdlLMtt0a3Dgt+xkWQiA5itDEITVJtuSwMc= -github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= -github.com/jackpal/go-nat-pmp v0.0.0-20170405195558-28a68d0c24ad h1:heFfj7z0pGsNCekUlsFhO2jstxO4b5iQ665LjwM5mDc= -github.com/jackpal/go-nat-pmp v0.0.0-20170405195558-28a68d0c24ad/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jedib0t/go-pretty v4.3.0+incompatible h1:CGs8AVhEKg/n9YbUenWmNStRW2PHJzaeDodcfvRAbIo= -github.com/jedib0t/go-pretty v4.3.0+incompatible/go.mod h1:XemHduiw8R651AF9Pt4FwCTKeG3oo7hrHJAoznj9nag= -github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.1-0.20200711081900-c17162fe8fd7 h1:Ug59miTxVKVg5Oi2S5uHlKOIV5jBx4Hb2u0jIxxDaSs= github.com/jessevdk/go-flags v1.4.1-0.20200711081900-c17162fe8fd7/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.11-0.20200806011408-6821bec9fa5c h1:pyHLN175+U/9YIGgS34PCGLWQcw2tGiDNpnXaQv9U2Y= github.com/json-iterator/go v1.1.11-0.20200806011408-6821bec9fa5c/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c h1:3UvYABOQRhJAApj9MdCN+Ydv841ETSoy6xLzdmmr/9A= -github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c/go.mod h1:nD0vlnrUjcjJhqN5WuCWZyzfd5AHZAC9/ajvbSx69xA= -github.com/juju/errors v0.0.0-20190806202954-0232dcc7464d h1:hJXjZMxj0SWlMoQkzeZDLi2cmeiWKa7y1B8Rg+qaoEc= -github.com/juju/errors v0.0.0-20190806202954-0232dcc7464d/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= -github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 h1:UUHMLvzt/31azWTN/ifGWef4WUqvXk0iRqdhdy/2uzI= -github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= -github.com/juju/retry v0.0.0-20180821225755-9058e192b216 h1:/eQL7EJQKFHByJe3DeE8Z36yqManj9UY5zppDoQi4FU= -github.com/juju/retry v0.0.0-20180821225755-9058e192b216/go.mod h1:OohPQGsr4pnxwD5YljhQ+TZnuVRYpa5irjugL1Yuif4= -github.com/juju/testing v0.0.0-20190723135506-ce30eb24acd2 h1:Pp8RxiF4rSoXP9SED26WCfNB28/dwTDpPXS8XMJR8rc= -github.com/juju/testing v0.0.0-20190723135506-ce30eb24acd2/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= -github.com/juju/utils v0.0.0-20180820210520-bf9cc5bdd62d h1:irPlN9z5VCe6BTsqVsxheCZH99OFSmqSVyTigW4mEoY= -github.com/juju/utils v0.0.0-20180820210520-bf9cc5bdd62d/go.mod h1:6/KLg8Wz/y2KVGWEpkK9vMNGkOnu4k/cqs8Z1fKjTOk= -github.com/juju/version v0.0.0-20180108022336-b64dbd566305 h1:lQxPJ1URr2fjsKnJRt/BxiIxjLt9IKGvS+0injMHbag= -github.com/juju/version v0.0.0-20180108022336-b64dbd566305/go.mod h1:kE8gK5X0CImdr7qpSKl3xB2PmpySSmfj7zVbkZFs81U= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/kkdai/bstream v1.0.0 h1:Se5gHwgp2VT2uHfDrkbbgbgEvV9cimLELwrPJctSjg8= github.com/kkdai/bstream v1.0.0/go.mod h1:FDnDOHt5Yx4p3FaHcioFT0QjDOtgUpvjeZqAs+NVZZA= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lightninglabs/protobuf-hex-display v1.3.3-0.20191212020323-b444784ce75d h1:QWD/5MPnaZfUVP7P8wLa4M8Td2DI7XXHXt2vhVtUgGI= -github.com/lightninglabs/protobuf-hex-display v1.3.3-0.20191212020323-b444784ce75d/go.mod h1:KDb67YMzoh4eudnzClmvs2FbiLG9vxISmLApUkCa4uI= -github.com/ltcsuite/ltcd v0.0.0-20190101042124-f37f8bf35796 h1:sjOGyegMIhvgfq5oaue6Td+hxZuf3tDC8lAPrFldqFw= -github.com/ltcsuite/ltcd v0.0.0-20190101042124-f37f8bf35796/go.mod h1:3p7ZTf9V1sNPI5H8P3NkTFF4LuwMdPl2DodF60qAKqY= -github.com/ltcsuite/ltcutil v0.0.0-20181217130922-17f3b04680b6/go.mod h1:8Vg/LTOO0KYa/vlHWJ6XZAevPQThGH5sufO0Hrou/lA= -github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v0.0.0-20171125082028-79bfde677fa8 h1:PRMAcldsl4mXKJeRNB/KVNz6TlbS6hk2Rs42PqgU3Ws= -github.com/miekg/dns v0.0.0-20171125082028-79bfde677fa8/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.6-0.20201001195649-edf6bc2dfc36 h1:PRRpSsmsTtwhP1qI6upsrOzE5M8ic156VLo+rCfVUJo= github.com/nxadm/tail v1.4.6-0.20201001195649-edf6bc2dfc36/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.3-0.20201013214636-dfe369837f25 h1:fJfvJUUCt/J+eEtCQ0IEwk21eis9Bvts7kQHUc6dS0g= @@ -191,88 +73,27 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/sethgrid/pester v1.1.1-0.20200617174401-d2ad9ec9a8b6 h1:PZ6YTNMEyy6GMAL+xsBHP3Ohjq4DIhNviW4evP+BzB0= github.com/sethgrid/pester v1.1.1-0.20200617174401-d2ad9ec9a8b6/go.mod h1:hEUINb4RqvDxtoCaU0BNT/HV4ig5kfgOasrf1xcvr0A= -github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tv42/zbase32 v0.0.0-20160707012821-501572607d02 h1:tcJ6OjwOMvExLlzrAVZute09ocAGa7KqOON60++Gz4E= -github.com/tv42/zbase32 v0.0.0-20160707012821-501572607d02/go.mod h1:tHlrkM198S068ZqfrO6S8HsoJq2bF3ETfTL+kt4tInY= -github.com/urfave/cli v1.18.0 h1:m9MfmZWX7bwr9kUcs/Asr95j0IVXzGNNc+/5ku2m26Q= -github.com/urfave/cli v1.18.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.6-0.20200807205753-f6be82302843 h1:g0YWcnTxZ70pMN+rjjHC2/ba4T+R6okysNm3KdSt7gA= -go.etcd.io/bbolt v1.3.6-0.20200807205753-f6be82302843/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.mongodb.org/mongo-driver v1.0.3 h1:GKoji1ld3tw2aC+GX1wbr/J2fX13yNacEYoJ8Nhr0yU= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= -go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 h1:pLI5jrR7OSLijeIDcmRxNmw2api+jEfxLoykJVice/E= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201029221708-28c70e62bb1d h1:dOiJ2n2cMwGLce/74I/QHMbnpk5GfY7InR8rczoMqRM= @@ -280,49 +101,35 @@ golang.org/x/net v0.0.0-20201029221708-28c70e62bb1d/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201029080932-201ba4db2418 h1:HlFl4V6pEMziuLXyRkm5BIYq1y1GAbb02pRlWvI54OM= golang.org/x/sys v0.0.0-20201029080932-201ba4db2418/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20201021134325-0d71844de594 h1:JZWUHUjZJojCHxs9ZZLFsnRGKVBXBoOHGxeTSt6OE+Q= -google.golang.org/genproto v0.0.0-20201021134325-0d71844de594/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -334,25 +141,12 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v1 v1.0.1 h1:oQFRXzZ7CkBGdm1XZm/EbQYaYNNEElNBOd09M6cqNso= -gopkg.in/errgo.v1 v1.0.1/go.mod h1:3NjfXwocQRYAPTq4/fzX+CwUhPRcR/azYRhj8G+LqMo= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/macaroon-bakery.v2 v2.0.1 h1:0N1TlEdfLP4HXNCg7MQUMp5XwvOoxk+oe9Owr2cpvsc= -gopkg.in/macaroon-bakery.v2 v2.0.1/go.mod h1:B4/T17l+ZWGwxFSZQmlBwp25x+og7OkhETfr3S9MbIA= -gopkg.in/macaroon.v2 v2.0.0 h1:LVWycAfeJBUjCIqfR9gqlo7I8vmiXRr51YEOZ1suop8= -gopkg.in/macaroon.v2 v2.0.0/go.mod h1:+I6LnTMkm/uV5ew/0nsulNjL16SK4+C8yDmRUzHR17I= -gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw= -gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -361,7 +155,3 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclp gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/lnd/Dockerfile b/lnd/Dockerfile deleted file mode 100644 index d66220a3..00000000 --- a/lnd/Dockerfile +++ /dev/null @@ -1,43 +0,0 @@ -FROM golang:1.14.5-alpine as builder - -# Force Go to use the cgo based DNS resolver. This is required to ensure DNS -# queries required to connect to linked containers succeed. -ENV GODEBUG netdns=cgo - -# Pass a tag, branch or a commit using build-arg. This allows a docker -# image to be built from a specified Git state. The default image -# will use the Git tip of master by default. -ARG checkout="master" - -# Install dependencies and build the binaries. -RUN apk add --no-cache --update alpine-sdk \ - git \ - make \ - gcc \ -&& git clone https://github.com/lightningnetwork/lnd /go/src/github.com/lightningnetwork/lnd \ -&& cd /go/src/github.com/lightningnetwork/lnd \ -&& git checkout $checkout \ -&& make \ -&& make install tags="signrpc walletrpc chainrpc invoicesrpc" - -# Start a new, final image. -FROM alpine as final - -# Define a root volume for data persistence. -VOLUME /root/.lnd - -# Add bash, jq and ca-certs, for quality of life and SSL-related reasons. -RUN apk --no-cache add \ - bash \ - jq \ - ca-certificates - -# Copy the binaries from the builder image. -COPY --from=builder /go/bin/lncli /bin/ -COPY --from=builder /go/bin/lnd /bin/ - -# Expose lnd ports (p2p, rpc). -EXPOSE 9735 10009 - -# Specify the start command and entrypoint as the lnd daemon. -ENTRYPOINT ["lnd"] diff --git a/lnd/LICENSE b/lnd/LICENSE deleted file mode 100644 index 70f1f0df..00000000 --- a/lnd/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (C) 2015-2018 Lightning Labs and The Lightning Network Developers - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/lnd/Makefile b/lnd/Makefile deleted file mode 100644 index 22d7b9d8..00000000 --- a/lnd/Makefile +++ /dev/null @@ -1,343 +0,0 @@ -PKG := github.com/lightningnetwork/lnd -ESCPKG := github.com\/lightningnetwork\/lnd -MOBILE_PKG := $(PKG)/mobile - -BTCD_PKG := github.com/btcsuite/btcd -GOVERALLS_PKG := github.com/mattn/goveralls -LINT_PKG := github.com/golangci/golangci-lint/cmd/golangci-lint -GOACC_PKG := github.com/ory/go-acc -FALAFEL_PKG := github.com/lightninglabs/falafel -GOIMPORTS_PKG := golang.org/x/tools/cmd/goimports -GOFUZZ_BUILD_PKG := github.com/dvyukov/go-fuzz/go-fuzz-build -GOFUZZ_PKG := github.com/dvyukov/go-fuzz/go-fuzz - -GO_BIN := ${GOPATH}/bin -BTCD_BIN := $(GO_BIN)/btcd -GOMOBILE_BIN := GO111MODULE=off $(GO_BIN)/gomobile -GOVERALLS_BIN := $(GO_BIN)/goveralls -LINT_BIN := $(GO_BIN)/golangci-lint -GOACC_BIN := $(GO_BIN)/go-acc -GOFUZZ_BUILD_BIN := $(GO_BIN)/go-fuzz-build -GOFUZZ_BIN := $(GO_BIN)/go-fuzz - -BTCD_DIR :=${GOPATH}/src/$(BTCD_PKG) -MOBILE_BUILD_DIR :=${GOPATH}/src/$(MOBILE_PKG)/build -IOS_BUILD_DIR := $(MOBILE_BUILD_DIR)/ios -IOS_BUILD := $(IOS_BUILD_DIR)/Lndmobile.framework -ANDROID_BUILD_DIR := $(MOBILE_BUILD_DIR)/android -ANDROID_BUILD := $(ANDROID_BUILD_DIR)/Lndmobile.aar - -COMMIT := $(shell git describe --abbrev=40 --dirty) -COMMIT_HASH := $(shell git rev-parse HEAD) - -BTCD_COMMIT := $(shell cat go.mod | \ - grep $(BTCD_PKG) | \ - tail -n1 | \ - awk -F " " '{ print $$2 }' | \ - awk -F "/" '{ print $$1 }') - -LINT_COMMIT := v1.18.0 -GOACC_COMMIT := ddc355013f90fea78d83d3a6c71f1d37ac07ecd5 -FALAFEL_COMMIT := v0.7.1 -GOFUZZ_COMMIT := 21309f307f61 - -DEPGET := cd /tmp && GO111MODULE=on go get -v -GOBUILD := GO111MODULE=on go build -v -GOINSTALL := GO111MODULE=on go install -v -GOTEST := GO111MODULE=on go test - -GOVERSION := $(shell go version | awk '{print $$3}') -GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "./vendor/*") - -RM := rm -f -CP := cp -MAKE := make -XARGS := xargs -L 1 - -include make/testing_flags.mk -include make/release_flags.mk -include make/fuzz_flags.mk - -DEV_TAGS := $(if ${tags},$(DEV_TAGS) ${tags},$(DEV_TAGS)) - -# We only return the part inside the double quote here to avoid escape issues -# when calling the external release script. The second parameter can be used to -# add additional ldflags if needed (currently only used for the release). -make_ldflags = $(2) -X $(PKG)/build.Commit=$(COMMIT) \ - -X $(PKG)/build.CommitHash=$(COMMIT_HASH) \ - -X $(PKG)/build.GoVersion=$(GOVERSION) \ - -X $(PKG)/build.RawTags=$(shell echo $(1) | sed -e 's/ /,/g') - -LDFLAGS := -ldflags "$(call make_ldflags, ${tags}, -s -w)" -DEV_LDFLAGS := -ldflags "$(call make_ldflags, $(DEV_TAGS))" -ITEST_LDFLAGS := -ldflags "$(call make_ldflags, $(ITEST_TAGS))" - -# For the release, we want to remove the symbol table and debug information (-s) -# and omit the DWARF symbol table (-w). Also we clear the build ID. -RELEASE_LDFLAGS := $(call make_ldflags, $(RELEASE_TAGS), -s -w -buildid=) - -# Linting uses a lot of memory, so keep it under control by limiting the number -# of workers if requested. -ifneq ($(workers),) -LINT_WORKERS = --concurrency=$(workers) -endif - -LINT = $(LINT_BIN) run -v $(LINT_WORKERS) - -GREEN := "\\033[0;32m" -NC := "\\033[0m" -define print - echo $(GREEN)$1$(NC) -endef - -default: scratch - -all: scratch check install - -# ============ -# DEPENDENCIES -# ============ - -$(GOVERALLS_BIN): - @$(call print, "Fetching goveralls.") - go get -u $(GOVERALLS_PKG) - -$(LINT_BIN): - @$(call print, "Fetching linter") - $(DEPGET) $(LINT_PKG)@$(LINT_COMMIT) - -$(GOACC_BIN): - @$(call print, "Fetching go-acc") - $(DEPGET) $(GOACC_PKG)@$(GOACC_COMMIT) - -btcd: - @$(call print, "Installing btcd.") - $(DEPGET) $(BTCD_PKG)@$(BTCD_COMMIT) - -falafel: - @$(call print, "Installing falafel.") - $(DEPGET) $(FALAFEL_PKG)@$(FALAFEL_COMMIT) - -goimports: - @$(call print, "Installing goimports.") - $(DEPGET) $(GOIMPORTS_PKG) - -$(GOFUZZ_BIN): - @$(call print, "Fetching go-fuzz") - $(DEPGET) $(GOFUZZ_PKG)@$(GOFUZZ_COMMIT) - -$(GOFUZZ_BUILD_BIN): - @$(call print, "Fetching go-fuzz-build") - $(DEPGET) $(GOFUZZ_BUILD_PKG)@$(GOFUZZ_COMMIT) - -# ============ -# INSTALLATION -# ============ - -build: - @$(call print, "Building debug lnd and lncli.") - $(GOBUILD) -tags="$(DEV_TAGS)" -o lnd-debug $(DEV_LDFLAGS) $(PKG)/cmd/lnd - $(GOBUILD) -tags="$(DEV_TAGS)" -o lncli-debug $(DEV_LDFLAGS) $(PKG)/cmd/lncli - -build-itest: - @$(call print, "Building itest lnd and lncli.") - $(GOBUILD) -tags="$(ITEST_TAGS)" -o lnd-itest $(ITEST_LDFLAGS) $(PKG)/cmd/lnd - $(GOBUILD) -tags="$(ITEST_TAGS)" -o lncli-itest $(ITEST_LDFLAGS) $(PKG)/cmd/lncli - -build-itest-windows: - @$(call print, "Building itest lnd and lncli.") - $(GOBUILD) -tags="$(ITEST_TAGS)" -o lnd-itest.exe $(ITEST_LDFLAGS) $(PKG)/cmd/lnd - $(GOBUILD) -tags="$(ITEST_TAGS)" -o lncli-itest.exe $(ITEST_LDFLAGS) $(PKG)/cmd/lncli - -install: - @$(call print, "Installing lnd and lncli.") - $(GOINSTALL) -tags="${tags}" $(LDFLAGS) $(PKG)/cmd/lnd - $(GOINSTALL) -tags="${tags}" $(LDFLAGS) $(PKG)/cmd/lncli - -release: - @$(call print, "Releasing lnd and lncli binaries.") - $(VERSION_CHECK) - ./scripts/release.sh build-release "$(VERSION_TAG)" "$(BUILD_SYSTEM)" "$(RELEASE_TAGS)" "$(RELEASE_LDFLAGS)" - -scratch: build - - -# ======= -# TESTING -# ======= - -check: unit itest - -itest-only: - @$(call print, "Running integration tests with ${backend} backend.") - $(ITEST) - lntest/itest/log_check_errors.sh - -itest: btcd build-itest itest-only - -itest-parallel: btcd - @$(call print, "Building lnd binary") - CGO_ENABLED=0 $(GOBUILD) -tags="$(ITEST_TAGS)" -o lntest/itest/lnd-itest $(ITEST_LDFLAGS) $(PKG)/cmd/lnd - - @$(call print, "Building itest binary for $(backend) backend") - CGO_ENABLED=0 $(GOTEST) -v ./lntest/itest -tags="$(DEV_TAGS) $(RPC_TAGS) rpctest $(backend)" -logoutput -goroutinedump -c -o lntest/itest/itest.test - - @$(call print, "Running tests") - rm -rf lntest/itest/*.log lntest/itest/.logs-* - echo "$$(seq 0 $$(expr $(ITEST_PARALLELISM) - 1))" | xargs -P $(ITEST_PARALLELISM) -n 1 -I {} scripts/itest_part.sh {} $(NUM_ITEST_TRANCHES) $(TEST_FLAGS) - -itest-parallel-windows: btcd - @$(call print, "Building lnd binary") - CGO_ENABLED=0 $(GOBUILD) -tags="$(ITEST_TAGS)" -o lntest/itest/lnd-itest.exe $(ITEST_LDFLAGS) $(PKG)/cmd/lnd - - @$(call print, "Building itest binary for $(backend) backend") - CGO_ENABLED=0 $(GOTEST) -v ./lntest/itest -tags="$(DEV_TAGS) $(RPC_TAGS) rpctest $(backend)" -logoutput -goroutinedump -c -o lntest/itest/itest.test.exe - - @$(call print, "Running tests") - EXEC_SUFFIX=".exe" echo "$$(seq 0 $$(expr $(ITEST_PARALLELISM) - 1))" | xargs -P $(ITEST_PARALLELISM) -n 1 -I {} scripts/itest_part.sh {} $(NUM_ITEST_TRANCHES) $(TEST_FLAGS) - -itest-windows: btcd build-itest-windows itest-only - -unit: btcd - @$(call print, "Running unit tests.") - $(UNIT) - -unit-cover: $(GOACC_BIN) - @$(call print, "Running unit coverage tests.") - $(GOACC_BIN) $(COVER_PKG) -- -tags="$(DEV_TAGS) $(LOG_TAGS)" - - -unit-race: - @$(call print, "Running unit race tests.") - env CGO_ENABLED=1 GORACE="history_size=7 halt_on_errors=1" $(UNIT_RACE) - -goveralls: $(GOVERALLS_BIN) - @$(call print, "Sending coverage report.") - $(GOVERALLS_BIN) -coverprofile=coverage.txt -service=travis-ci - - -travis-race: btcd unit-race - -travis-cover: btcd unit-cover goveralls - -# ============= -# FLAKE HUNTING -# ============= - -flakehunter: build-itest - @$(call print, "Flake hunting ${backend} integration tests.") - while [ $$? -eq 0 ]; do $(ITEST); done - -flake-unit: - @$(call print, "Flake hunting unit tests.") - while [ $$? -eq 0 ]; do GOTRACEBACK=all $(UNIT) -count=1; done - -flakehunter-parallel: - @$(call print, "Flake hunting ${backend} integration tests in parallel.") - while [ $$? -eq 0 ]; do make itest-parallel tranches=1 parallel=${ITEST_PARALLELISM} icase='${icase}' backend='${backend}'; done - -# ============= -# FUZZING -# ============= -fuzz-build: $(GOFUZZ_BUILD_BIN) - @$(call print, "Creating fuzz harnesses for packages '$(FUZZPKG)'.") - scripts/fuzz.sh build "$(FUZZPKG)" - -fuzz-run: $(GOFUZZ_BIN) - @$(call print, "Fuzzing packages '$(FUZZPKG)'.") - scripts/fuzz.sh run "$(FUZZPKG)" "$(FUZZ_TEST_RUN_TIME)" "$(FUZZ_TEST_TIMEOUT)" "$(FUZZ_NUM_PROCESSES)" "$(FUZZ_BASE_WORKDIR)" - -# ========= -# UTILITIES -# ========= - -fmt: - @$(call print, "Formatting source.") - gofmt -l -w -s $(GOFILES_NOVENDOR) - -lint: $(LINT_BIN) - @$(call print, "Linting source.") - $(LINT) - -list: - @$(call print, "Listing commands.") - @$(MAKE) -qp | \ - awk -F':' '/^[a-zA-Z0-9][^$$#\/\t=]*:([^=]|$$)/ {split($$1,A,/ /);for(i in A)print A[i]}' | \ - grep -v Makefile | \ - sort - -rpc: - @$(call print, "Compiling protos.") - cd ./lnrpc; ./gen_protos.sh - -rpc-format: - @$(call print, "Formatting protos.") - cd ./lnrpc; find . -name "*.proto" | xargs clang-format --style=file -i - -rpc-check: rpc - @$(call print, "Verifying protos.") - for rpc in $$(find lnrpc/ -name "*.proto" | $(XARGS) awk '/ rpc /{print $$2}'); do if ! grep -q $$rpc lnrpc/rest-annotations.yaml; then echo "RPC $$rpc not added to lnrpc/rest-annotations.yaml"; exit 1; fi; done - if test -n "$$(git describe --dirty | grep dirty)"; then echo "Protos not properly formatted or not compiled with v3.4.0"; git status; git diff; exit 1; fi - -sample-conf-check: - @$(call print, "Making sure every flag has an example in the sample-lnd.conf file") - for flag in $$(GO_FLAGS_COMPLETION=1 go run -tags="$(RELEASE_TAGS)" $(PKG)/cmd/lnd -- | grep -v help | cut -c3-); do if ! grep -q $$flag sample-lnd.conf; then echo "Command line flag --$$flag not added to sample-lnd.conf"; exit 1; fi; done - -mobile-rpc: falafel goimports - @$(call print, "Creating mobile RPC from protos.") - cd ./mobile; ./gen_bindings.sh $(FALAFEL_COMMIT) - -vendor: - @$(call print, "Re-creating vendor directory.") - rm -r vendor/; GO111MODULE=on go mod vendor - -ios: vendor mobile-rpc - @$(call print, "Building iOS framework ($(IOS_BUILD)).") - mkdir -p $(IOS_BUILD_DIR) - $(GOMOBILE_BIN) bind -target=ios -tags="mobile $(DEV_TAGS) autopilotrpc" $(LDFLAGS) -v -o $(IOS_BUILD) $(MOBILE_PKG) - -android: vendor mobile-rpc - @$(call print, "Building Android library ($(ANDROID_BUILD)).") - mkdir -p $(ANDROID_BUILD_DIR) - $(GOMOBILE_BIN) bind -target=android -tags="mobile $(DEV_TAGS) autopilotrpc" $(LDFLAGS) -v -o $(ANDROID_BUILD) $(MOBILE_PKG) - -mobile: ios android - -clean: - @$(call print, "Cleaning source.$(NC)") - $(RM) ./lnd-debug ./lncli-debug - $(RM) ./lnd-itest ./lncli-itest - $(RM) -r ./vendor .vendor-new - - -.PHONY: all \ - btcd \ - default \ - build \ - install \ - scratch \ - check \ - itest-only \ - itest \ - unit \ - unit-cover \ - unit-race \ - falafel \ - goveralls \ - travis-race \ - travis-cover \ - travis-itest \ - flakehunter \ - flake-unit \ - fmt \ - lint \ - list \ - rpc \ - rpc-format \ - rpc-check \ - mobile-rpc \ - vendor \ - ios \ - android \ - mobile \ - clean diff --git a/lnd/README.md b/lnd/README.md deleted file mode 100644 index 6d882390..00000000 --- a/lnd/README.md +++ /dev/null @@ -1,97 +0,0 @@ -## Lightning Network Daemon - -[![Build Status](https://img.shields.io/travis/lightningnetwork/lnd.svg)](https://travis-ci.org/lightningnetwork/lnd) -[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/lightningnetwork/lnd/blob/master/LICENSE) -[![Irc](https://img.shields.io/badge/chat-on%20freenode-brightgreen.svg)](https://webchat.freenode.net/?channels=lnd) -[![Godoc](https://godoc.org/github.com/lightningnetwork/lnd?status.svg)](https://godoc.org/github.com/lightningnetwork/lnd) - - - -The Lightning Network Daemon (`lnd`) - is a complete implementation of a -[Lightning Network](https://lightning.network) node. `lnd` has several pluggable back-end -chain services including [`btcd`](https://github.com/btcsuite/btcd) (a -full-node), [`bitcoind`](https://github.com/bitcoin/bitcoin), and -[`neutrino`](https://github.com/pkt-cash/pktd/neutrino) (a new experimental light client). The project's codebase uses the -[btcsuite](https://github.com/btcsuite/) set of Bitcoin libraries, and also -exports a large set of isolated re-usable Lightning Network related libraries -within it. In the current state `lnd` is capable of: -* Creating channels. -* Closing channels. -* Completely managing all channel states (including the exceptional ones!). -* Maintaining a fully authenticated+validated channel graph. -* Performing path finding within the network, passively forwarding incoming payments. -* Sending outgoing [onion-encrypted payments](https://github.com/lightningnetwork/lightning-onion) -through the network. -* Updating advertised fee schedules. -* Automatic channel management ([`autopilot`](https://github.com/lightningnetwork/lnd/tree/master/autopilot)). - -## Lightning Network Specification Compliance -`lnd` _fully_ conforms to the [Lightning Network specification -(BOLTs)](https://github.com/lightningnetwork/lightning-rfc). BOLT stands for: -Basis of Lightning Technology. The specifications are currently being drafted -by several groups of implementers based around the world including the -developers of `lnd`. The set of specification documents as well as our -implementation of the specification are still a work-in-progress. With that -said, the current status of `lnd`'s BOLT compliance is: - - - [X] BOLT 1: Base Protocol - - [X] BOLT 2: Peer Protocol for Channel Management - - [X] BOLT 3: Bitcoin Transaction and Script Formats - - [X] BOLT 4: Onion Routing Protocol - - [X] BOLT 5: Recommendations for On-chain Transaction Handling - - [X] BOLT 7: P2P Node and Channel Discovery - - [X] BOLT 8: Encrypted and Authenticated Transport - - [X] BOLT 9: Assigned Feature Flags - - [X] BOLT 10: DNS Bootstrap and Assisted Node Location - - [X] BOLT 11: Invoice Protocol for Lightning Payments - -## Developer Resources - -The daemon has been designed to be as developer friendly as possible in order -to facilitate application development on top of `lnd`. Two primary RPC -interfaces are exported: an HTTP REST API, and a [gRPC](https://grpc.io/) -service. The exported API's are not yet stable, so be warned: they may change -drastically in the near future. - -An automatically generated set of documentation for the RPC APIs can be found -at [api.lightning.community](https://api.lightning.community). A set of developer -resources including talks, articles, and example applications can be found at: -[dev.lightning.community](https://dev.lightning.community). - -Finally, we also have an active -[Slack](https://lightning.engineering/slack.html) where protocol developers, application developers, testers and users gather to -discuss various aspects of `lnd` and also Lightning in general. - -## Installation - In order to build from source, please see [the installation - instructions](docs/INSTALL.md). - -## Docker - To run lnd from Docker, please see the main [Docker instructions](docs/DOCKER.md) - -## IRC - * irc.freenode.net - * channel #lnd - * [webchat](https://webchat.freenode.net/?channels=lnd) - -## Safety - -When operating a mainnet `lnd` node, please refer to our [operational safety -guildelines](docs/safety.md). It is important to note that `lnd` is still -**beta** software and that ignoring these operational guidelines can lead to -loss of funds. - -## Security - -The developers of `lnd` take security _very_ seriously. The disclosure of -security vulnerabilities helps us secure the health of `lnd`, privacy of our -users, and also the health of the Lightning Network as a whole. If you find -any issues regarding security or privacy, please disclose the information -responsibly by sending an email to security at lightning dot engineering, -preferably encrypted using our designated PGP key -(`91FE464CD75101DA6B6BAB60555C6465E5BCB3AF`) which can be found -[here](https://gist.githubusercontent.com/Roasbeef/6fb5b52886183239e4aa558f83d085d3/raw/5fa96010af201628bcfa61e9309d9b13d23d220f/security@lightning.engineering). - -## Further reading -* [Step-by-step send payment guide with docker](https://github.com/lightningnetwork/lnd/tree/master/docker) -* [Contribution guide](https://github.com/lightningnetwork/lnd/blob/master/docs/code_contribution_guidelines.md) diff --git a/lnd/autopilot/agent.go b/lnd/autopilot/agent.go deleted file mode 100644 index 4cb6d16a..00000000 --- a/lnd/autopilot/agent.go +++ /dev/null @@ -1,876 +0,0 @@ -package autopilot - -import ( - "bytes" - "math/rand" - "net" - "sync" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/pktlog/log" -) - -// Config couples all the items that an autopilot agent needs to function. -// All items within the struct MUST be populated for the Agent to be able to -// carry out its duties. -type Config struct { - // Self is the identity public key of the Lightning Network node that - // is being driven by the agent. This is used to ensure that we don't - // accidentally attempt to open a channel with ourselves. - Self *btcec.PublicKey - - // Heuristic is an attachment heuristic which will govern to whom we - // open channels to, and also what those channels look like in terms of - // desired capacity. The Heuristic will take into account the current - // state of the graph, our set of open channels, and the amount of - // available funds when determining how channels are to be opened. - // Additionally, a heuristic make also factor in extra-graph - // information in order to make more pertinent recommendations. - Heuristic AttachmentHeuristic - - // ChanController is an interface that is able to directly manage the - // creation, closing and update of channels within the network. - ChanController ChannelController - - // ConnectToPeer attempts to connect to the peer using one of its - // advertised addresses. The boolean returned signals whether the peer - // was already connected. - ConnectToPeer func(*btcec.PublicKey, []net.Addr) (bool, er.R) - - // DisconnectPeer attempts to disconnect the peer with the given public - // key. - DisconnectPeer func(*btcec.PublicKey) er.R - - // WalletBalance is a function closure that should return the current - // available balance of the backing wallet. - WalletBalance func() (btcutil.Amount, er.R) - - // Graph is an abstract channel graph that the Heuristic and the Agent - // will use to make decisions w.r.t channel allocation and placement - // within the graph. - Graph ChannelGraph - - // Constraints is the set of constraints the autopilot must adhere to - // when opening channels. - Constraints AgentConstraints - - // TODO(roasbeef): add additional signals from fee rates and revenue of - // currently opened channels -} - -// channelState is a type that represents the set of active channels of the -// backing LN node that the Agent should be aware of. This type contains a few -// helper utility methods. -type channelState map[lnwire.ShortChannelID]LocalChannel - -// Channels returns a slice of all the active channels. -func (c channelState) Channels() []LocalChannel { - chans := make([]LocalChannel, 0, len(c)) - for _, channel := range c { - chans = append(chans, channel) - } - return chans -} - -// ConnectedNodes returns the set of nodes we currently have a channel with. -// This information is needed as we want to avoid making repeated channels with -// any node. -func (c channelState) ConnectedNodes() map[NodeID]struct{} { - nodes := make(map[NodeID]struct{}) - for _, channels := range c { - nodes[channels.Node] = struct{}{} - } - - // TODO(roasbeef): add outgoing, nodes, allow incoming and outgoing to - // per node - // * only add node is chan as funding amt set - - return nodes -} - -// Agent implements a closed-loop control system which seeks to autonomously -// optimize the allocation of satoshis within channels throughput the network's -// channel graph. An agent is configurable by swapping out different -// AttachmentHeuristic strategies. The agent uses external signals such as the -// wallet balance changing, or new channels being opened/closed for the local -// node as an indicator to re-examine its internal state, and the amount of -// available funds in order to make updated decisions w.r.t the channel graph. -// The Agent will automatically open, close, and splice in/out channel as -// necessary for it to step closer to its optimal state. -// -// TODO(roasbeef): prob re-word -type Agent struct { - started sync.Once - stopped sync.Once - - // cfg houses the configuration state of the Ant. - cfg Config - - // chanState tracks the current set of open channels. - chanState channelState - chanStateMtx sync.Mutex - - // stateUpdates is a channel that any external state updates that may - // affect the heuristics of the agent will be sent over. - stateUpdates chan interface{} - - // balanceUpdates is a channel where notifications about updates to the - // wallet's balance will be sent. This channel will be buffered to - // ensure we have at most one pending update of this type to handle at - // a given time. - balanceUpdates chan *balanceUpdate - - // nodeUpdates is a channel that changes to the graph node landscape - // will be sent over. This channel will be buffered to ensure we have - // at most one pending update of this type to handle at a given time. - nodeUpdates chan *nodeUpdates - - // pendingOpenUpdates is a channel where updates about channel pending - // opening will be sent. This channel will be buffered to ensure we - // have at most one pending update of this type to handle at a given - // time. - pendingOpenUpdates chan *chanPendingOpenUpdate - - // chanOpenFailures is a channel where updates about channel open - // failures will be sent. This channel will be buffered to ensure we - // have at most one pending update of this type to handle at a given - // time. - chanOpenFailures chan *chanOpenFailureUpdate - - // heuristicUpdates is a channel where updates from active heurstics - // will be sent. - heuristicUpdates chan *heuristicUpdate - - // totalBalance is the total number of satoshis the backing wallet is - // known to control at any given instance. This value will be updated - // when the agent receives external balance update signals. - totalBalance btcutil.Amount - - // failedNodes lists nodes that we've previously attempted to initiate - // channels with, but didn't succeed. - failedNodes map[NodeID]struct{} - - // pendingConns tracks the nodes that we are attempting to make - // connections to. This prevents us from making duplicate connection - // requests to the same node. - pendingConns map[NodeID]struct{} - - // pendingOpens tracks the channels that we've requested to be - // initiated, but haven't yet been confirmed as being fully opened. - // This state is required as otherwise, we may go over our allotted - // channel limit, or open multiple channels to the same node. - pendingOpens map[NodeID]LocalChannel - pendingMtx sync.Mutex - - quit chan struct{} - wg sync.WaitGroup -} - -// New creates a new instance of the Agent instantiated using the passed -// configuration and initial channel state. The initial channel state slice -// should be populated with the set of Channels that are currently opened by -// the backing Lightning Node. -func New(cfg Config, initialState []LocalChannel) (*Agent, er.R) { - a := &Agent{ - cfg: cfg, - chanState: make(map[lnwire.ShortChannelID]LocalChannel), - quit: make(chan struct{}), - stateUpdates: make(chan interface{}), - balanceUpdates: make(chan *balanceUpdate, 1), - nodeUpdates: make(chan *nodeUpdates, 1), - chanOpenFailures: make(chan *chanOpenFailureUpdate, 1), - heuristicUpdates: make(chan *heuristicUpdate, 1), - pendingOpenUpdates: make(chan *chanPendingOpenUpdate, 1), - failedNodes: make(map[NodeID]struct{}), - pendingConns: make(map[NodeID]struct{}), - pendingOpens: make(map[NodeID]LocalChannel), - } - - for _, c := range initialState { - a.chanState[c.ChanID] = c - } - - return a, nil -} - -// Start starts the agent along with any goroutines it needs to perform its -// normal duties. -func (a *Agent) Start() er.R { - var err er.R - a.started.Do(func() { - err = a.start() - }) - return err -} - -func (a *Agent) start() er.R { - rand.Seed(time.Now().Unix()) - log.Infof("Autopilot Agent starting") - - a.wg.Add(1) - go a.controller() - - return nil -} - -// Stop signals the Agent to gracefully shutdown. This function will block -// until all goroutines have exited. -func (a *Agent) Stop() er.R { - var err er.R - a.stopped.Do(func() { - err = a.stop() - }) - return err -} - -func (a *Agent) stop() er.R { - log.Infof("Autopilot Agent stopping") - - close(a.quit) - a.wg.Wait() - - return nil -} - -// balanceUpdate is a type of external state update that reflects an -// increase/decrease in the funds currently available to the wallet. -type balanceUpdate struct { -} - -// nodeUpdates is a type of external state update that reflects an addition or -// modification in channel graph node membership. -type nodeUpdates struct{} - -// chanOpenUpdate is a type of external state update that indicates a new -// channel has been opened, either by the Agent itself (within the main -// controller loop), or by an external user to the system. -type chanOpenUpdate struct { - newChan LocalChannel -} - -// chanPendingOpenUpdate is a type of external state update that indicates a new -// channel has been opened, either by the agent itself or an external subsystem, -// but is still pending. -type chanPendingOpenUpdate struct{} - -// chanOpenFailureUpdate is a type of external state update that indicates -// a previous channel open failed, and that it might be possible to try again. -type chanOpenFailureUpdate struct{} - -// heuristicUpdate is an update sent when one of the autopilot heuristics has -// changed, and prompts the agent to make a new attempt at opening more -// channels. -type heuristicUpdate struct { - heuristic AttachmentHeuristic -} - -// chanCloseUpdate is a type of external state update that indicates that the -// backing Lightning Node has closed a previously open channel. -type chanCloseUpdate struct { - closedChans []lnwire.ShortChannelID -} - -// OnBalanceChange is a callback that should be executed each time the balance -// of the backing wallet changes. -func (a *Agent) OnBalanceChange() { - select { - case a.balanceUpdates <- &balanceUpdate{}: - default: - } -} - -// OnNodeUpdates is a callback that should be executed each time our channel -// graph has new nodes or their node announcements are updated. -func (a *Agent) OnNodeUpdates() { - select { - case a.nodeUpdates <- &nodeUpdates{}: - default: - } -} - -// OnChannelOpen is a callback that should be executed each time a new channel -// is manually opened by the user or any system outside the autopilot agent. -func (a *Agent) OnChannelOpen(c LocalChannel) { - a.wg.Add(1) - go func() { - defer a.wg.Done() - - select { - case a.stateUpdates <- &chanOpenUpdate{newChan: c}: - case <-a.quit: - } - }() -} - -// OnChannelPendingOpen is a callback that should be executed each time a new -// channel is opened, either by the agent or an external subsystems, but is -// still pending. -func (a *Agent) OnChannelPendingOpen() { - select { - case a.pendingOpenUpdates <- &chanPendingOpenUpdate{}: - default: - } -} - -// OnChannelOpenFailure is a callback that should be executed when the -// autopilot has attempted to open a channel, but failed. In this case we can -// retry channel creation with a different node. -func (a *Agent) OnChannelOpenFailure() { - select { - case a.chanOpenFailures <- &chanOpenFailureUpdate{}: - default: - } -} - -// OnChannelClose is a callback that should be executed each time a prior -// channel has been closed for any reason. This includes regular -// closes, force closes, and channel breaches. -func (a *Agent) OnChannelClose(closedChans ...lnwire.ShortChannelID) { - a.wg.Add(1) - go func() { - defer a.wg.Done() - - select { - case a.stateUpdates <- &chanCloseUpdate{closedChans: closedChans}: - case <-a.quit: - } - }() -} - -// OnHeuristicUpdate is a method called when a heuristic has been updated, to -// trigger the agent to do a new state assessment. -func (a *Agent) OnHeuristicUpdate(h AttachmentHeuristic) { - select { - case a.heuristicUpdates <- &heuristicUpdate{ - heuristic: h, - }: - default: - } -} - -// mergeNodeMaps merges the Agent's set of nodes that it already has active -// channels open to, with the other sets of nodes that should be removed from -// consideration during heuristic selection. This ensures that the Agent doesn't -// attempt to open any "duplicate" channels to the same node. -func mergeNodeMaps(c map[NodeID]LocalChannel, - skips ...map[NodeID]struct{}) map[NodeID]struct{} { - - numNodes := len(c) - for _, skip := range skips { - numNodes += len(skip) - } - - res := make(map[NodeID]struct{}, numNodes) - for nodeID := range c { - res[nodeID] = struct{}{} - } - for _, skip := range skips { - for nodeID := range skip { - res[nodeID] = struct{}{} - } - } - - return res -} - -// mergeChanState merges the Agent's set of active channels, with the set of -// channels awaiting confirmation. This ensures that the agent doesn't go over -// the prescribed channel limit or fund allocation limit. -func mergeChanState(pendingChans map[NodeID]LocalChannel, - activeChans channelState) []LocalChannel { - - numChans := len(pendingChans) + len(activeChans) - totalChans := make([]LocalChannel, 0, numChans) - - totalChans = append(totalChans, activeChans.Channels()...) - - for _, pendingChan := range pendingChans { - totalChans = append(totalChans, pendingChan) - } - - return totalChans -} - -// controller implements the closed-loop control system of the Agent. The -// controller will make a decision w.r.t channel placement within the graph -// based on: its current internal state of the set of active channels open, -// and external state changes as a result of decisions it makes w.r.t channel -// allocation, or attributes affecting its control loop being updated by the -// backing Lightning Node. -func (a *Agent) controller() { - defer a.wg.Done() - - // We'll start off by assigning our starting balance, and injecting - // that amount as an initial wake up to the main controller goroutine. - a.OnBalanceChange() - - // TODO(roasbeef): do we in fact need to maintain order? - // * use sync.Cond if so - updateBalance := func() { - newBalance, err := a.cfg.WalletBalance() - if err != nil { - log.Warnf("unable to update wallet balance: %v", err) - return - } - - a.totalBalance = newBalance - } - - // TODO(roasbeef): add 10-minute wake up timer - for { - select { - // A new external signal has arrived. We'll use this to update - // our internal state, then determine if we should trigger a - // channel state modification (open/close, splice in/out). - case signal := <-a.stateUpdates: - log.Infof("Processing new external signal") - - switch update := signal.(type) { - // A new channel has been opened successfully. This was - // either opened by the Agent, or an external system - // that is able to drive the Lightning Node. - case *chanOpenUpdate: - log.Debugf("New channel successfully opened, "+ - "updating state with: %v", - spew.Sdump(update.newChan)) - - newChan := update.newChan - a.chanStateMtx.Lock() - a.chanState[newChan.ChanID] = newChan - a.chanStateMtx.Unlock() - - a.pendingMtx.Lock() - delete(a.pendingOpens, newChan.Node) - a.pendingMtx.Unlock() - - updateBalance() - // A channel has been closed, this may free up an - // available slot, triggering a new channel update. - case *chanCloseUpdate: - log.Debugf("Applying closed channel "+ - "updates: %v", - spew.Sdump(update.closedChans)) - - a.chanStateMtx.Lock() - for _, closedChan := range update.closedChans { - delete(a.chanState, closedChan) - } - a.chanStateMtx.Unlock() - - updateBalance() - } - - // A new channel has been opened by the agent or an external - // subsystem, but is still pending confirmation. - case <-a.pendingOpenUpdates: - updateBalance() - - // The balance of the backing wallet has changed, if more funds - // are now available, we may attempt to open up an additional - // channel, or splice in funds to an existing one. - case <-a.balanceUpdates: - log.Debug("Applying external balance state update") - - updateBalance() - - // The channel we tried to open previously failed for whatever - // reason. - case <-a.chanOpenFailures: - log.Debug("Retrying after previous channel open " + - "failure.") - - updateBalance() - - // New nodes have been added to the graph or their node - // announcements have been updated. We will consider opening - // channels to these nodes if we haven't stabilized. - case <-a.nodeUpdates: - log.Debugf("Node updates received, assessing " + - "need for more channels") - - // Any of the deployed heuristics has been updated, check - // whether we have new channel candidates available. - case upd := <-a.heuristicUpdates: - log.Debugf("Heuristic %v updated, assessing need for "+ - "more channels", upd.heuristic.Name()) - - // The agent has been signalled to exit, so we'll bail out - // immediately. - case <-a.quit: - return - } - - a.pendingMtx.Lock() - log.Debugf("Pending channels: %v", spew.Sdump(a.pendingOpens)) - a.pendingMtx.Unlock() - - // With all the updates applied, we'll obtain a set of the - // current active channels (confirmed channels), and also - // factor in our set of unconfirmed channels. - a.chanStateMtx.Lock() - a.pendingMtx.Lock() - totalChans := mergeChanState(a.pendingOpens, a.chanState) - a.pendingMtx.Unlock() - a.chanStateMtx.Unlock() - - // Now that we've updated our internal state, we'll consult our - // channel attachment heuristic to determine if we can open - // up any additional channels while staying within our - // constraints. - availableFunds, numChans := a.cfg.Constraints.ChannelBudget( - totalChans, a.totalBalance, - ) - switch { - case numChans == 0: - continue - - // If the amount is too small, we don't want to attempt opening - // another channel. - case availableFunds == 0: - continue - case availableFunds < a.cfg.Constraints.MinChanSize(): - continue - } - - log.Infof("Triggering attachment directive dispatch, "+ - "total_funds=%v", a.totalBalance) - - err := a.openChans(availableFunds, numChans, totalChans) - if err != nil { - log.Errorf("Unable to open channels: %v", err) - } - } -} - -// openChans queries the agent's heuristic for a set of channel candidates, and -// attempts to open channels to them. -func (a *Agent) openChans(availableFunds btcutil.Amount, numChans uint32, - totalChans []LocalChannel) er.R { - - // As channel size we'll use the maximum channel size available. - chanSize := a.cfg.Constraints.MaxChanSize() - if availableFunds < chanSize { - chanSize = availableFunds - } - - if chanSize < a.cfg.Constraints.MinChanSize() { - return er.Errorf("not enough funds available to open a " + - "single channel") - } - - // We're to attempt an attachment so we'll obtain the set of - // nodes that we currently have channels with so we avoid - // duplicate edges. - a.chanStateMtx.Lock() - connectedNodes := a.chanState.ConnectedNodes() - a.chanStateMtx.Unlock() - - for nID := range connectedNodes { - log.Tracef("Skipping node %x with open channel", nID[:]) - } - - a.pendingMtx.Lock() - - for nID := range a.pendingOpens { - log.Tracef("Skipping node %x with pending channel open", nID[:]) - } - - for nID := range a.pendingConns { - log.Tracef("Skipping node %x with pending connection", nID[:]) - } - - for nID := range a.failedNodes { - log.Tracef("Skipping failed node %v", nID[:]) - } - - nodesToSkip := mergeNodeMaps(a.pendingOpens, - a.pendingConns, connectedNodes, a.failedNodes, - ) - - a.pendingMtx.Unlock() - - // Gather the set of all nodes in the graph, except those we - // want to skip. - selfPubBytes := a.cfg.Self.SerializeCompressed() - nodes := make(map[NodeID]struct{}) - addresses := make(map[NodeID][]net.Addr) - if err := a.cfg.Graph.ForEachNode(func(node Node) er.R { - nID := NodeID(node.PubKey()) - - // If we come across ourselves, them we'll continue in - // order to avoid attempting to make a channel with - // ourselves. - if bytes.Equal(nID[:], selfPubBytes) { - log.Tracef("Skipping self node %x", nID[:]) - return nil - } - - // If the node has no known addresses, we cannot connect to it, - // so we'll skip it. - addrs := node.Addrs() - if len(addrs) == 0 { - log.Tracef("Skipping node %x since no addresses known", - nID[:]) - return nil - } - addresses[nID] = addrs - - // Additionally, if this node is in the blacklist, then - // we'll skip it. - if _, ok := nodesToSkip[nID]; ok { - log.Tracef("Skipping blacklisted node %x", nID[:]) - return nil - } - - nodes[nID] = struct{}{} - return nil - }); err != nil { - return er.Errorf("unable to get graph nodes: %v", err) - } - - // Use the heuristic to calculate a score for each node in the - // graph. - log.Debugf("Scoring %d nodes for chan_size=%v", len(nodes), chanSize) - scores, err := a.cfg.Heuristic.NodeScores( - a.cfg.Graph, totalChans, chanSize, nodes, - ) - if err != nil { - return er.Errorf("unable to calculate node scores : %v", err) - } - - log.Debugf("Got scores for %d nodes", len(scores)) - - // Now use the score to make a weighted choice which nodes to attempt - // to open channels to. - scores, err = chooseN(numChans, scores) - if err != nil { - return er.Errorf("unable to make weighted choice: %v", - err) - } - - chanCandidates := make(map[NodeID]*AttachmentDirective) - for nID := range scores { - log.Tracef("Creating attachment directive for chosen node %x", - nID[:]) - - // Track the available funds we have left. - if availableFunds < chanSize { - chanSize = availableFunds - } - availableFunds -= chanSize - - // If we run out of funds, we can break early. - if chanSize < a.cfg.Constraints.MinChanSize() { - log.Tracef("Chan size %v too small to satisfy min "+ - "channel size %v, breaking", chanSize, - a.cfg.Constraints.MinChanSize()) - break - } - - chanCandidates[nID] = &AttachmentDirective{ - NodeID: nID, - ChanAmt: chanSize, - Addrs: addresses[nID], - } - } - - if len(chanCandidates) == 0 { - log.Infof("No eligible candidates to connect to") - return nil - } - - log.Infof("Attempting to execute channel attachment "+ - "directives: %v", spew.Sdump(chanCandidates)) - - // Before proceeding, check to see if we have any slots - // available to open channels. If there are any, we will attempt - // to dispatch the retrieved directives since we can't be - // certain which ones may actually succeed. If too many - // connections succeed, they will be ignored and made - // available to future heuristic selections. - a.pendingMtx.Lock() - defer a.pendingMtx.Unlock() - if uint16(len(a.pendingOpens)) >= a.cfg.Constraints.MaxPendingOpens() { - log.Debugf("Reached cap of %v pending "+ - "channel opens, will retry "+ - "after success/failure", - a.cfg.Constraints.MaxPendingOpens()) - return nil - } - - // For each recommended attachment directive, we'll launch a - // new goroutine to attempt to carry out the directive. If any - // of these succeed, then we'll receive a new state update, - // taking us back to the top of our controller loop. - for _, chanCandidate := range chanCandidates { - // Skip candidates which we are already trying - // to establish a connection with. - nodeID := chanCandidate.NodeID - if _, ok := a.pendingConns[nodeID]; ok { - continue - } - a.pendingConns[nodeID] = struct{}{} - - a.wg.Add(1) - go a.executeDirective(*chanCandidate) - } - return nil -} - -// executeDirective attempts to connect to the channel candidate specified by -// the given attachment directive, and open a channel of the given size. -// -// NOTE: MUST be run as a goroutine. -func (a *Agent) executeDirective(directive AttachmentDirective) { - defer a.wg.Done() - - // We'll start out by attempting to connect to the peer in order to - // begin the funding workflow. - nodeID := directive.NodeID - pub, err := btcec.ParsePubKey(nodeID[:], btcec.S256()) - if err != nil { - log.Errorf("Unable to parse pubkey %x: %v", nodeID, err) - return - } - - connected := make(chan bool) - errChan := make(chan er.R) - - // To ensure a call to ConnectToPeer doesn't block the agent from - // shutting down, we'll launch it in a non-waitgrouped goroutine, that - // will signal when a result is returned. - // TODO(halseth): use DialContext to cancel on transport level. - go func() { - alreadyConnected, err := a.cfg.ConnectToPeer( - pub, directive.Addrs, - ) - if err != nil { - select { - case errChan <- err: - case <-a.quit: - } - return - } - - select { - case connected <- alreadyConnected: - case <-a.quit: - return - } - }() - - var alreadyConnected bool - select { - case alreadyConnected = <-connected: - case err = <-errChan: - case <-a.quit: - return - } - - if err != nil { - log.Warnf("Unable to connect to %x: %v", - pub.SerializeCompressed(), err) - - // Since we failed to connect to them, we'll mark them as - // failed so that we don't attempt to connect to them again. - a.pendingMtx.Lock() - delete(a.pendingConns, nodeID) - a.failedNodes[nodeID] = struct{}{} - a.pendingMtx.Unlock() - - // Finally, we'll trigger the agent to select new peers to - // connect to. - a.OnChannelOpenFailure() - - return - } - - // The connection was successful, though before progressing we must - // check that we have not already met our quota for max pending open - // channels. This can happen if multiple directives were spawned but - // fewer slots were available, and other successful attempts finished - // first. - a.pendingMtx.Lock() - if uint16(len(a.pendingOpens)) >= a.cfg.Constraints.MaxPendingOpens() { - // Since we've reached our max number of pending opens, we'll - // disconnect this peer and exit. However, if we were - // previously connected to them, then we'll make sure to - // maintain the connection alive. - if alreadyConnected { - // Since we succeeded in connecting, we won't add this - // peer to the failed nodes map, but we will remove it - // from a.pendingConns so that it can be retried in the - // future. - delete(a.pendingConns, nodeID) - a.pendingMtx.Unlock() - return - } - - err = a.cfg.DisconnectPeer(pub) - if err != nil { - log.Warnf("Unable to disconnect peer %x: %v", - pub.SerializeCompressed(), err) - } - - // Now that we have disconnected, we can remove this node from - // our pending conns map, permitting subsequent connection - // attempts. - delete(a.pendingConns, nodeID) - a.pendingMtx.Unlock() - return - } - - // If we were successful, we'll track this peer in our set of pending - // opens. We do this here to ensure we don't stall on selecting new - // peers if the connection attempt happens to take too long. - delete(a.pendingConns, nodeID) - a.pendingOpens[nodeID] = LocalChannel{ - Balance: directive.ChanAmt, - Node: nodeID, - } - a.pendingMtx.Unlock() - - // We can then begin the funding workflow with this peer. - err = a.cfg.ChanController.OpenChannel(pub, directive.ChanAmt) - if err != nil { - log.Warnf("Unable to open channel to %x of %v: %v", - pub.SerializeCompressed(), directive.ChanAmt, err) - - // As the attempt failed, we'll clear the peer from the set of - // pending opens and mark them as failed so we don't attempt to - // open a channel to them again. - a.pendingMtx.Lock() - delete(a.pendingOpens, nodeID) - a.failedNodes[nodeID] = struct{}{} - a.pendingMtx.Unlock() - - // Trigger the agent to re-evaluate everything and possibly - // retry with a different node. - a.OnChannelOpenFailure() - - // Finally, we should also disconnect the peer if we weren't - // already connected to them beforehand by an external - // subsystem. - if alreadyConnected { - return - } - - err = a.cfg.DisconnectPeer(pub) - if err != nil { - log.Warnf("Unable to disconnect peer %x: %v", - pub.SerializeCompressed(), err) - } - } - - // Since the channel open was successful and is currently pending, - // we'll trigger the autopilot agent to query for more peers. - // TODO(halseth): this triggers a new loop before all the new channels - // are added to the pending channels map. Should add before executing - // directive in goroutine? - a.OnChannelPendingOpen() -} diff --git a/lnd/autopilot/agent_constraints.go b/lnd/autopilot/agent_constraints.go deleted file mode 100644 index 31053e45..00000000 --- a/lnd/autopilot/agent_constraints.go +++ /dev/null @@ -1,151 +0,0 @@ -package autopilot - -import ( - "github.com/pkt-cash/pktd/btcutil" -) - -// AgentConstraints is an interface the agent will query to determine what -// limits it will need to stay inside when opening channels. -type AgentConstraints interface { - // ChannelBudget should, given the passed parameters, return whether - // more channels can be opened while still staying within the set - // constraints. If the constraints allow us to open more channels, then - // the first return value will represent the amount of additional funds - // available towards creating channels. The second return value is the - // exact *number* of additional channels available. - ChannelBudget(chans []LocalChannel, balance btcutil.Amount) ( - btcutil.Amount, uint32) - - // MaxPendingOpens returns the maximum number of pending channel - // establishment goroutines that can be lingering. We cap this value in - // order to control the level of parallelism caused by the autopilot - // agent. - MaxPendingOpens() uint16 - - // MinChanSize returns the smallest channel that the autopilot agent - // should create. - MinChanSize() btcutil.Amount - - // MaxChanSize returns largest channel that the autopilot agent should - // create. - MaxChanSize() btcutil.Amount -} - -// agenConstraints is an implementation of the AgentConstraints interface that -// indicate the constraints the autopilot agent must adhere to when opening -// channels. -type agentConstraints struct { - // minChanSize is the smallest channel that the autopilot agent should - // create. - minChanSize btcutil.Amount - - // maxChanSize is the largest channel that the autopilot agent should - // create. - maxChanSize btcutil.Amount - - // chanLimit is the maximum number of channels that should be created. - chanLimit uint16 - - // allocation is the percentage of total funds that should be committed - // to automatic channel establishment. - allocation float64 - - // maxPendingOpens is the maximum number of pending channel - // establishment goroutines that can be lingering. We cap this value in - // order to control the level of parallelism caused by the autopilot - // agent. - maxPendingOpens uint16 -} - -// A compile time assertion to ensure agentConstraints satisfies the -// AgentConstraints interface. -var _ AgentConstraints = (*agentConstraints)(nil) - -// NewConstraints returns a new AgentConstraints with the given limits. -func NewConstraints(minChanSize, maxChanSize btcutil.Amount, chanLimit, - maxPendingOpens uint16, allocation float64) AgentConstraints { - - return &agentConstraints{ - minChanSize: minChanSize, - maxChanSize: maxChanSize, - chanLimit: chanLimit, - allocation: allocation, - maxPendingOpens: maxPendingOpens, - } -} - -// ChannelBudget should, given the passed parameters, return whether more -// channels can be be opened while still staying within the set constraints. -// If the constraints allow us to open more channels, then the first return -// value will represent the amount of additional funds available towards -// creating channels. The second return value is the exact *number* of -// additional channels available. -// -// Note: part of the AgentConstraints interface. -func (h *agentConstraints) ChannelBudget(channels []LocalChannel, - funds btcutil.Amount) (btcutil.Amount, uint32) { - - // If we're already over our maximum allowed number of channels, then - // we'll instruct the controller not to create any more channels. - if len(channels) >= int(h.chanLimit) { - return 0, 0 - } - - // The number of additional channels that should be opened is the - // difference between the channel limit, and the number of channels we - // already have open. - numAdditionalChans := uint32(h.chanLimit) - uint32(len(channels)) - - // First, we'll tally up the total amount of funds that are currently - // present within the set of active channels. - var totalChanAllocation btcutil.Amount - for _, channel := range channels { - totalChanAllocation += channel.Balance - } - - // With this value known, we'll now compute the total amount of fund - // allocated across regular utxo's and channel utxo's. - totalFunds := funds + totalChanAllocation - - // Once the total amount has been computed, we then calculate the - // fraction of funds currently allocated to channels. - fundsFraction := float64(totalChanAllocation) / float64(totalFunds) - - // If this fraction is below our threshold, then we'll return true, to - // indicate the controller should call Select to obtain a candidate set - // of channels to attempt to open. - needMore := fundsFraction < h.allocation - if !needMore { - return 0, 0 - } - - // Now that we know we need more funds, we'll compute the amount of - // additional funds we should allocate towards channels. - targetAllocation := btcutil.Amount(float64(totalFunds) * h.allocation) - fundsAvailable := targetAllocation - totalChanAllocation - return fundsAvailable, numAdditionalChans -} - -// MaxPendingOpens returns the maximum number of pending channel establishment -// goroutines that can be lingering. We cap this value in order to control the -// level of parallelism caused by the autopilot agent. -// -// Note: part of the AgentConstraints interface. -func (h *agentConstraints) MaxPendingOpens() uint16 { - return h.maxPendingOpens -} - -// MinChanSize returns the smallest channel that the autopilot agent should -// create. -// -// Note: part of the AgentConstraints interface. -func (h *agentConstraints) MinChanSize() btcutil.Amount { - return h.minChanSize -} - -// MaxChanSize returns largest channel that the autopilot agent should create. -// -// Note: part of the AgentConstraints interface. -func (h *agentConstraints) MaxChanSize() btcutil.Amount { - return h.maxChanSize -} diff --git a/lnd/autopilot/agent_constraints_test.go b/lnd/autopilot/agent_constraints_test.go deleted file mode 100644 index ac0e006a..00000000 --- a/lnd/autopilot/agent_constraints_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package autopilot - -import ( - "testing" - "time" - - prand "math/rand" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -func TestConstraintsChannelBudget(t *testing.T) { - t.Parallel() - - prand.Seed(time.Now().Unix()) - - maxChanSize := btcutil.Amount(btcutil.UnitsPerCoin()) - const ( - minChanSize = 0 - - chanLimit = 3 - - threshold = 0.5 - ) - - constraints := NewConstraints( - minChanSize, - maxChanSize, - chanLimit, - 0, - threshold, - ) - - randChanID := func() lnwire.ShortChannelID { - return lnwire.NewShortChanIDFromInt(uint64(prand.Int63())) - } - - testCases := []struct { - channels []LocalChannel - walletAmt btcutil.Amount - - needMore bool - amtAvailable btcutil.Amount - numMore uint32 - }{ - // Many available funds, but already have too many active open - // channels. - { - []LocalChannel{ - { - ChanID: randChanID(), - Balance: btcutil.Amount(prand.Int31()), - }, - { - ChanID: randChanID(), - Balance: btcutil.Amount(prand.Int31()), - }, - { - ChanID: randChanID(), - Balance: btcutil.Amount(prand.Int31()), - }, - }, - btcutil.Amount(btcutil.UnitsPerCoin() * 10), - false, - 0, - 0, - }, - - // Ratio of funds in channels and total funds meets the - // threshold. - { - []LocalChannel{ - { - ChanID: randChanID(), - Balance: btcutil.Amount(btcutil.UnitsPerCoin()), - }, - { - ChanID: randChanID(), - Balance: btcutil.Amount(btcutil.UnitsPerCoin()), - }, - }, - btcutil.Amount(btcutil.UnitsPerCoin() * 2), - false, - 0, - 0, - }, - - // Ratio of funds in channels and total funds is below the - // threshold. We have 10 BTC allocated amongst channels and - // funds, atm. We're targeting 50%, so 5 BTC should be - // allocated. Only 1 BTC is atm, so 4 BTC should be - // recommended. We should also request 2 more channels as the - // limit is 3. - { - []LocalChannel{ - { - ChanID: randChanID(), - Balance: btcutil.Amount(btcutil.UnitsPerCoin()), - }, - }, - btcutil.Amount(btcutil.UnitsPerCoin() * 9), - true, - btcutil.Amount(btcutil.UnitsPerCoin() * 4), - 2, - }, - - // Ratio of funds in channels and total funds is below the - // threshold. We have 14 BTC total amongst the wallet's - // balance, and our currently opened channels. Since we're - // targeting a 50% allocation, we should commit 7 BTC. The - // current channels commit 4 BTC, so we should expected 3 BTC - // to be committed. We should only request a single additional - // channel as the limit is 3. - { - []LocalChannel{ - { - ChanID: randChanID(), - Balance: btcutil.Amount(btcutil.UnitsPerCoin()), - }, - { - ChanID: randChanID(), - Balance: btcutil.Amount(btcutil.UnitsPerCoin() * 3), - }, - }, - btcutil.Amount(btcutil.UnitsPerCoin() * 10), - true, - btcutil.Amount(btcutil.UnitsPerCoin() * 3), - 1, - }, - - // Ratio of funds in channels and total funds is above the - // threshold. - { - []LocalChannel{ - { - ChanID: randChanID(), - Balance: btcutil.Amount(btcutil.UnitsPerCoin()), - }, - { - ChanID: randChanID(), - Balance: btcutil.Amount(btcutil.UnitsPerCoin()), - }, - }, - btcutil.Amount(btcutil.UnitsPerCoin()), - false, - 0, - 0, - }, - } - - for i, testCase := range testCases { - amtToAllocate, numMore := constraints.ChannelBudget( - testCase.channels, testCase.walletAmt, - ) - - if amtToAllocate != testCase.amtAvailable { - t.Fatalf("test #%v: expected %v, got %v", - i, testCase.amtAvailable, amtToAllocate) - } - if numMore != testCase.numMore { - t.Fatalf("test #%v: expected %v, got %v", - i, testCase.numMore, numMore) - } - } -} diff --git a/lnd/autopilot/agent_test.go b/lnd/autopilot/agent_test.go deleted file mode 100644 index 3fb09001..00000000 --- a/lnd/autopilot/agent_test.go +++ /dev/null @@ -1,1377 +0,0 @@ -package autopilot - -import ( - "net" - "sync" - "testing" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/wire" -) - -type moreChansResp struct { - numMore uint32 - amt btcutil.Amount -} - -type moreChanArg struct { - chans []LocalChannel - balance btcutil.Amount -} - -type mockConstraints struct { - moreChansResps chan moreChansResp - moreChanArgs chan moreChanArg - quit chan struct{} -} - -func (m *mockConstraints) ChannelBudget(chans []LocalChannel, - balance btcutil.Amount) (btcutil.Amount, uint32) { - - if m.moreChanArgs != nil { - moreChan := moreChanArg{ - chans: chans, - balance: balance, - } - - select { - case m.moreChanArgs <- moreChan: - case <-m.quit: - return 0, 0 - } - } - - select { - case resp := <-m.moreChansResps: - return resp.amt, resp.numMore - case <-m.quit: - return 0, 0 - } -} - -func (m *mockConstraints) MaxPendingOpens() uint16 { - return 10 -} - -func (m *mockConstraints) MinChanSize() btcutil.Amount { - return 1e7 -} -func (m *mockConstraints) MaxChanSize() btcutil.Amount { - return 1e8 -} - -var _ AgentConstraints = (*mockConstraints)(nil) - -type mockHeuristic struct { - nodeScoresResps chan map[NodeID]*NodeScore - nodeScoresArgs chan directiveArg - - quit chan struct{} -} - -type directiveArg struct { - graph ChannelGraph - amt btcutil.Amount - chans []LocalChannel - nodes map[NodeID]struct{} -} - -func (m *mockHeuristic) Name() string { - return "mock" -} - -func (m *mockHeuristic) NodeScores(g ChannelGraph, chans []LocalChannel, - chanSize btcutil.Amount, nodes map[NodeID]struct{}) ( - map[NodeID]*NodeScore, er.R) { - - if m.nodeScoresArgs != nil { - directive := directiveArg{ - graph: g, - amt: chanSize, - chans: chans, - nodes: nodes, - } - - select { - case m.nodeScoresArgs <- directive: - case <-m.quit: - return nil, er.New("exiting") - } - } - - select { - case resp := <-m.nodeScoresResps: - return resp, nil - case <-m.quit: - return nil, er.New("exiting") - } -} - -var _ AttachmentHeuristic = (*mockHeuristic)(nil) - -type openChanIntent struct { - target *btcec.PublicKey - amt btcutil.Amount - private bool -} - -type mockChanController struct { - openChanSignals chan openChanIntent - private bool -} - -func (m *mockChanController) OpenChannel(target *btcec.PublicKey, - amt btcutil.Amount) er.R { - - m.openChanSignals <- openChanIntent{ - target: target, - amt: amt, - private: m.private, - } - - return nil -} - -func (m *mockChanController) CloseChannel(chanPoint *wire.OutPoint) er.R { - return nil -} - -var _ ChannelController = (*mockChanController)(nil) - -type testContext struct { - constraints *mockConstraints - heuristic *mockHeuristic - chanController ChannelController - graph testGraph - agent *Agent - walletBalance btcutil.Amount - - quit chan struct{} - sync.Mutex -} - -func setup(t *testing.T, initialChans []LocalChannel) (*testContext, func()) { - t.Helper() - - // First, we'll create all the dependencies that we'll need in order to - // create the autopilot agent. - self, err := randKey() - if err != nil { - t.Fatalf("unable to generate key: %v", err) - } - - quit := make(chan struct{}) - heuristic := &mockHeuristic{ - nodeScoresArgs: make(chan directiveArg), - nodeScoresResps: make(chan map[NodeID]*NodeScore), - quit: quit, - } - constraints := &mockConstraints{ - moreChansResps: make(chan moreChansResp), - moreChanArgs: make(chan moreChanArg), - quit: quit, - } - - chanController := &mockChanController{ - openChanSignals: make(chan openChanIntent, 10), - } - memGraph, _, _ := newMemChanGraph() - - // We'll keep track of the funds available to the agent, to make sure - // it correctly uses this value when querying the ChannelBudget. - var availableFunds btcutil.Amount = 10 * btcutil.UnitsPerCoin() - - ctx := &testContext{ - constraints: constraints, - heuristic: heuristic, - chanController: chanController, - graph: memGraph, - walletBalance: availableFunds, - quit: quit, - } - - // With the dependencies we created, we can now create the initial - // agent itself. - testCfg := Config{ - Self: self, - Heuristic: heuristic, - ChanController: chanController, - WalletBalance: func() (btcutil.Amount, er.R) { - ctx.Lock() - defer ctx.Unlock() - return ctx.walletBalance, nil - }, - ConnectToPeer: func(*btcec.PublicKey, []net.Addr) (bool, er.R) { - return false, nil - }, - DisconnectPeer: func(*btcec.PublicKey) er.R { - return nil - }, - Graph: memGraph, - Constraints: constraints, - } - - agent, err := New(testCfg, initialChans) - if err != nil { - t.Fatalf("unable to create agent: %v", err) - } - ctx.agent = agent - - // With the autopilot agent and all its dependencies we'll start the - // primary controller goroutine. - if err := agent.Start(); err != nil { - t.Fatalf("unable to start agent: %v", err) - } - - cleanup := func() { - // We must close quit before agent.Stop(), to make sure - // ChannelBudget won't block preventing the agent from exiting. - close(quit) - agent.Stop() - } - - return ctx, cleanup -} - -// respondMoreChans consumes the moreChanArgs element and responds to the agent -// with the given moreChansResp. -func respondMoreChans(t *testing.T, testCtx *testContext, resp moreChansResp) { - t.Helper() - - // The agent should now query the heuristic. - select { - case <-testCtx.constraints.moreChanArgs: - case <-time.After(time.Second * 3): - t.Fatalf("heuristic wasn't queried in time") - } - - // We'll send the response. - select { - case testCtx.constraints.moreChansResps <- resp: - case <-time.After(time.Second * 10): - t.Fatalf("response wasn't sent in time") - } -} - -// respondMoreChans consumes the nodeScoresArgs element and responds to the -// agent with the given node scores. -func respondNodeScores(t *testing.T, testCtx *testContext, - resp map[NodeID]*NodeScore) { - t.Helper() - - // Send over an empty list of attachment directives, which should cause - // the agent to return to waiting on a new signal. - select { - case <-testCtx.heuristic.nodeScoresArgs: - case <-time.After(time.Second * 3): - t.Fatalf("node scores weren't queried in time") - } - select { - case testCtx.heuristic.nodeScoresResps <- resp: - case <-time.After(time.Second * 10): - t.Fatalf("node scores were not sent in time") - } -} - -// TestAgentChannelOpenSignal tests that upon receipt of a chanOpenUpdate, then -// agent modifies its local state accordingly, and reconsults the heuristic. -func TestAgentChannelOpenSignal(t *testing.T) { - t.Parallel() - - testCtx, cleanup := setup(t, nil) - defer cleanup() - - // We'll send an initial "no" response to advance the agent past its - // initial check. - respondMoreChans(t, testCtx, moreChansResp{0, 0}) - - // Next we'll signal a new channel being opened by the backing LN node, - // with a capacity of 1 BTC. - newChan := LocalChannel{ - ChanID: randChanID(), - Balance: btcutil.UnitsPerCoin(), - } - testCtx.agent.OnChannelOpen(newChan) - - // The agent should now query the heuristic in order to determine its - // next action as it local state has now been modified. - respondMoreChans(t, testCtx, moreChansResp{0, 0}) - - // At this point, the local state of the agent should - // have also been updated to reflect that the LN node - // now has an additional channel with one BTC. - if _, ok := testCtx.agent.chanState[newChan.ChanID]; !ok { - t.Fatalf("internal channel state wasn't updated") - } - - // There shouldn't be a call to the Select method as we've returned - // "false" for NeedMoreChans above. - select { - - // If this send success, then Select was erroneously called and the - // test should be failed. - case testCtx.heuristic.nodeScoresResps <- map[NodeID]*NodeScore{}: - t.Fatalf("Select was called but shouldn't have been") - - // This is the correct path as Select should've be called. - default: - } -} - -// TestAgentHeuristicUpdateSignal tests that upon notification about a -// heuristic update, the agent reconsults the heuristic. -func TestAgentHeuristicUpdateSignal(t *testing.T) { - t.Parallel() - - testCtx, cleanup := setup(t, nil) - defer cleanup() - - pub, err := testCtx.graph.addRandNode() - if err != nil { - t.Fatalf("unable to generate key: %v", err) - } - - // We'll send an initial "no" response to advance the agent past its - // initial check. - respondMoreChans(t, testCtx, moreChansResp{0, 0}) - - // Next we'll signal that one of the heuristcs have been updated. - testCtx.agent.OnHeuristicUpdate(testCtx.heuristic) - - // The update should trigger the agent to ask for a channel budget.so - // we'll respond that there is a budget for opening 1 more channel. - respondMoreChans(t, testCtx, - moreChansResp{ - numMore: 1, - amt: 1 * btcutil.UnitsPerCoin(), - }, - ) - - // At this point, the agent should now be querying the heuristic for - // scores. We'll respond. - nodeID := NewNodeID(pub) - scores := map[NodeID]*NodeScore{ - nodeID: { - NodeID: nodeID, - Score: 0.5, - }, - } - respondNodeScores(t, testCtx, scores) - - // Finally, this should result in the agent opening a channel. - chanController := testCtx.chanController.(*mockChanController) - select { - case <-chanController.openChanSignals: - case <-time.After(time.Second * 10): - t.Fatalf("channel not opened in time") - } -} - -// A mockFailingChanController always fails to open a channel. -type mockFailingChanController struct { -} - -func (m *mockFailingChanController) OpenChannel(target *btcec.PublicKey, - amt btcutil.Amount) er.R { - return er.New("failure") -} - -func (m *mockFailingChanController) CloseChannel(chanPoint *wire.OutPoint) er.R { - return nil -} - -var _ ChannelController = (*mockFailingChanController)(nil) - -// TestAgentChannelFailureSignal tests that if an autopilot channel fails to -// open, the agent is signalled to make a new decision. -func TestAgentChannelFailureSignal(t *testing.T) { - t.Parallel() - - testCtx, cleanup := setup(t, nil) - defer cleanup() - - testCtx.chanController = &mockFailingChanController{} - - node, err := testCtx.graph.addRandNode() - if err != nil { - t.Fatalf("unable to add node: %v", err) - } - - // First ensure the agent will attempt to open a new channel. Return - // that we need more channels, and have 5BTC to use. - respondMoreChans(t, testCtx, moreChansResp{1, 5 * btcutil.UnitsPerCoin()}) - - // At this point, the agent should now be querying the heuristic to - // request attachment directives, return a fake so the agent will - // attempt to open a channel. - var fakeDirective = &NodeScore{ - NodeID: NewNodeID(node), - Score: 0.5, - } - - respondNodeScores( - t, testCtx, map[NodeID]*NodeScore{ - NewNodeID(node): fakeDirective, - }, - ) - - // At this point the agent will attempt to create a channel and fail. - - // Now ensure that the controller loop is re-executed. - respondMoreChans(t, testCtx, moreChansResp{1, 5 * btcutil.UnitsPerCoin()}) - respondNodeScores(t, testCtx, map[NodeID]*NodeScore{}) -} - -// TestAgentChannelCloseSignal ensures that once the agent receives an outside -// signal of a channel belonging to the backing LN node being closed, then it -// will query the heuristic to make its next decision. -func TestAgentChannelCloseSignal(t *testing.T) { - t.Parallel() - // We'll start the agent with two channels already being active. - initialChans := []LocalChannel{ - { - ChanID: randChanID(), - Balance: btcutil.UnitsPerCoin(), - }, - { - ChanID: randChanID(), - Balance: btcutil.UnitsPerCoin() * 2, - }, - } - - testCtx, cleanup := setup(t, initialChans) - defer cleanup() - - // We'll send an initial "no" response to advance the agent past its - // initial check. - respondMoreChans(t, testCtx, moreChansResp{0, 0}) - - // Next, we'll close both channels which should force the agent to - // re-query the heuristic. - testCtx.agent.OnChannelClose(initialChans[0].ChanID, initialChans[1].ChanID) - - // The agent should now query the heuristic in order to determine its - // next action as it local state has now been modified. - respondMoreChans(t, testCtx, moreChansResp{0, 0}) - - // At this point, the local state of the agent should - // have also been updated to reflect that the LN node - // has no existing open channels. - if len(testCtx.agent.chanState) != 0 { - t.Fatalf("internal channel state wasn't updated") - } - - // There shouldn't be a call to the Select method as we've returned - // "false" for NeedMoreChans above. - select { - - // If this send success, then Select was erroneously called and the - // test should be failed. - case testCtx.heuristic.nodeScoresResps <- map[NodeID]*NodeScore{}: - t.Fatalf("Select was called but shouldn't have been") - - // This is the correct path as Select should've be called. - default: - } -} - -// TestAgentBalanceUpdateIncrease ensures that once the agent receives an -// outside signal concerning a balance update, then it will re-query the -// heuristic to determine its next action. -func TestAgentBalanceUpdate(t *testing.T) { - t.Parallel() - - testCtx, cleanup := setup(t, nil) - defer cleanup() - - // We'll send an initial "no" response to advance the agent past its - // initial check. - respondMoreChans(t, testCtx, moreChansResp{0, 0}) - - // Next we'll send a new balance update signal to the agent, adding 5 - // BTC to the amount of available funds. - testCtx.Lock() - testCtx.walletBalance += btcutil.UnitsPerCoin() * 5 - testCtx.Unlock() - - testCtx.agent.OnBalanceChange() - - // The agent should now query the heuristic in order to determine its - // next action as it local state has now been modified. - respondMoreChans(t, testCtx, moreChansResp{0, 0}) - - // At this point, the local state of the agent should - // have also been updated to reflect that the LN node - // now has an additional 5BTC available. - if testCtx.agent.totalBalance != testCtx.walletBalance { - t.Fatalf("expected %v wallet balance "+ - "instead have %v", testCtx.agent.totalBalance, - testCtx.walletBalance) - } - - // There shouldn't be a call to the Select method as we've returned - // "false" for NeedMoreChans above. - select { - - // If this send success, then Select was erroneously called and the - // test should be failed. - case testCtx.heuristic.nodeScoresResps <- map[NodeID]*NodeScore{}: - t.Fatalf("Select was called but shouldn't have been") - - // This is the correct path as Select should've be called. - default: - } -} - -// TestAgentImmediateAttach tests that if an autopilot agent is created, and it -// has enough funds available to create channels, then it does so immediately. -func TestAgentImmediateAttach(t *testing.T) { - t.Parallel() - - testCtx, cleanup := setup(t, nil) - defer cleanup() - - const numChans = 5 - - // We'll generate 5 mock directives so it can progress within its loop. - directives := make(map[NodeID]*NodeScore) - nodeKeys := make(map[NodeID]struct{}) - for i := 0; i < numChans; i++ { - pub, err := testCtx.graph.addRandNode() - if err != nil { - t.Fatalf("unable to generate key: %v", err) - } - nodeID := NewNodeID(pub) - directives[nodeID] = &NodeScore{ - NodeID: nodeID, - Score: 0.5, - } - nodeKeys[nodeID] = struct{}{} - } - // The very first thing the agent should do is query the NeedMoreChans - // method on the passed heuristic. So we'll provide it with a response - // that will kick off the main loop. - respondMoreChans(t, testCtx, - moreChansResp{ - numMore: numChans, - amt: 5 * btcutil.UnitsPerCoin(), - }, - ) - - // At this point, the agent should now be querying the heuristic to - // requests attachment directives. With our fake directives created, - // we'll now send then to the agent as a return value for the Select - // function. - respondNodeScores(t, testCtx, directives) - - // Finally, we should receive 5 calls to the OpenChannel method with - // the exact same parameters that we specified within the attachment - // directives. - chanController := testCtx.chanController.(*mockChanController) - for i := 0; i < numChans; i++ { - select { - case openChan := <-chanController.openChanSignals: - if openChan.amt != btcutil.UnitsPerCoin() { - t.Fatalf("invalid chan amt: expected %v, got %v", - btcutil.UnitsPerCoin(), openChan.amt) - } - nodeID := NewNodeID(openChan.target) - _, ok := nodeKeys[nodeID] - if !ok { - t.Fatalf("unexpected key: %v, not found", - nodeID) - } - delete(nodeKeys, nodeID) - - case <-time.After(time.Second * 10): - t.Fatalf("channel not opened in time") - } - } -} - -// TestAgentPrivateChannels ensure that only requests for private channels are -// sent if set. -func TestAgentPrivateChannels(t *testing.T) { - t.Parallel() - - testCtx, cleanup := setup(t, nil) - defer cleanup() - - // The chanController should be initialized such that all of its open - // channel requests are for private channels. - testCtx.chanController.(*mockChanController).private = true - - const numChans = 5 - - // We'll generate 5 mock directives so the pubkeys will be found in the - // agent's graph, and it can progress within its loop. - directives := make(map[NodeID]*NodeScore) - for i := 0; i < numChans; i++ { - pub, err := testCtx.graph.addRandNode() - if err != nil { - t.Fatalf("unable to generate key: %v", err) - } - directives[NewNodeID(pub)] = &NodeScore{ - NodeID: NewNodeID(pub), - Score: 0.5, - } - } - - // The very first thing the agent should do is query the NeedMoreChans - // method on the passed heuristic. So we'll provide it with a response - // that will kick off the main loop. We'll send over a response - // indicating that it should establish more channels, and give it a - // budget of 5 BTC to do so. - resp := moreChansResp{ - numMore: numChans, - amt: 5 * btcutil.UnitsPerCoin(), - } - respondMoreChans(t, testCtx, resp) - - // At this point, the agent should now be querying the heuristic to - // requests attachment directives. With our fake directives created, - // we'll now send then to the agent as a return value for the Select - // function. - respondNodeScores(t, testCtx, directives) - - // Finally, we should receive 5 calls to the OpenChannel method, each - // specifying that it's for a private channel. - chanController := testCtx.chanController.(*mockChanController) - for i := 0; i < numChans; i++ { - select { - case openChan := <-chanController.openChanSignals: - if !openChan.private { - t.Fatal("expected open channel request to be private") - } - case <-time.After(10 * time.Second): - t.Fatal("channel not opened in time") - } - } -} - -// TestAgentPendingChannelState ensures that the agent properly factors in its -// pending channel state when making decisions w.r.t if it needs more channels -// or not, and if so, who is eligible to open new channels to. -func TestAgentPendingChannelState(t *testing.T) { - t.Parallel() - - testCtx, cleanup := setup(t, nil) - defer cleanup() - - // We'll only return a single directive for a pre-chosen node. - nodeKey, err := testCtx.graph.addRandNode() - if err != nil { - t.Fatalf("unable to generate key: %v", err) - } - nodeID := NewNodeID(nodeKey) - nodeDirective := &NodeScore{ - NodeID: nodeID, - Score: 0.5, - } - - // Once again, we'll start by telling the agent as part of its first - // query, that it needs more channels and has 3 BTC available for - // attachment. We'll send over a response indicating that it should - // establish more channels, and give it a budget of 1 BTC to do so. - respondMoreChans(t, testCtx, - moreChansResp{ - numMore: 1, - amt: btcutil.UnitsPerCoin(), - }, - ) - - respondNodeScores(t, testCtx, - map[NodeID]*NodeScore{ - nodeID: nodeDirective, - }, - ) - - // A request to open the channel should've also been sent. - chanController := testCtx.chanController.(*mockChanController) - select { - case openChan := <-chanController.openChanSignals: - chanAmt := testCtx.constraints.MaxChanSize() - if openChan.amt != chanAmt { - t.Fatalf("invalid chan amt: expected %v, got %v", - chanAmt, openChan.amt) - } - if !openChan.target.IsEqual(nodeKey) { - t.Fatalf("unexpected key: expected %x, got %x", - nodeKey.SerializeCompressed(), - openChan.target.SerializeCompressed()) - } - case <-time.After(time.Second * 10): - t.Fatalf("channel wasn't opened in time") - } - - // Now, in order to test that the pending state was properly updated, - // we'll trigger a balance update in order to trigger a query to the - // heuristic. - testCtx.Lock() - testCtx.walletBalance += btcutil.Amount(0.4 * btcutil.UnitsPerCoinF()) - testCtx.Unlock() - - testCtx.agent.OnBalanceChange() - - // The heuristic should be queried, and the argument for the set of - // channels passed in should include the pending channels that - // should've been created above. - select { - // The request that we get should include a pending channel for the - // one that we just created, otherwise the agent isn't properly - // updating its internal state. - case req := <-testCtx.constraints.moreChanArgs: - chanAmt := testCtx.constraints.MaxChanSize() - if len(req.chans) != 1 { - t.Fatalf("should include pending chan in current "+ - "state, instead have %v chans", len(req.chans)) - } - if req.chans[0].Balance != chanAmt { - t.Fatalf("wrong chan balance: expected %v, got %v", - req.chans[0].Balance, chanAmt) - } - if req.chans[0].Node != nodeID { - t.Fatalf("wrong node ID: expected %x, got %x", - nodeID, req.chans[0].Node[:]) - } - case <-time.After(time.Second * 10): - t.Fatalf("need more chans wasn't queried in time") - } - - // We'll send across a response indicating that it *does* need more - // channels. - select { - case testCtx.constraints.moreChansResps <- moreChansResp{1, btcutil.UnitsPerCoin()}: - case <-time.After(time.Second * 10): - t.Fatalf("need more chans wasn't queried in time") - } - - // The response above should prompt the agent to make a query to the - // Select method. The arguments passed should reflect the fact that the - // node we have a pending channel to, should be ignored. - select { - case req := <-testCtx.heuristic.nodeScoresArgs: - if len(req.chans) == 0 { - t.Fatalf("expected to skip %v nodes, instead "+ - "skipping %v", 1, len(req.chans)) - } - if req.chans[0].Node != nodeID { - t.Fatalf("pending node not included in skip arguments") - } - case <-time.After(time.Second * 10): - t.Fatalf("select wasn't queried in time") - } -} - -// TestAgentPendingOpenChannel ensures that the agent queries its heuristic once -// it detects a channel is pending open. This allows the agent to use its own -// change outputs that have yet to confirm for funding transactions. -func TestAgentPendingOpenChannel(t *testing.T) { - t.Parallel() - - testCtx, cleanup := setup(t, nil) - defer cleanup() - - // We'll send an initial "no" response to advance the agent past its - // initial check. - respondMoreChans(t, testCtx, moreChansResp{0, 0}) - - // Next, we'll signal that a new channel has been opened, but it is - // still pending. - testCtx.agent.OnChannelPendingOpen() - - // The agent should now query the heuristic in order to determine its - // next action as its local state has now been modified. - respondMoreChans(t, testCtx, moreChansResp{0, 0}) - - // There shouldn't be a call to the Select method as we've returned - // "false" for NeedMoreChans above. - select { - case testCtx.heuristic.nodeScoresResps <- map[NodeID]*NodeScore{}: - t.Fatalf("Select was called but shouldn't have been") - default: - } -} - -// TestAgentOnNodeUpdates tests that the agent will wake up in response to the -// OnNodeUpdates signal. This is useful in ensuring that autopilot is always -// pulling in the latest graph updates into its decision making. It also -// prevents the agent from stalling after an initial attempt that finds no nodes -// in the graph. -func TestAgentOnNodeUpdates(t *testing.T) { - t.Parallel() - - testCtx, cleanup := setup(t, nil) - defer cleanup() - - // We'll send an initial "yes" response to advance the agent past its - // initial check. This will cause it to try to get directives from an - // empty graph. - respondMoreChans( - t, testCtx, - moreChansResp{ - numMore: 2, - amt: testCtx.walletBalance, - }, - ) - - // Send over an empty list of attachment directives, which should cause - // the agent to return to waiting on a new signal. - respondNodeScores(t, testCtx, map[NodeID]*NodeScore{}) - - // Simulate more nodes being added to the graph by informing the agent - // that we have node updates. - testCtx.agent.OnNodeUpdates() - - // In response, the agent should wake up and see if it needs more - // channels. Since we haven't done anything, we will send the same - // response as before since we are still trying to open channels. - respondMoreChans( - t, testCtx, - moreChansResp{ - numMore: 2, - amt: testCtx.walletBalance, - }, - ) - - // Again the agent should pull in the next set of attachment directives. - // It's not important that this list is also empty, so long as the node - // updates signal is causing the agent to make this attempt. - respondNodeScores(t, testCtx, map[NodeID]*NodeScore{}) -} - -// TestAgentSkipPendingConns asserts that the agent will not try to make -// duplicate connection requests to the same node, even if the attachment -// heuristic instructs the agent to do so. It also asserts that the agent -// stops tracking the pending connection once it finishes. Note that in -// practice, a failed connection would be inserted into the skip map passed to -// the attachment heuristic, though this does not assert that case. -func TestAgentSkipPendingConns(t *testing.T) { - t.Parallel() - - testCtx, cleanup := setup(t, nil) - defer cleanup() - - connect := make(chan chan er.R) - testCtx.agent.cfg.ConnectToPeer = func(*btcec.PublicKey, []net.Addr) (bool, er.R) { - errChan := make(chan er.R) - - select { - case connect <- errChan: - case <-testCtx.quit: - return false, er.New("quit") - } - - select { - case err := <-errChan: - return false, err - case <-testCtx.quit: - return false, er.New("quit") - } - } - - // We'll only return a single directive for a pre-chosen node. - nodeKey, err := testCtx.graph.addRandNode() - if err != nil { - t.Fatalf("unable to generate key: %v", err) - } - nodeID := NewNodeID(nodeKey) - nodeDirective := &NodeScore{ - NodeID: nodeID, - Score: 0.5, - } - - // We'll also add a second node to the graph, to keep the first one - // company. - nodeKey2, err := testCtx.graph.addRandNode() - if err != nil { - t.Fatalf("unable to generate key: %v", err) - } - nodeID2 := NewNodeID(nodeKey2) - - // We'll send an initial "yes" response to advance the agent past its - // initial check. This will cause it to try to get directives from the - // graph. - respondMoreChans(t, testCtx, - moreChansResp{ - numMore: 1, - amt: testCtx.walletBalance, - }, - ) - - // Both nodes should be part of the arguments. - select { - case req := <-testCtx.heuristic.nodeScoresArgs: - if len(req.nodes) != 2 { - t.Fatalf("expected %v nodes, instead "+ - "had %v", 2, len(req.nodes)) - } - if _, ok := req.nodes[nodeID]; !ok { - t.Fatalf("node not included in arguments") - } - if _, ok := req.nodes[nodeID2]; !ok { - t.Fatalf("node not included in arguments") - } - case <-time.After(time.Second * 10): - t.Fatalf("select wasn't queried in time") - } - - // Respond with a scored directive. We skip node2 for now, implicitly - // giving it a zero-score. - select { - case testCtx.heuristic.nodeScoresResps <- map[NodeID]*NodeScore{ - NewNodeID(nodeKey): nodeDirective, - }: - case <-time.After(time.Second * 10): - t.Fatalf("heuristic wasn't queried in time") - } - - // The agent should attempt connection to the node. - var errChan chan er.R - select { - case errChan = <-connect: - case <-time.After(time.Second * 10): - t.Fatalf("agent did not attempt connection") - } - - // Signal the agent to go again, now that we've tried to connect. - testCtx.agent.OnNodeUpdates() - - // The heuristic again informs the agent that we need more channels. - respondMoreChans(t, testCtx, - moreChansResp{ - numMore: 1, - amt: testCtx.walletBalance, - }, - ) - - // Since the node now has a pending connection, it should be skipped - // and not part of the nodes attempting to be scored. - select { - case req := <-testCtx.heuristic.nodeScoresArgs: - if len(req.nodes) != 1 { - t.Fatalf("expected %v nodes, instead "+ - "had %v", 1, len(req.nodes)) - } - if _, ok := req.nodes[nodeID2]; !ok { - t.Fatalf("node not included in arguments") - } - case <-time.After(time.Second * 10): - t.Fatalf("select wasn't queried in time") - } - - // Respond with an emtpty score set. - select { - case testCtx.heuristic.nodeScoresResps <- map[NodeID]*NodeScore{}: - case <-time.After(time.Second * 10): - t.Fatalf("heuristic wasn't queried in time") - } - - // The agent should not attempt any connection, since no nodes were - // scored. - select { - case <-connect: - t.Fatalf("agent should not have attempted connection") - case <-time.After(time.Second * 3): - } - - // Now, timeout the original request, which should still be waiting for - // a response. - select { - case errChan <- er.Errorf("connection timeout"): - case <-time.After(time.Second * 10): - t.Fatalf("agent did not receive connection timeout") - } - - // The agent will now retry since the last connection attempt failed. - // The heuristic again informs the agent that we need more channels. - respondMoreChans(t, testCtx, - moreChansResp{ - numMore: 1, - amt: testCtx.walletBalance, - }, - ) - - // The node should now be marked as "failed", which should make it - // being skipped during scoring. Again check that it won't be among the - // score request. - select { - case req := <-testCtx.heuristic.nodeScoresArgs: - if len(req.nodes) != 1 { - t.Fatalf("expected %v nodes, instead "+ - "had %v", 1, len(req.nodes)) - } - if _, ok := req.nodes[nodeID2]; !ok { - t.Fatalf("node not included in arguments") - } - case <-time.After(time.Second * 10): - t.Fatalf("select wasn't queried in time") - } - - // Send a directive for the second node. - nodeDirective2 := &NodeScore{ - NodeID: nodeID2, - Score: 0.5, - } - select { - case testCtx.heuristic.nodeScoresResps <- map[NodeID]*NodeScore{ - nodeID2: nodeDirective2, - }: - case <-time.After(time.Second * 10): - t.Fatalf("heuristic wasn't queried in time") - } - - // This time, the agent should try the connection to the second node. - select { - case <-connect: - case <-time.After(time.Second * 10): - t.Fatalf("agent should have attempted connection") - } -} - -// TestAgentQuitWhenPendingConns tests that we are able to stop the autopilot -// agent even though there are pending connections to nodes. -func TestAgentQuitWhenPendingConns(t *testing.T) { - t.Parallel() - - testCtx, cleanup := setup(t, nil) - defer cleanup() - - connect := make(chan chan er.R) - - testCtx.agent.cfg.ConnectToPeer = func(*btcec.PublicKey, []net.Addr) (bool, er.R) { - errChan := make(chan er.R) - - select { - case connect <- errChan: - case <-testCtx.quit: - return false, er.New("quit") - } - - select { - case err := <-errChan: - return false, err - case <-testCtx.quit: - return false, er.New("quit") - } - } - - // We'll only return a single directive for a pre-chosen node. - nodeKey, err := testCtx.graph.addRandNode() - if err != nil { - t.Fatalf("unable to generate key: %v", err) - } - nodeID := NewNodeID(nodeKey) - nodeDirective := &NodeScore{ - NodeID: nodeID, - Score: 0.5, - } - - // We'll send an initial "yes" response to advance the agent past its - // initial check. This will cause it to try to get directives from the - // graph. - respondMoreChans(t, testCtx, - moreChansResp{ - numMore: 1, - amt: testCtx.walletBalance, - }, - ) - - // Check the args. - select { - case req := <-testCtx.heuristic.nodeScoresArgs: - if len(req.nodes) != 1 { - t.Fatalf("expected %v nodes, instead "+ - "had %v", 1, len(req.nodes)) - } - if _, ok := req.nodes[nodeID]; !ok { - t.Fatalf("node not included in arguments") - } - case <-time.After(time.Second * 10): - t.Fatalf("select wasn't queried in time") - } - - // Respond with a scored directive. - select { - case testCtx.heuristic.nodeScoresResps <- map[NodeID]*NodeScore{ - NewNodeID(nodeKey): nodeDirective, - }: - case <-time.After(time.Second * 10): - t.Fatalf("heuristic wasn't queried in time") - } - - // The agent should attempt connection to the node. - select { - case <-connect: - case <-time.After(time.Second * 10): - t.Fatalf("agent did not attempt connection") - } - - // Make sure that we are able to stop the agent, even though there is a - // pending connection. - stopped := make(chan er.R) - go func() { - stopped <- testCtx.agent.Stop() - }() - - select { - case err := <-stopped: - if err != nil { - t.Fatalf("error stopping agent: %v", err) - } - case <-time.After(2 * time.Second): - t.Fatalf("unable to stop agent") - } -} - -// respondWithScores checks that the moreChansRequest contains what we expect, -// and responds with the given node scores. -func respondWithScores(t *testing.T, testCtx *testContext, - channelBudget btcutil.Amount, existingChans, newChans int, - nodeScores map[NodeID]*NodeScore) { - - t.Helper() - - select { - case testCtx.constraints.moreChansResps <- moreChansResp{ - numMore: uint32(newChans), - amt: channelBudget, - }: - case <-time.After(time.Second * 3): - t.Fatalf("heuristic wasn't queried in time") - } - - // The agent should query for scores using the constraints returned - // above. We expect the agent to use the maximum channel size when - // opening channels. - chanSize := testCtx.constraints.MaxChanSize() - - select { - case req := <-testCtx.heuristic.nodeScoresArgs: - // All nodes in the graph should be potential channel - // candidates. - if len(req.nodes) != len(nodeScores) { - t.Fatalf("expected %v nodes, instead had %v", - len(nodeScores), len(req.nodes)) - } - - // 'existingChans' is already open. - if len(req.chans) != existingChans { - t.Fatalf("expected %d existing channel, got %v", - existingChans, len(req.chans)) - } - if req.amt != chanSize { - t.Fatalf("expected channel size of %v, got %v", - chanSize, req.amt) - } - - case <-time.After(time.Second * 3): - t.Fatalf("select wasn't queried in time") - } - - // Respond with the given scores. - select { - case testCtx.heuristic.nodeScoresResps <- nodeScores: - case <-time.After(time.Second * 3): - t.Fatalf("NodeScores wasn't queried in time") - } -} - -// checkChannelOpens asserts that the channel controller attempts open the -// number of channels we expect, and with the exact total allocation. -func checkChannelOpens(t *testing.T, testCtx *testContext, - allocation btcutil.Amount, numChans int) []NodeID { - - var nodes []NodeID - - // The agent should attempt to open channels, totaling what we expect. - var totalAllocation btcutil.Amount - chanController := testCtx.chanController.(*mockChanController) - for i := 0; i < numChans; i++ { - select { - case openChan := <-chanController.openChanSignals: - totalAllocation += openChan.amt - - testCtx.Lock() - testCtx.walletBalance -= openChan.amt - testCtx.Unlock() - - nodes = append(nodes, NewNodeID(openChan.target)) - - case <-time.After(time.Second * 3): - t.Fatalf("channel not opened in time") - } - } - - if totalAllocation != allocation { - t.Fatalf("expected agent to open channels totalling %v, "+ - "instead was %v", allocation, totalAllocation) - } - - // Finally, make sure the agent won't try opening more channels. - select { - case <-chanController.openChanSignals: - t.Fatalf("agent unexpectedly opened channel") - - case <-time.After(50 * time.Millisecond): - } - - return nodes -} - -// TestAgentChannelSizeAllocation tests that the autopilot agent opens channel -// of size that stays within the channel budget and size restrictions. -func TestAgentChannelSizeAllocation(t *testing.T) { - t.Parallel() - - // Total number of nodes in our mock graph. - const numNodes = 20 - - testCtx, cleanup := setup(t, nil) - defer cleanup() - - nodeScores := make(map[NodeID]*NodeScore) - for i := 0; i < numNodes; i++ { - nodeKey, err := testCtx.graph.addRandNode() - if err != nil { - t.Fatalf("unable to generate key: %v", err) - } - nodeID := NewNodeID(nodeKey) - nodeScores[nodeID] = &NodeScore{ - NodeID: nodeID, - Score: 0.5, - } - } - - // The agent should now query the heuristic in order to determine its - // next action as it local state has now been modified. - select { - case arg := <-testCtx.constraints.moreChanArgs: - if len(arg.chans) != 0 { - t.Fatalf("expected agent to have no channels open, "+ - "had %v", len(arg.chans)) - } - if arg.balance != testCtx.walletBalance { - t.Fatalf("expectd agent to have %v balance, had %v", - testCtx.walletBalance, arg.balance) - } - case <-time.After(time.Second * 3): - t.Fatalf("heuristic wasn't queried in time") - } - - // We'll return a response telling the agent to open 5 channels, with a - // total channel budget of 5 BTC. - var channelBudget btcutil.Amount = 5 * btcutil.UnitsPerCoin() - numExistingChannels := 0 - numNewChannels := 5 - respondWithScores( - t, testCtx, channelBudget, numExistingChannels, - numNewChannels, nodeScores, - ) - - // We expect the autopilot to have allocated all funds towards - // channels. - expectedAllocation := testCtx.constraints.MaxChanSize() * btcutil.Amount(numNewChannels) - nodes := checkChannelOpens( - t, testCtx, expectedAllocation, numNewChannels, - ) - - // Delete the selected nodes from our set of scores, to avoid scoring - // nodes we already have channels to. - for _, node := range nodes { - delete(nodeScores, node) - } - - // TODO(halseth): this loop is a hack to ensure all the attempted - // channels are accounted for. This happens because the agent will - // query the ChannelBudget before all the pending channels are added to - // the map. Fix by adding them to the pending channels map before - // executing directives in goroutines? - waitForNumChans := func(expChans int) { - t.Helper() - - var ( - numChans int - balance btcutil.Amount - ) - - Loop: - for { - select { - case arg := <-testCtx.constraints.moreChanArgs: - numChans = len(arg.chans) - balance = arg.balance - - // As long as the number of existing channels - // is below our expected number of channels, - // and the balance is not what we expect, we'll - // keep responding with "no more channels". - if numChans == expChans && - balance == testCtx.walletBalance { - break Loop - } - - select { - case testCtx.constraints.moreChansResps <- moreChansResp{0, 0}: - case <-time.After(time.Second * 3): - t.Fatalf("heuristic wasn't queried " + - "in time") - } - - case <-time.After(time.Second * 3): - t.Fatalf("did not receive expected "+ - "channels(%d) and balance(%d), "+ - "instead got %d and %d", expChans, - testCtx.walletBalance, numChans, - balance) - } - } - } - - // Wait for the agent to have 5 channels. - waitForNumChans(numNewChannels) - - // Set the channel budget to 1.5 BTC. - channelBudget = btcutil.UnitsPerCoin() * 3 / 2 - - // We'll return a response telling the agent to open 3 channels, with a - // total channel budget of 1.5 BTC. - numExistingChannels = 5 - numNewChannels = 3 - respondWithScores( - t, testCtx, channelBudget, numExistingChannels, - numNewChannels, nodeScores, - ) - - // To stay within the budget, we expect the autopilot to open 2 - // channels. - expectedAllocation = channelBudget - nodes = checkChannelOpens(t, testCtx, expectedAllocation, 2) - numExistingChannels = 7 - - for _, node := range nodes { - delete(nodeScores, node) - } - - waitForNumChans(numExistingChannels) - - // Finally check that we make maximum channels if we are well within - // our budget. - channelBudget = btcutil.UnitsPerCoin() * 5 - numNewChannels = 2 - respondWithScores( - t, testCtx, channelBudget, numExistingChannels, - numNewChannels, nodeScores, - ) - - // We now expect the autopilot to open 2 channels, and since it has - // more than enough balance within the budget, they should both be of - // maximum size. - expectedAllocation = testCtx.constraints.MaxChanSize() * - btcutil.Amount(numNewChannels) - - checkChannelOpens(t, testCtx, expectedAllocation, numNewChannels) -} diff --git a/lnd/autopilot/betweenness_centrality.go b/lnd/autopilot/betweenness_centrality.go deleted file mode 100644 index e701b721..00000000 --- a/lnd/autopilot/betweenness_centrality.go +++ /dev/null @@ -1,266 +0,0 @@ -package autopilot - -import ( - "sync" - - "github.com/pkt-cash/pktd/btcutil/er" -) - -// stack is a simple int stack to help with readability of Brandes' -// betweenness centrality implementation below. -type stack struct { - stack []int -} - -func (s *stack) push(v int) { - s.stack = append(s.stack, v) -} - -func (s *stack) top() int { - return s.stack[len(s.stack)-1] -} - -func (s *stack) pop() { - s.stack = s.stack[:len(s.stack)-1] -} - -func (s *stack) empty() bool { - return len(s.stack) == 0 -} - -// queue is a simple int queue to help with readability of Brandes' -// betweenness centrality implementation below. -type queue struct { - queue []int -} - -func (q *queue) push(v int) { - q.queue = append(q.queue, v) -} - -func (q *queue) front() int { - return q.queue[0] -} - -func (q *queue) pop() { - q.queue = q.queue[1:] -} - -func (q *queue) empty() bool { - return len(q.queue) == 0 -} - -// BetweennessCentrality is a NodeMetric that calculates node betweenness -// centrality using Brandes' algorithm. Betweenness centrality for each node -// is the number of shortest paths passing trough that node, not counting -// shortest paths starting or ending at that node. This is a useful metric -// to measure control of individual nodes over the whole network. -type BetweennessCentrality struct { - // workers number of goroutines are used to parallelize - // centrality calculation. - workers int - - // centrality stores original (not normalized) centrality values for - // each node in the graph. - centrality map[NodeID]float64 - - // min is the minimum centrality in the graph. - min float64 - - // max is the maximum centrality in the graph. - max float64 -} - -// NewBetweennessCentralityMetric creates a new BetweennessCentrality instance. -// Users can specify the number of workers to use for calculating centrality. -func NewBetweennessCentralityMetric(workers int) (*BetweennessCentrality, er.R) { - // There should be at least one worker. - if workers < 1 { - return nil, er.Errorf("workers must be positive") - } - return &BetweennessCentrality{ - workers: workers, - }, nil -} - -// Name returns the name of the metric. -func (bc *BetweennessCentrality) Name() string { - return "betweenness_centrality" -} - -// betweennessCentrality is the core of Brandes' algorithm. -// We first calculate the shortest paths from the start node s to all other -// nodes with BFS, then update the betweenness centrality values by using -// Brandes' dependency trick. -// For detailed explanation please read: -// https://www.cl.cam.ac.uk/teaching/1617/MLRD/handbook/brandes.html -func betweennessCentrality(g *SimpleGraph, s int, centrality []float64) { - // pred[w] is the list of nodes that immediately precede w on a - // shortest path from s to t for each node t. - pred := make([][]int, len(g.Nodes)) - - // sigma[t] is the number of shortest paths between nodes s and t - // for each node t. - sigma := make([]int, len(g.Nodes)) - sigma[s] = 1 - - // dist[t] holds the distance between s and t for each node t. - // We initialize this to -1 (meaning infinity) for each t != s. - dist := make([]int, len(g.Nodes)) - for i := range dist { - dist[i] = -1 - } - - dist[s] = 0 - - var ( - st stack - q queue - ) - q.push(s) - - // BFS to calculate the shortest paths (sigma and pred) - // from s to t for each node t. - for !q.empty() { - v := q.front() - q.pop() - st.push(v) - - for _, w := range g.Adj[v] { - // If distance from s to w is infinity (-1) - // then set it and enqueue w. - if dist[w] < 0 { - dist[w] = dist[v] + 1 - q.push(w) - } - - // If w is on a shortest path the update - // sigma and add v to w's predecessor list. - if dist[w] == dist[v]+1 { - sigma[w] += sigma[v] - pred[w] = append(pred[w], v) - } - } - } - - // delta[v] is the ratio of the shortest paths between s and t that go - // through v and the total number of shortest paths between s and t. - // If we have delta then the betweenness centrality is simply the sum - // of delta[w] for each w != s. - delta := make([]float64, len(g.Nodes)) - - for !st.empty() { - w := st.top() - st.pop() - - // pred[w] is the list of nodes that immediately precede w on a - // shortest path from s. - for _, v := range pred[w] { - // Update delta using Brandes' equation. - delta[v] += (float64(sigma[v]) / float64(sigma[w])) * (1.0 + delta[w]) - } - - if w != s { - // As noted above centrality is simply the sum - // of delta[w] for each w != s. - centrality[w] += delta[w] - } - } -} - -// Refresh recaculates and stores centrality values. -func (bc *BetweennessCentrality) Refresh(graph ChannelGraph) er.R { - cache, err := NewSimpleGraph(graph) - if err != nil { - return err - } - - var wg sync.WaitGroup - work := make(chan int) - partials := make(chan []float64, bc.workers) - - // Each worker will compute a partial result. - // This partial result is a sum of centrality updates - // on roughly N / workers nodes. - worker := func() { - defer wg.Done() - partial := make([]float64, len(cache.Nodes)) - - // Consume the next node, update centrality - // parital to avoid unnecessary synchronizaton. - for node := range work { - betweennessCentrality(cache, node, partial) - } - partials <- partial - } - - // Now start the N workers. - wg.Add(bc.workers) - for i := 0; i < bc.workers; i++ { - go worker() - } - - // Distribute work amongst workers. - // Should be fair when the graph is sufficiently large. - for node := range cache.Nodes { - work <- node - } - - close(work) - wg.Wait() - close(partials) - - // Collect and sum partials for final result. - centrality := make([]float64, len(cache.Nodes)) - for partial := range partials { - for i := 0; i < len(partial); i++ { - centrality[i] += partial[i] - } - } - - // Get min/max to be able to normalize - // centrality values between 0 and 1. - bc.min = 0 - bc.max = 0 - if len(centrality) > 0 { - for _, v := range centrality { - if v < bc.min { - bc.min = v - } else if v > bc.max { - bc.max = v - } - } - } - - // Divide by two as this is an undirected graph. - bc.min /= 2.0 - bc.max /= 2.0 - - bc.centrality = make(map[NodeID]float64) - for u, value := range centrality { - // Divide by two as this is an undirected graph. - bc.centrality[cache.Nodes[u]] = value / 2.0 - } - - return nil -} - -// GetMetric returns the current centrality values for each node indexed -// by node id. -func (bc *BetweennessCentrality) GetMetric(normalize bool) map[NodeID]float64 { - // Normalization factor. - var z float64 - if (bc.max - bc.min) > 0 { - z = 1.0 / (bc.max - bc.min) - } - - centrality := make(map[NodeID]float64) - for k, v := range bc.centrality { - if normalize { - v = (v - bc.min) * z - } - centrality[k] = v - } - - return centrality -} diff --git a/lnd/autopilot/betweenness_centrality_test.go b/lnd/autopilot/betweenness_centrality_test.go deleted file mode 100644 index 0391ef09..00000000 --- a/lnd/autopilot/betweenness_centrality_test.go +++ /dev/null @@ -1,146 +0,0 @@ -package autopilot - -import ( - "fmt" - "os" - "testing" - - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg/globalcfg" - "github.com/stretchr/testify/require" -) - -func TestBetweennessCentralityMetricConstruction(t *testing.T) { - failing := []int{-1, 0} - ok := []int{1, 10} - - for _, workers := range failing { - m, err := NewBetweennessCentralityMetric(workers) - util.RequireErr( - t, err, "construction must fail with <= 0 workers", - ) - require.Nil(t, m) - } - - for _, workers := range ok { - m, err := NewBetweennessCentralityMetric(workers) - util.RequireNoErr( - t, err, "construction must succeed with >= 1 workers", - ) - require.NotNil(t, m) - } -} - -// Tests that empty graph results in empty centrality result. -func TestBetweennessCentralityEmptyGraph(t *testing.T) { - centralityMetric, err := NewBetweennessCentralityMetric(1) - util.RequireNoErr( - t, err, - "construction must succeed with positive number of workers", - ) - - for _, chanGraph := range chanGraphs { - graph, cleanup, err := chanGraph.genFunc() - success := t.Run(chanGraph.name, func(t1 *testing.T) { - util.RequireNoErr(t, err, "unable to create graph") - - if cleanup != nil { - defer cleanup() - } - - err := centralityMetric.Refresh(graph) - util.RequireNoErr(t, err) - - centrality := centralityMetric.GetMetric(false) - require.Equal(t, 0, len(centrality)) - - centrality = centralityMetric.GetMetric(true) - require.Equal(t, 0, len(centrality)) - }) - if !success { - break - } - } -} - -// Test betweenness centrality calculating using an example graph. -func TestBetweennessCentralityWithNonEmptyGraph(t *testing.T) { - workers := []int{1, 3, 9, 100} - - tests := []struct { - normalize bool - centrality []float64 - }{ - { - normalize: true, - centrality: normalizedTestGraphCentrality, - }, - { - normalize: false, - centrality: testGraphCentrality, - }, - } - - for _, numWorkers := range workers { - for _, chanGraph := range chanGraphs { - numWorkers := numWorkers - graph, cleanup, err := chanGraph.genFunc() - util.RequireNoErr(t, err, "unable to create graph") - - if cleanup != nil { - defer cleanup() - } - - testName := fmt.Sprintf( - "%v %d workers", chanGraph.name, numWorkers, - ) - - success := t.Run(testName, func(t1 *testing.T) { - metric, err := NewBetweennessCentralityMetric( - numWorkers, - ) - util.RequireNoErr( - t, err, - "construction must succeed with "+ - "positive number of workers", - ) - - graphNodes := buildTestGraph( - t1, graph, centralityTestGraph, - ) - - err = metric.Refresh(graph) - util.RequireNoErr(t, err) - - for _, expected := range tests { - expected := expected - centrality := metric.GetMetric( - expected.normalize, - ) - - require.Equal(t, - centralityTestGraph.nodes, - len(centrality), - ) - - for i, c := range expected.centrality { - nodeID := NewNodeID( - graphNodes[i], - ) - result, ok := centrality[nodeID] - require.True(t, ok) - require.Equal(t, c, result) - } - } - }) - if !success { - break - } - } - } -} - -func TestMain(m *testing.M) { - globalcfg.SelectConfig(globalcfg.BitcoinDefaults()) - os.Exit(m.Run()) -} diff --git a/lnd/autopilot/centrality_testdata_test.go b/lnd/autopilot/centrality_testdata_test.go deleted file mode 100644 index 829a6a5d..00000000 --- a/lnd/autopilot/centrality_testdata_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package autopilot - -import ( - "testing" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/util" -) - -// testGraphDesc is a helper type to describe a test graph. -type testGraphDesc struct { - nodes int - edges map[int][]int -} - -var centralityTestGraph = testGraphDesc{ - nodes: 9, - edges: map[int][]int{ - 0: {1, 2, 3}, - 1: {2}, - 2: {3}, - 3: {4, 5}, - 4: {5, 6, 7}, - 5: {6, 7}, - 6: {7, 8}, - }, -} - -var testGraphCentrality = []float64{ - 3.0, 0.0, 3.0, 15.0, 6.0, 6.0, 7.0, 0.0, 0.0, -} - -var normalizedTestGraphCentrality = []float64{ - 0.2, 0.0, 0.2, 1.0, 0.4, 0.4, 7.0 / 15.0, 0.0, 0.0, -} - -// buildTestGraph builds a test graph from a passed graph desriptor. -func buildTestGraph(t *testing.T, - graph testGraph, desc testGraphDesc) map[int]*btcec.PublicKey { - - nodes := make(map[int]*btcec.PublicKey) - - for i := 0; i < desc.nodes; i++ { - key, err := graph.addRandNode() - util.RequireNoErr(t, err, "cannot create random node") - - nodes[i] = key - } - - chanCapacity := btcutil.UnitsPerCoin() - for u, neighbors := range desc.edges { - for _, v := range neighbors { - _, _, err := graph.addRandChannel( - nodes[u], nodes[v], chanCapacity, - ) - util.RequireNoErr(t, err, - "unexpected error adding random channel", - ) - if err != nil { - t.Fatalf("unexpected error adding"+ - "random channel: %v", err) - } - } - } - - return nodes -} diff --git a/lnd/autopilot/choice.go b/lnd/autopilot/choice.go deleted file mode 100644 index f52654e0..00000000 --- a/lnd/autopilot/choice.go +++ /dev/null @@ -1,89 +0,0 @@ -package autopilot - -import ( - "math/rand" - - "github.com/pkt-cash/pktd/btcutil/er" -) - -var Err = er.NewErrorType("lnd.autopilot") - -// ErrNoPositive is returned from weightedChoice when there are no positive -// weights left to choose from. -var ErrNoPositive = Err.CodeWithDetail("ErrNoPositive", "no positive weights left") - -// weightedChoice draws a random index from the slice of weights, with a -// probability propotional to the weight at the given index. -func weightedChoice(w []float64) (int, er.R) { - // Calculate the sum of weights. - var sum float64 - for _, v := range w { - sum += v - } - - if sum <= 0 { - return 0, ErrNoPositive.Default() - } - - // Pick a random number in the range [0.0, 1.0) and multiply it with - // the sum of weights. Then we'll iterate the weights until the number - // goes below 0. This means that each index is picked with a probablity - // equal to their normalized score. - // - // Example: - // Items with scores [1, 5, 2, 2] - // Normalized scores [0.1, 0.5, 0.2, 0.2] - // Imagine they each occupy a "range" equal to their normalized score - // in [0, 1.0]: - // [|-0.1-||-----0.5-----||--0.2--||--0.2--|] - // The following loop is now equivalent to "hitting" the intervals. - r := rand.Float64() * sum - for i := range w { - r -= w[i] - if r <= 0 { - return i, nil - } - } - - return 0, er.Errorf("unable to make choice") -} - -// chooseN picks at random min[n, len(s)] nodes if from the NodeScore map, with -// a probability weighted by their score. -func chooseN(n uint32, s map[NodeID]*NodeScore) ( - map[NodeID]*NodeScore, er.R) { - - // Keep track of the number of nodes not yet chosen, in addition to - // their scores and NodeIDs. - rem := len(s) - scores := make([]float64, len(s)) - nodeIDs := make([]NodeID, len(s)) - i := 0 - for k, v := range s { - scores[i] = v.Score - nodeIDs[i] = k - i++ - } - - // Pick a weighted choice from the remaining nodes as long as there are - // nodes left, and we haven't already picked n. - chosen := make(map[NodeID]*NodeScore) - for len(chosen) < int(n) && rem > 0 { - choice, err := weightedChoice(scores) - if ErrNoPositive.Is(err) { - return chosen, nil - } else if err != nil { - return nil, err - } - - nID := nodeIDs[choice] - - chosen[nID] = s[nID] - - // We set the score of the chosen node to 0, so it won't be - // picked the next iteration. - scores[choice] = 0 - } - - return chosen, nil -} diff --git a/lnd/autopilot/choice_test.go b/lnd/autopilot/choice_test.go deleted file mode 100644 index 44e50eb6..00000000 --- a/lnd/autopilot/choice_test.go +++ /dev/null @@ -1,338 +0,0 @@ -package autopilot - -import ( - "encoding/binary" - "math/rand" - "reflect" - "testing" - "testing/quick" -) - -// TestWeightedChoiceEmptyMap tests that passing in an empty slice of weights -// returns an error. -func TestWeightedChoiceEmptyMap(t *testing.T) { - t.Parallel() - - var w []float64 - _, err := weightedChoice(w) - if !ErrNoPositive.Is(err) { - t.Fatalf("expected ErrNoPositive when choosing in "+ - "empty map, instead got %v", err) - } -} - -// singeNonZero is a type used to generate float64 slices with one non-zero -// element. -type singleNonZero []float64 - -// Generate generates a value of type sinelNonZero to be used during -// QuickTests. -func (singleNonZero) Generate(rand *rand.Rand, size int) reflect.Value { - w := make([]float64, size) - - // Pick a random index and set it to a random float. - i := rand.Intn(size) - w[i] = rand.Float64() - - return reflect.ValueOf(w) -} - -// TestWeightedChoiceSingleIndex tests that choosing randomly in a slice with -// one positive element always returns that one index. -func TestWeightedChoiceSingleIndex(t *testing.T) { - t.Parallel() - - // Helper that returns the index of the non-zero element. - allButOneZero := func(weights []float64) (bool, int) { - var ( - numZero uint32 - nonZeroEl int - ) - - for i, w := range weights { - if w != 0 { - numZero++ - nonZeroEl = i - } - } - - return numZero == 1, nonZeroEl - } - - property := func(weights singleNonZero) bool { - // Make sure the generated slice has exactly one non-zero - // element. - conditionMet, nonZeroElem := allButOneZero(weights[:]) - if !conditionMet { - return false - } - - // Call weightedChoice and assert it picks the non-zero - // element. - choice, err := weightedChoice(weights[:]) - if err != nil { - return false - } - return choice == nonZeroElem - } - - if err := quick.Check(property, nil); err != nil { - t.Fatal(err) - } -} - -// nonNegative is a type used to generate float64 slices with non-negative -// elements. -type nonNegative []float64 - -// Generate generates a value of type nonNegative to be used during -// QuickTests. -func (nonNegative) Generate(rand *rand.Rand, size int) reflect.Value { - w := make([]float64, size) - - for i := range w { - r := rand.Float64() - - // For very small weights it won't work to check deviation from - // expected value, so we set them to zero. - if r < 0.01*float64(size) { - r = 0 - } - w[i] = float64(r) - } - return reflect.ValueOf(w) -} - -func assertChoice(w []float64, iterations int) bool { - var sum float64 - for _, v := range w { - sum += v - } - - // Calculate the expected frequency of each choice. - expFrequency := make([]float64, len(w)) - for i, ww := range w { - expFrequency[i] = ww / sum - } - - chosen := make(map[int]int) - for i := 0; i < iterations; i++ { - res, err := weightedChoice(w) - if err != nil { - return false - } - chosen[res]++ - } - - // Since this is random we check that the number of times chosen is - // within 20% of the expected value. - totalChoices := 0 - for i, f := range expFrequency { - exp := float64(iterations) * f - v := float64(chosen[i]) - totalChoices += chosen[i] - expHigh := exp + exp/5 - expLow := exp - exp/5 - if v < expLow || v > expHigh { - return false - } - } - - // The sum of choices must be exactly iterations of course. - return totalChoices == iterations - -} - -// TestWeightedChoiceDistribution asserts that the weighted choice algorithm -// chooses among indexes according to their scores. -func TestWeightedChoiceDistribution(t *testing.T) { - const iterations = 100000 - - property := func(weights nonNegative) bool { - return assertChoice(weights, iterations) - } - - if err := quick.Check(property, nil); err != nil { - t.Fatal(err) - } -} - -// TestChooseNEmptyMap checks that chooseN returns an empty result when no -// nodes are chosen among. -func TestChooseNEmptyMap(t *testing.T) { - t.Parallel() - - nodes := map[NodeID]*NodeScore{} - property := func(n uint32) bool { - res, err := chooseN(n, nodes) - if err != nil { - return false - } - - // Result should always be empty. - return len(res) == 0 - } - - if err := quick.Check(property, nil); err != nil { - t.Fatal(err) - } -} - -// candidateMapVarLen is a type we'll use to generate maps of various lengths -// up to 255 to be used during QuickTests. -type candidateMapVarLen map[NodeID]*NodeScore - -// Generate generates a value of type candidateMapVarLen to be used during -// QuickTests. -func (candidateMapVarLen) Generate(rand *rand.Rand, size int) reflect.Value { - nodes := make(map[NodeID]*NodeScore) - - // To avoid creating huge maps, we restrict them to max uint8 len. - n := uint8(rand.Uint32()) - - for i := uint8(0); i < n; i++ { - s := rand.Float64() - - // We set small values to zero, to ensure we handle these - // correctly. - if s < 0.01 { - s = 0 - } - - var nID [33]byte - binary.BigEndian.PutUint32(nID[:], uint32(i)) - nodes[nID] = &NodeScore{ - Score: s, - } - } - - return reflect.ValueOf(nodes) -} - -// TestChooseNMinimum test that chooseN returns the minimum of the number of -// nodes we request and the number of positively scored nodes in the given map. -func TestChooseNMinimum(t *testing.T) { - t.Parallel() - - // Helper to count the number of positive scores in the given map. - numPositive := func(nodes map[NodeID]*NodeScore) int { - cnt := 0 - for _, v := range nodes { - if v.Score > 0 { - cnt++ - } - } - return cnt - } - - // We use let the type of n be uint8 to avoid generating huge numbers. - property := func(nodes candidateMapVarLen, n uint8) bool { - res, err := chooseN(uint32(n), nodes) - if err != nil { - return false - } - - positive := numPositive(nodes) - - // Result should always be the minimum of the number of nodes - // we wanted to select and the number of positively scored - // nodes in the map. - min := positive - if int(n) < min { - min = int(n) - } - - if len(res) != min { - return false - - } - return true - } - - if err := quick.Check(property, nil); err != nil { - t.Fatal(err) - } -} - -// TestChooseNSample sanity checks that nodes are picked by chooseN according -// to their scores. -func TestChooseNSample(t *testing.T) { - t.Parallel() - - const numNodes = 500 - const maxIterations = 100000 - fifth := uint32(numNodes / 5) - - nodes := make(map[NodeID]*NodeScore) - - // we make 5 buckets of nodes: 0, 0.1, 0.2, 0.4 and 0.8 score. We want - // to check that zero scores never gets chosen, while a doubling the - // score makes a node getting chosen about double the amount (this is - // true only when n <<< numNodes). - j := 2 * fifth - score := 0.1 - for i := uint32(0); i < numNodes; i++ { - - // Each time i surpasses j we double the score we give to the - // next fifth of nodes. - if i >= j { - score *= 2 - j += fifth - } - s := score - - // The first 1/5 of nodes we give a score of 0. - if i < fifth { - s = 0 - } - - var nID [33]byte - binary.BigEndian.PutUint32(nID[:], i) - nodes[nID] = &NodeScore{ - Score: s, - } - } - - // For each value of N we'll check that the nodes are picked the - // expected number of times over time. - for _, n := range []uint32{1, 5, 10, 20, 50} { - // Since choosing more nodes will result in chooseN getting - // slower we decrease the number of iterations. This is okay - // since the variance in the total picks for a node will be - // lower when choosing more nodes each time. - iterations := maxIterations / n - count := make(map[NodeID]int) - for i := 0; i < int(iterations); i++ { - res, err := chooseN(n, nodes) - if err != nil { - t.Fatalf("failed choosing nodes: %v", err) - } - - for nID := range res { - count[nID]++ - } - } - - // Sum the number of times a node in each score bucket was - // picked. - sums := make(map[float64]int) - for nID, s := range nodes { - sums[s.Score] += count[nID] - } - - // The count of each bucket should be about double of the - // previous bucket. Since this is all random, we check that - // the result is within 20% of the expected value. - for _, score := range []float64{0.2, 0.4, 0.8} { - cnt := sums[score] - half := cnt / 2 - expLow := half - half/5 - expHigh := half + half/5 - if sums[score/2] < expLow || sums[score/2] > expHigh { - t.Fatalf("expected the nodes with score %v "+ - "to be chosen about %v times, instead "+ - "was %v", score/2, half, sums[score/2]) - } - } - } -} diff --git a/lnd/autopilot/combinedattach.go b/lnd/autopilot/combinedattach.go deleted file mode 100644 index 39c7e0f1..00000000 --- a/lnd/autopilot/combinedattach.go +++ /dev/null @@ -1,174 +0,0 @@ -package autopilot - -import ( - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/pktlog/log" -) - -// WeightedHeuristic is a tuple that associates a weight to an -// AttachmentHeuristic. This is used to determining a node's final score when -// querying several heuristics for scores. -type WeightedHeuristic struct { - // Weight is this AttachmentHeuristic's relative weight factor. It - // should be between 0.0 and 1.0. - Weight float64 - - AttachmentHeuristic -} - -// WeightedCombAttachment is an implementation of the AttachmentHeuristic -// interface that combines the scores given by several sub-heuristics into one. -type WeightedCombAttachment struct { - heuristics []*WeightedHeuristic -} - -// NewWeightedCombAttachment creates a new instance of a WeightedCombAttachment. -func NewWeightedCombAttachment(h ...*WeightedHeuristic) ( - *WeightedCombAttachment, er.R) { - - // The sum of weights given to the sub-heuristics must sum to exactly - // 1.0. - var sum float64 - for _, w := range h { - sum += w.Weight - } - - if sum != 1.0 { - return nil, er.Errorf("weights MUST sum to 1.0 (was %v)", sum) - } - - return &WeightedCombAttachment{ - heuristics: h, - }, nil -} - -// A compile time assertion to ensure WeightedCombAttachment meets the -// AttachmentHeuristic and ScoreSettable interfaces. -var _ AttachmentHeuristic = (*WeightedCombAttachment)(nil) -var _ ScoreSettable = (*WeightedCombAttachment)(nil) - -// Name returns the name of this heuristic. -// -// NOTE: This is a part of the AttachmentHeuristic interface. -func (c *WeightedCombAttachment) Name() string { - return "weightedcomb" -} - -// NodeScores is a method that given the current channel graph, current set of -// local channels and funds available, scores the given nodes according to the -// preference of opening a channel with them. The returned channel candidates -// maps the NodeID to an attachment directive containing a score and a channel -// size. -// -// The scores is determined by quering the set of sub-heuristics, then -// combining these scores into a final score according to the active -// configuration. -// -// The returned scores will be in the range [0, 1.0], where 0 indicates no -// improvement in connectivity if a channel is opened to this node, while 1.0 -// is the maximum possible improvement in connectivity. -// -// NOTE: This is a part of the AttachmentHeuristic interface. -func (c *WeightedCombAttachment) NodeScores(g ChannelGraph, chans []LocalChannel, - chanSize btcutil.Amount, nodes map[NodeID]struct{}) ( - map[NodeID]*NodeScore, er.R) { - - // We now query each heuristic to determine the score they give to the - // nodes for the given channel size. - var subScores []map[NodeID]*NodeScore - for _, h := range c.heuristics { - log.Tracef("Getting scores from sub heuristic %v", h.Name()) - - s, err := h.NodeScores( - g, chans, chanSize, nodes, - ) - if err != nil { - return nil, er.Errorf("unable to get sub score: %v", - err) - } - - subScores = append(subScores, s) - } - - // We combine the scores given by the sub-heuristics by using the - // heruistics' given weight factor. - scores := make(map[NodeID]*NodeScore) - for nID := range nodes { - score := &NodeScore{ - NodeID: nID, - } - - // Each sub-heuristic should have scored the node, if not it is - // implicitly given a zero score by that heuristic. - for i, h := range c.heuristics { - sub, ok := subScores[i][nID] - if !ok { - log.Tracef("No score given to node %x by sub "+ - "heuristic %v", nID[:], h.Name()) - continue - } - // Use the heuristic's weight factor to determine of - // how much weight we should give to this particular - // score. - subScore := h.Weight * sub.Score - log.Tracef("Giving node %x a sub score of %v "+ - "(%v * %v) from sub heuristic %v", nID[:], - subScore, h.Weight, sub.Score, h.Name()) - - score.Score += subScore - } - - log.Tracef("Node %x got final combined score %v", nID[:], - score.Score) - - switch { - // Instead of adding a node with score 0 to the returned set, - // we just skip it. - case score.Score == 0: - continue - - // Sanity check the new score. - case score.Score < 0 || score.Score > 1.0: - return nil, er.Errorf("invalid node score from "+ - "combination: %v", score.Score) - } - - scores[nID] = score - } - - return scores, nil -} - -// SetNodeScores is used to set the internal map from NodeIDs to scores. The -// passed scores must be in the range [0, 1.0]. The fist parameter is the name -// of the targeted heuristic, to allow recursively target specific -// sub-heuristics. The returned boolean indicates whether the targeted -// heuristic was found. -// -// Since this heuristic doesn't keep any internal scores, it will recursively -// apply the scores to its sub-heuristics. -// -// NOTE: This is a part of the ScoreSettable interface. -func (c *WeightedCombAttachment) SetNodeScores(targetHeuristic string, - newScores map[NodeID]float64) (bool, er.R) { - - found := false - for _, h := range c.heuristics { - // It must be ScoreSettable to be available for external - // scores. - s, ok := h.AttachmentHeuristic.(ScoreSettable) - if !ok { - continue - } - - // Heuristic supports scoring, attempt to set them. - applied, err := s.SetNodeScores(targetHeuristic, newScores) - if err != nil { - return false, err - } - found = found || applied - } - - return found, nil -} diff --git a/lnd/autopilot/externalscoreattach.go b/lnd/autopilot/externalscoreattach.go deleted file mode 100644 index 77b504b8..00000000 --- a/lnd/autopilot/externalscoreattach.go +++ /dev/null @@ -1,130 +0,0 @@ -package autopilot - -import ( - "sync" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/pktlog/log" -) - -// ExternalScoreAttachment is an implementation of the AttachmentHeuristic -// interface that allows an external source to provide it with node scores. -type ExternalScoreAttachment struct { - // TODO(halseth): persist across restarts. - nodeScores map[NodeID]float64 - - sync.Mutex -} - -// NewExternalScoreAttachment creates a new instance of an -// ExternalScoreAttachment. -func NewExternalScoreAttachment() *ExternalScoreAttachment { - return &ExternalScoreAttachment{} -} - -// A compile time assertion to ensure ExternalScoreAttachment meets the -// AttachmentHeuristic and ScoreSettable interfaces. -var _ AttachmentHeuristic = (*ExternalScoreAttachment)(nil) -var _ ScoreSettable = (*ExternalScoreAttachment)(nil) - -// Name returns the name of this heuristic. -// -// NOTE: This is a part of the AttachmentHeuristic interface. -func (s *ExternalScoreAttachment) Name() string { - return "externalscore" -} - -// SetNodeScores is used to set the internal map from NodeIDs to scores. The -// passed scores must be in the range [0, 1.0]. The fist parameter is the name -// of the targeted heuristic, to allow recursively target specific -// sub-heuristics. The returned boolean indicates whether the targeted -// heuristic was found. -// -// NOTE: This is a part of the ScoreSettable interface. -func (s *ExternalScoreAttachment) SetNodeScores(targetHeuristic string, - newScores map[NodeID]float64) (bool, er.R) { - - // Return if this heuristic wasn't targeted. - if targetHeuristic != s.Name() { - return false, nil - } - - // Since there's a requirement that all score are in the range [0, - // 1.0], we validate them before setting the internal list. - for nID, s := range newScores { - if s < 0 || s > 1.0 { - return false, er.Errorf("invalid score %v for "+ - "nodeID %v", s, nID) - } - } - - s.Lock() - defer s.Unlock() - - s.nodeScores = newScores - log.Tracef("Setting %v external scores", len(s.nodeScores)) - - return true, nil -} - -// NodeScores is a method that given the current channel graph and current set -// of local channels, scores the given nodes according to the preference of -// opening a channel of the given size with them. The returned channel -// candidates maps the NodeID to a NodeScore for the node. -// -// The returned scores will be in the range [0, 1.0], where 0 indicates no -// improvement in connectivity if a channel is opened to this node, while 1.0 -// is the maximum possible improvement in connectivity. -// -// The scores are determined by checking the internal node scores list. Nodes -// not known will get a score of 0. -// -// NOTE: This is a part of the AttachmentHeuristic interface. -func (s *ExternalScoreAttachment) NodeScores(g ChannelGraph, chans []LocalChannel, - chanSize btcutil.Amount, nodes map[NodeID]struct{}) ( - map[NodeID]*NodeScore, er.R) { - - existingPeers := make(map[NodeID]struct{}) - for _, c := range chans { - existingPeers[c.Node] = struct{}{} - } - - s.Lock() - defer s.Unlock() - - log.Tracef("External scoring %v nodes, from %v set scores", - len(nodes), len(s.nodeScores)) - - // Fill the map of candidates to return. - candidates := make(map[NodeID]*NodeScore) - for nID := range nodes { - var score float64 - if nodeScore, ok := s.nodeScores[nID]; ok { - score = nodeScore - } - - // If the node is among or existing channel peers, we don't - // need another channel. - if _, ok := existingPeers[nID]; ok { - log.Tracef("Skipping existing peer %x from external "+ - "score results", nID[:]) - continue - } - - log.Tracef("External score %v given to node %x", score, nID[:]) - - // Instead of adding a node with score 0 to the returned set, - // we just skip it. - if score == 0 { - continue - } - - candidates[nID] = &NodeScore{ - NodeID: nID, - Score: score, - } - } - - return candidates, nil -} diff --git a/lnd/autopilot/externalscoreattach_test.go b/lnd/autopilot/externalscoreattach_test.go deleted file mode 100644 index bae28f0e..00000000 --- a/lnd/autopilot/externalscoreattach_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package autopilot_test - -import ( - "testing" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/autopilot" -) - -// randKey returns a random public key. -func randKey() (*btcec.PublicKey, er.R) { - priv, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - return nil, err - } - - return priv.PubKey(), nil -} - -// TestSetNodeScores tests that the scores returned by the -// ExternalScoreAttachment correctly reflects the scores we set last. -func TestSetNodeScores(t *testing.T) { - t.Parallel() - - const name = "externalscore" - - h := autopilot.NewExternalScoreAttachment() - - // Create a list of random node IDs. - const numKeys = 20 - var pubkeys []autopilot.NodeID - for i := 0; i < numKeys; i++ { - k, err := randKey() - if err != nil { - t.Fatal(err) - } - - nID := autopilot.NewNodeID(k) - pubkeys = append(pubkeys, nID) - } - - // Set the score of half of the nodes. - scores := make(map[autopilot.NodeID]float64) - for i := 0; i < numKeys/2; i++ { - nID := pubkeys[i] - scores[nID] = 0.05 * float64(i) - } - - applied, err := h.SetNodeScores(name, scores) - if err != nil { - t.Fatal(err) - } - - if !applied { - t.Fatalf("scores were not applied") - } - - // Query all scores, half should be set, half should be zero. - q := make(map[autopilot.NodeID]struct{}) - for _, nID := range pubkeys { - q[nID] = struct{}{} - } - resp, err := h.NodeScores( - nil, nil, btcutil.Amount(btcutil.UnitsPerCoin()), q, - ) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < numKeys; i++ { - var expected float64 - if i < numKeys/2 { - expected = 0.05 * float64(i) - } - nID := pubkeys[i] - - var score float64 - if s, ok := resp[nID]; ok { - score = s.Score - } - - if score != expected { - t.Fatalf("expected score %v, got %v", - expected, score) - } - - } - - // Try to apply scores with bogus name, should not be applied. - applied, err = h.SetNodeScores("dummy", scores) - if err != nil { - t.Fatal(err) - } - - if applied { - t.Fatalf("scores were applied") - } - -} diff --git a/lnd/autopilot/graph.go b/lnd/autopilot/graph.go deleted file mode 100644 index 5bd7f17e..00000000 --- a/lnd/autopilot/graph.go +++ /dev/null @@ -1,525 +0,0 @@ -package autopilot - -import ( - "bytes" - "math/big" - "net" - "sort" - "sync/atomic" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/routing/route" -) - -var ( - testSig = &btcec.Signature{ - R: new(big.Int), - S: new(big.Int), - } - _, _ = testSig.R.SetString("63724406601629180062774974542967536251589935445068131219452686511677818569431", 10) - _, _ = testSig.S.SetString("18801056069249825825291287104931333862866033135609736119018462340006816851118", 10) - - chanIDCounter uint64 // To be used atomically. -) - -// databaseChannelGraph wraps a channeldb.ChannelGraph instance with the -// necessary API to properly implement the autopilot.ChannelGraph interface. -// -// TODO(roasbeef): move inmpl to main package? -type databaseChannelGraph struct { - db *channeldb.ChannelGraph -} - -// A compile time assertion to ensure databaseChannelGraph meets the -// autopilot.ChannelGraph interface. -var _ ChannelGraph = (*databaseChannelGraph)(nil) - -// ChannelGraphFromDatabase returns an instance of the autopilot.ChannelGraph -// backed by a live, open channeldb instance. -func ChannelGraphFromDatabase(db *channeldb.ChannelGraph) ChannelGraph { - return &databaseChannelGraph{ - db: db, - } -} - -// type dbNode is a wrapper struct around a database transaction an -// channeldb.LightningNode. The wrapper method implement the autopilot.Node -// interface. -type dbNode struct { - tx kvdb.RTx - - node *channeldb.LightningNode -} - -// A compile time assertion to ensure dbNode meets the autopilot.Node -// interface. -var _ Node = (*dbNode)(nil) - -// PubKey is the identity public key of the node. This will be used to attempt -// to target a node for channel opening by the main autopilot agent. The key -// will be returned in serialized compressed format. -// -// NOTE: Part of the autopilot.Node interface. -func (d dbNode) PubKey() [33]byte { - return d.node.PubKeyBytes -} - -// Addrs returns a slice of publicly reachable public TCP addresses that the -// peer is known to be listening on. -// -// NOTE: Part of the autopilot.Node interface. -func (d dbNode) Addrs() []net.Addr { - return d.node.Addresses -} - -// ForEachChannel is a higher-order function that will be used to iterate -// through all edges emanating from/to the target node. For each active -// channel, this function should be called with the populated ChannelEdge that -// describes the active channel. -// -// NOTE: Part of the autopilot.Node interface. -func (d dbNode) ForEachChannel(cb func(ChannelEdge) er.R) er.R { - return d.node.ForEachChannel(d.tx, func(tx kvdb.RTx, - ei *channeldb.ChannelEdgeInfo, ep, _ *channeldb.ChannelEdgePolicy) er.R { - - // Skip channels for which no outgoing edge policy is available. - // - // TODO(joostjager): Ideally the case where channels have a nil - // policy should be supported, as autopilot is not looking at - // the policies. For now, it is not easily possible to get a - // reference to the other end LightningNode object without - // retrieving the policy. - if ep == nil { - return nil - } - - edge := ChannelEdge{ - ChanID: lnwire.NewShortChanIDFromInt(ep.ChannelID), - Capacity: ei.Capacity, - Peer: dbNode{ - tx: tx, - node: ep.Node, - }, - } - - return cb(edge) - }) -} - -// ForEachNode is a higher-order function that should be called once for each -// connected node within the channel graph. If the passed callback returns an -// error, then execution should be terminated. -// -// NOTE: Part of the autopilot.ChannelGraph interface. -func (d *databaseChannelGraph) ForEachNode(cb func(Node) er.R) er.R { - return d.db.ForEachNode(func(tx kvdb.RTx, n *channeldb.LightningNode) er.R { - // We'll skip over any node that doesn't have any advertised - // addresses. As we won't be able to reach them to actually - // open any channels. - if len(n.Addresses) == 0 { - return nil - } - - node := dbNode{ - tx: tx, - node: n, - } - return cb(node) - }) -} - -// addRandChannel creates a new channel two target nodes. This function is -// meant to aide in the generation of random graphs for use within test cases -// the exercise the autopilot package. -func (d *databaseChannelGraph) addRandChannel(node1, node2 *btcec.PublicKey, - capacity btcutil.Amount) (*ChannelEdge, *ChannelEdge, er.R) { - - fetchNode := func(pub *btcec.PublicKey) (*channeldb.LightningNode, er.R) { - if pub != nil { - vertex, err := route.NewVertexFromBytes( - pub.SerializeCompressed(), - ) - if err != nil { - return nil, err - } - - dbNode, err := d.db.FetchLightningNode(nil, vertex) - switch { - case channeldb.ErrGraphNodeNotFound.Is(err): - fallthrough - case channeldb.ErrGraphNotFound.Is(err): - graphNode := &channeldb.LightningNode{ - HaveNodeAnnouncement: true, - Addresses: []net.Addr{ - &net.TCPAddr{ - IP: bytes.Repeat([]byte("a"), 16), - }, - }, - Features: lnwire.NewFeatureVector( - nil, lnwire.Features, - ), - AuthSigBytes: testSig.Serialize(), - } - graphNode.AddPubKey(pub) - if err := d.db.AddLightningNode(graphNode); err != nil { - return nil, err - } - case err != nil: - return nil, err - } - - return dbNode, nil - } - - nodeKey, err := randKey() - if err != nil { - return nil, err - } - dbNode := &channeldb.LightningNode{ - HaveNodeAnnouncement: true, - Addresses: []net.Addr{ - &net.TCPAddr{ - IP: bytes.Repeat([]byte("a"), 16), - }, - }, - Features: lnwire.NewFeatureVector( - nil, lnwire.Features, - ), - AuthSigBytes: testSig.Serialize(), - } - dbNode.AddPubKey(nodeKey) - if err := d.db.AddLightningNode(dbNode); err != nil { - return nil, err - } - - return dbNode, nil - } - - vertex1, err := fetchNode(node1) - if err != nil { - return nil, nil, err - } - - vertex2, err := fetchNode(node2) - if err != nil { - return nil, nil, err - } - - var lnNode1, lnNode2 *btcec.PublicKey - if bytes.Compare(vertex1.PubKeyBytes[:], vertex2.PubKeyBytes[:]) == -1 { - lnNode1, _ = vertex1.PubKey() - lnNode2, _ = vertex2.PubKey() - } else { - lnNode1, _ = vertex2.PubKey() - lnNode2, _ = vertex1.PubKey() - } - - chanID := randChanID() - edge := &channeldb.ChannelEdgeInfo{ - ChannelID: chanID.ToUint64(), - Capacity: capacity, - } - edge.AddNodeKeys(lnNode1, lnNode2, lnNode1, lnNode2) - if err := d.db.AddChannelEdge(edge); err != nil { - return nil, nil, err - } - edgePolicy := &channeldb.ChannelEdgePolicy{ - SigBytes: testSig.Serialize(), - ChannelID: chanID.ToUint64(), - LastUpdate: time.Now(), - TimeLockDelta: 10, - MinHTLC: 1, - MaxHTLC: lnwire.NewMSatFromSatoshis(capacity), - FeeBaseMSat: 10, - FeeProportionalMillionths: 10000, - MessageFlags: 1, - ChannelFlags: 0, - } - - if err := d.db.UpdateEdgePolicy(edgePolicy); err != nil { - return nil, nil, err - } - edgePolicy = &channeldb.ChannelEdgePolicy{ - SigBytes: testSig.Serialize(), - ChannelID: chanID.ToUint64(), - LastUpdate: time.Now(), - TimeLockDelta: 10, - MinHTLC: 1, - MaxHTLC: lnwire.NewMSatFromSatoshis(capacity), - FeeBaseMSat: 10, - FeeProportionalMillionths: 10000, - MessageFlags: 1, - ChannelFlags: 1, - } - if err := d.db.UpdateEdgePolicy(edgePolicy); err != nil { - return nil, nil, err - } - - return &ChannelEdge{ - ChanID: chanID, - Capacity: capacity, - Peer: dbNode{ - node: vertex1, - }, - }, - &ChannelEdge{ - ChanID: chanID, - Capacity: capacity, - Peer: dbNode{ - node: vertex2, - }, - }, - nil -} - -func (d *databaseChannelGraph) addRandNode() (*btcec.PublicKey, er.R) { - nodeKey, err := randKey() - if err != nil { - return nil, err - } - dbNode := &channeldb.LightningNode{ - HaveNodeAnnouncement: true, - Addresses: []net.Addr{ - &net.TCPAddr{ - IP: bytes.Repeat([]byte("a"), 16), - }, - }, - Features: lnwire.NewFeatureVector( - nil, lnwire.Features, - ), - AuthSigBytes: testSig.Serialize(), - } - dbNode.AddPubKey(nodeKey) - if err := d.db.AddLightningNode(dbNode); err != nil { - return nil, err - } - - return nodeKey, nil - -} - -// memChannelGraph is an implementation of the autopilot.ChannelGraph backed by -// an in-memory graph. -type memChannelGraph struct { - graph map[NodeID]*memNode -} - -// A compile time assertion to ensure memChannelGraph meets the -// autopilot.ChannelGraph interface. -var _ ChannelGraph = (*memChannelGraph)(nil) - -// newMemChannelGraph creates a new blank in-memory channel graph -// implementation. -func newMemChannelGraph() *memChannelGraph { - return &memChannelGraph{ - graph: make(map[NodeID]*memNode), - } -} - -// ForEachNode is a higher-order function that should be called once for each -// connected node within the channel graph. If the passed callback returns an -// error, then execution should be terminated. -// -// NOTE: Part of the autopilot.ChannelGraph interface. -func (m memChannelGraph) ForEachNode(cb func(Node) er.R) er.R { - for _, node := range m.graph { - if err := cb(node); err != nil { - return err - } - } - - return nil -} - -// randChanID generates a new random channel ID. -func randChanID() lnwire.ShortChannelID { - id := atomic.AddUint64(&chanIDCounter, 1) - return lnwire.NewShortChanIDFromInt(id) -} - -// randKey returns a random public key. -func randKey() (*btcec.PublicKey, er.R) { - priv, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - return nil, err - } - - return priv.PubKey(), nil -} - -// addRandChannel creates a new channel two target nodes. This function is -// meant to aide in the generation of random graphs for use within test cases -// the exercise the autopilot package. -func (m *memChannelGraph) addRandChannel(node1, node2 *btcec.PublicKey, - capacity btcutil.Amount) (*ChannelEdge, *ChannelEdge, er.R) { - - var ( - vertex1, vertex2 *memNode - ok bool - ) - - if node1 != nil { - vertex1, ok = m.graph[NewNodeID(node1)] - if !ok { - vertex1 = &memNode{ - pub: node1, - addrs: []net.Addr{ - &net.TCPAddr{ - IP: bytes.Repeat([]byte("a"), 16), - }, - }, - } - } - } else { - newPub, err := randKey() - if err != nil { - return nil, nil, err - } - vertex1 = &memNode{ - pub: newPub, - addrs: []net.Addr{ - &net.TCPAddr{ - IP: bytes.Repeat([]byte("a"), 16), - }, - }, - } - } - - if node2 != nil { - vertex2, ok = m.graph[NewNodeID(node2)] - if !ok { - vertex2 = &memNode{ - pub: node2, - addrs: []net.Addr{ - &net.TCPAddr{ - IP: bytes.Repeat([]byte("a"), 16), - }, - }, - } - } - } else { - newPub, err := randKey() - if err != nil { - return nil, nil, err - } - vertex2 = &memNode{ - pub: newPub, - addrs: []net.Addr{ - &net.TCPAddr{ - IP: bytes.Repeat([]byte("a"), 16), - }, - }, - } - } - - edge1 := ChannelEdge{ - ChanID: randChanID(), - Capacity: capacity, - Peer: vertex2, - } - vertex1.chans = append(vertex1.chans, edge1) - - edge2 := ChannelEdge{ - ChanID: randChanID(), - Capacity: capacity, - Peer: vertex1, - } - vertex2.chans = append(vertex2.chans, edge2) - - m.graph[NewNodeID(vertex1.pub)] = vertex1 - m.graph[NewNodeID(vertex2.pub)] = vertex2 - - return &edge1, &edge2, nil -} - -func (m *memChannelGraph) addRandNode() (*btcec.PublicKey, er.R) { - newPub, err := randKey() - if err != nil { - return nil, err - } - vertex := &memNode{ - pub: newPub, - addrs: []net.Addr{ - &net.TCPAddr{ - IP: bytes.Repeat([]byte("a"), 16), - }, - }, - } - m.graph[NewNodeID(newPub)] = vertex - - return newPub, nil -} - -// memNode is a purely in-memory implementation of the autopilot.Node -// interface. -type memNode struct { - pub *btcec.PublicKey - - chans []ChannelEdge - - addrs []net.Addr -} - -// A compile time assertion to ensure memNode meets the autopilot.Node -// interface. -var _ Node = (*memNode)(nil) - -// PubKey is the identity public key of the node. This will be used to attempt -// to target a node for channel opening by the main autopilot agent. -// -// NOTE: Part of the autopilot.Node interface. -func (m memNode) PubKey() [33]byte { - var n [33]byte - copy(n[:], m.pub.SerializeCompressed()) - - return n -} - -// Addrs returns a slice of publicly reachable public TCP addresses that the -// peer is known to be listening on. -// -// NOTE: Part of the autopilot.Node interface. -func (m memNode) Addrs() []net.Addr { - return m.addrs -} - -// ForEachChannel is a higher-order function that will be used to iterate -// through all edges emanating from/to the target node. For each active -// channel, this function should be called with the populated ChannelEdge that -// describes the active channel. -// -// NOTE: Part of the autopilot.Node interface. -func (m memNode) ForEachChannel(cb func(ChannelEdge) er.R) er.R { - for _, channel := range m.chans { - if err := cb(channel); err != nil { - return err - } - } - - return nil -} - -// Median returns the median value in the slice of Amounts. -func Median(vals []btcutil.Amount) btcutil.Amount { - sort.Slice(vals, func(i, j int) bool { - return vals[i] < vals[j] - }) - - num := len(vals) - switch { - case num == 0: - return 0 - - case num%2 == 0: - return (vals[num/2-1] + vals[num/2]) / 2 - - default: - return vals[num/2] - } -} diff --git a/lnd/autopilot/graph_test.go b/lnd/autopilot/graph_test.go deleted file mode 100644 index 725a33bf..00000000 --- a/lnd/autopilot/graph_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package autopilot_test - -import ( - "testing" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/lnd/autopilot" -) - -// TestMedian tests the Median method. -func TestMedian(t *testing.T) { - t.Parallel() - - testCases := []struct { - values []btcutil.Amount - median btcutil.Amount - }{ - { - values: []btcutil.Amount{}, - median: 0, - }, - { - values: []btcutil.Amount{10}, - median: 10, - }, - { - values: []btcutil.Amount{10, 20}, - median: 15, - }, - { - values: []btcutil.Amount{10, 20, 30}, - median: 20, - }, - { - values: []btcutil.Amount{30, 10, 20}, - median: 20, - }, - { - values: []btcutil.Amount{10, 10, 10, 10, 5000000}, - median: 10, - }, - } - - for _, test := range testCases { - res := autopilot.Median(test.values) - if res != test.median { - t.Fatalf("expected median %v, got %v", test.median, res) - } - } -} diff --git a/lnd/autopilot/interface.go b/lnd/autopilot/interface.go deleted file mode 100644 index 7c9ddfc1..00000000 --- a/lnd/autopilot/interface.go +++ /dev/null @@ -1,219 +0,0 @@ -package autopilot - -import ( - "net" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/wire" -) - -// DefaultConfTarget is the default confirmation target for autopilot channels. -// TODO(halseth): possibly make dynamic, going aggressive->lax as more channels -// are opened. -const DefaultConfTarget = 3 - -// Node is an interface which represents n abstract vertex within the -// channel graph. All nodes should have at least a single edge to/from them -// within the graph. -// -// TODO(roasbeef): combine with routing.ChannelGraphSource -type Node interface { - // PubKey is the identity public key of the node. This will be used to - // attempt to target a node for channel opening by the main autopilot - // agent. The key will be returned in serialized compressed format. - PubKey() [33]byte - - // Addrs returns a slice of publicly reachable public TCP addresses - // that the peer is known to be listening on. - Addrs() []net.Addr - - // ForEachChannel is a higher-order function that will be used to - // iterate through all edges emanating from/to the target node. For - // each active channel, this function should be called with the - // populated ChannelEdge that describes the active channel. - ForEachChannel(func(ChannelEdge) er.R) er.R -} - -// LocalChannel is a simple struct which contains relevant details of a -// particular channel the local node has. The fields in this struct may be used -// a signals for various AttachmentHeuristic implementations. -type LocalChannel struct { - // ChanID is the short channel ID for this channel as defined within - // BOLT-0007. - ChanID lnwire.ShortChannelID - - // Balance is the local balance of the channel expressed in satoshis. - Balance btcutil.Amount - - // Node is the peer that this channel has been established with. - Node NodeID - - // TODO(roasbeef): also add other traits? - // * fee, timelock, etc -} - -// ChannelEdge is a struct that holds details concerning a channel, but also -// contains a reference to the Node that this channel connects to as a directed -// edge within the graph. The existence of this reference to the connected node -// will allow callers to traverse the graph in an object-oriented manner. -type ChannelEdge struct { - // ChanID is the short channel ID for this channel as defined within - // BOLT-0007. - ChanID lnwire.ShortChannelID - - // Capacity is the capacity of the channel expressed in satoshis. - Capacity btcutil.Amount - - // Peer is the peer that this channel creates an edge to in the channel - // graph. - Peer Node -} - -// ChannelGraph in an interface that represents a traversable channel graph. -// The autopilot agent will use this interface as its source of graph traits in -// order to make decisions concerning which channels should be opened, and to -// whom. -// -// TODO(roasbeef): abstract?? -type ChannelGraph interface { - // ForEachNode is a higher-order function that should be called once - // for each connected node within the channel graph. If the passed - // callback returns an error, then execution should be terminated. - ForEachNode(func(Node) er.R) er.R -} - -// NodeScore is a tuple mapping a NodeID to a score indicating the preference -// of opening a channel with it. -type NodeScore struct { - // NodeID is the serialized compressed pubkey of the node that is being - // scored. - NodeID NodeID - - // Score is the score given by the heuristic for opening a channel of - // the given size to this node. - Score float64 -} - -// AttachmentDirective describes a channel attachment proscribed by an -// AttachmentHeuristic. It details to which node a channel should be created -// to, and also the parameters which should be used in the channel creation. -type AttachmentDirective struct { - // NodeID is the serialized compressed pubkey of the target node for - // this attachment directive. It can be identified by its public key, - // and therefore can be used along with a ChannelOpener implementation - // to execute the directive. - NodeID NodeID - - // ChanAmt is the size of the channel that should be opened, expressed - // in satoshis. - ChanAmt btcutil.Amount - - // Addrs is a list of addresses that the target peer may be reachable - // at. - Addrs []net.Addr -} - -// AttachmentHeuristic is one of the primary interfaces within this package. -// Implementations of this interface will be used to implement a control system -// which automatically regulates channels of a particular agent, attempting to -// optimize channels opened/closed based on various heuristics. The purpose of -// the interface is to allow an auto-pilot agent to decide if it needs more -// channels, and if so, which exact channels should be opened. -type AttachmentHeuristic interface { - // Name returns the name of this heuristic. - Name() string - - // NodeScores is a method that given the current channel graph and - // current set of local channels, scores the given nodes according to - // the preference of opening a channel of the given size with them. The - // returned channel candidates maps the NodeID to a NodeScore for the - // node. - // - // The returned scores will be in the range [0, 1.0], where 0 indicates - // no improvement in connectivity if a channel is opened to this node, - // while 1.0 is the maximum possible improvement in connectivity. The - // implementation of this interface must return scores in this range to - // properly allow the autopilot agent to make a reasonable choice based - // on the score from multiple heuristics. - // - // NOTE: A NodeID not found in the returned map is implicitly given a - // score of 0. - NodeScores(g ChannelGraph, chans []LocalChannel, - chanSize btcutil.Amount, nodes map[NodeID]struct{}) ( - map[NodeID]*NodeScore, er.R) -} - -// NodeMetric is a common interface for all graph metrics that are not -// directly used as autopilot node scores but may be used in compositional -// heuristics or statistical information exposed to users. -type NodeMetric interface { - // Name returns the unique name of this metric. - Name() er.R - - // Refresh refreshes the metric values based on the current graph. - Refresh(graph ChannelGraph) er.R - - // GetMetric returns the latest value of this metric. Values in the - // map are per node and can be in arbitrary domain. If normalize is - // set to true, then the returned values are normalized to either - // [0, 1] or [-1, 1] depending on the metric. - GetMetric(normalize bool) map[NodeID]float64 -} - -// ScoreSettable is an interface that indicates that the scores returned by the -// heuristic can be mutated by an external caller. The ExternalScoreAttachment -// currently implements this interface, and so should any heuristic that is -// using the ExternalScoreAttachment as a sub-heuristic, or keeps their own -// internal list of mutable scores, to allow access to setting the internal -// scores. -type ScoreSettable interface { - // SetNodeScores is used to set the internal map from NodeIDs to - // scores. The passed scores must be in the range [0, 1.0]. The fist - // parameter is the name of the targeted heuristic, to allow - // recursively target specific sub-heuristics. The returned boolean - // indicates whether the targeted heuristic was found. - SetNodeScores(string, map[NodeID]float64) (bool, er.R) -} - -var ( - // availableHeuristics holds all heuristics possible to combine for use - // with the autopilot agent. - availableHeuristics = []AttachmentHeuristic{ - NewPrefAttachment(), - NewExternalScoreAttachment(), - NewTopCentrality(), - } - - // AvailableHeuristics is a map that holds the name of available - // heuristics to the actual heuristic for easy lookup. It will be - // filled during init(). - AvailableHeuristics = make(map[string]AttachmentHeuristic) -) - -func init() { - // Fill the map from heuristic names to available heuristics for easy - // lookup. - for _, h := range availableHeuristics { - AvailableHeuristics[h.Name()] = h - } -} - -// ChannelController is a simple interface that allows an auto-pilot agent to -// open a channel within the graph to a target peer, close targeted channels, -// or add/remove funds from existing channels via a splice in/out mechanisms. -type ChannelController interface { - // OpenChannel opens a channel to a target peer, using at most amt - // funds. This means that the resulting channel capacity might be - // slightly less to account for fees. This function should un-block - // immediately after the funding transaction that marks the channel - // open has been broadcast. - OpenChannel(target *btcec.PublicKey, amt btcutil.Amount) er.R - - // CloseChannel attempts to close out the target channel. - // - // TODO(roasbeef): add force option? - CloseChannel(chanPoint *wire.OutPoint) er.R -} diff --git a/lnd/autopilot/manager.go b/lnd/autopilot/manager.go deleted file mode 100644 index fb7f1eee..00000000 --- a/lnd/autopilot/manager.go +++ /dev/null @@ -1,393 +0,0 @@ -package autopilot - -import ( - "sync" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/routing" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/wire" -) - -// ManagerCfg houses a set of values and methods that is passed to the Manager -// for it to properly manage its autopilot agent. -type ManagerCfg struct { - // Self is the public key of the lnd instance. It is used to making - // sure the autopilot is not opening channels to itself. - Self *btcec.PublicKey - - // PilotCfg is the config of the autopilot agent managed by the - // Manager. - PilotCfg *Config - - // ChannelState is a function closure that returns the current set of - // channels managed by this node. - ChannelState func() ([]LocalChannel, er.R) - - // ChannelInfo is a function closure that returns the channel managed - // by the node given by the passed channel point. - ChannelInfo func(wire.OutPoint) (*LocalChannel, er.R) - - // SubscribeTransactions is used to get a subscription for transactions - // relevant to this node's wallet. - SubscribeTransactions func() (lnwallet.TransactionSubscription, er.R) - - // SubscribeTopology is used to get a subscription for topology changes - // on the network. - SubscribeTopology func() (*routing.TopologyClient, er.R) -} - -// Manager is struct that manages an autopilot agent, making it possible to -// enable and disable it at will, and hand it relevant external information. -// It implements the autopilot grpc service, which is used to get data about -// the running autopilot, and give it relevant information. -type Manager struct { - started sync.Once - stopped sync.Once - - cfg *ManagerCfg - - // pilot is the current autopilot agent. It will be nil if the agent is - // disabled. - pilot *Agent - - quit chan struct{} - wg sync.WaitGroup - sync.Mutex -} - -// NewManager creates a new instance of the Manager from the passed config. -func NewManager(cfg *ManagerCfg) (*Manager, er.R) { - return &Manager{ - cfg: cfg, - quit: make(chan struct{}), - }, nil -} - -// Start starts the Manager. -func (m *Manager) Start() er.R { - m.started.Do(func() {}) - return nil -} - -// Stop stops the Manager. If an autopilot agent is active, it will also be -// stopped. -func (m *Manager) Stop() er.R { - m.stopped.Do(func() { - if err := m.StopAgent(); err != nil { - log.Errorf("Unable to stop pilot: %v", err) - } - - close(m.quit) - m.wg.Wait() - }) - return nil -} - -// IsActive returns whether the autopilot agent is currently active. -func (m *Manager) IsActive() bool { - m.Lock() - defer m.Unlock() - - return m.pilot != nil -} - -// StartAgent creates and starts an autopilot agent from the Manager's -// config. -func (m *Manager) StartAgent() er.R { - m.Lock() - defer m.Unlock() - - // Already active. - if m.pilot != nil { - return nil - } - - // Next, we'll fetch the current state of open channels from the - // database to use as initial state for the auto-pilot agent. - initialChanState, err := m.cfg.ChannelState() - if err != nil { - return err - } - - // Now that we have all the initial dependencies, we can create the - // auto-pilot instance itself. - pilot, err := New(*m.cfg.PilotCfg, initialChanState) - if err != nil { - return err - } - - if err := pilot.Start(); err != nil { - return err - } - - // Finally, we'll need to subscribe to two things: incoming - // transactions that modify the wallet's balance, and also any graph - // topology updates. - txnSubscription, err := m.cfg.SubscribeTransactions() - if err != nil { - pilot.Stop() - return err - } - graphSubscription, err := m.cfg.SubscribeTopology() - if err != nil { - txnSubscription.Cancel() - pilot.Stop() - return err - } - - m.pilot = pilot - - // We'll launch a goroutine to provide the agent with notifications - // whenever the balance of the wallet changes. - // TODO(halseth): can lead to panic if in process of shutting down. - m.wg.Add(1) - go func() { - defer txnSubscription.Cancel() - defer m.wg.Done() - - for { - select { - case <-txnSubscription.ConfirmedTransactions(): - pilot.OnBalanceChange() - - // We won't act upon new unconfirmed transaction, as - // we'll only use confirmed outputs when funding. - // However, we will still drain this request in order - // to avoid goroutine leaks, and ensure we promptly - // read from the channel if available. - case <-txnSubscription.UnconfirmedTransactions(): - case <-pilot.quit: - return - case <-m.quit: - return - } - } - - }() - - // We'll also launch a goroutine to provide the agent with - // notifications for when the graph topology controlled by the node - // changes. - m.wg.Add(1) - go func() { - defer graphSubscription.Cancel() - defer m.wg.Done() - - for { - select { - case topChange, ok := <-graphSubscription.TopologyChanges: - // If the router is shutting down, then we will - // as well. - if !ok { - return - } - - for _, edgeUpdate := range topChange.ChannelEdgeUpdates { - // If this isn't an advertisement by - // the backing lnd node, then we'll - // continue as we only want to add - // channels that we've created - // ourselves. - if !edgeUpdate.AdvertisingNode.IsEqual(m.cfg.Self) { - continue - } - - // If this is indeed a channel we - // opened, then we'll convert it to the - // autopilot.Channel format, and notify - // the pilot of the new channel. - cp := edgeUpdate.ChanPoint - edge, err := m.cfg.ChannelInfo(cp) - if err != nil { - log.Errorf("Unable to fetch "+ - "channel info for %v: "+ - "%v", cp, err) - continue - } - - pilot.OnChannelOpen(*edge) - } - - // For each closed channel, we'll obtain - // the chanID of the closed channel and send it - // to the pilot. - for _, chanClose := range topChange.ClosedChannels { - chanID := lnwire.NewShortChanIDFromInt( - chanClose.ChanID, - ) - - pilot.OnChannelClose(chanID) - } - - // If new nodes were added to the graph, or nod - // information has changed, we'll poke autopilot - // to see if it can make use of them. - if len(topChange.NodeUpdates) > 0 { - pilot.OnNodeUpdates() - } - - case <-pilot.quit: - return - case <-m.quit: - return - } - } - }() - - log.Debugf("Manager started autopilot agent") - - return nil -} - -// StopAgent stops any active autopilot agent. -func (m *Manager) StopAgent() er.R { - m.Lock() - defer m.Unlock() - - // Not active, so we can return early. - if m.pilot == nil { - return nil - } - - if err := m.pilot.Stop(); err != nil { - return err - } - - // Make sure to nil the current agent, indicating it is no longer - // active. - m.pilot = nil - - log.Debugf("Manager stopped autopilot agent") - - return nil -} - -// QueryHeuristics queries the available autopilot heuristics for node scores. -func (m *Manager) QueryHeuristics(nodes []NodeID, localState bool) ( - HeuristicScores, er.R) { - - m.Lock() - defer m.Unlock() - - n := make(map[NodeID]struct{}) - for _, node := range nodes { - n[node] = struct{}{} - } - - log.Debugf("Querying heuristics for %d nodes", len(n)) - return m.queryHeuristics(n, localState) -} - -// HeuristicScores is an alias for a map that maps heuristic names to a map of -// scores for pubkeys. -type HeuristicScores map[string]map[NodeID]float64 - -// queryHeuristics gets node scores from all available simple heuristics, and -// the agent's current active heuristic. -// -// NOTE: Must be called with the manager's lock. -func (m *Manager) queryHeuristics(nodes map[NodeID]struct{}, localState bool) ( - HeuristicScores, er.R) { - - // If we want to take the local state into action when querying the - // heuristics, we fetch it. If not we'll just pass an emply slice to - // the heuristic. - var totalChans []LocalChannel - var err er.R - if localState { - // Fetch the current set of channels. - totalChans, err = m.cfg.ChannelState() - if err != nil { - return nil, err - } - - // If the agent is active, we can merge the channel state with - // the channels pending open. - if m.pilot != nil { - m.pilot.chanStateMtx.Lock() - m.pilot.pendingMtx.Lock() - totalChans = mergeChanState( - m.pilot.pendingOpens, m.pilot.chanState, - ) - m.pilot.pendingMtx.Unlock() - m.pilot.chanStateMtx.Unlock() - } - } - - // As channel size we'll use the maximum size. - chanSize := m.cfg.PilotCfg.Constraints.MaxChanSize() - - // We'll start by getting the scores from each available sub-heuristic, - // in addition the current agent heuristic. - var heuristics []AttachmentHeuristic - heuristics = append(heuristics, availableHeuristics...) - heuristics = append(heuristics, m.cfg.PilotCfg.Heuristic) - - report := make(HeuristicScores) - for _, h := range heuristics { - name := h.Name() - - // If the agent heuristic is among the simple heuristics it - // might get queried more than once. As an optimization we'll - // just skip it the second time. - if _, ok := report[name]; ok { - continue - } - - s, err := h.NodeScores( - m.cfg.PilotCfg.Graph, totalChans, chanSize, nodes, - ) - if err != nil { - return nil, er.Errorf("unable to get sub score: %v", - err) - } - - log.Debugf("Heuristic \"%v\" scored %d nodes", name, len(s)) - - scores := make(map[NodeID]float64) - for nID, score := range s { - scores[nID] = score.Score - } - - report[name] = scores - } - - return report, nil -} - -// SetNodeScores is used to set the scores of the given heuristic, if it is -// active, and ScoreSettable. -func (m *Manager) SetNodeScores(name string, scores map[NodeID]float64) er.R { - m.Lock() - defer m.Unlock() - - // It must be ScoreSettable to be available for external - // scores. - s, ok := m.cfg.PilotCfg.Heuristic.(ScoreSettable) - if !ok { - return er.Errorf("current heuristic doesn't support " + - "external scoring") - } - - // Heuristic was found, set its node scores. - applied, err := s.SetNodeScores(name, scores) - if err != nil { - return err - } - - if !applied { - return er.Errorf("heuristic with name %v not found", name) - } - - // If the autopilot agent is active, notify about the updated - // heuristic. - if m.pilot != nil { - m.pilot.OnHeuristicUpdate(m.cfg.PilotCfg.Heuristic) - } - - return nil -} diff --git a/lnd/autopilot/prefattach.go b/lnd/autopilot/prefattach.go deleted file mode 100644 index 87498f7a..00000000 --- a/lnd/autopilot/prefattach.go +++ /dev/null @@ -1,212 +0,0 @@ -package autopilot - -import ( - prand "math/rand" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/pktlog/log" -) - -// minMedianChanSizeFraction determines the minimum size a channel must have to -// count positively when calculating the scores using preferential attachment. -// The minimum channel size is calculated as median/minMedianChanSizeFraction, -// where median is the median channel size of the entire graph. -const minMedianChanSizeFraction = 4 - -// PrefAttachment is an implementation of the AttachmentHeuristic interface -// that implement a non-linear preferential attachment heuristic. This means -// that given a threshold to allocate to automatic channel establishment, the -// heuristic will attempt to favor connecting to nodes which already have a set -// amount of links, selected by sampling from a power law distribution. The -// attachment is non-linear in that it favors nodes with a higher in-degree but -// less so than regular linear preferential attachment. As a result, this -// creates smaller and less clusters than regular linear preferential -// attachment. -// -// TODO(roasbeef): BA, with k=-3 -type PrefAttachment struct { -} - -// NewPrefAttachment creates a new instance of a PrefAttachment heuristic. -func NewPrefAttachment() *PrefAttachment { - prand.Seed(time.Now().Unix()) - return &PrefAttachment{} -} - -// A compile time assertion to ensure PrefAttachment meets the -// AttachmentHeuristic interface. -var _ AttachmentHeuristic = (*PrefAttachment)(nil) - -// NodeID is a simple type that holds an EC public key serialized in compressed -// format. -type NodeID [33]byte - -// NewNodeID creates a new nodeID from a passed public key. -func NewNodeID(pub *btcec.PublicKey) NodeID { - var n NodeID - copy(n[:], pub.SerializeCompressed()) - return n -} - -// Name returns the name of this heuristic. -// -// NOTE: This is a part of the AttachmentHeuristic interface. -func (p *PrefAttachment) Name() string { - return "preferential" -} - -// NodeScores is a method that given the current channel graph and current set -// of local channels, scores the given nodes according to the preference of -// opening a channel of the given size with them. The returned channel -// candidates maps the NodeID to a NodeScore for the node. -// -// The heuristic employed by this method is one that attempts to promote a -// scale-free network globally, via local attachment preferences for new nodes -// joining the network with an amount of available funds to be allocated to -// channels. Specifically, we consider the degree of each node (and the flow -// in/out of the node available via its open channels) and utilize the -// Barabási–Albert model to drive our recommended attachment heuristics. If -// implemented globally for each new participant, this results in a channel -// graph that is scale-free and follows a power law distribution with k=-3. -// -// To avoid assigning a high score to nodes with a large number of small -// channels, we only count channels at least as large as a given fraction of -// the graph's median channel size. -// -// The returned scores will be in the range [0.0, 1.0], where higher scores are -// given to nodes already having high connectivity in the graph. -// -// NOTE: This is a part of the AttachmentHeuristic interface. -func (p *PrefAttachment) NodeScores(g ChannelGraph, chans []LocalChannel, - chanSize btcutil.Amount, nodes map[NodeID]struct{}) ( - map[NodeID]*NodeScore, er.R) { - - // We first run though the graph once in order to find the median - // channel size. - var ( - allChans []btcutil.Amount - seenChans = make(map[uint64]struct{}) - ) - if err := g.ForEachNode(func(n Node) er.R { - err := n.ForEachChannel(func(e ChannelEdge) er.R { - if _, ok := seenChans[e.ChanID.ToUint64()]; ok { - return nil - } - seenChans[e.ChanID.ToUint64()] = struct{}{} - allChans = append(allChans, e.Capacity) - return nil - }) - if err != nil { - return err - } - - return nil - }); err != nil { - return nil, err - } - - medianChanSize := Median(allChans) - log.Tracef("Found channel median %v for preferential score heuristic", - medianChanSize) - - // Count the number of large-ish channels for each particular node in - // the graph. - var maxChans int - nodeChanNum := make(map[NodeID]int) - if err := g.ForEachNode(func(n Node) er.R { - var nodeChans int - err := n.ForEachChannel(func(e ChannelEdge) er.R { - // Since connecting to nodes with a lot of small - // channels actually worsens our connectivity in the - // graph (we will potentially waste time trying to use - // these useless channels in path finding), we decrease - // the counter for such channels. - if e.Capacity < medianChanSize/minMedianChanSizeFraction { - nodeChans-- - return nil - } - - // Larger channels we count. - nodeChans++ - return nil - }) - if err != nil { - return err - } - - // We keep track of the highest-degree node we've seen, as this - // will be given the max score. - if nodeChans > maxChans { - maxChans = nodeChans - } - - // If this node is not among our nodes to score, we can return - // early. - nID := NodeID(n.PubKey()) - if _, ok := nodes[nID]; !ok { - log.Tracef("Node %x not among nodes to score, "+ - "ignoring", nID[:]) - return nil - } - - // Otherwise we'll record the number of channels. - nodeChanNum[nID] = nodeChans - log.Tracef("Counted %v channels for node %x", nodeChans, nID[:]) - - return nil - }); err != nil { - return nil, err - } - - // If there are no channels in the graph we cannot determine any - // preferences, so we return, indicating all candidates get a score of - // zero. - if maxChans == 0 { - log.Tracef("No channels in the graph") - return nil, nil - } - - existingPeers := make(map[NodeID]struct{}) - for _, c := range chans { - existingPeers[c.Node] = struct{}{} - } - - // For each node in the set of nodes, count their fraction of channels - // in the graph, and use that as the score. - candidates := make(map[NodeID]*NodeScore) - for nID, nodeChans := range nodeChanNum { - - // If the node is among or existing channel peers, we don't - // need another channel. - if _, ok := existingPeers[nID]; ok { - log.Tracef("Node %x among existing peers for pref "+ - "attach heuristic, giving zero score", nID[:]) - continue - } - - // If the node had no large channels, we skip it, since it - // would have gotten a zero score anyway. - if nodeChans <= 0 { - log.Tracef("Skipping node %x with channel count %v", - nID[:], nodeChans) - continue - } - - // Otherwise we score the node according to its fraction of - // channels in the graph, scaled such that the highest-degree - // node will be given a score of 1.0. - score := float64(nodeChans) / float64(maxChans) - log.Tracef("Giving node %x a pref attach score of %v", - nID[:], score) - - candidates[nID] = &NodeScore{ - NodeID: nID, - Score: score, - } - } - - return candidates, nil -} diff --git a/lnd/autopilot/prefattach_test.go b/lnd/autopilot/prefattach_test.go deleted file mode 100644 index 46ee3c29..00000000 --- a/lnd/autopilot/prefattach_test.go +++ /dev/null @@ -1,451 +0,0 @@ -package autopilot - -import ( - "bytes" - "io/ioutil" - "os" - "testing" - "time" - - prand "math/rand" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" -) - -type genGraphFunc func() (testGraph, func(), er.R) - -type testGraph interface { - ChannelGraph - - addRandChannel(*btcec.PublicKey, *btcec.PublicKey, - btcutil.Amount) (*ChannelEdge, *ChannelEdge, er.R) - - addRandNode() (*btcec.PublicKey, er.R) -} - -func newDiskChanGraph() (testGraph, func(), er.R) { - // First, create a temporary directory to be used for the duration of - // this test. - tempDirName, errr := ioutil.TempDir("", "channeldb") - if errr != nil { - return nil, nil, er.E(errr) - } - - // Next, create channeldb for the first time. - cdb, err := channeldb.Open(tempDirName) - if err != nil { - return nil, nil, err - } - - cleanUp := func() { - cdb.Close() - os.RemoveAll(tempDirName) - } - - return &databaseChannelGraph{ - db: cdb.ChannelGraph(), - }, cleanUp, nil -} - -var _ testGraph = (*databaseChannelGraph)(nil) - -func newMemChanGraph() (testGraph, func(), er.R) { - return newMemChannelGraph(), nil, nil -} - -var _ testGraph = (*memChannelGraph)(nil) - -var chanGraphs = []struct { - name string - genFunc genGraphFunc -}{ - { - name: "disk_graph", - genFunc: newDiskChanGraph, - }, - { - name: "mem_graph", - genFunc: newMemChanGraph, - }, -} - -// TestPrefAttachmentSelectEmptyGraph ensures that when passed an -// empty graph, the NodeSores function always returns a score of 0. -func TestPrefAttachmentSelectEmptyGraph(t *testing.T) { - prefAttach := NewPrefAttachment() - - // Create a random public key, which we will query to get a score for. - pub, err := randKey() - if err != nil { - t.Fatalf("unable to generate key: %v", err) - } - - nodes := map[NodeID]struct{}{ - NewNodeID(pub): {}, - } - - for _, graph := range chanGraphs { - success := t.Run(graph.name, func(t1 *testing.T) { - graph, cleanup, err := graph.genFunc() - if err != nil { - t1.Fatalf("unable to create graph: %v", err) - } - if cleanup != nil { - defer cleanup() - } - - // With the necessary state initialized, we'll now - // attempt to get the score for this one node. - walletFunds := btcutil.UnitsPerCoin() - scores, err := prefAttach.NodeScores(graph, nil, - walletFunds, nodes) - if err != nil { - t1.Fatalf("unable to select attachment "+ - "directives: %v", err) - } - - // Since the graph is empty, we expect the score to be - // 0, giving an empty return map. - if len(scores) != 0 { - t1.Fatalf("expected empty score map, "+ - "instead got %v ", len(scores)) - } - }) - if !success { - break - } - } -} - -// TestPrefAttachmentSelectTwoVertexes ensures that when passed a -// graph with only two eligible vertexes, then both are given the same score, -// and the funds are appropriately allocated across each peer. -func TestPrefAttachmentSelectTwoVertexes(t *testing.T) { - t.Parallel() - - prand.Seed(time.Now().Unix()) - - maxChanSize := btcutil.Amount(btcutil.UnitsPerCoin()) - - for _, graph := range chanGraphs { - success := t.Run(graph.name, func(t1 *testing.T) { - graph, cleanup, err := graph.genFunc() - if err != nil { - t1.Fatalf("unable to create graph: %v", err) - } - if cleanup != nil { - defer cleanup() - } - - prefAttach := NewPrefAttachment() - - // For this set, we'll load the memory graph with two - // nodes, and a random channel connecting them. - chanCapacity := btcutil.UnitsPerCoin() - edge1, edge2, err := graph.addRandChannel(nil, nil, chanCapacity) - if err != nil { - t1.Fatalf("unable to generate channel: %v", err) - } - - // We also add a third, non-connected node to the graph. - _, err = graph.addRandNode() - if err != nil { - t1.Fatalf("unable to add random node: %v", err) - } - - // Get the score for all nodes found in the graph at - // this point. - nodes := make(map[NodeID]struct{}) - if err := graph.ForEachNode(func(n Node) er.R { - nodes[n.PubKey()] = struct{}{} - return nil - }); err != nil { - t1.Fatalf("unable to traverse graph: %v", err) - } - - if len(nodes) != 3 { - t1.Fatalf("expected 2 nodes, found %d", len(nodes)) - } - - // With the necessary state initialized, we'll now - // attempt to get our candidates channel score given - // the current state of the graph. - candidates, err := prefAttach.NodeScores(graph, nil, - maxChanSize, nodes) - if err != nil { - t1.Fatalf("unable to select attachment "+ - "directives: %v", err) - } - - // We expect two candidates, since one of the nodes - // doesn't have any channels. - if len(candidates) != 2 { - t1.Fatalf("2 nodes should be scored, "+ - "instead %v were", len(candidates)) - } - - // The candidates should be amongst the two edges - // created above. - for nodeID, candidate := range candidates { - edge1Pub := edge1.Peer.PubKey() - edge2Pub := edge2.Peer.PubKey() - - switch { - case bytes.Equal(nodeID[:], edge1Pub[:]): - case bytes.Equal(nodeID[:], edge2Pub[:]): - default: - t1.Fatalf("attached to unknown node: %x", - nodeID[:]) - } - - // Since each of the nodes has 1 channel, out - // of only one channel in the graph, we expect - // their score to be 1.0. - expScore := float64(1.0) - if candidate.Score != expScore { - t1.Fatalf("expected candidate score "+ - "to be %v, instead was %v", - expScore, candidate.Score) - } - } - }) - if !success { - break - } - } -} - -// TestPrefAttachmentSelectGreedyAllocation tests that if upon -// returning node scores, the NodeScores method will attempt to greedily -// allocate all funds to each vertex (up to the max channel size). -func TestPrefAttachmentSelectGreedyAllocation(t *testing.T) { - t.Parallel() - - prand.Seed(time.Now().Unix()) - - maxChanSize := btcutil.Amount(btcutil.UnitsPerCoin()) - - for _, graph := range chanGraphs { - success := t.Run(graph.name, func(t1 *testing.T) { - graph, cleanup, err := graph.genFunc() - if err != nil { - t1.Fatalf("unable to create graph: %v", err) - } - if cleanup != nil { - defer cleanup() - } - - prefAttach := NewPrefAttachment() - - chanCapacity := btcutil.UnitsPerCoin() - - // Next, we'll add 3 nodes to the graph, creating an - // "open triangle topology". - edge1, _, err := graph.addRandChannel(nil, nil, - chanCapacity) - if err != nil { - t1.Fatalf("unable to create channel: %v", err) - } - peerPubBytes := edge1.Peer.PubKey() - peerPub, err := btcec.ParsePubKey( - peerPubBytes[:], btcec.S256(), - ) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } - _, _, err = graph.addRandChannel( - peerPub, nil, chanCapacity, - ) - if err != nil { - t1.Fatalf("unable to create channel: %v", err) - } - - // At this point, there should be three nodes in the - // graph, with node node having two edges. - numNodes := 0 - twoChans := false - nodes := make(map[NodeID]struct{}) - if err := graph.ForEachNode(func(n Node) er.R { - numNodes++ - nodes[n.PubKey()] = struct{}{} - numChans := 0 - err := n.ForEachChannel(func(c ChannelEdge) er.R { - numChans++ - return nil - }) - if err != nil { - return err - } - - twoChans = twoChans || (numChans == 2) - - return nil - }); err != nil { - t1.Fatalf("unable to traverse graph: %v", err) - } - if numNodes != 3 { - t1.Fatalf("expected 3 nodes, instead have: %v", - numNodes) - } - if !twoChans { - t1.Fatalf("expected node to have two channels") - } - - // We'll now begin our test, modeling the available - // wallet balance to be 5.5 BTC. We're shooting for a - // 50/50 allocation, and have 3 BTC in channels. As a - // result, the heuristic should try to greedily - // allocate funds to channels. - scores, err := prefAttach.NodeScores(graph, nil, - maxChanSize, nodes) - if err != nil { - t1.Fatalf("unable to select attachment "+ - "directives: %v", err) - } - - if len(scores) != len(nodes) { - t1.Fatalf("all nodes should be scored, "+ - "instead %v were", len(scores)) - } - - // The candidates should have a non-zero score, and - // have the max chan size funds recommended channel - // size. - for _, candidate := range scores { - if candidate.Score == 0 { - t1.Fatalf("Expected non-zero score") - } - } - - // Imagine a few channels are being opened, and there's - // only 0.5 BTC left. That should leave us with channel - // candidates of that size. - remBalance := btcutil.Amount(btcutil.UnitsPerCoinF() * 0.5) - scores, err = prefAttach.NodeScores(graph, nil, - remBalance, nodes) - if err != nil { - t1.Fatalf("unable to select attachment "+ - "directives: %v", err) - } - - if len(scores) != len(nodes) { - t1.Fatalf("all nodes should be scored, "+ - "instead %v were", len(scores)) - } - - // Check that the recommended channel sizes are now the - // remaining channel balance. - for _, candidate := range scores { - if candidate.Score == 0 { - t1.Fatalf("Expected non-zero score") - } - } - }) - if !success { - break - } - } -} - -// TestPrefAttachmentSelectSkipNodes ensures that if a node was -// already selected as a channel counterparty, then that node will get a score -// of zero during scoring. -func TestPrefAttachmentSelectSkipNodes(t *testing.T) { - t.Parallel() - - prand.Seed(time.Now().Unix()) - - maxChanSize := btcutil.Amount(btcutil.UnitsPerCoin()) - - for _, graph := range chanGraphs { - success := t.Run(graph.name, func(t1 *testing.T) { - graph, cleanup, err := graph.genFunc() - if err != nil { - t1.Fatalf("unable to create graph: %v", err) - } - if cleanup != nil { - defer cleanup() - } - - prefAttach := NewPrefAttachment() - - // Next, we'll create a simple topology of two nodes, - // with a single channel connecting them. - chanCapacity := btcutil.UnitsPerCoin() - _, _, err = graph.addRandChannel(nil, nil, - chanCapacity) - if err != nil { - t1.Fatalf("unable to create channel: %v", err) - } - - nodes := make(map[NodeID]struct{}) - if err := graph.ForEachNode(func(n Node) er.R { - nodes[n.PubKey()] = struct{}{} - return nil - }); err != nil { - t1.Fatalf("unable to traverse graph: %v", err) - } - - if len(nodes) != 2 { - t1.Fatalf("expected 2 nodes, found %d", len(nodes)) - } - - // With our graph created, we'll now get the scores for - // all nodes in the graph. - scores, err := prefAttach.NodeScores(graph, nil, - maxChanSize, nodes) - if err != nil { - t1.Fatalf("unable to select attachment "+ - "directives: %v", err) - } - - if len(scores) != len(nodes) { - t1.Fatalf("all nodes should be scored, "+ - "instead %v were", len(scores)) - } - - // THey should all have a score, and a maxChanSize - // channel size recommendation. - for _, candidate := range scores { - if candidate.Score == 0 { - t1.Fatalf("Expected non-zero score") - } - } - - // We'll simulate a channel update by adding the nodes - // to our set of channels. - var chans []LocalChannel - for _, candidate := range scores { - chans = append(chans, - LocalChannel{ - Node: candidate.NodeID, - }, - ) - } - - // If we attempt to make a call to the NodeScores - // function, without providing any new information, - // then all nodes should have a score of zero, since we - // already got channels to them. - scores, err = prefAttach.NodeScores(graph, chans, - maxChanSize, nodes) - if err != nil { - t1.Fatalf("unable to select attachment "+ - "directives: %v", err) - } - - // Since all should be given a score of 0, the map - // should be empty. - if len(scores) != 0 { - t1.Fatalf("expected empty score map, "+ - "instead got %v ", len(scores)) - } - }) - if !success { - break - } - } -} diff --git a/lnd/autopilot/simple_graph.go b/lnd/autopilot/simple_graph.go deleted file mode 100644 index e19f6961..00000000 --- a/lnd/autopilot/simple_graph.go +++ /dev/null @@ -1,68 +0,0 @@ -package autopilot - -import "github.com/pkt-cash/pktd/btcutil/er" - -// SimpleGraph stores a simplifed adj graph of a channel graph to speed -// up graph processing by eliminating all unnecessary hashing and map access. -type SimpleGraph struct { - // Nodes is a map from node index to NodeID. - Nodes []NodeID - - // Adj stores nodes and neighbors in an adjacency list. - Adj [][]int -} - -// NewSimpleGraph creates a simplified graph from the current channel graph. -// Returns an error if the channel graph iteration fails due to underlying -// failure. -func NewSimpleGraph(g ChannelGraph) (*SimpleGraph, er.R) { - nodes := make(map[NodeID]int) - adj := make(map[int][]int) - nextIndex := 0 - - // getNodeIndex returns the integer index of the passed node. - // The returned index is then used to create a simplifed adjacency list - // where each node is identified by its index instead of its pubkey, and - // also to create a mapping from node index to node pubkey. - getNodeIndex := func(node Node) int { - key := NodeID(node.PubKey()) - nodeIndex, ok := nodes[key] - - if !ok { - nodes[key] = nextIndex - nodeIndex = nextIndex - nextIndex++ - } - - return nodeIndex - } - - // Iterate over each node and each channel and update the adj and the node - // index. - err := g.ForEachNode(func(node Node) er.R { - u := getNodeIndex(node) - - return node.ForEachChannel(func(edge ChannelEdge) er.R { - v := getNodeIndex(edge.Peer) - - adj[u] = append(adj[u], v) - return nil - }) - }) - if err != nil { - return nil, err - } - - graph := &SimpleGraph{ - Nodes: make([]NodeID, len(nodes)), - Adj: make([][]int, len(nodes)), - } - - // Fill the adj and the node index to node pubkey mapping. - for nodeID, nodeIndex := range nodes { - graph.Adj[nodeIndex] = adj[nodeIndex] - graph.Nodes[nodeIndex] = nodeID - } - - return graph, nil -} diff --git a/lnd/autopilot/top_centrality.go b/lnd/autopilot/top_centrality.go deleted file mode 100644 index e8c0549d..00000000 --- a/lnd/autopilot/top_centrality.go +++ /dev/null @@ -1,94 +0,0 @@ -package autopilot - -import ( - "runtime" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" -) - -// TopCentrality is a simple greedy technique to create connections to nodes -// with the top betweenness centrality value. This algorithm is usually -// referred to as TopK in the literature. The idea is that by opening channels -// to nodes with top betweenness centrality we also increase our own betweenness -// centrality (given we already have at least one channel, or create at least -// two new channels). -// A different and much better approach is instead of selecting nodes with top -// centrality value, we extend the graph in a loop by inserting a new non -// existing edge and recalculate the betweenness centrality of each node. This -// technique is usually referred to as "greedy" algorithm and gives better -// results than TopK but is considerably slower too. -type TopCentrality struct { - centralityMetric *BetweennessCentrality -} - -// A compile time assertion to ensure TopCentrality meets the -// AttachmentHeuristic interface. -var _ AttachmentHeuristic = (*TopCentrality)(nil) - -// NewTopCentrality constructs and returns a new TopCentrality heuristic. -func NewTopCentrality() *TopCentrality { - metric, err := NewBetweennessCentralityMetric( - runtime.NumCPU(), - ) - if err != nil { - panic(err) - } - - return &TopCentrality{ - centralityMetric: metric, - } -} - -// Name returns the name of the heuristic. -func (g *TopCentrality) Name() string { - return "top_centrality" -} - -// NodeScores will return a [0,1] normalized map of scores for the given nodes -// except for the ones we already have channels with. The scores will simply -// be the betweenness centrality values of the nodes. -// As our current implementation of betweenness centrality is non-incremental, -// NodeScores will recalculate the centrality values on every call, which is -// slow for large graphs. -func (g *TopCentrality) NodeScores(graph ChannelGraph, chans []LocalChannel, - chanSize btcutil.Amount, nodes map[NodeID]struct{}) ( - map[NodeID]*NodeScore, er.R) { - - // Calculate betweenness centrality for the whole graph. - if err := g.centralityMetric.Refresh(graph); err != nil { - return nil, err - } - - normalize := true - centrality := g.centralityMetric.GetMetric(normalize) - - // Create a map of the existing peers for faster filtering. - existingPeers := make(map[NodeID]struct{}) - for _, c := range chans { - existingPeers[c.Node] = struct{}{} - } - - result := make(map[NodeID]*NodeScore, len(nodes)) - for nodeID := range nodes { - // Skip nodes we already have channel with. - if _, ok := existingPeers[nodeID]; ok { - continue - } - - // Skip passed nodes not in the graph. This could happen if - // the graph changed before computing the centrality values as - // the nodes we iterate are prefiltered by the autopilot agent. - score, ok := centrality[nodeID] - if !ok { - continue - } - - result[nodeID] = &NodeScore{ - NodeID: nodeID, - Score: score, - } - } - - return result, nil -} diff --git a/lnd/autopilot/top_centrality_test.go b/lnd/autopilot/top_centrality_test.go deleted file mode 100644 index 0caedb12..00000000 --- a/lnd/autopilot/top_centrality_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package autopilot - -import ( - "testing" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/stretchr/testify/require" -) - -// testTopCentrality is subtest helper to which given the passed graph and -// channels creates the expected centrality score set and checks that the -// calculated score set matches it. -func testTopCentrality(t *testing.T, graph testGraph, - graphNodes map[int]*btcec.PublicKey, channelsWith []int) { - - topCentrality := NewTopCentrality() - - var channels []LocalChannel - for _, ch := range channelsWith { - channels = append(channels, LocalChannel{ - Node: NewNodeID(graphNodes[ch]), - }) - } - - // Start iteration from -1 to also test the case where the node set - // is empty. - for i := -1; i < len(graphNodes); i++ { - nodes := make(map[NodeID]struct{}) - expected := make(map[NodeID]*NodeScore) - - for j := 0; j <= i; j++ { - // Add node to the interest set. - nodeID := NewNodeID(graphNodes[j]) - nodes[nodeID] = struct{}{} - - // Add to the expected set unless it's a node we have - // a channel with. - haveChannel := false - for _, ch := range channels { - if nodeID == ch.Node { - haveChannel = true - break - } - } - - if !haveChannel { - score := normalizedTestGraphCentrality[j] - expected[nodeID] = &NodeScore{ - NodeID: nodeID, - Score: score, - } - } - } - - chanSize := btcutil.UnitsPerCoin() - - // Attempt to get centrality scores and expect - // that the result equals with the expected set. - scores, err := topCentrality.NodeScores( - graph, channels, chanSize, nodes, - ) - - util.RequireNoErr(t, err) - require.Equal(t, expected, scores) - } -} - -// TestTopCentrality tests that we return the correct normalized centralitiy -// values given a non empty graph, and given our node has an increasing amount -// of channels from 0 to N-1 simulating the whole range from non-connected to -// fully connected. -func TestTopCentrality(t *testing.T) { - // Generate channels: {}, {0}, {0, 1}, ... {0, 1, ..., N-1} - channelsWith := [][]int{nil} - - for i := 0; i < centralityTestGraph.nodes; i++ { - channels := make([]int, i+1) - for j := 0; j <= i; j++ { - channels[j] = j - } - channelsWith = append(channelsWith, channels) - } - - for _, chanGraph := range chanGraphs { - chanGraph := chanGraph - - success := t.Run(chanGraph.name, func(t *testing.T) { - t.Parallel() - - graph, cleanup, err := chanGraph.genFunc() - util.RequireNoErr(t, err, "unable to create graph") - if cleanup != nil { - defer cleanup() - } - - // Build the test graph. - graphNodes := buildTestGraph( - t, graph, centralityTestGraph, - ) - - for _, chans := range channelsWith { - testTopCentrality(t, graph, graphNodes, chans) - } - }) - - require.True(t, success) - } -} diff --git a/lnd/breacharbiter.go b/lnd/breacharbiter.go deleted file mode 100644 index 4ed6d1f0..00000000 --- a/lnd/breacharbiter.go +++ /dev/null @@ -1,1589 +0,0 @@ -package lnd - -import ( - "bytes" - "encoding/binary" - "io" - "sync" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/blockchain" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/txscript" - "github.com/pkt-cash/pktd/wire" - - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/htlcswitch" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/labels" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee" -) - -var ( - // retributionBucket stores retribution state on disk between detecting - // a contract breach, broadcasting a justice transaction that sweeps the - // channel, and finally witnessing the justice transaction confirm on - // the blockchain. It is critical that such state is persisted on disk, - // so that if our node restarts at any point during the retribution - // procedure, we can recover and continue from the persisted state. - retributionBucket = []byte("retribution") - - // justiceTxnBucket holds the finalized justice transactions for all - // breached contracts. Entries are added to the justice txn bucket just - // before broadcasting the sweep txn. - justiceTxnBucket = []byte("justice-txn") - - // errBrarShuttingDown is an error returned if the breacharbiter has - // been signalled to exit. - errBrarShuttingDown = Err.CodeWithDetail("errBrarShuttingDown", - "breacharbiter shutting down") -) - -// ContractBreachEvent is an event the breachArbiter will receive in case a -// contract breach is observed on-chain. It contains the necessary information -// to handle the breach, and a ProcessACK channel we will use to ACK the event -// when we have safely stored all the necessary information. -type ContractBreachEvent struct { - // ChanPoint is the channel point of the breached channel. - ChanPoint wire.OutPoint - - // ProcessACK is an error channel where a nil error should be sent - // iff the breach retribution info is safely stored in the retribution - // store. In case storing the information to the store fails, a non-nil - // error should be sent. - ProcessACK chan er.R - - // BreachRetribution is the information needed to act on this contract - // breach. - BreachRetribution *lnwallet.BreachRetribution -} - -// BreachConfig bundles the required subsystems used by the breach arbiter. An -// instance of BreachConfig is passed to newBreachArbiter during instantiation. -type BreachConfig struct { - // CloseLink allows the breach arbiter to shutdown any channel links for - // which it detects a breach, ensuring now further activity will - // continue across the link. The method accepts link's channel point and - // a close type to be included in the channel close summary. - CloseLink func(*wire.OutPoint, htlcswitch.ChannelCloseType) - - // DB provides access to the user's channels, allowing the breach - // arbiter to determine the current state of a user's channels, and how - // it should respond to channel closure. - DB *channeldb.DB - - // Estimator is used by the breach arbiter to determine an appropriate - // fee level when generating, signing, and broadcasting sweep - // transactions. - Estimator chainfee.Estimator - - // GenSweepScript generates the receiving scripts for swept outputs. - GenSweepScript func() ([]byte, er.R) - - // Notifier provides a publish/subscribe interface for event driven - // notifications regarding the confirmation of txids. - Notifier chainntnfs.ChainNotifier - - // PublishTransaction facilitates the process of broadcasting a - // transaction to the network. - PublishTransaction func(*wire.MsgTx, string) er.R - - // ContractBreaches is a channel where the breachArbiter will receive - // notifications in the event of a contract breach being observed. A - // ContractBreachEvent must be ACKed by the breachArbiter, such that - // the sending subsystem knows that the event is properly handed off. - ContractBreaches <-chan *ContractBreachEvent - - // Signer is used by the breach arbiter to generate sweep transactions, - // which move coins from previously open channels back to the user's - // wallet. - Signer input.Signer - - // Store is a persistent resource that maintains information regarding - // breached channels. This is used in conjunction with DB to recover - // from crashes, restarts, or other failures. - Store RetributionStore -} - -// breachArbiter is a special subsystem which is responsible for watching and -// acting on the detection of any attempted uncooperative channel breaches by -// channel counterparties. This file essentially acts as deterrence code for -// those attempting to launch attacks against the daemon. In practice it's -// expected that the logic in this file never gets executed, but it is -// important to have it in place just in case we encounter cheating channel -// counterparties. -// TODO(roasbeef): closures in config for subsystem pointers to decouple? -type breachArbiter struct { - started sync.Once - stopped sync.Once - - cfg *BreachConfig - - quit chan struct{} - wg sync.WaitGroup - sync.Mutex -} - -// newBreachArbiter creates a new instance of a breachArbiter initialized with -// its dependent objects. -func newBreachArbiter(cfg *BreachConfig) *breachArbiter { - return &breachArbiter{ - cfg: cfg, - quit: make(chan struct{}), - } -} - -// Start is an idempotent method that officially starts the breachArbiter along -// with all other goroutines it needs to perform its functions. -func (b *breachArbiter) Start() er.R { - var err er.R - b.started.Do(func() { - err = b.start() - }) - return err -} - -func (b *breachArbiter) start() er.R { - log.Tracef("Starting breach arbiter") - - // Load all retributions currently persisted in the retribution store. - var breachRetInfos map[wire.OutPoint]retributionInfo - if err := b.cfg.Store.ForAll(func(ret *retributionInfo) er.R { - breachRetInfos[ret.chanPoint] = *ret - return nil - }, func() { - breachRetInfos = make(map[wire.OutPoint]retributionInfo) - }); err != nil { - return err - } - - // Load all currently closed channels from disk, we will use the - // channels that have been marked fully closed to filter the retribution - // information loaded from disk. This is necessary in the event that the - // channel was marked fully closed, but was not removed from the - // retribution store. - closedChans, err := b.cfg.DB.FetchClosedChannels(false) - if err != nil { - log.Errorf("Unable to fetch closing channels: %v", err) - return err - } - - // Using the set of non-pending, closed channels, reconcile any - // discrepancies between the channeldb and the retribution store by - // removing any retribution information for which we have already - // finished our responsibilities. If the removal is successful, we also - // remove the entry from our in-memory map, to avoid any further action - // for this channel. - // TODO(halseth): no need continue on IsPending once closed channels - // actually means close transaction is confirmed. - for _, chanSummary := range closedChans { - if chanSummary.IsPending { - continue - } - - chanPoint := &chanSummary.ChanPoint - if _, ok := breachRetInfos[*chanPoint]; ok { - if err := b.cfg.Store.Remove(chanPoint); err != nil { - log.Errorf("Unable to remove closed "+ - "chanid=%v from breach arbiter: %v", - chanPoint, err) - return err - } - delete(breachRetInfos, *chanPoint) - } - } - - // Spawn the exactRetribution tasks to monitor and resolve any breaches - // that were loaded from the retribution store. - for chanPoint := range breachRetInfos { - retInfo := breachRetInfos[chanPoint] - - // Register for a notification when the breach transaction is - // confirmed on chain. - breachTXID := retInfo.commitHash - breachScript := retInfo.breachedOutputs[0].signDesc.Output.PkScript - confChan, err := b.cfg.Notifier.RegisterConfirmationsNtfn( - &breachTXID, breachScript, 1, retInfo.breachHeight, - ) - if err != nil { - log.Errorf("Unable to register for conf updates "+ - "for txid: %v, err: %v", breachTXID, err) - return err - } - - // Launch a new goroutine which to finalize the channel - // retribution after the breach transaction confirms. - b.wg.Add(1) - go b.exactRetribution(confChan, &retInfo) - } - - // Start watching the remaining active channels! - b.wg.Add(1) - go b.contractObserver() - - return nil -} - -// Stop is an idempotent method that signals the breachArbiter to execute a -// graceful shutdown. This function will block until all goroutines spawned by -// the breachArbiter have gracefully exited. -func (b *breachArbiter) Stop() er.R { - b.stopped.Do(func() { - log.Infof("Breach arbiter shutting down") - - close(b.quit) - b.wg.Wait() - }) - return nil -} - -// IsBreached queries the breach arbiter's retribution store to see if it is -// aware of any channel breaches for a particular channel point. -func (b *breachArbiter) IsBreached(chanPoint *wire.OutPoint) (bool, er.R) { - return b.cfg.Store.IsBreached(chanPoint) -} - -// contractObserver is the primary goroutine for the breachArbiter. This -// goroutine is responsible for handling breach events coming from the -// contractcourt on the ContractBreaches channel. If a channel breach is -// detected, then the contractObserver will execute the retribution logic -// required to sweep ALL outputs from a contested channel into the daemon's -// wallet. -// -// NOTE: This MUST be run as a goroutine. -func (b *breachArbiter) contractObserver() { - defer b.wg.Done() - - log.Infof("Starting contract observer, watching for breaches.") - - for { - select { - case breachEvent := <-b.cfg.ContractBreaches: - // We have been notified about a contract breach! - // Handle the handoff, making sure we ACK the event - // after we have safely added it to the retribution - // store. - b.wg.Add(1) - go b.handleBreachHandoff(breachEvent) - - case <-b.quit: - return - } - } -} - -// convertToSecondLevelRevoke takes a breached output, and a transaction that -// spends it to the second level, and mutates the breach output into one that -// is able to properly sweep that second level output. We'll use this function -// when we go to sweep a breached commitment transaction, but the cheating -// party has already attempted to take it to the second level -func convertToSecondLevelRevoke(bo *breachedOutput, breachInfo *retributionInfo, - spendDetails *chainntnfs.SpendDetail) { - - // In this case, we'll modify the witness type of this output to - // actually prepare for a second level revoke. - bo.witnessType = input.HtlcSecondLevelRevoke - - // We'll also redirect the outpoint to this second level output, so the - // spending transaction updates it inputs accordingly. - spendingTx := spendDetails.SpendingTx - oldOp := bo.outpoint - bo.outpoint = wire.OutPoint{ - Hash: spendingTx.TxHash(), - Index: 0, - } - - // Next, we need to update the amount so we can do fee estimation - // properly, and also so we can generate a valid signature as we need - // to know the new input value (the second level transactions shaves - // off some funds to fees). - newAmt := spendingTx.TxOut[0].Value - bo.amt = btcutil.Amount(newAmt) - bo.signDesc.Output.Value = newAmt - bo.signDesc.Output.PkScript = spendingTx.TxOut[0].PkScript - - // Finally, we'll need to adjust the witness program in the - // SignDescriptor. - bo.signDesc.WitnessScript = bo.secondLevelWitnessScript - - log.Warnf("HTLC(%v) for ChannelPoint(%v) has been spent to the "+ - "second-level, adjusting -> %v", oldOp, breachInfo.chanPoint, - bo.outpoint) -} - -// waitForSpendEvent waits for any of the breached outputs to get spent, and -// mutates the breachInfo to be able to sweep it. This method should be used -// when we fail to publish the justice tx because of a double spend, indicating -// that the counter party has taken one of the breached outputs to the second -// level. The spendNtfns map is a cache used to store registered spend -// subscriptions, in case we must call this method multiple times. -func (b *breachArbiter) waitForSpendEvent(breachInfo *retributionInfo, - spendNtfns map[wire.OutPoint]*chainntnfs.SpendEvent) er.R { - - inputs := breachInfo.breachedOutputs - - // spend is used to wrap the index of the output that gets spent - // together with the spend details. - type spend struct { - index int - detail *chainntnfs.SpendDetail - } - - // We create a channel the first goroutine that gets a spend event can - // signal. We make it buffered in case multiple spend events come in at - // the same time. - anySpend := make(chan struct{}, len(inputs)) - - // The allSpends channel will be used to pass spend events from all the - // goroutines that detects a spend before they are signalled to exit. - allSpends := make(chan spend, len(inputs)) - - // exit will be used to signal the goroutines that they can exit. - exit := make(chan struct{}) - var wg sync.WaitGroup - - // We'll now launch a goroutine for each of the HTLC outputs, that will - // signal the moment they detect a spend event. - for i := range inputs { - breachedOutput := &inputs[i] - - log.Infof("Checking spend from %v(%v) for ChannelPoint(%v)", - breachedOutput.witnessType, breachedOutput.outpoint, - breachInfo.chanPoint) - - // If we have already registered for a notification for this - // output, we'll reuse it. - spendNtfn, ok := spendNtfns[breachedOutput.outpoint] - if !ok { - var err er.R - spendNtfn, err = b.cfg.Notifier.RegisterSpendNtfn( - &breachedOutput.outpoint, - breachedOutput.signDesc.Output.PkScript, - breachInfo.breachHeight, - ) - if err != nil { - log.Errorf("Unable to check for spentness "+ - "of outpoint=%v: %v", - breachedOutput.outpoint, err) - - // Registration may have failed if we've been - // instructed to shutdown. If so, return here - // to avoid entering an infinite loop. - select { - case <-b.quit: - return errBrarShuttingDown.Default() - default: - continue - } - } - spendNtfns[breachedOutput.outpoint] = spendNtfn - } - - // Launch a goroutine waiting for a spend event. - b.wg.Add(1) - wg.Add(1) - go func(index int, spendEv *chainntnfs.SpendEvent) { - defer b.wg.Done() - defer wg.Done() - - select { - // The output has been taken to the second level! - case sp, ok := <-spendEv.Spend: - if !ok { - return - } - - log.Infof("Detected spend on %s(%v) by "+ - "txid(%v) for ChannelPoint(%v)", - inputs[index].witnessType, - inputs[index].outpoint, - sp.SpenderTxHash, - breachInfo.chanPoint) - - // First we send the spend event on the - // allSpends channel, such that it can be - // handled after all go routines have exited. - allSpends <- spend{index, sp} - - // Finally we'll signal the anySpend channel - // that a spend was detected, such that the - // other goroutines can be shut down. - anySpend <- struct{}{} - case <-exit: - return - case <-b.quit: - return - } - }(i, spendNtfn) - } - - // We'll wait for any of the outputs to be spent, or that we are - // signalled to exit. - select { - // A goroutine have signalled that a spend occurred. - case <-anySpend: - // Signal for the remaining goroutines to exit. - close(exit) - wg.Wait() - - // At this point all goroutines that can send on the allSpends - // channel have exited. We can therefore safely close the - // channel before ranging over its content. - close(allSpends) - - doneOutputs := make(map[int]struct{}) - for s := range allSpends { - breachedOutput := &inputs[s.index] - delete(spendNtfns, breachedOutput.outpoint) - - switch breachedOutput.witnessType { - case input.HtlcAcceptedRevoke: - fallthrough - case input.HtlcOfferedRevoke: - log.Infof("Spend on second-level"+ - "%s(%v) for ChannelPoint(%v) "+ - "transitions to second-level output", - breachedOutput.witnessType, - breachedOutput.outpoint, - breachInfo.chanPoint) - - // In this case we'll morph our initial revoke - // spend to instead point to the second level - // output, and update the sign descriptor in the - // process. - convertToSecondLevelRevoke( - breachedOutput, breachInfo, s.detail, - ) - - continue - } - - log.Infof("Spend on %s(%v) for ChannelPoint(%v) "+ - "transitions output to terminal state, "+ - "removing input from justice transaction", - breachedOutput.witnessType, - breachedOutput.outpoint, breachInfo.chanPoint) - - doneOutputs[s.index] = struct{}{} - } - - // Filter the inputs for which we can no longer proceed. - var nextIndex int - for i := range inputs { - if _, ok := doneOutputs[i]; ok { - continue - } - - inputs[nextIndex] = inputs[i] - nextIndex++ - } - - // Update our remaining set of outputs before continuing with - // another attempt at publication. - breachInfo.breachedOutputs = inputs[:nextIndex] - - case <-b.quit: - return errBrarShuttingDown.Default() - } - - return nil -} - -// exactRetribution is a goroutine which is executed once a contract breach has -// been detected by a breachObserver. This function is responsible for -// punishing a counterparty for violating the channel contract by sweeping ALL -// the lingering funds within the channel into the daemon's wallet. -// -// NOTE: This MUST be run as a goroutine. -func (b *breachArbiter) exactRetribution(confChan *chainntnfs.ConfirmationEvent, - breachInfo *retributionInfo) { - - defer b.wg.Done() - - // TODO(roasbeef): state needs to be checkpointed here - var breachConfHeight uint32 - select { - case breachConf, ok := <-confChan.Confirmed: - // If the second value is !ok, then the channel has been closed - // signifying a daemon shutdown, so we exit. - if !ok { - return - } - - breachConfHeight = breachConf.BlockHeight - - // Otherwise, if this is a real confirmation notification, then - // we fall through to complete our duty. - case <-b.quit: - return - } - - log.Debugf("Breach transaction %v has been confirmed, sweeping "+ - "revoked funds", breachInfo.commitHash) - - // We may have to wait for some of the HTLC outputs to be spent to the - // second level before broadcasting the justice tx. We'll store the - // SpendEvents between each attempt to not re-register uneccessarily. - spendNtfns := make(map[wire.OutPoint]*chainntnfs.SpendEvent) - - finalTx, err := b.cfg.Store.GetFinalizedTxn(&breachInfo.chanPoint) - if err != nil { - log.Errorf("Unable to get finalized txn for"+ - "chanid=%v: %v", &breachInfo.chanPoint, err) - return - } - - // If this retribution has not been finalized before, we will first - // construct a sweep transaction and write it to disk. This will allow - // the breach arbiter to re-register for notifications for the justice - // txid. -justiceTxBroadcast: - if finalTx == nil { - // With the breach transaction confirmed, we now create the - // justice tx which will claim ALL the funds within the - // channel. - finalTx, err = b.createJusticeTx(breachInfo) - if err != nil { - log.Errorf("Unable to create justice tx: %v", err) - return - } - - // Persist our finalized justice transaction before making an - // attempt to broadcast. - err := b.cfg.Store.Finalize(&breachInfo.chanPoint, finalTx) - if err != nil { - log.Errorf("Unable to finalize justice tx for "+ - "chanid=%v: %v", &breachInfo.chanPoint, err) - return - } - } - - log.Debugf("Broadcasting justice tx: %v", log.C(func() string { - return spew.Sdump(finalTx) - })) - - // We'll now attempt to broadcast the transaction which finalized the - // channel's retribution against the cheating counter party. - label := labels.MakeLabel(labels.LabelTypeJusticeTransaction, nil) - err = b.cfg.PublishTransaction(finalTx, label) - if err != nil { - log.Errorf("Unable to broadcast justice tx: %v", err) - - if lnwallet.ErrDoubleSpend.Is(err) { - // Broadcasting the transaction failed because of a - // conflict either in the mempool or in chain. We'll - // now create spend subscriptions for all HTLC outputs - // on the commitment transaction that could possibly - // have been spent, and wait for any of them to - // trigger. - log.Infof("Waiting for a spend event before " + - "attempting to craft new justice tx.") - finalTx = nil - - err := b.waitForSpendEvent(breachInfo, spendNtfns) - if err != nil { - if !errBrarShuttingDown.Is(err) { - log.Errorf("error waiting for "+ - "spend event: %v", err) - } - return - } - - if len(breachInfo.breachedOutputs) == 0 { - log.Debugf("No more outputs to sweep for "+ - "breach, marking ChannelPoint(%v) "+ - "fully resolved", breachInfo.chanPoint) - - err = b.cleanupBreach(&breachInfo.chanPoint) - if err != nil { - log.Errorf("Failed to cleanup "+ - "breached ChannelPoint(%v): %v", - breachInfo.chanPoint, err) - } - return - } - - log.Infof("Attempting another justice tx "+ - "with %d inputs", - len(breachInfo.breachedOutputs)) - - goto justiceTxBroadcast - } - } - - // As a conclusionary step, we register for a notification to be - // dispatched once the justice tx is confirmed. After confirmation we - // notify the caller that initiated the retribution workflow that the - // deed has been done. - justiceTXID := finalTx.TxHash() - justiceScript := finalTx.TxOut[0].PkScript - confChan, err = b.cfg.Notifier.RegisterConfirmationsNtfn( - &justiceTXID, justiceScript, 1, breachConfHeight, - ) - if err != nil { - log.Errorf("Unable to register for conf for txid(%v): %v", - justiceTXID, err) - return - } - - select { - case _, ok := <-confChan.Confirmed: - if !ok { - return - } - - // Compute both the total value of funds being swept and the - // amount of funds that were revoked from the counter party. - var totalFunds, revokedFunds btcutil.Amount - for _, inp := range breachInfo.breachedOutputs { - totalFunds += inp.Amount() - - // If the output being revoked is the remote commitment - // output or an offered HTLC output, it's amount - // contributes to the value of funds being revoked from - // the counter party. - switch inp.WitnessType() { - case input.CommitmentRevoke: - revokedFunds += inp.Amount() - case input.HtlcOfferedRevoke: - revokedFunds += inp.Amount() - default: - } - } - - log.Infof("Justice for ChannelPoint(%v) has "+ - "been served, %v revoked funds (%v total) "+ - "have been claimed", breachInfo.chanPoint, - revokedFunds, totalFunds) - - err = b.cleanupBreach(&breachInfo.chanPoint) - if err != nil { - log.Errorf("Failed to cleanup breached "+ - "ChannelPoint(%v): %v", breachInfo.chanPoint, - err) - } - - // TODO(roasbeef): add peer to blacklist? - - // TODO(roasbeef): close other active channels with offending - // peer - - return - case <-b.quit: - return - } -} - -// cleanupBreach marks the given channel point as fully resolved and removes the -// retribution for that the channel from the retribution store. -func (b *breachArbiter) cleanupBreach(chanPoint *wire.OutPoint) er.R { - // With the channel closed, mark it in the database as such. - err := b.cfg.DB.MarkChanFullyClosed(chanPoint) - if err != nil { - return er.Errorf("unable to mark chan as closed: %v", err) - } - - // Justice has been carried out; we can safely delete the retribution - // info from the database. - err = b.cfg.Store.Remove(chanPoint) - if err != nil { - return er.Errorf("unable to remove retribution from db: %v", - err) - } - - return nil -} - -// handleBreachHandoff handles a new breach event, by writing it to disk, then -// notifies the breachArbiter contract observer goroutine that a channel's -// contract has been breached by the prior counterparty. Once notified the -// breachArbiter will attempt to sweep ALL funds within the channel using the -// information provided within the BreachRetribution generated due to the -// breach of channel contract. The funds will be swept only after the breaching -// transaction receives a necessary number of confirmations. -// -// NOTE: This MUST be run as a goroutine. -func (b *breachArbiter) handleBreachHandoff(breachEvent *ContractBreachEvent) { - defer b.wg.Done() - - chanPoint := breachEvent.ChanPoint - log.Debugf("Handling breach handoff for ChannelPoint(%v)", - chanPoint) - - // A read from this channel indicates that a channel breach has been - // detected! So we notify the main coordination goroutine with the - // information needed to bring the counterparty to justice. - breachInfo := breachEvent.BreachRetribution - log.Warnf("REVOKED STATE #%v FOR ChannelPoint(%v) "+ - "broadcast, REMOTE PEER IS DOING SOMETHING "+ - "SKETCHY!!!", breachInfo.RevokedStateNum, - chanPoint) - - // Immediately notify the HTLC switch that this link has been - // breached in order to ensure any incoming or outgoing - // multi-hop HTLCs aren't sent over this link, nor any other - // links associated with this peer. - b.cfg.CloseLink(&chanPoint, htlcswitch.CloseBreach) - - // TODO(roasbeef): need to handle case of remote broadcast - // mid-local initiated state-transition, possible - // false-positive? - - // Acquire the mutex to ensure consistency between the call to - // IsBreached and Add below. - b.Lock() - - // We first check if this breach info is already added to the - // retribution store. - breached, err := b.cfg.Store.IsBreached(&chanPoint) - if err != nil { - b.Unlock() - log.Errorf("Unable to check breach info in DB: %v", err) - - select { - case breachEvent.ProcessACK <- err: - case <-b.quit: - } - return - } - - // If this channel is already marked as breached in the retribution - // store, we already have handled the handoff for this breach. In this - // case we can safely ACK the handoff, and return. - if breached { - b.Unlock() - - select { - case breachEvent.ProcessACK <- nil: - case <-b.quit: - } - return - } - - // Using the breach information provided by the wallet and the - // channel snapshot, construct the retribution information that - // will be persisted to disk. - retInfo := newRetributionInfo(&chanPoint, breachInfo) - - // Persist the pending retribution state to disk. - err = b.cfg.Store.Add(retInfo) - b.Unlock() - if err != nil { - log.Errorf("Unable to persist retribution "+ - "info to db: %v", err) - } - - // Now that the breach has been persisted, try to send an - // acknowledgment back to the close observer with the error. If - // the ack is successful, the close observer will mark the - // channel as pending-closed in the channeldb. - select { - case breachEvent.ProcessACK <- err: - // Bail if we failed to persist retribution info. - if err != nil { - return - } - - case <-b.quit: - return - } - - // Now that a new channel contract has been added to the retribution - // store, we first register for a notification to be dispatched once - // the breach transaction (the revoked commitment transaction) has been - // confirmed in the chain to ensure we're not dealing with a moving - // target. - breachTXID := &retInfo.commitHash - breachScript := retInfo.breachedOutputs[0].signDesc.Output.PkScript - cfChan, err := b.cfg.Notifier.RegisterConfirmationsNtfn( - breachTXID, breachScript, 1, retInfo.breachHeight, - ) - if err != nil { - log.Errorf("Unable to register for conf updates for "+ - "txid: %v, err: %v", breachTXID, err) - return - } - - log.Warnf("A channel has been breached with txid: %v. Waiting "+ - "for confirmation, then justice will be served!", breachTXID) - - // With the retribution state persisted, channel close persisted, and - // notification registered, we launch a new goroutine which will - // finalize the channel retribution after the breach transaction has - // been confirmed. - b.wg.Add(1) - go b.exactRetribution(cfChan, retInfo) -} - -// breachedOutput contains all the information needed to sweep a breached -// output. A breached output is an output that we are now entitled to due to a -// revoked commitment transaction being broadcast. -type breachedOutput struct { - amt btcutil.Amount - outpoint wire.OutPoint - witnessType input.StandardWitnessType - signDesc input.SignDescriptor - confHeight uint32 - - secondLevelWitnessScript []byte - - witnessFunc input.WitnessGenerator -} - -// makeBreachedOutput assembles a new breachedOutput that can be used by the -// breach arbiter to construct a justice or sweep transaction. -func makeBreachedOutput(outpoint *wire.OutPoint, - witnessType input.StandardWitnessType, - secondLevelScript []byte, - signDescriptor *input.SignDescriptor, - confHeight uint32) breachedOutput { - - amount := signDescriptor.Output.Value - - return breachedOutput{ - amt: btcutil.Amount(amount), - outpoint: *outpoint, - secondLevelWitnessScript: secondLevelScript, - witnessType: witnessType, - signDesc: *signDescriptor, - confHeight: confHeight, - } -} - -// Amount returns the number of satoshis contained in the breached output. -func (bo *breachedOutput) Amount() btcutil.Amount { - return bo.amt -} - -// OutPoint returns the breached output's identifier that is to be included as a -// transaction input. -func (bo *breachedOutput) OutPoint() *wire.OutPoint { - return &bo.outpoint -} - -// RequiredTxOut returns a non-nil TxOut if input commits to a certain -// transaction output. This is used in the SINGLE|ANYONECANPAY case to make -// sure any presigned input is still valid by including the output. -func (bo *breachedOutput) RequiredTxOut() *wire.TxOut { - return nil -} - -// RequiredLockTime returns whether this input commits to a tx locktime that -// must be used in the transaction including it. -func (bo *breachedOutput) RequiredLockTime() (uint32, bool) { - return 0, false -} - -// WitnessType returns the type of witness that must be generated to spend the -// breached output. -func (bo *breachedOutput) WitnessType() input.WitnessType { - return bo.witnessType -} - -// SignDesc returns the breached output's SignDescriptor, which is used during -// signing to compute the witness. -func (bo *breachedOutput) SignDesc() *input.SignDescriptor { - return &bo.signDesc -} - -// CraftInputScript computes a valid witness that allows us to spend from the -// breached output. It does so by first generating and memoizing the witness -// generation function, which parameterized primarily by the witness type and -// sign descriptor. The method then returns the witness computed by invoking -// this function on the first and subsequent calls. -func (bo *breachedOutput) CraftInputScript(signer input.Signer, txn *wire.MsgTx, - hashCache *txscript.TxSigHashes, txinIdx int) (*input.Script, er.R) { - - // First, we ensure that the witness generation function has been - // initialized for this breached output. - bo.witnessFunc = bo.witnessType.WitnessGenerator(signer, bo.SignDesc()) - - // Now that we have ensured that the witness generation function has - // been initialized, we can proceed to execute it and generate the - // witness for this particular breached output. - return bo.witnessFunc(txn, hashCache, txinIdx) -} - -// BlocksToMaturity returns the relative timelock, as a number of blocks, that -// must be built on top of the confirmation height before the output can be -// spent. -func (bo *breachedOutput) BlocksToMaturity() uint32 { - // If the output is a to_remote output we can claim, and it's of the - // confirmed type, we must wait one block before claiming it. - if bo.witnessType == input.CommitmentToRemoteConfirmed { - return 1 - } - - // All other breached outputs have no CSV delay. - return 0 -} - -// HeightHint returns the minimum height at which a confirmed spending tx can -// occur. -func (bo *breachedOutput) HeightHint() uint32 { - return bo.confHeight -} - -// UnconfParent returns information about a possibly unconfirmed parent tx. -func (bo *breachedOutput) UnconfParent() *input.TxInfo { - return nil -} - -// Add compile-time constraint ensuring breachedOutput implements the Input -// interface. -var _ input.Input = (*breachedOutput)(nil) - -// retributionInfo encapsulates all the data needed to sweep all the contested -// funds within a channel whose contract has been breached by the prior -// counterparty. This struct is used to create the justice transaction which -// spends all outputs of the commitment transaction into an output controlled -// by the wallet. -type retributionInfo struct { - commitHash chainhash.Hash - chanPoint wire.OutPoint - chainHash chainhash.Hash - breachHeight uint32 - - breachedOutputs []breachedOutput -} - -// newRetributionInfo constructs a retributionInfo containing all the -// information required by the breach arbiter to recover funds from breached -// channels. The information is primarily populated using the BreachRetribution -// delivered by the wallet when it detects a channel breach. -func newRetributionInfo(chanPoint *wire.OutPoint, - breachInfo *lnwallet.BreachRetribution) *retributionInfo { - - // Determine the number of second layer HTLCs we will attempt to sweep. - nHtlcs := len(breachInfo.HtlcRetributions) - - // Initialize a slice to hold the outputs we will attempt to sweep. The - // maximum capacity of the slice is set to 2+nHtlcs to handle the case - // where the local, remote, and all HTLCs are not dust outputs. All - // HTLC outputs provided by the wallet are guaranteed to be non-dust, - // though the commitment outputs are conditionally added depending on - // the nil-ness of their sign descriptors. - breachedOutputs := make([]breachedOutput, 0, nHtlcs+2) - - // First, record the breach information for the local channel point if - // it is not considered dust, which is signaled by a non-nil sign - // descriptor. Here we use CommitmentNoDelay (or - // CommitmentNoDelayTweakless for newer commitments) since this output - // belongs to us and has no time-based constraints on spending. - if breachInfo.LocalOutputSignDesc != nil { - witnessType := input.CommitmentNoDelay - if breachInfo.LocalOutputSignDesc.SingleTweak == nil { - witnessType = input.CommitSpendNoDelayTweakless - } - - // If the local delay is non-zero, it means this output is of - // the confirmed to_remote type. - if breachInfo.LocalDelay != 0 { - witnessType = input.CommitmentToRemoteConfirmed - } - - localOutput := makeBreachedOutput( - &breachInfo.LocalOutpoint, - witnessType, - // No second level script as this is a commitment - // output. - nil, - breachInfo.LocalOutputSignDesc, - breachInfo.BreachHeight, - ) - - breachedOutputs = append(breachedOutputs, localOutput) - } - - // Second, record the same information regarding the remote outpoint, - // again if it is not dust, which belongs to the party who tried to - // steal our money! Here we set witnessType of the breachedOutput to - // CommitmentRevoke, since we will be using a revoke key, withdrawing - // the funds from the commitment transaction immediately. - if breachInfo.RemoteOutputSignDesc != nil { - remoteOutput := makeBreachedOutput( - &breachInfo.RemoteOutpoint, - input.CommitmentRevoke, - // No second level script as this is a commitment - // output. - nil, - breachInfo.RemoteOutputSignDesc, - breachInfo.BreachHeight, - ) - - breachedOutputs = append(breachedOutputs, remoteOutput) - } - - // Lastly, for each of the breached HTLC outputs, record each as a - // breached output with the appropriate witness type based on its - // directionality. All HTLC outputs provided by the wallet are assumed - // to be non-dust. - for i, breachedHtlc := range breachInfo.HtlcRetributions { - // Using the breachedHtlc's incoming flag, determine the - // appropriate witness type that needs to be generated in order - // to sweep the HTLC output. - var htlcWitnessType input.StandardWitnessType - if breachedHtlc.IsIncoming { - htlcWitnessType = input.HtlcAcceptedRevoke - } else { - htlcWitnessType = input.HtlcOfferedRevoke - } - - htlcOutput := makeBreachedOutput( - &breachInfo.HtlcRetributions[i].OutPoint, - htlcWitnessType, - breachInfo.HtlcRetributions[i].SecondLevelWitnessScript, - &breachInfo.HtlcRetributions[i].SignDesc, - breachInfo.BreachHeight) - - breachedOutputs = append(breachedOutputs, htlcOutput) - } - - return &retributionInfo{ - commitHash: breachInfo.BreachTransaction.TxHash(), - chainHash: breachInfo.ChainHash, - chanPoint: *chanPoint, - breachedOutputs: breachedOutputs, - breachHeight: breachInfo.BreachHeight, - } -} - -// createJusticeTx creates a transaction which exacts "justice" by sweeping ALL -// the funds within the channel which we are now entitled to due to a breach of -// the channel's contract by the counterparty. This function returns a *fully* -// signed transaction with the witness for each input fully in place. -func (b *breachArbiter) createJusticeTx( - r *retributionInfo) (*wire.MsgTx, er.R) { - - // We will assemble the breached outputs into a slice of spendable - // outputs, while simultaneously computing the estimated weight of the - // transaction. - var ( - spendableOutputs []input.Input - weightEstimate input.TxWeightEstimator - ) - - // Allocate enough space to potentially hold each of the breached - // outputs in the retribution info. - spendableOutputs = make([]input.Input, 0, len(r.breachedOutputs)) - - // The justice transaction we construct will be a segwit transaction - // that pays to a p2wkh output. Components such as the version, - // nLockTime, and output are already included in the TxWeightEstimator. - weightEstimate.AddP2WKHOutput() - - // Next, we iterate over the breached outputs contained in the - // retribution info. For each, we switch over the witness type such - // that we contribute the appropriate weight for each input and witness, - // finally adding to our list of spendable outputs. - for i := range r.breachedOutputs { - // Grab locally scoped reference to breached output. - inp := &r.breachedOutputs[i] - - // First, determine the appropriate estimated witness weight for - // the give witness type of this breached output. If the witness - // weight cannot be estimated, we will omit it from the - // transaction. - witnessWeight, _, err := inp.WitnessType().SizeUpperBound() - if err != nil { - log.Warnf("could not determine witness weight "+ - "for breached output in retribution info: %v", - err) - continue - } - weightEstimate.AddWitnessInput(witnessWeight) - - // Finally, append this input to our list of spendable outputs. - spendableOutputs = append(spendableOutputs, inp) - } - - txWeight := int64(weightEstimate.Weight()) - return b.sweepSpendableOutputsTxn(txWeight, spendableOutputs...) -} - -// sweepSpendableOutputsTxn creates a signed transaction from a sequence of -// spendable outputs by sweeping the funds into a single p2wkh output. -func (b *breachArbiter) sweepSpendableOutputsTxn(txWeight int64, - inputs ...input.Input) (*wire.MsgTx, er.R) { - - // First, we obtain a new public key script from the wallet which we'll - // sweep the funds to. - // TODO(roasbeef): possibly create many outputs to minimize change in - // the future? - pkScript, err := b.cfg.GenSweepScript() - if err != nil { - return nil, err - } - - // Compute the total amount contained in the inputs. - var totalAmt btcutil.Amount - for _, input := range inputs { - totalAmt += btcutil.Amount(input.SignDesc().Output.Value) - } - - // We'll actually attempt to target inclusion within the next two - // blocks as we'd like to sweep these funds back into our wallet ASAP. - feePerKw, err := b.cfg.Estimator.EstimateFeePerKW(2) - if err != nil { - return nil, err - } - txFee := feePerKw.FeeForWeight(txWeight) - - // TODO(roasbeef): already start to siphon their funds into fees - sweepAmt := int64(totalAmt - txFee) - - // With the fee calculated, we can now create the transaction using the - // information gathered above and the provided retribution information. - txn := wire.NewMsgTx(2) - - // We begin by adding the output to which our funds will be deposited. - txn.AddTxOut(&wire.TxOut{ - PkScript: pkScript, - Value: sweepAmt, - }) - - // Next, we add all of the spendable outputs as inputs to the - // transaction. - for _, input := range inputs { - txn.AddTxIn(&wire.TxIn{ - PreviousOutPoint: *input.OutPoint(), - Sequence: input.BlocksToMaturity(), - }) - } - - // Before signing the transaction, check to ensure that it meets some - // basic validity requirements. - btx := btcutil.NewTx(txn) - if err := blockchain.CheckTransactionSanity(btx); err != nil { - return nil, err - } - - // Create a sighash cache to improve the performance of hashing and - // signing SigHashAll inputs. - hashCache := txscript.NewTxSigHashes(txn) - - // Create a closure that encapsulates the process of initializing a - // particular output's witness generation function, computing the - // witness, and attaching it to the transaction. This function accepts - // an integer index representing the intended txin index, and the - // breached output from which it will spend. - addWitness := func(idx int, so input.Input) er.R { - // First, we construct a valid witness for this outpoint and - // transaction using the SpendableOutput's witness generation - // function. - inputScript, err := so.CraftInputScript( - b.cfg.Signer, txn, hashCache, idx, - ) - if err != nil { - return err - } - - // Then, we add the witness to the transaction at the - // appropriate txin index. - txn.TxIn[idx].Witness = inputScript.Witness - - return nil - } - - // Finally, generate a witness for each output and attach it to the - // transaction. - for i, input := range inputs { - if err := addWitness(i, input); err != nil { - return nil, err - } - } - - return txn, nil -} - -// RetributionStore provides an interface for managing a persistent map from -// wire.OutPoint -> retributionInfo. Upon learning of a breach, a BreachArbiter -// should record the retributionInfo for the breached channel, which serves a -// checkpoint in the event that retribution needs to be resumed after failure. -// A RetributionStore provides an interface for managing the persisted set, as -// well as mapping user defined functions over the entire on-disk contents. -// -// Calls to RetributionStore may occur concurrently. A concrete instance of -// RetributionStore should use appropriate synchronization primitives, or -// be otherwise safe for concurrent access. -type RetributionStore interface { - // Add persists the retributionInfo to disk, using the information's - // chanPoint as the key. This method should overwrite any existing - // entries found under the same key, and an error should be raised if - // the addition fails. - Add(retInfo *retributionInfo) er.R - - // IsBreached queries the retribution store to see if the breach arbiter - // is aware of any breaches for the provided channel point. - IsBreached(chanPoint *wire.OutPoint) (bool, er.R) - - // Finalize persists the finalized justice transaction for a particular - // channel. - Finalize(chanPoint *wire.OutPoint, finalTx *wire.MsgTx) er.R - - // GetFinalizedTxn loads the finalized justice transaction, if any, from - // the retribution store. The finalized transaction will be nil if - // Finalize has not yet been called for this channel point. - GetFinalizedTxn(chanPoint *wire.OutPoint) (*wire.MsgTx, er.R) - - // Remove deletes the retributionInfo from disk, if any exists, under - // the given key. An error should be re raised if the removal fails. - Remove(key *wire.OutPoint) er.R - - // ForAll iterates over the existing on-disk contents and applies a - // chosen, read-only callback to each. This method should ensure that it - // immediately propagate any errors generated by the callback. - ForAll(cb func(*retributionInfo) er.R, reset func()) er.R -} - -// retributionStore handles persistence of retribution states to disk and is -// backed by a boltdb bucket. The primary responsibility of the retribution -// store is to ensure that we can recover from a restart in the middle of a -// breached contract retribution. -type retributionStore struct { - db *channeldb.DB -} - -// newRetributionStore creates a new instance of a retributionStore. -func newRetributionStore(db *channeldb.DB) *retributionStore { - return &retributionStore{ - db: db, - } -} - -// Add adds a retribution state to the retributionStore, which is then persisted -// to disk. -func (rs *retributionStore) Add(ret *retributionInfo) er.R { - return kvdb.Update(rs.db, func(tx kvdb.RwTx) er.R { - // If this is our first contract breach, the retributionBucket - // won't exist, in which case, we just create a new bucket. - retBucket, err := tx.CreateTopLevelBucket(retributionBucket) - if err != nil { - return err - } - - var outBuf bytes.Buffer - if err := writeOutpoint(&outBuf, &ret.chanPoint); err != nil { - return err - } - - var retBuf bytes.Buffer - if err := ret.Encode(&retBuf); err != nil { - return err - } - - return retBucket.Put(outBuf.Bytes(), retBuf.Bytes()) - }, func() {}) -} - -// Finalize writes a signed justice transaction to the retribution store. This -// is done before publishing the transaction, so that we can recover the txid on -// startup and re-register for confirmation notifications. -func (rs *retributionStore) Finalize(chanPoint *wire.OutPoint, - finalTx *wire.MsgTx) er.R { - return kvdb.Update(rs.db, func(tx kvdb.RwTx) er.R { - justiceBkt, err := tx.CreateTopLevelBucket(justiceTxnBucket) - if err != nil { - return err - } - - var chanBuf bytes.Buffer - if err := writeOutpoint(&chanBuf, chanPoint); err != nil { - return err - } - - var txBuf bytes.Buffer - if err := finalTx.Serialize(&txBuf); err != nil { - return err - } - - return justiceBkt.Put(chanBuf.Bytes(), txBuf.Bytes()) - }, func() {}) -} - -// GetFinalizedTxn loads the finalized justice transaction for the provided -// channel point. The finalized transaction will be nil if Finalize has yet to -// be called for this channel point. -func (rs *retributionStore) GetFinalizedTxn( - chanPoint *wire.OutPoint) (*wire.MsgTx, er.R) { - - var finalTxBytes []byte - if err := kvdb.View(rs.db, func(tx kvdb.RTx) er.R { - justiceBkt := tx.ReadBucket(justiceTxnBucket) - if justiceBkt == nil { - return nil - } - - var chanBuf bytes.Buffer - if err := writeOutpoint(&chanBuf, chanPoint); err != nil { - return err - } - - finalTxBytes = justiceBkt.Get(chanBuf.Bytes()) - - return nil - }, func() { - finalTxBytes = nil - }); err != nil { - return nil, err - } - - if finalTxBytes == nil { - return nil, nil - } - - finalTx := &wire.MsgTx{} - err := finalTx.Deserialize(bytes.NewReader(finalTxBytes)) - - return finalTx, err -} - -// IsBreached queries the retribution store to discern if this channel was -// previously breached. This is used when connecting to a peer to determine if -// it is safe to add a link to the htlcswitch, as we should never add a channel -// that has already been breached. -func (rs *retributionStore) IsBreached(chanPoint *wire.OutPoint) (bool, er.R) { - var found bool - err := kvdb.View(rs.db, func(tx kvdb.RTx) er.R { - retBucket := tx.ReadBucket(retributionBucket) - if retBucket == nil { - return nil - } - - var chanBuf bytes.Buffer - if err := writeOutpoint(&chanBuf, chanPoint); err != nil { - return err - } - - retInfo := retBucket.Get(chanBuf.Bytes()) - if retInfo != nil { - found = true - } - - return nil - }, func() { - found = false - }) - - return found, err -} - -// Remove removes a retribution state and finalized justice transaction by -// channel point from the retribution store. -func (rs *retributionStore) Remove(chanPoint *wire.OutPoint) er.R { - return kvdb.Update(rs.db, func(tx kvdb.RwTx) er.R { - retBucket := tx.ReadWriteBucket(retributionBucket) - - // We return an error if the bucket is not already created, - // since normal operation of the breach arbiter should never try - // to remove a finalized retribution state that is not already - // stored in the db. - if retBucket == nil { - return er.New("unable to remove retribution " + - "because the retribution bucket doesn't exist") - } - - // Serialize the channel point we are intending to remove. - var chanBuf bytes.Buffer - if err := writeOutpoint(&chanBuf, chanPoint); err != nil { - return err - } - chanBytes := chanBuf.Bytes() - - // Remove the persisted retribution info and finalized justice - // transaction. - if err := retBucket.Delete(chanBytes); err != nil { - return err - } - - // If we have not finalized this channel breach, we can exit - // early. - justiceBkt := tx.ReadWriteBucket(justiceTxnBucket) - if justiceBkt == nil { - return nil - } - - return justiceBkt.Delete(chanBytes) - }, func() {}) -} - -// ForAll iterates through all stored retributions and executes the passed -// callback function on each retribution. -func (rs *retributionStore) ForAll(cb func(*retributionInfo) er.R, - reset func()) er.R { - - return kvdb.View(rs.db, func(tx kvdb.RTx) er.R { - // If the bucket does not exist, then there are no pending - // retributions. - retBucket := tx.ReadBucket(retributionBucket) - if retBucket == nil { - return nil - } - - // Otherwise, we fetch each serialized retribution info, - // deserialize it, and execute the passed in callback function - // on it. - return retBucket.ForEach(func(_, retBytes []byte) er.R { - ret := &retributionInfo{} - err := ret.Decode(bytes.NewBuffer(retBytes)) - if err != nil { - return err - } - - return cb(ret) - }) - }, reset) -} - -// Encode serializes the retribution into the passed byte stream. -func (ret *retributionInfo) Encode(w io.Writer) er.R { - var scratch [4]byte - - if _, err := util.Write(w, ret.commitHash[:]); err != nil { - return err - } - - if err := writeOutpoint(w, &ret.chanPoint); err != nil { - return err - } - - if _, err := util.Write(w, ret.chainHash[:]); err != nil { - return err - } - - binary.BigEndian.PutUint32(scratch[:], ret.breachHeight) - if _, err := util.Write(w, scratch[:]); err != nil { - return err - } - - nOutputs := len(ret.breachedOutputs) - if err := wire.WriteVarInt(w, 0, uint64(nOutputs)); err != nil { - return err - } - - for _, output := range ret.breachedOutputs { - if err := output.Encode(w); err != nil { - return err - } - } - - return nil -} - -// Dencode deserializes a retribution from the passed byte stream. -func (ret *retributionInfo) Decode(r io.Reader) er.R { - var scratch [32]byte - - if _, err := util.ReadFull(r, scratch[:]); err != nil { - return err - } - hash, err := chainhash.NewHash(scratch[:]) - if err != nil { - return err - } - ret.commitHash = *hash - - if err := readOutpoint(r, &ret.chanPoint); err != nil { - return err - } - - if _, err := util.ReadFull(r, scratch[:]); err != nil { - return err - } - chainHash, err := chainhash.NewHash(scratch[:]) - if err != nil { - return err - } - ret.chainHash = *chainHash - - if _, err := util.ReadFull(r, scratch[:4]); err != nil { - return err - } - ret.breachHeight = binary.BigEndian.Uint32(scratch[:4]) - - nOutputsU64, err := wire.ReadVarInt(r, 0) - if err != nil { - return err - } - nOutputs := int(nOutputsU64) - - ret.breachedOutputs = make([]breachedOutput, nOutputs) - for i := range ret.breachedOutputs { - if err := ret.breachedOutputs[i].Decode(r); err != nil { - return err - } - } - - return nil -} - -// Encode serializes a breachedOutput into the passed byte stream. -func (bo *breachedOutput) Encode(w io.Writer) er.R { - var scratch [8]byte - - binary.BigEndian.PutUint64(scratch[:8], uint64(bo.amt)) - if _, err := util.Write(w, scratch[:8]); err != nil { - return err - } - - if err := writeOutpoint(w, &bo.outpoint); err != nil { - return err - } - - err := input.WriteSignDescriptor(w, &bo.signDesc) - if err != nil { - return err - } - - err = wire.WriteVarBytes(w, 0, bo.secondLevelWitnessScript) - if err != nil { - return err - } - - binary.BigEndian.PutUint16(scratch[:2], uint16(bo.witnessType)) - if _, err := util.Write(w, scratch[:2]); err != nil { - return err - } - - return nil -} - -// Decode deserializes a breachedOutput from the passed byte stream. -func (bo *breachedOutput) Decode(r io.Reader) er.R { - var scratch [8]byte - - if _, err := util.ReadFull(r, scratch[:8]); err != nil { - return err - } - bo.amt = btcutil.Amount(binary.BigEndian.Uint64(scratch[:8])) - - if err := readOutpoint(r, &bo.outpoint); err != nil { - return err - } - - if err := input.ReadSignDescriptor(r, &bo.signDesc); err != nil { - return err - } - - wScript, err := wire.ReadVarBytes(r, 0, 1000, "witness script") - if err != nil { - return err - } - bo.secondLevelWitnessScript = wScript - - if _, err := util.ReadFull(r, scratch[:2]); err != nil { - return err - } - bo.witnessType = input.StandardWitnessType( - binary.BigEndian.Uint16(scratch[:2]), - ) - - return nil -} diff --git a/lnd/breacharbiter_test.go b/lnd/breacharbiter_test.go deleted file mode 100644 index 2e3d7500..00000000 --- a/lnd/breacharbiter_test.go +++ /dev/null @@ -1,2055 +0,0 @@ -// +build !rpctest - -package lnd - -import ( - "bytes" - crand "crypto/rand" - "crypto/sha256" - "encoding/binary" - "io/ioutil" - "math/rand" - "net" - "os" - "reflect" - "sync" - "testing" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/chaincfg/globalcfg" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/htlcswitch" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/lntest/mock" - "github.com/pkt-cash/pktd/lnd/lntest/wait" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/shachain" - "github.com/pkt-cash/pktd/txscript/params" - "github.com/pkt-cash/pktd/wire" -) - -var ( - breachOutPoints = []wire.OutPoint{ - { - Hash: [chainhash.HashSize]byte{ - 0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, - 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, - 0x2d, 0xe7, 0x93, 0xe4, 0xb7, 0x25, 0xb8, 0x4d, - 0x1f, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9, - }, - Index: 9, - }, - { - Hash: [chainhash.HashSize]byte{ - 0xb7, 0x94, 0x38, 0x5f, 0x2d, 0x1e, 0xf7, 0xab, - 0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4, - 0x4f, 0x2f, 0x6f, 0x25, 0x88, 0xa3, 0xef, 0xb9, - 0x6a, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53, - }, - Index: 49, - }, - { - Hash: [chainhash.HashSize]byte{ - 0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, - 0x63, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, - 0xd, 0xe7, 0x95, 0xe4, 0xb7, 0x25, 0xb8, 0x4d, - 0x1e, 0xb, 0x4c, 0xfd, 0x9e, 0xc5, 0x8c, 0xe9, - }, - Index: 23, - }, - } - - breachKeys = [][]byte{ - {0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, - 0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e, - 0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca, - 0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0, - 0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64, - 0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9, - 0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56, - 0xb4, 0x12, 0xa3, - }, - {0x07, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, - 0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e, - 0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca, - 0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0, - 0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64, - 0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9, - 0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56, - 0xb4, 0x12, 0xa3, - }, - {0x02, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b, - 0xa5, 0x49, 0xfd, 0xd6, 0x75, 0xc9, 0x80, 0x75, 0xf1, - 0x2e, 0x9c, 0x51, 0x0f, 0x8e, 0xf5, 0x2b, 0xd0, 0x21, - 0xa9, 0xa1, 0xf4, 0x80, 0x9d, 0x3b, 0x4d, - }, - {0x02, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b, - 0x2e, 0x9c, 0x51, 0x0f, 0x8e, 0xf5, 0x2b, 0xd0, 0x21, - 0xa5, 0x49, 0xfd, 0xd6, 0x75, 0xc9, 0x80, 0x75, 0xf1, - 0xa3, 0xa1, 0xf4, 0x80, 0x9d, 0x3b, 0x4d, - }, - } - - breachedOutputs = []breachedOutput{ - { - amt: btcutil.Amount(1e7), - outpoint: breachOutPoints[0], - witnessType: input.CommitmentNoDelay, - signDesc: input.SignDescriptor{ - SingleTweak: []byte{ - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, - }, - WitnessScript: []byte{ - 0x00, 0x14, 0xee, 0x91, 0x41, 0x7e, - 0x85, 0x6c, 0xde, 0x10, 0xa2, 0x91, - 0x1e, 0xdc, 0xbd, 0xbd, 0x69, 0xe2, - 0xef, 0xb5, 0x71, 0x48, - }, - Output: &wire.TxOut{ - Value: 5000000000, - PkScript: []byte{ - 0x41, // OP_DATA_65 - 0x04, 0xd6, 0x4b, 0xdf, 0xd0, - 0x9e, 0xb1, 0xc5, 0xfe, 0x29, - 0x5a, 0xbd, 0xeb, 0x1d, 0xca, - 0x42, 0x81, 0xbe, 0x98, 0x8e, - 0x2d, 0xa0, 0xb6, 0xc1, 0xc6, - 0xa5, 0x9d, 0xc2, 0x26, 0xc2, - 0x86, 0x24, 0xe1, 0x81, 0x75, - 0xe8, 0x51, 0xc9, 0x6b, 0x97, - 0x3d, 0x81, 0xb0, 0x1c, 0xc3, - 0x1f, 0x04, 0x78, 0x34, 0xbc, - 0x06, 0xd6, 0xd6, 0xed, 0xf6, - 0x20, 0xd1, 0x84, 0x24, 0x1a, - 0x6a, 0xed, 0x8b, 0x63, - 0xa6, // 65-byte signature - 0xac, // OP_CHECKSIG - }, - }, - HashType: params.SigHashAll, - }, - secondLevelWitnessScript: breachKeys[0], - }, - { - amt: btcutil.Amount(1e7), - outpoint: breachOutPoints[0], - witnessType: input.CommitSpendNoDelayTweakless, - signDesc: input.SignDescriptor{ - WitnessScript: []byte{ - 0x00, 0x14, 0xee, 0x91, 0x41, 0x7e, - 0x85, 0x6c, 0xde, 0x10, 0xa2, 0x91, - 0x1e, 0xdc, 0xbd, 0xbd, 0x69, 0xe2, - 0xef, 0xb5, 0x71, 0x48, - }, - Output: &wire.TxOut{ - Value: 5000000000, - PkScript: []byte{ - 0x41, // OP_DATA_65 - 0x04, 0xd6, 0x4b, 0xdf, 0xd0, - 0x9e, 0xb1, 0xc5, 0xfe, 0x29, - 0x5a, 0xbd, 0xeb, 0x1d, 0xca, - 0x42, 0x81, 0xbe, 0x98, 0x8e, - 0x2d, 0xa0, 0xb6, 0xc1, 0xc6, - 0xa5, 0x9d, 0xc2, 0x26, 0xc2, - 0x86, 0x24, 0xe1, 0x81, 0x75, - 0xe8, 0x51, 0xc9, 0x6b, 0x97, - 0x3d, 0x81, 0xb0, 0x1c, 0xc3, - 0x1f, 0x04, 0x78, 0x34, 0xbc, - 0x06, 0xd6, 0xd6, 0xed, 0xf6, - 0x20, 0xd1, 0x84, 0x24, 0x1a, - 0x6a, 0xed, 0x8b, 0x63, - 0xa6, // 65-byte signature - 0xac, // OP_CHECKSIG - }, - }, - HashType: params.SigHashAll, - }, - secondLevelWitnessScript: breachKeys[0], - }, - { - amt: btcutil.Amount(2e9), - outpoint: breachOutPoints[1], - witnessType: input.CommitmentRevoke, - signDesc: input.SignDescriptor{ - SingleTweak: []byte{ - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, - }, - WitnessScript: []byte{ - 0x00, 0x14, 0xee, 0x91, 0x41, 0x7e, - 0x85, 0x6c, 0xde, 0x10, 0xa2, 0x91, - 0x1e, 0xdc, 0xbd, 0xbd, 0x69, 0xe2, - 0xef, 0xb5, 0x71, 0x48, - }, - Output: &wire.TxOut{ - Value: 5000000000, - PkScript: []byte{ - 0x41, // OP_DATA_65 - 0x04, 0xd6, 0x4b, 0xdf, 0xd0, - 0x9e, 0xb1, 0xc5, 0xfe, 0x29, - 0x5a, 0xbd, 0xeb, 0x1d, 0xca, - 0x42, 0x81, 0xbe, 0x98, 0x8e, - 0x2d, 0xa0, 0xb6, 0xc1, 0xc6, - 0xa5, 0x9d, 0xc2, 0x26, 0xc2, - 0x86, 0x24, 0xe1, 0x81, 0x75, - 0xe8, 0x51, 0xc9, 0x6b, 0x97, - 0x3d, 0x81, 0xb0, 0x1c, 0xc3, - 0x1f, 0x04, 0x78, 0x34, 0xbc, - 0x06, 0xd6, 0xd6, 0xed, 0xf6, - 0x20, 0xd1, 0x84, 0x24, 0x1a, - 0x6a, 0xed, 0x8b, 0x63, - 0xa6, // 65-byte signature - 0xac, // OP_CHECKSIG - }, - }, - HashType: params.SigHashAll, - }, - secondLevelWitnessScript: breachKeys[0], - }, - { - amt: btcutil.Amount(3e4), - outpoint: breachOutPoints[2], - witnessType: input.CommitmentDelayOutput, - signDesc: input.SignDescriptor{ - SingleTweak: []byte{ - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, - }, - WitnessScript: []byte{ - 0x00, 0x14, 0xee, 0x91, 0x41, 0x7e, - 0x85, 0x6c, 0xde, 0x10, 0xa2, 0x91, - 0x1e, 0xdc, 0xbd, 0xbd, 0x69, 0xe2, - 0xef, 0xb5, 0x71, 0x48, - }, - Output: &wire.TxOut{ - Value: 5000000000, - PkScript: []byte{ - 0x41, // OP_DATA_65 - 0x04, 0xd6, 0x4b, 0xdf, 0xd0, - 0x9e, 0xb1, 0xc5, 0xfe, 0x29, - 0x5a, 0xbd, 0xeb, 0x1d, 0xca, - 0x42, 0x81, 0xbe, 0x98, 0x8e, - 0x2d, 0xa0, 0xb6, 0xc1, 0xc6, - 0xa5, 0x9d, 0xc2, 0x26, 0xc2, - 0x86, 0x24, 0xe1, 0x81, 0x75, - 0xe8, 0x51, 0xc9, 0x6b, 0x97, - 0x3d, 0x81, 0xb0, 0x1c, 0xc3, - 0x1f, 0x04, 0x78, 0x34, 0xbc, - 0x06, 0xd6, 0xd6, 0xed, 0xf6, - 0x20, 0xd1, 0x84, 0x24, 0x1a, - 0x6a, 0xed, 0x8b, 0x63, - 0xa6, // 65-byte signature - 0xac, // OP_CHECKSIG - }, - }, - HashType: params.SigHashAll, - }, - secondLevelWitnessScript: breachKeys[0], - }, - } - - retributionMap = make(map[wire.OutPoint]retributionInfo) - retributions = []retributionInfo{ - { - commitHash: [chainhash.HashSize]byte{ - 0xb7, 0x94, 0x38, 0x5f, 0x2d, 0x1e, 0xf7, 0xab, - 0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4, - 0x4f, 0x2f, 0x6f, 0x25, 0x88, 0xa3, 0xef, 0xb9, - 0x6a, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53, - }, - chainHash: [chainhash.HashSize]byte{ - 0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4, - 0x4f, 0x2f, 0x6f, 0x25, 0x88, 0xa3, 0xef, 0xb9, - 0xb7, 0x94, 0x38, 0x5f, 0x2d, 0x1e, 0xf7, 0xab, - 0x6b, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53, - }, - chanPoint: breachOutPoints[0], - breachHeight: 337, - // Set to breachedOutputs 0 and 1 in init() - breachedOutputs: []breachedOutput{{}, {}}, - }, - { - commitHash: [chainhash.HashSize]byte{ - 0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, - 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, - 0x2d, 0xe7, 0x93, 0xe4, 0xb7, 0x25, 0xb8, 0x4d, - 0x1f, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9, - }, - chainHash: [chainhash.HashSize]byte{ - 0x4f, 0x2f, 0x6f, 0x25, 0x88, 0xa3, 0xef, 0xb9, - 0xb7, 0x94, 0x39, 0x5f, 0x2d, 0x1e, 0xf7, 0xab, - 0x6b, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53, - 0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4, - }, - chanPoint: breachOutPoints[1], - breachHeight: 420420, - // Set to breachedOutputs 1 and 2 in init() - breachedOutputs: []breachedOutput{{}, {}}, - }, - } -) - -func init() { - // Ensure that breached outputs are initialized before starting tests. - if err := initBreachedOutputs(); err != nil { - panic(err) - } - - // Populate a retribution map to for convenience, to allow lookups by - // channel point. - for i := range retributions { - retInfo := &retributions[i] - retInfo.breachedOutputs[0] = breachedOutputs[i] - retInfo.breachedOutputs[1] = breachedOutputs[i+1] - - retributionMap[retInfo.chanPoint] = *retInfo - - } -} - -// FailingRetributionStore wraps a RetributionStore and supports controlled -// restarts of the persistent instance. This allows us to test (1) that no -// modifications to the entries are made between calls or through side effects, -// and (2) that the database is actually being persisted between actions. -type FailingRetributionStore interface { - RetributionStore - - Restart() -} - -// failingRetributionStore is a concrete implementation of a -// FailingRetributionStore. It wraps an underlying RetributionStore and is -// parameterized entirely by a restart function, which is intended to simulate a -// full stop/start of the store. -type failingRetributionStore struct { - mu sync.Mutex - - rs RetributionStore - - nextAddErr er.R - - restart func() RetributionStore -} - -// newFailingRetributionStore creates a new failing retribution store. The given -// restart closure should ensure that it is reloading its contents from the -// persistent source. -func newFailingRetributionStore( - restart func() RetributionStore) *failingRetributionStore { - - return &failingRetributionStore{ - mu: sync.Mutex{}, - rs: restart(), - restart: restart, - } -} - -// FailNextAdd instructs the retribution store to return the provided error. If -// the error is nil, a generic default will be used. -func (frs *failingRetributionStore) FailNextAdd(err er.R) { - if err == nil { - err = er.New("retribution store failed") - } - - frs.mu.Lock() - frs.nextAddErr = err - frs.mu.Unlock() -} - -func (frs *failingRetributionStore) Restart() { - frs.mu.Lock() - frs.rs = frs.restart() - frs.mu.Unlock() -} - -// Add forwards the call to the underlying retribution store, unless this Add -// has been previously instructed to fail. -func (frs *failingRetributionStore) Add(retInfo *retributionInfo) er.R { - frs.mu.Lock() - defer frs.mu.Unlock() - - if frs.nextAddErr != nil { - err := frs.nextAddErr - frs.nextAddErr = nil - return err - } - - return frs.rs.Add(retInfo) -} - -func (frs *failingRetributionStore) IsBreached(chanPoint *wire.OutPoint) (bool, er.R) { - frs.mu.Lock() - defer frs.mu.Unlock() - - return frs.rs.IsBreached(chanPoint) -} - -func (frs *failingRetributionStore) Finalize(chanPoint *wire.OutPoint, - finalTx *wire.MsgTx) er.R { - - frs.mu.Lock() - defer frs.mu.Unlock() - - return frs.rs.Finalize(chanPoint, finalTx) -} - -func (frs *failingRetributionStore) GetFinalizedTxn( - chanPoint *wire.OutPoint) (*wire.MsgTx, er.R) { - - frs.mu.Lock() - defer frs.mu.Unlock() - - return frs.rs.GetFinalizedTxn(chanPoint) -} - -func (frs *failingRetributionStore) Remove(key *wire.OutPoint) er.R { - frs.mu.Lock() - defer frs.mu.Unlock() - - return frs.rs.Remove(key) -} - -func (frs *failingRetributionStore) ForAll(cb func(*retributionInfo) er.R, - reset func()) er.R { - - frs.mu.Lock() - defer frs.mu.Unlock() - - return frs.rs.ForAll(cb, reset) -} - -// Parse the pubkeys in the breached outputs. -func initBreachedOutputs() er.R { - for i := range breachedOutputs { - bo := &breachedOutputs[i] - - // Parse the sign descriptor's pubkey. - pubkey, err := btcec.ParsePubKey(breachKeys[i], btcec.S256()) - if err != nil { - return er.Errorf("unable to parse pubkey: %v", - breachKeys[i]) - } - bo.signDesc.KeyDesc.PubKey = pubkey - } - - return nil -} - -// Test that breachedOutput Encode/Decode works. -func TestBreachedOutputSerialization(t *testing.T) { - for i := range breachedOutputs { - bo := &breachedOutputs[i] - - var buf bytes.Buffer - - if err := bo.Encode(&buf); err != nil { - t.Fatalf("unable to serialize breached output [%v]: %v", - i, err) - } - - desBo := &breachedOutput{} - if err := desBo.Decode(&buf); err != nil { - t.Fatalf("unable to deserialize "+ - "breached output [%v]: %v", i, err) - } - - if !reflect.DeepEqual(bo, desBo) { - t.Fatalf("original and deserialized "+ - "breached outputs not equal:\n"+ - "original : %+v\n"+ - "deserialized : %+v\n", - bo, desBo) - } - } -} - -// Test that retribution Encode/Decode works. -func TestRetributionSerialization(t *testing.T) { - for i := range retributions { - ret := &retributions[i] - - var buf bytes.Buffer - - if err := ret.Encode(&buf); err != nil { - t.Fatalf("unable to serialize retribution [%v]: %v", - i, err) - } - - desRet := &retributionInfo{} - if err := desRet.Decode(&buf); err != nil { - t.Fatalf("unable to deserialize retribution [%v]: %v", - i, err) - } - - if !reflect.DeepEqual(ret, desRet) { - t.Fatalf("original and deserialized "+ - "retribution infos not equal:\n"+ - "original : %+v\n"+ - "deserialized : %+v\n", - ret, desRet) - } - } -} - -// copyRetInfo creates a complete copy of the given retributionInfo. -func copyRetInfo(retInfo *retributionInfo) *retributionInfo { - nOutputs := len(retInfo.breachedOutputs) - - ret := &retributionInfo{ - commitHash: retInfo.commitHash, - chainHash: retInfo.chainHash, - chanPoint: retInfo.chanPoint, - breachHeight: retInfo.breachHeight, - breachedOutputs: make([]breachedOutput, nOutputs), - } - - for i := range retInfo.breachedOutputs { - ret.breachedOutputs[i] = retInfo.breachedOutputs[i] - } - - return ret -} - -// mockRetributionStore implements the RetributionStore interface and is backed -// by an in-memory map. Access to the internal state is provided by a mutex. -// TODO(cfromknecht) extend to support and test controlled failures. -type mockRetributionStore struct { - mu sync.Mutex - state map[wire.OutPoint]*retributionInfo - finalTxs map[wire.OutPoint]*wire.MsgTx -} - -func newMockRetributionStore() *mockRetributionStore { - return &mockRetributionStore{ - mu: sync.Mutex{}, - state: make(map[wire.OutPoint]*retributionInfo), - finalTxs: make(map[wire.OutPoint]*wire.MsgTx), - } -} - -func (rs *mockRetributionStore) Add(retInfo *retributionInfo) er.R { - rs.mu.Lock() - rs.state[retInfo.chanPoint] = copyRetInfo(retInfo) - rs.mu.Unlock() - - return nil -} - -func (rs *mockRetributionStore) IsBreached(chanPoint *wire.OutPoint) (bool, er.R) { - rs.mu.Lock() - _, ok := rs.state[*chanPoint] - rs.mu.Unlock() - - return ok, nil -} - -func (rs *mockRetributionStore) Finalize(chanPoint *wire.OutPoint, - finalTx *wire.MsgTx) er.R { - - rs.mu.Lock() - rs.finalTxs[*chanPoint] = finalTx - rs.mu.Unlock() - - return nil -} - -func (rs *mockRetributionStore) GetFinalizedTxn( - chanPoint *wire.OutPoint) (*wire.MsgTx, er.R) { - - rs.mu.Lock() - finalTx := rs.finalTxs[*chanPoint] - rs.mu.Unlock() - - return finalTx, nil -} - -func (rs *mockRetributionStore) Remove(key *wire.OutPoint) er.R { - rs.mu.Lock() - delete(rs.state, *key) - delete(rs.finalTxs, *key) - rs.mu.Unlock() - - return nil -} - -func (rs *mockRetributionStore) ForAll(cb func(*retributionInfo) er.R, - reset func()) er.R { - - rs.mu.Lock() - defer rs.mu.Unlock() - - reset() - for _, retInfo := range rs.state { - if err := cb(copyRetInfo(retInfo)); err != nil { - return err - } - } - - return nil -} - -var retributionStoreTestSuite = []struct { - name string - test func(FailingRetributionStore, *testing.T) -}{ - { - "Initialization", - testRetributionStoreInit, - }, - { - "Add/Remove", - testRetributionStoreAddRemove, - }, - { - "Persistence", - testRetributionStorePersistence, - }, - { - "Overwrite", - testRetributionStoreOverwrite, - }, - { - "RemoveEmpty", - testRetributionStoreRemoveEmpty, - }, -} - -// TestMockRetributionStore instantiates a mockRetributionStore and tests its -// behavior using the general RetributionStore test suite. -func TestMockRetributionStore(t *testing.T) { - for _, test := range retributionStoreTestSuite { - t.Run( - "mockRetributionStore."+test.name, - func(tt *testing.T) { - mrs := newMockRetributionStore() - frs := newFailingRetributionStore( - func() RetributionStore { return mrs }, - ) - test.test(frs, tt) - }, - ) - } -} - -func makeTestChannelDB() (*channeldb.DB, func(), er.R) { - // First, create a temporary directory to be used for the duration of - // this test. - tempDirName, errr := ioutil.TempDir("", "channeldb") - if errr != nil { - return nil, nil, er.E(errr) - } - - cleanUp := func() { - os.RemoveAll(tempDirName) - } - - db, err := channeldb.Open(tempDirName) - if err != nil { - cleanUp() - return nil, nil, err - } - - return db, cleanUp, nil -} - -// TestChannelDBRetributionStore instantiates a retributionStore backed by a -// channeldb.DB, and tests its behavior using the general RetributionStore test -// suite. -func TestChannelDBRetributionStore(t *testing.T) { - // Finally, instantiate retribution store and execute RetributionStore - // test suite. - for _, test := range retributionStoreTestSuite { - t.Run( - "channeldbDBRetributionStore."+test.name, - func(tt *testing.T) { - db, cleanUp, err := makeTestChannelDB() - if err != nil { - t.Fatalf("unable to open channeldb: %v", err) - } - defer db.Close() - defer cleanUp() - - restartDb := func() RetributionStore { - // Close and reopen channeldb - if err = db.Close(); err != nil { - t.Fatalf("unable to close "+ - "channeldb during "+ - "restart: %v", - err) - } - db, err = channeldb.Open(db.Path()) - if err != nil { - t.Fatalf("unable to open "+ - "channeldb: %v", err) - } - - return newRetributionStore(db) - } - - frs := newFailingRetributionStore(restartDb) - test.test(frs, tt) - }, - ) - } -} - -// countRetributions uses a retribution store's ForAll to count the number of -// elements emitted from the store. -func countRetributions(t *testing.T, rs RetributionStore) int { - count := 0 - err := rs.ForAll(func(_ *retributionInfo) er.R { - count++ - return nil - }, func() { - count = 0 - }) - if err != nil { - t.Fatalf("unable to list retributions in db: %v", err) - } - return count -} - -// testRetributionStoreAddRemove executes a generic test suite for any concrete -// implementation of the RetributionStore interface. This test adds all -// retributions to the store, confirms that they are all present, and then -// removes each one individually. Between each addition or removal, the number -// of elements in the store is checked to ensure that it only changes by one. -func testRetributionStoreAddRemove(frs FailingRetributionStore, t *testing.T) { - // Make sure that a new retribution store is actually empty. - if count := countRetributions(t, frs); count != 0 { - t.Fatalf("expected 0 retributions, found %v", count) - } - - // Add all retributions, check that ForAll returns the correct - // information, and then remove all retributions. - testRetributionStoreAdds(frs, t, false) - testRetributionStoreForAll(frs, t, false) - testRetributionStoreRemoves(frs, t, false) -} - -// testRetributionStorePersistence executes the same general test as -// testRetributionStoreAddRemove, except that it also restarts the store between -// each operation to ensure that the results are properly persisted. -func testRetributionStorePersistence(frs FailingRetributionStore, t *testing.T) { - // Make sure that a new retribution store is still empty after failing - // right off the bat. - frs.Restart() - if count := countRetributions(t, frs); count != 0 { - t.Fatalf("expected 1 retributions, found %v", count) - } - - // Insert all retributions into the database, restarting and checking - // between subsequent calls to test that each intermediate additions are - // persisted. - testRetributionStoreAdds(frs, t, true) - - // After all retributions have been inserted, verify that the store - // emits a distinct set of retributions that are equivalent to the test - // vector. - testRetributionStoreForAll(frs, t, true) - - // Remove all retributions from the database, restarting and checking - // between subsequent calls to test that each intermediate removals are - // persisted. - testRetributionStoreRemoves(frs, t, true) -} - -// testRetributionStoreInit ensures that a retribution store is always -// initialized with no retributions. -func testRetributionStoreInit(frs FailingRetributionStore, t *testing.T) { - // Make sure that a new retribution store starts empty. - if count := countRetributions(t, frs); count != 0 { - t.Fatalf("expected 0 retributions, found %v", count) - } -} - -// testRetributionStoreRemoveEmpty ensures that a retribution store will not -// fail or panic if it is instructed to remove an entry while empty. -func testRetributionStoreRemoveEmpty(frs FailingRetributionStore, t *testing.T) { - testRetributionStoreRemoves(frs, t, false) -} - -// testRetributionStoreOverwrite ensures that attempts to write retribution -// information regarding a channel point that already exists does not change the -// total number of entries held by the retribution store. -func testRetributionStoreOverwrite(frs FailingRetributionStore, t *testing.T) { - // Initially, add all retributions to store. - testRetributionStoreAdds(frs, t, false) - - // Overwrite the initial entries again. - for i, retInfo := range retributions { - if err := frs.Add(&retInfo); err != nil { - t.Fatalf("unable to add to retribution %v to store: %v", - i, err) - } - } - - // Check that retribution store still has 2 entries. - if count := countRetributions(t, frs); count != 2 { - t.Fatalf("expected 2 retributions, found %v", count) - } -} - -// testRetributionStoreAdds adds all of the test retributions to the database, -// ensuring that the total number of elements increases by exactly 1 after each -// operation. If the `failing` flag is provide, the test will restart the -// database and confirm that the delta is still 1. -func testRetributionStoreAdds( - frs FailingRetributionStore, - t *testing.T, - failing bool) { - - // Iterate over retributions, adding each from the store. If we are - // testing the store under failures, we restart the store and verify - // that the contents are the same. - for i, retInfo := range retributions { - // Snapshot number of entries before and after the addition. - nbefore := countRetributions(t, frs) - if err := frs.Add(&retInfo); err != nil { - t.Fatalf("unable to add to retribution %v to store: %v", - i, err) - } - nafter := countRetributions(t, frs) - - // Check that only one retribution was added. - if nafter-nbefore != 1 { - t.Fatalf("expected %v retributions, found %v", - nbefore+1, nafter) - } - - if failing { - frs.Restart() - - // Check that retribution store has persisted addition - // after restarting. - nrestart := countRetributions(t, frs) - if nrestart-nbefore != 1 { - t.Fatalf("expected %v retributions, found %v", - nbefore+1, nrestart) - } - } - } -} - -// testRetributionStoreRemoves removes all of the test retributions to the -// database, ensuring that the total number of elements decreases by exactly 1 -// after each operation. If the `failing` flag is provide, the test will -// restart the database and confirm that the delta is the same. -func testRetributionStoreRemoves( - frs FailingRetributionStore, - t *testing.T, - failing bool) { - - // Iterate over retributions, removing each from the store. If we are - // testing the store under failures, we restart the store and verify - // that the contents are the same. - for i, retInfo := range retributions { - // Snapshot number of entries before and after the removal. - nbefore := countRetributions(t, frs) - err := frs.Remove(&retInfo.chanPoint) - switch { - case nbefore == 0 && err == nil: - - case nbefore > 0 && err != nil: - t.Fatalf("unable to remove to retribution %v "+ - "from store: %v", i, err) - } - nafter := countRetributions(t, frs) - - // If the store is empty, increment nbefore to simulate the - // removal of one element. - if nbefore == 0 { - nbefore++ - } - - // Check that only one retribution was removed. - if nbefore-nafter != 1 { - t.Fatalf("expected %v retributions, found %v", - nbefore-1, nafter) - } - - if failing { - frs.Restart() - - // Check that retribution store has persisted removal - // after restarting. - nrestart := countRetributions(t, frs) - if nbefore-nrestart != 1 { - t.Fatalf("expected %v retributions, found %v", - nbefore-1, nrestart) - } - } - } -} - -// testRetributionStoreForAll iterates over the current entries in the -// retribution store, ensuring that each entry in the database is unique, and -// corresponds to exactly one of the entries in the test vector. If the -// `failing` flag is provide, the test will restart the database and confirm -// that the entries again validate against the test vectors. -func testRetributionStoreForAll( - frs FailingRetributionStore, - t *testing.T, - failing bool) { - - // nrets is the number of retributions in the test vector - nrets := len(retributions) - - // isRestart indicates whether or not the database has been restarted. - // When testing for failures, this allows the test case to make a second - // attempt without causing a subsequent restart on the second pass. - var isRestart bool - -restartCheck: - // Construct a set of all channel points presented by the store. Entries - // are only be added to the set if their corresponding retribution - // information matches the test vector. - var foundSet map[wire.OutPoint]struct{} - - // Iterate through the stored retributions, checking to see if we have - // an equivalent retribution in the test vector. This will return an - // error unless all persisted retributions exist in the test vector. - if err := frs.ForAll(func(ret *retributionInfo) er.R { - // Fetch the retribution information from the test vector. If - // the entry does not exist, the test returns an error. - if exRetInfo, ok := retributionMap[ret.chanPoint]; ok { - // Compare the presented retribution information with - // the expected value, fail if they are inconsistent. - if !reflect.DeepEqual(ret, &exRetInfo) { - return er.Errorf("unexpected retribution "+ - "retrieved from db --\n"+ - "want: %#v\ngot: %#v", exRetInfo, ret, - ) - } - - // Retribution information from database matches the - // test vector, record the channel point in the found - // map. - foundSet[ret.chanPoint] = struct{}{} - - } else { - return er.Errorf("unknown retribution retrieved "+ - "from db: %v", ret) - } - - return nil - }, func() { - foundSet = make(map[wire.OutPoint]struct{}) - }); err != nil { - t.Fatalf("failed to iterate over persistent retributions: %v", - err) - } - - // Check that retribution store emits nrets entries - if count := countRetributions(t, frs); count != nrets { - t.Fatalf("expected %v retributions, found %v", nrets, count) - } - - // Confirm that all of the retributions emitted from the iteration - // correspond to unique channel points. - nunique := len(foundSet) - if nunique != nrets { - t.Fatalf("expected %v unique retributions, only found %v", - nrets, nunique) - } - - // If in failure mode on only on first pass, restart the database and - // rexecute the test. - if failing && !isRestart { - frs.Restart() - isRestart = true - - goto restartCheck - } -} - -func initBreachedState(t *testing.T) (*breachArbiter, - *lnwallet.LightningChannel, *lnwallet.LightningChannel, - *lnwallet.LocalForceCloseSummary, chan *ContractBreachEvent, - func(), func()) { - // Create a pair of channels using a notifier that allows us to signal - // a spend of the funding transaction. Alice's channel will be the on - // observing a breach. - alice, bob, cleanUpChans, err := createInitChannels(1) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } - - // Instantiate a breach arbiter to handle the breach of alice's channel. - contractBreaches := make(chan *ContractBreachEvent) - - brar, cleanUpArb, err := createTestArbiter( - t, contractBreaches, alice.State().Db, - ) - if err != nil { - t.Fatalf("unable to initialize test breach arbiter: %v", err) - } - - // Send one HTLC to Bob and perform a state transition to lock it in. - htlcAmount := lnwire.NewMSatFromSatoshis(20000) - htlc, _ := createHTLC(0, htlcAmount) - if _, err := alice.AddHTLC(htlc, nil); err != nil { - t.Fatalf("alice unable to add htlc: %v", err) - } - if _, err := bob.ReceiveHTLC(htlc); err != nil { - t.Fatalf("bob unable to recv add htlc: %v", err) - } - if err := forceStateTransition(alice, bob); err != nil { - t.Fatalf("Can't update the channel state: %v", err) - } - - // Generate the force close summary at this point in time, this will - // serve as the old state bob will broadcast. - bobClose, err := bob.ForceClose() - if err != nil { - t.Fatalf("unable to force close bob's channel: %v", err) - } - - // Now send another HTLC and perform a state transition, this ensures - // Alice is ahead of the state Bob will broadcast. - htlc2, _ := createHTLC(1, htlcAmount) - if _, err := alice.AddHTLC(htlc2, nil); err != nil { - t.Fatalf("alice unable to add htlc: %v", err) - } - if _, err := bob.ReceiveHTLC(htlc2); err != nil { - t.Fatalf("bob unable to recv add htlc: %v", err) - } - if err := forceStateTransition(alice, bob); err != nil { - t.Fatalf("Can't update the channel state: %v", err) - } - - return brar, alice, bob, bobClose, contractBreaches, cleanUpChans, - cleanUpArb -} - -// TestBreachHandoffSuccess tests that a channel's close observer properly -// delivers retribution information to the breach arbiter in response to a -// breach close. This test verifies correctness in the event that the handoff -// experiences no interruptions. -func TestBreachHandoffSuccess(t *testing.T) { - brar, alice, _, bobClose, contractBreaches, - cleanUpChans, cleanUpArb := initBreachedState(t) - defer cleanUpChans() - defer cleanUpArb() - - chanPoint := alice.ChanPoint - - // Signal a spend of the funding transaction and wait for the close - // observer to exit. - breach := &ContractBreachEvent{ - ChanPoint: *chanPoint, - ProcessACK: make(chan er.R, 1), - BreachRetribution: &lnwallet.BreachRetribution{ - BreachTransaction: bobClose.CloseTx, - LocalOutputSignDesc: &input.SignDescriptor{ - Output: &wire.TxOut{ - PkScript: breachKeys[0], - }, - }, - }, - } - contractBreaches <- breach - - // We'll also wait to consume the ACK back from the breach arbiter. - select { - case err := <-breach.ProcessACK: - if err != nil { - t.Fatalf("handoff failed: %v", err) - } - case <-time.After(time.Second * 15): - t.Fatalf("breach arbiter didn't send ack back") - } - - // After exiting, the breach arbiter should have persisted the - // retribution information and the channel should be shown as pending - // force closed. - assertArbiterBreach(t, brar, chanPoint) - - // Send another breach event. Since the handoff for this channel was - // already ACKed, the breach arbiter should immediately ACK and ignore - // this event. - breach = &ContractBreachEvent{ - ChanPoint: *chanPoint, - ProcessACK: make(chan er.R, 1), - BreachRetribution: &lnwallet.BreachRetribution{ - BreachTransaction: bobClose.CloseTx, - LocalOutputSignDesc: &input.SignDescriptor{ - Output: &wire.TxOut{ - PkScript: breachKeys[0], - }, - }, - }, - } - - contractBreaches <- breach - - // We'll also wait to consume the ACK back from the breach arbiter. - select { - case err := <-breach.ProcessACK: - if err != nil { - t.Fatalf("handoff failed: %v", err) - } - case <-time.After(time.Second * 15): - t.Fatalf("breach arbiter didn't send ack back") - } - - // State should not have changed. - assertArbiterBreach(t, brar, chanPoint) -} - -// TestBreachHandoffFail tests that a channel's close observer properly -// delivers retribution information to the breach arbiter in response to a -// breach close. This test verifies correctness in the event that the breach -// arbiter fails to write the information to disk, and that a subsequent attempt -// at the handoff succeeds. -func TestBreachHandoffFail(t *testing.T) { - brar, alice, _, bobClose, contractBreaches, - cleanUpChans, cleanUpArb := initBreachedState(t) - defer cleanUpChans() - defer cleanUpArb() - - // Before alerting Alice of the breach, instruct our failing retribution - // store to fail the next database operation, which we expect to write - // the information handed off by the channel's close observer. - fstore := brar.cfg.Store.(*failingRetributionStore) - fstore.FailNextAdd(nil) - - // Signal the notifier to dispatch spend notifications of the funding - // transaction using the transaction from bob's closing summary. - chanPoint := alice.ChanPoint - breach := &ContractBreachEvent{ - ChanPoint: *chanPoint, - ProcessACK: make(chan er.R, 1), - BreachRetribution: &lnwallet.BreachRetribution{ - BreachTransaction: bobClose.CloseTx, - LocalOutputSignDesc: &input.SignDescriptor{ - Output: &wire.TxOut{ - PkScript: breachKeys[0], - }, - }, - }, - } - contractBreaches <- breach - - // We'll also wait to consume the ACK back from the breach arbiter. - select { - case err := <-breach.ProcessACK: - if err == nil { - t.Fatalf("breach write should have failed") - } - case <-time.After(time.Second * 15): - t.Fatalf("breach arbiter didn't send ack back") - } - - // Since the handoff failed, the breach arbiter should not show the - // channel as breached, and the channel should also not have been marked - // pending closed. - assertNoArbiterBreach(t, brar, chanPoint) - assertNotPendingClosed(t, alice) - - brar, cleanUpArb, err := createTestArbiter( - t, contractBreaches, alice.State().Db, - ) - if err != nil { - t.Fatalf("unable to initialize test breach arbiter: %v", err) - } - defer cleanUpArb() - - // Signal a spend of the funding transaction and wait for the close - // observer to exit. This time we are allowing the handoff to succeed. - breach = &ContractBreachEvent{ - ChanPoint: *chanPoint, - ProcessACK: make(chan er.R, 1), - BreachRetribution: &lnwallet.BreachRetribution{ - BreachTransaction: bobClose.CloseTx, - LocalOutputSignDesc: &input.SignDescriptor{ - Output: &wire.TxOut{ - PkScript: breachKeys[0], - }, - }, - }, - } - - contractBreaches <- breach - - select { - case err := <-breach.ProcessACK: - if err != nil { - t.Fatalf("handoff failed: %v", err) - } - case <-time.After(time.Second * 15): - t.Fatalf("breach arbiter didn't send ack back") - } - - // Check that the breach was properly recorded in the breach arbiter, - // and that the close observer marked the channel as pending closed - // before exiting. - assertArbiterBreach(t, brar, chanPoint) -} - -type publAssertion func(*testing.T, map[wire.OutPoint]*wire.MsgTx, - chan *wire.MsgTx) - -type breachTest struct { - name string - - // spend2ndLevel requests that second level htlcs be spent *again*, as - // if by a remote party or watchtower. The outpoint of the second level - // htlc is in effect "readded" to the set of inputs. - spend2ndLevel bool - - // sendFinalConf informs the test to send a confirmation for the justice - // transaction before asserting the arbiter is cleaned up. - sendFinalConf bool - - // whenNonZeroInputs is called after spending an input but there are - // further inputs to spend in the test. - whenNonZeroInputs publAssertion - - // whenZeroInputs is called after spending an input but there are no - // further inputs to spend in the test. - whenZeroInputs publAssertion -} - -var ( - // commitSpendTx is used to spend commitment outputs. - commitSpendTx = &wire.MsgTx{ - TxOut: []*wire.TxOut{ - {Value: 500000000}, - }, - } - // htlc2ndLevlTx is used to transition an htlc output on the commitment - // transaction to a second level htlc. - htlc2ndLevlTx = &wire.MsgTx{ - TxOut: []*wire.TxOut{ - {Value: 20000}, - }, - } - // htlcSpendTx is used to spend from a second level htlc. - htlcSpendTx = &wire.MsgTx{ - TxOut: []*wire.TxOut{ - {Value: 10000}, - }, - } -) - -var breachTests = []breachTest{ - { - name: "all spends", - spend2ndLevel: true, - whenNonZeroInputs: func(t *testing.T, - inputs map[wire.OutPoint]*wire.MsgTx, - publTx chan *wire.MsgTx) { - - var tx *wire.MsgTx - select { - case tx = <-publTx: - case <-time.After(5 * time.Second): - t.Fatalf("tx was not published") - } - - // The justice transaction should have thee same number - // of inputs as we are tracking in the test. - if len(tx.TxIn) != len(inputs) { - t.Fatalf("expected justice txn to have %d "+ - "inputs, found %d", len(inputs), - len(tx.TxIn)) - } - - // Ensure that each input exists on the justice - // transaction. - for in := range inputs { - findInputIndex(t, in, tx) - } - - }, - whenZeroInputs: func(t *testing.T, - inputs map[wire.OutPoint]*wire.MsgTx, - publTx chan *wire.MsgTx) { - - // Sanity check to ensure the brar doesn't try to - // broadcast another sweep, since all outputs have been - // spent externally. - select { - case <-publTx: - t.Fatalf("tx published unexpectedly") - case <-time.After(50 * time.Millisecond): - } - }, - }, - { - name: "commit spends, second level sweep", - spend2ndLevel: false, - sendFinalConf: true, - whenNonZeroInputs: func(t *testing.T, - inputs map[wire.OutPoint]*wire.MsgTx, - publTx chan *wire.MsgTx) { - - select { - case <-publTx: - case <-time.After(5 * time.Second): - t.Fatalf("tx was not published") - } - }, - whenZeroInputs: func(t *testing.T, - inputs map[wire.OutPoint]*wire.MsgTx, - publTx chan *wire.MsgTx) { - - // Now a transaction attempting to spend from the second - // level tx should be published instead. Let this - // publish succeed by setting the publishing error to - // nil. - var tx *wire.MsgTx - select { - case tx = <-publTx: - case <-time.After(5 * time.Second): - t.Fatalf("tx was not published") - } - - // The commitment outputs should be gone, and there - // should only be a single htlc spend. - if len(tx.TxIn) != 1 { - t.Fatalf("expect 1 htlc output, found %d "+ - "outputs", len(tx.TxIn)) - } - - // The remaining TxIn previously attempting to spend - // the HTLC outpoint should now be spending from the - // second level tx. - // - // NOTE: Commitment outputs and htlc sweeps are spent - // with a different transactions (and thus txids), - // ensuring we aren't mistaking this for a different - // output type. - onlyInput := tx.TxIn[0].PreviousOutPoint.Hash - if onlyInput != htlc2ndLevlTx.TxHash() { - t.Fatalf("tx not attempting to spend second "+ - "level tx, %v", tx.TxIn[0]) - } - }, - }, -} - -// TestBreachSpends checks the behavior of the breach arbiter in response to -// spend events on a channels outputs by asserting that it properly removes or -// modifies the inputs from the justice txn. -func TestBreachSpends(t *testing.T) { - for _, test := range breachTests { - tc := test - t.Run(tc.name, func(t *testing.T) { - testBreachSpends(t, tc) - }) - } -} - -func testBreachSpends(t *testing.T, test breachTest) { - brar, alice, _, bobClose, contractBreaches, - cleanUpChans, cleanUpArb := initBreachedState(t) - defer cleanUpChans() - defer cleanUpArb() - - var ( - height = bobClose.ChanSnapshot.CommitHeight - forceCloseTx = bobClose.CloseTx - chanPoint = alice.ChanPoint - publTx = make(chan *wire.MsgTx) - publErr *er.ErrorCode - publMtx sync.Mutex - ) - - // Make PublishTransaction always return ErrDoubleSpend to begin with. - publErr = lnwallet.ErrDoubleSpend - brar.cfg.PublishTransaction = func(tx *wire.MsgTx, _ string) er.R { - publMtx.Lock() - var err er.R - if publErr != nil { - err = publErr.Default() - } - publMtx.Unlock() - publTx <- tx - - return err - } - - // Notify the breach arbiter about the breach. - retribution, err := lnwallet.NewBreachRetribution( - alice.State(), height, 1, - ) - if err != nil { - t.Fatalf("unable to create breach retribution: %v", err) - } - - breach := &ContractBreachEvent{ - ChanPoint: *chanPoint, - ProcessACK: make(chan er.R, 1), - BreachRetribution: retribution, - } - contractBreaches <- breach - - // We'll also wait to consume the ACK back from the breach arbiter. - select { - case err := <-breach.ProcessACK: - if err != nil { - t.Fatalf("handoff failed: %v", err) - } - case <-time.After(time.Second * 15): - t.Fatalf("breach arbiter didn't send ack back") - } - - state := alice.State() - err = state.CloseChannel(&channeldb.ChannelCloseSummary{ - ChanPoint: state.FundingOutpoint, - ChainHash: state.ChainHash, - RemotePub: state.IdentityPub, - CloseType: channeldb.BreachClose, - Capacity: state.Capacity, - IsPending: true, - ShortChanID: state.ShortChanID(), - RemoteCurrentRevocation: state.RemoteCurrentRevocation, - RemoteNextRevocation: state.RemoteNextRevocation, - LocalChanConfig: state.LocalChanCfg, - }) - if err != nil { - t.Fatalf("unable to close channel: %v", err) - } - - // After exiting, the breach arbiter should have persisted the - // retribution information and the channel should be shown as pending - // force closed. - assertArbiterBreach(t, brar, chanPoint) - - // Assert that the database sees the channel as pending close, otherwise - // the breach arbiter won't be able to fully close it. - assertPendingClosed(t, alice) - - // Notify that the breaching transaction is confirmed, to trigger the - // retribution logic. - notifier := brar.cfg.Notifier.(*mock.SpendNotifier) - notifier.ConfChan <- &chainntnfs.TxConfirmation{} - - // The breach arbiter should attempt to sweep all outputs on the - // breached commitment. We'll pretend that the HTLC output has been - // spent by the channel counter party's second level tx already. - var tx *wire.MsgTx - select { - case tx = <-publTx: - case <-time.After(5 * time.Second): - t.Fatalf("tx was not published") - } - - // All outputs should initially spend from the force closed txn. - forceTxID := forceCloseTx.TxHash() - for _, txIn := range tx.TxIn { - if txIn.PreviousOutPoint.Hash != forceTxID { - t.Fatalf("og justice tx not spending commitment") - } - } - - localOutpoint := retribution.LocalOutpoint - remoteOutpoint := retribution.RemoteOutpoint - htlcOutpoint := retribution.HtlcRetributions[0].OutPoint - - // Construct a map from outpoint on the force close to the transaction - // we want it to be spent by. As the test progresses, this map will be - // updated to contain only the set of commitment or second level - // outpoints that remain to be spent. - inputs := map[wire.OutPoint]*wire.MsgTx{ - htlcOutpoint: htlc2ndLevlTx, - localOutpoint: commitSpendTx, - remoteOutpoint: commitSpendTx, - } - - // Until no more inputs to spend remain, deliver the spend events and - // process the assertions prescribed by the test case. - for len(inputs) > 0 { - var ( - op wire.OutPoint - spendTx *wire.MsgTx - ) - - // Pick an outpoint at random from the set of inputs. - for op, spendTx = range inputs { - delete(inputs, op) - break - } - - // Deliver the spend notification for the chosen transaction. - notifier.Spend(&op, 2, spendTx) - - // When the second layer transfer is detected, add back the - // outpoint of the second layer tx so that we can spend it - // again. Only do so if the test requests this behavior. - spendTxID := spendTx.TxHash() - if test.spend2ndLevel && spendTxID == htlc2ndLevlTx.TxHash() { - // Create the second level outpoint that will be spent, - // the index is always zero for these 1-in-1-out txns. - spendOp := wire.OutPoint{Hash: spendTxID} - inputs[spendOp] = htlcSpendTx - } - - if len(inputs) > 0 { - test.whenNonZeroInputs(t, inputs, publTx) - } else { - // Reset the publishing error so that any publication, - // made by the breach arbiter, if any, will succeed. - publMtx.Lock() - publErr = nil - publMtx.Unlock() - test.whenZeroInputs(t, inputs, publTx) - } - } - - // Deliver confirmation of sweep if the test expects it. - if test.sendFinalConf { - notifier.ConfChan <- &chainntnfs.TxConfirmation{} - } - - // Assert that the channel is fully resolved. - assertBrarCleanup(t, brar, alice.ChanPoint, alice.State().Db) -} - -// findInputIndex returns the index of the input that spends from the given -// outpoint. This method fails if the outpoint is not found. -func findInputIndex(t *testing.T, op wire.OutPoint, tx *wire.MsgTx) int { - t.Helper() - - inputIdx := -1 - for i, txIn := range tx.TxIn { - if txIn.PreviousOutPoint == op { - inputIdx = i - } - } - if inputIdx == -1 { - t.Fatalf("input %v in not found", op) - } - - return inputIdx -} - -// assertArbiterBreach checks that the breach arbiter has persisted the breach -// information for a particular channel. -func assertArbiterBreach(t *testing.T, brar *breachArbiter, - chanPoint *wire.OutPoint) { - - t.Helper() - - isBreached, err := brar.IsBreached(chanPoint) - if err != nil { - t.Fatalf("unable to determine if channel is "+ - "breached: %v", err) - } - - if !isBreached { - t.Fatalf("channel %v was never marked breached", - chanPoint) - } - -} - -// assertNoArbiterBreach checks that the breach arbiter has not persisted the -// breach information for a particular channel. -func assertNoArbiterBreach(t *testing.T, brar *breachArbiter, - chanPoint *wire.OutPoint) { - - t.Helper() - - isBreached, err := brar.IsBreached(chanPoint) - if err != nil { - t.Fatalf("unable to determine if channel is "+ - "breached: %v", err) - } - - if isBreached { - t.Fatalf("channel %v was marked breached", - chanPoint) - } -} - -// assertBrarCleanup blocks until the given channel point has been removed the -// retribution store and the channel is fully closed in the database. -func assertBrarCleanup(t *testing.T, brar *breachArbiter, - chanPoint *wire.OutPoint, db *channeldb.DB) { - - t.Helper() - - err := wait.NoError(func() er.R { - isBreached, err := brar.IsBreached(chanPoint) - if err != nil { - return err - } - - if isBreached { - return er.Errorf("channel %v still breached", - chanPoint) - } - - closedChans, err := db.FetchClosedChannels(false) - if err != nil { - return err - } - - for _, channel := range closedChans { - switch { - // Wrong channel. - case channel.ChanPoint != *chanPoint: - continue - - // Right channel, fully closed! - case !channel.IsPending: - return nil - } - - // Still pending. - return er.Errorf("channel %v still pending "+ - "close", chanPoint) - } - - return er.Errorf("channel %v not closed", chanPoint) - - }, time.Second) - if err != nil { - t.Fatalf(err.String()) - } -} - -// assertPendingClosed checks that the channel has been marked pending closed in -// the channel database. -func assertPendingClosed(t *testing.T, c *lnwallet.LightningChannel) { - t.Helper() - - closedChans, err := c.State().Db.FetchClosedChannels(true) - if err != nil { - t.Fatalf("unable to load pending closed channels: %v", err) - } - - for _, chanSummary := range closedChans { - if chanSummary.ChanPoint == *c.ChanPoint { - return - } - } - - t.Fatalf("channel %v was not marked pending closed", c.ChanPoint) -} - -// assertNotPendingClosed checks that the channel has not been marked pending -// closed in the channel database. -func assertNotPendingClosed(t *testing.T, c *lnwallet.LightningChannel) { - t.Helper() - - closedChans, err := c.State().Db.FetchClosedChannels(true) - if err != nil { - t.Fatalf("unable to load pending closed channels: %v", err) - } - - for _, chanSummary := range closedChans { - if chanSummary.ChanPoint == *c.ChanPoint { - t.Fatalf("channel %v was marked pending closed", - c.ChanPoint) - } - } -} - -// createTestArbiter instantiates a breach arbiter with a failing retribution -// store, so that controlled failures can be tested. -func createTestArbiter(t *testing.T, contractBreaches chan *ContractBreachEvent, - db *channeldb.DB) (*breachArbiter, func(), er.R) { - - // Create a failing retribution store, that wraps a normal one. - store := newFailingRetributionStore(func() RetributionStore { - return newRetributionStore(db) - }) - - aliceKeyPriv, _ := btcec.PrivKeyFromBytes(btcec.S256(), - alicesPrivKey) - signer := &mock.SingleSigner{Privkey: aliceKeyPriv} - - // Assemble our test arbiter. - notifier := mock.MakeMockSpendNotifier() - ba := newBreachArbiter(&BreachConfig{ - CloseLink: func(_ *wire.OutPoint, _ htlcswitch.ChannelCloseType) {}, - DB: db, - Estimator: chainfee.NewStaticEstimator(12500, 0), - GenSweepScript: func() ([]byte, er.R) { return nil, nil }, - ContractBreaches: contractBreaches, - Signer: signer, - Notifier: notifier, - PublishTransaction: func(_ *wire.MsgTx, _ string) er.R { return nil }, - Store: store, - }) - - if err := ba.Start(); err != nil { - return nil, nil, err - } - - // The caller is responsible for closing the database. - cleanUp := func() { - ba.Stop() - } - - return ba, cleanUp, nil -} - -// createInitChannels creates two initialized test channels funded with 10 BTC, -// with 5 BTC allocated to each side. Within the channel, Alice is the -// initiator. -func createInitChannels(revocationWindow int) (*lnwallet.LightningChannel, *lnwallet.LightningChannel, func(), er.R) { - - aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(btcec.S256(), - alicesPrivKey) - bobKeyPriv, bobKeyPub := btcec.PrivKeyFromBytes(btcec.S256(), - bobsPrivKey) - - channelCapacity, err := btcutil.NewAmount(10) - if err != nil { - return nil, nil, nil, err - } - - channelBal := channelCapacity / 2 - aliceDustLimit := btcutil.Amount(200) - bobDustLimit := btcutil.Amount(1300) - csvTimeoutAlice := uint32(5) - csvTimeoutBob := uint32(4) - - prevOut := &wire.OutPoint{ - Hash: chainhash.Hash(testHdSeed), - Index: 0, - } - fundingTxIn := wire.NewTxIn(prevOut, nil, nil) - - aliceCfg := channeldb.ChannelConfig{ - ChannelConstraints: channeldb.ChannelConstraints{ - DustLimit: aliceDustLimit, - MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()), - ChanReserve: 0, - MinHTLC: 0, - MaxAcceptedHtlcs: uint16(rand.Int31()), - CsvDelay: uint16(csvTimeoutAlice), - }, - MultiSigKey: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - RevocationBasePoint: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - PaymentBasePoint: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - DelayBasePoint: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - HtlcBasePoint: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - } - bobCfg := channeldb.ChannelConfig{ - ChannelConstraints: channeldb.ChannelConstraints{ - DustLimit: bobDustLimit, - MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()), - ChanReserve: 0, - MinHTLC: 0, - MaxAcceptedHtlcs: uint16(rand.Int31()), - CsvDelay: uint16(csvTimeoutBob), - }, - MultiSigKey: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - RevocationBasePoint: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - PaymentBasePoint: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - DelayBasePoint: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - HtlcBasePoint: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - } - - bobRoot, err := chainhash.NewHash(bobKeyPriv.Serialize()) - if err != nil { - return nil, nil, nil, err - } - bobPreimageProducer := shachain.NewRevocationProducer(*bobRoot) - bobFirstRevoke, err := bobPreimageProducer.AtIndex(0) - if err != nil { - return nil, nil, nil, err - } - bobCommitPoint := input.ComputeCommitmentPoint(bobFirstRevoke[:]) - - aliceRoot, err := chainhash.NewHash(aliceKeyPriv.Serialize()) - if err != nil { - return nil, nil, nil, err - } - alicePreimageProducer := shachain.NewRevocationProducer(*aliceRoot) - aliceFirstRevoke, err := alicePreimageProducer.AtIndex(0) - if err != nil { - return nil, nil, nil, err - } - aliceCommitPoint := input.ComputeCommitmentPoint(aliceFirstRevoke[:]) - - aliceCommitTx, bobCommitTx, err := lnwallet.CreateCommitmentTxns( - channelBal, channelBal, &aliceCfg, &bobCfg, aliceCommitPoint, - bobCommitPoint, *fundingTxIn, channeldb.SingleFunderTweaklessBit, - ) - if err != nil { - return nil, nil, nil, err - } - - alicePath, errr := ioutil.TempDir("", "alicedb") - if errr != nil { - return nil, nil, nil, er.E(errr) - } - - dbAlice, err := channeldb.Open(alicePath) - if err != nil { - return nil, nil, nil, err - } - - bobPath, errr := ioutil.TempDir("", "bobdb") - if errr != nil { - return nil, nil, nil, er.E(errr) - } - - dbBob, err := channeldb.Open(bobPath) - if err != nil { - return nil, nil, nil, err - } - - estimator := chainfee.NewStaticEstimator(12500, 0) - feePerKw, err := estimator.EstimateFeePerKW(1) - if err != nil { - return nil, nil, nil, err - } - - // TODO(roasbeef): need to factor in commit fee? - aliceCommit := channeldb.ChannelCommitment{ - CommitHeight: 0, - LocalBalance: lnwire.NewMSatFromSatoshis(channelBal), - RemoteBalance: lnwire.NewMSatFromSatoshis(channelBal), - FeePerKw: btcutil.Amount(feePerKw), - CommitFee: 8688, - CommitTx: aliceCommitTx, - CommitSig: bytes.Repeat([]byte{1}, 71), - } - bobCommit := channeldb.ChannelCommitment{ - CommitHeight: 0, - LocalBalance: lnwire.NewMSatFromSatoshis(channelBal), - RemoteBalance: lnwire.NewMSatFromSatoshis(channelBal), - FeePerKw: btcutil.Amount(feePerKw), - CommitFee: 8688, - CommitTx: bobCommitTx, - CommitSig: bytes.Repeat([]byte{1}, 71), - } - - var chanIDBytes [8]byte - if _, err := util.ReadFull(crand.Reader, chanIDBytes[:]); err != nil { - return nil, nil, nil, err - } - - shortChanID := lnwire.NewShortChanIDFromInt( - binary.BigEndian.Uint64(chanIDBytes[:]), - ) - - aliceChannelState := &channeldb.OpenChannel{ - LocalChanCfg: aliceCfg, - RemoteChanCfg: bobCfg, - IdentityPub: aliceKeyPub, - FundingOutpoint: *prevOut, - ShortChannelID: shortChanID, - ChanType: channeldb.SingleFunderTweaklessBit, - IsInitiator: true, - Capacity: channelCapacity, - RemoteCurrentRevocation: bobCommitPoint, - RevocationProducer: alicePreimageProducer, - RevocationStore: shachain.NewRevocationStore(), - LocalCommitment: aliceCommit, - RemoteCommitment: aliceCommit, - Db: dbAlice, - Packager: channeldb.NewChannelPackager(shortChanID), - FundingTxn: testTx, - } - bobChannelState := &channeldb.OpenChannel{ - LocalChanCfg: bobCfg, - RemoteChanCfg: aliceCfg, - IdentityPub: bobKeyPub, - FundingOutpoint: *prevOut, - ShortChannelID: shortChanID, - ChanType: channeldb.SingleFunderTweaklessBit, - IsInitiator: false, - Capacity: channelCapacity, - RemoteCurrentRevocation: aliceCommitPoint, - RevocationProducer: bobPreimageProducer, - RevocationStore: shachain.NewRevocationStore(), - LocalCommitment: bobCommit, - RemoteCommitment: bobCommit, - Db: dbBob, - Packager: channeldb.NewChannelPackager(shortChanID), - } - - aliceSigner := &mock.SingleSigner{Privkey: aliceKeyPriv} - bobSigner := &mock.SingleSigner{Privkey: bobKeyPriv} - - alicePool := lnwallet.NewSigPool(1, aliceSigner) - channelAlice, err := lnwallet.NewLightningChannel( - aliceSigner, aliceChannelState, alicePool, - ) - if err != nil { - return nil, nil, nil, err - } - alicePool.Start() - - bobPool := lnwallet.NewSigPool(1, bobSigner) - channelBob, err := lnwallet.NewLightningChannel( - bobSigner, bobChannelState, bobPool, - ) - if err != nil { - return nil, nil, nil, err - } - bobPool.Start() - - addr := &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18556, - } - if err := channelAlice.State().SyncPending(addr, 101); err != nil { - return nil, nil, nil, err - } - - addr = &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18555, - } - if err := channelBob.State().SyncPending(addr, 101); err != nil { - return nil, nil, nil, err - } - - cleanUpFunc := func() { - dbBob.Close() - dbAlice.Close() - os.RemoveAll(bobPath) - os.RemoveAll(alicePath) - } - - // Now that the channel are open, simulate the start of a session by - // having Alice and Bob extend their revocation windows to each other. - err = initRevocationWindows(channelAlice, channelBob, revocationWindow) - if err != nil { - return nil, nil, nil, err - } - - return channelAlice, channelBob, cleanUpFunc, nil -} - -// initRevocationWindows simulates a new channel being opened within the p2p -// network by populating the initial revocation windows of the passed -// commitment state machines. -// -// TODO(conner) remove code duplication -func initRevocationWindows(chanA, chanB *lnwallet.LightningChannel, windowSize int) er.R { - aliceNextRevoke, err := chanA.NextRevocationKey() - if err != nil { - return err - } - if err := chanB.InitNextRevocation(aliceNextRevoke); err != nil { - return err - } - - bobNextRevoke, err := chanB.NextRevocationKey() - if err != nil { - return err - } - if err := chanA.InitNextRevocation(bobNextRevoke); err != nil { - return err - } - - return nil -} - -// createHTLC is a utility function for generating an HTLC with a given -// preimage and a given amount. -// TODO(conner) remove code duplication -func createHTLC(data int, amount lnwire.MilliSatoshi) (*lnwire.UpdateAddHTLC, [32]byte) { - preimage := bytes.Repeat([]byte{byte(data)}, 32) - paymentHash := sha256.Sum256(preimage) - - var returnPreimage [32]byte - copy(returnPreimage[:], preimage) - - return &lnwire.UpdateAddHTLC{ - ID: uint64(data), - PaymentHash: paymentHash, - Amount: amount, - Expiry: uint32(5), - }, returnPreimage -} - -// forceStateTransition executes the necessary interaction between the two -// commitment state machines to transition to a new state locking in any -// pending updates. -// TODO(conner) remove code duplication -func forceStateTransition(chanA, chanB *lnwallet.LightningChannel) er.R { - aliceSig, aliceHtlcSigs, _, err := chanA.SignNextCommitment() - if err != nil { - return err - } - if err = chanB.ReceiveNewCommitment(aliceSig, aliceHtlcSigs); err != nil { - return err - } - - bobRevocation, _, err := chanB.RevokeCurrentCommitment() - if err != nil { - return err - } - bobSig, bobHtlcSigs, _, err := chanB.SignNextCommitment() - if err != nil { - return err - } - - _, _, _, _, err = chanA.ReceiveRevocation(bobRevocation) - if err != nil { - return err - } - if err := chanA.ReceiveNewCommitment(bobSig, bobHtlcSigs); err != nil { - return err - } - - aliceRevocation, _, err := chanA.RevokeCurrentCommitment() - if err != nil { - return err - } - _, _, _, _, err = chanB.ReceiveRevocation(aliceRevocation) - if err != nil { - return err - } - - return nil -} - -func TestMain(m *testing.M) { - globalcfg.SelectConfig(globalcfg.BitcoinDefaults()) - os.Exit(m.Run()) -} diff --git a/lnd/brontide/README.md b/lnd/brontide/README.md deleted file mode 100644 index 0f0c6fbd..00000000 --- a/lnd/brontide/README.md +++ /dev/null @@ -1,28 +0,0 @@ -brontide -========== - -[![Build Status](http://img.shields.io/travis/lightningnetwork/lnd.svg)](https://travis-ci.org/lightningnetwork/lnd) -[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/lightningnetwork/lnd/blob/master/LICENSE) -[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/lightningnetwork/lnd/brontide) - -The brontide package implements a secure crypto messaging protocol based off of -the [Noise Protocol Framework](http://noiseprotocol.org/noise.html). The -package exposes the raw state machine that handles the handshake and subsequent -message encryption/decryption scheme. Additionally, the package exposes a -[net.Conn](https://golang.org/pkg/net/#Conn) and a -[net.Listener](https://golang.org/pkg/net/#Listener) interface implementation -which allows the encrypted transport to be seamlessly integrated into a -codebase. - -The secure messaging scheme implemented within this package is described in -detail in [BOLT #8 of the Lightning Network specifications](https://github.com/lightningnetwork/lightning-rfc/blob/master/08-transport.md). - -This package has intentionally been designed so it can be used as a standalone -package for any projects needing secure encrypted+authenticated communications -between network enabled programs. - -## Installation and Updating - -```bash -$ go get -u github.com/lightningnetwork/lnd/brontide -``` diff --git a/lnd/brontide/conn.go b/lnd/brontide/conn.go deleted file mode 100644 index 621e51ce..00000000 --- a/lnd/brontide/conn.go +++ /dev/null @@ -1,291 +0,0 @@ -package brontide - -import ( - "bytes" - "math" - "net" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/tor" -) - -// Conn is an implementation of net.Conn which enforces an authenticated key -// exchange and message encryption protocol dubbed "Brontide" after initial TCP -// connection establishment. In the case of a successful handshake, all -// messages sent via the .Write() method are encrypted with an AEAD cipher -// along with an encrypted length-prefix. See the Machine struct for -// additional details w.r.t to the handshake and encryption scheme. -type Conn struct { - conn net.Conn - - noise *Machine - - readBuf bytes.Buffer -} - -// A compile-time assertion to ensure that Conn meets the net.Conn interface. -var _ net.Conn = (*Conn)(nil) - -// Dial attempts to establish an encrypted+authenticated connection with the -// remote peer located at address which has remotePub as its long-term static -// public key. In the case of a handshake failure, the connection is closed and -// a non-nil error is returned. -func Dial(local keychain.SingleKeyECDH, netAddr *lnwire.NetAddress, - timeout time.Duration, dialer tor.DialFunc) (*Conn, er.R) { - - ipAddr := netAddr.Address.String() - var conn net.Conn - var err er.R - conn, err = dialer("tcp", ipAddr, timeout) - if err != nil { - return nil, err - } - - b := &Conn{ - conn: conn, - noise: NewBrontideMachine(true, local, netAddr.IdentityKey), - } - - // Initiate the handshake by sending the first act to the receiver. - actOne, err := b.noise.GenActOne() - if err != nil { - b.conn.Close() - return nil, err - } - if _, err := conn.Write(actOne[:]); err != nil { - b.conn.Close() - return nil, er.E(err) - } - - // We'll ensure that we get ActTwo from the remote peer in a timely - // manner. If they don't respond within 1s, then we'll kill the - // connection. - err = er.E(conn.SetReadDeadline(time.Now().Add(handshakeReadTimeout))) - if err != nil { - b.conn.Close() - return nil, err - } - - // If the first act was successful (we know that address is actually - // remotePub), then read the second act after which we'll be able to - // send our static public key to the remote peer with strong forward - // secrecy. - var actTwo [ActTwoSize]byte - if _, err := util.ReadFull(conn, actTwo[:]); err != nil { - b.conn.Close() - return nil, err - } - if err := b.noise.RecvActTwo(actTwo); err != nil { - b.conn.Close() - return nil, err - } - - // Finally, complete the handshake by sending over our encrypted static - // key and execute the final ECDH operation. - actThree, err := b.noise.GenActThree() - if err != nil { - b.conn.Close() - return nil, err - } - if _, err := conn.Write(actThree[:]); err != nil { - b.conn.Close() - return nil, er.E(err) - } - - // We'll reset the deadline as it's no longer critical beyond the - // initial handshake. - err = er.E(conn.SetReadDeadline(time.Time{})) - if err != nil { - b.conn.Close() - return nil, err - } - - return b, nil -} - -// ReadNextMessage uses the connection in a message-oriented manner, instructing -// it to read the next _full_ message with the brontide stream. This function -// will block until the read of the header and body succeeds. -// -// NOTE: This method SHOULD NOT be used in the case that the connection may be -// adversarial and induce long delays. If the caller needs to set read deadlines -// appropriately, it is preferred that they use the split ReadNextHeader and -// ReadNextBody methods so that the deadlines can be set appropriately on each. -func (c *Conn) ReadNextMessage() ([]byte, er.R) { - return c.noise.ReadMessage(c.conn) -} - -// ReadNextHeader uses the connection to read the next header from the brontide -// stream. This function will block until the read of the header succeeds and -// return the packet length (including MAC overhead) that is expected from the -// subsequent call to ReadNextBody. -func (c *Conn) ReadNextHeader() (uint32, er.R) { - return c.noise.ReadHeader(c.conn) -} - -// ReadNextBody uses the connection to read the next message body from the -// brontide stream. This function will block until the read of the body succeeds -// and return the decrypted payload. The provided buffer MUST be the packet -// length returned by the preceding call to ReadNextHeader. -func (c *Conn) ReadNextBody(buf []byte) ([]byte, er.R) { - return c.noise.ReadBody(c.conn, buf) -} - -// Read reads data from the connection. Read can be made to time out and -// return an Error with Timeout() == true after a fixed time limit; see -// SetDeadline and SetReadDeadline. -// -// Part of the net.Conn interface. -func (c *Conn) Read(b []byte) (n int, err error) { - // In order to reconcile the differences between the record abstraction - // of our AEAD connection, and the stream abstraction of TCP, we - // maintain an intermediate read buffer. If this buffer becomes - // depleted, then we read the next record, and feed it into the - // buffer. Otherwise, we read directly from the buffer. - if c.readBuf.Len() == 0 { - plaintext, err := c.noise.ReadMessage(c.conn) - if err != nil { - return 0, er.Native(err) - } - - if _, err := c.readBuf.Write(plaintext); err != nil { - return 0, err - } - } - - return c.readBuf.Read(b) -} - -// Write writes data to the connection. Write can be made to time out and -// return an Error with Timeout() == true after a fixed time limit; see -// SetDeadline and SetWriteDeadline. -// -// Part of the net.Conn interface. -func (c *Conn) Write(b []byte) (int, error) { - // If the message doesn't require any chunking, then we can go ahead - // with a single write. - if len(b) <= math.MaxUint16 { - err := c.noise.WriteMessage(b) - if err != nil { - return 0, er.Native(err) - } - i, e := c.noise.Flush(c.conn) - return i, er.Native(e) - } - - // If we need to split the message into fragments, then we'll write - // chunks which maximize usage of the available payload. - chunkSize := math.MaxUint16 - - bytesToWrite := len(b) - bytesWritten := 0 - for bytesWritten < bytesToWrite { - // If we're on the last chunk, then truncate the chunk size as - // necessary to avoid an out-of-bounds array memory access. - if bytesWritten+chunkSize > len(b) { - chunkSize = len(b) - bytesWritten - } - - // Slice off the next chunk to be written based on our running - // counter and next chunk size. - chunk := b[bytesWritten : bytesWritten+chunkSize] - if err := c.noise.WriteMessage(chunk); err != nil { - return bytesWritten, er.Native(err) - } - - n, err := c.noise.Flush(c.conn) - bytesWritten += n - if err != nil { - return bytesWritten, er.Native(err) - } - } - - return bytesWritten, nil -} - -// WriteMessage encrypts and buffers the next message p for the connection. The -// ciphertext of the message is prepended with an encrypt+auth'd length which -// must be used as the AD to the AEAD construction when being decrypted by the -// other side. -// -// NOTE: This DOES NOT write the message to the wire, it should be followed by a -// call to Flush to ensure the message is written. -func (c *Conn) WriteMessage(b []byte) er.R { - return c.noise.WriteMessage(b) -} - -// Flush attempts to write a message buffered using WriteMessage to the -// underlying connection. If no buffered message exists, this will result in a -// NOP. Otherwise, it will continue to write the remaining bytes, picking up -// where the byte stream left off in the event of a partial write. The number of -// bytes returned reflects the number of plaintext bytes in the payload, and -// does not account for the overhead of the header or MACs. -// -// NOTE: It is safe to call this method again iff a timeout error is returned. -func (c *Conn) Flush() (int, er.R) { - return c.noise.Flush(c.conn) -} - -// Close closes the connection. Any blocked Read or Write operations will be -// unblocked and return errors. -// -// Part of the net.Conn interface. -func (c *Conn) Close() error { - // TODO(roasbeef): reset brontide state? - return c.conn.Close() -} - -// LocalAddr returns the local network address. -// -// Part of the net.Conn interface. -func (c *Conn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -// RemoteAddr returns the remote network address. -// -// Part of the net.Conn interface. -func (c *Conn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -// SetDeadline sets the read and write deadlines associated with the -// connection. It is equivalent to calling both SetReadDeadline and -// SetWriteDeadline. -// -// Part of the net.Conn interface. -func (c *Conn) SetDeadline(t time.Time) error { - return c.conn.SetDeadline(t) -} - -// SetReadDeadline sets the deadline for future Read calls. A zero value for t -// means Read will not time out. -// -// Part of the net.Conn interface. -func (c *Conn) SetReadDeadline(t time.Time) error { - return c.conn.SetReadDeadline(t) -} - -// SetWriteDeadline sets the deadline for future Write calls. Even if write -// times out, it may return n > 0, indicating that some of the data was -// successfully written. A zero value for t means Write will not time out. -// -// Part of the net.Conn interface. -func (c *Conn) SetWriteDeadline(t time.Time) error { - return c.conn.SetWriteDeadline(t) -} - -// RemotePub returns the remote peer's static public key. -func (c *Conn) RemotePub() *btcec.PublicKey { - return c.noise.remoteStatic -} - -// LocalPub returns the local peer's static public key. -func (c *Conn) LocalPub() *btcec.PublicKey { - return c.noise.localStatic.PubKey() -} diff --git a/lnd/brontide/listener.go b/lnd/brontide/listener.go deleted file mode 100644 index db75de60..00000000 --- a/lnd/brontide/listener.go +++ /dev/null @@ -1,256 +0,0 @@ -package brontide - -import ( - "net" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/keychain" -) - -// defaultHandshakes is the maximum number of handshakes that can be done in -// parallel. -const defaultHandshakes = 1000 - -// Listener is an implementation of a net.Conn which executes an authenticated -// key exchange and message encryption protocol dubbed "Machine" after -// initial connection acceptance. See the Machine struct for additional -// details w.r.t the handshake and encryption scheme used within the -// connection. -type Listener struct { - localStatic keychain.SingleKeyECDH - - tcp *net.TCPListener - - handshakeSema chan struct{} - conns chan maybeConn - quit chan struct{} -} - -// A compile-time assertion to ensure that Conn meets the net.Listener interface. -var _ net.Listener = (*Listener)(nil) - -// NewListener returns a new net.Listener which enforces the Brontide scheme -// during both initial connection establishment and data transfer. -func NewListener(localStatic keychain.SingleKeyECDH, - listenAddr string) (*Listener, er.R) { - - addr, err := net.ResolveTCPAddr("tcp", listenAddr) - if err != nil { - return nil, er.E(err) - } - - l, err := net.ListenTCP("tcp", addr) - if err != nil { - return nil, er.E(err) - } - - brontideListener := &Listener{ - localStatic: localStatic, - tcp: l, - handshakeSema: make(chan struct{}, defaultHandshakes), - conns: make(chan maybeConn), - quit: make(chan struct{}), - } - - for i := 0; i < defaultHandshakes; i++ { - brontideListener.handshakeSema <- struct{}{} - } - - go brontideListener.listen() - - return brontideListener, nil -} - -// listen accepts connection from the underlying tcp conn, then performs -// the brontinde handshake procedure asynchronously. A maximum of -// defaultHandshakes will be active at any given time. -// -// NOTE: This method must be run as a goroutine. -func (l *Listener) listen() { - for { - select { - case <-l.handshakeSema: - case <-l.quit: - return - } - - conn, err := l.tcp.Accept() - if err != nil { - l.rejectConn(er.E(err)) - l.handshakeSema <- struct{}{} - continue - } - - go l.doHandshake(conn) - } -} - -// rejectedConnErr is a helper function that prepends the remote address of the -// failed connection attempt to the original error message. -func rejectedConnErr(err er.R, remoteAddr string) er.R { - return er.Errorf("unable to accept connection from %v: %v", remoteAddr, - err) -} - -// doHandshake asynchronously performs the brontide handshake, so that it does -// not block the main accept loop. This prevents peers that delay writing to the -// connection from block other connection attempts. -func (l *Listener) doHandshake(conn net.Conn) { - defer func() { l.handshakeSema <- struct{}{} }() - - select { - case <-l.quit: - return - default: - } - - remoteAddr := conn.RemoteAddr().String() - - brontideConn := &Conn{ - conn: conn, - noise: NewBrontideMachine(false, l.localStatic, nil), - } - - // We'll ensure that we get ActOne from the remote peer in a timely - // manner. If they don't respond within 1s, then we'll kill the - // connection. - errr := conn.SetReadDeadline(time.Now().Add(handshakeReadTimeout)) - if errr != nil { - brontideConn.conn.Close() - l.rejectConn(rejectedConnErr(er.E(errr), remoteAddr)) - return - } - - // Attempt to carry out the first act of the handshake protocol. If the - // connecting node doesn't know our long-term static public key, then - // this portion will fail with a non-nil error. - var actOne [ActOneSize]byte - if _, err := util.ReadFull(conn, actOne[:]); err != nil { - brontideConn.conn.Close() - l.rejectConn(rejectedConnErr(err, remoteAddr)) - return - } - if err := brontideConn.noise.RecvActOne(actOne); err != nil { - brontideConn.conn.Close() - l.rejectConn(rejectedConnErr(err, remoteAddr)) - return - } - - // Next, progress the handshake processes by sending over our ephemeral - // key for the session along with an authenticating tag. - actTwo, err := brontideConn.noise.GenActTwo() - if err != nil { - brontideConn.conn.Close() - l.rejectConn(rejectedConnErr(err, remoteAddr)) - return - } - if _, err := conn.Write(actTwo[:]); err != nil { - brontideConn.conn.Close() - l.rejectConn(rejectedConnErr(er.E(err), remoteAddr)) - return - } - - select { - case <-l.quit: - return - default: - } - - // We'll ensure that we get ActTwo from the remote peer in a timely - // manner. If they don't respond within 1 second, then we'll kill the - // connection. - errr = conn.SetReadDeadline(time.Now().Add(handshakeReadTimeout)) - if errr != nil { - brontideConn.conn.Close() - l.rejectConn(rejectedConnErr(er.E(errr), remoteAddr)) - return - } - - // Finally, finish the handshake processes by reading and decrypting - // the connection peer's static public key. If this succeeds then both - // sides have mutually authenticated each other. - var actThree [ActThreeSize]byte - if _, err := util.ReadFull(conn, actThree[:]); err != nil { - brontideConn.conn.Close() - l.rejectConn(rejectedConnErr(err, remoteAddr)) - return - } - if err := brontideConn.noise.RecvActThree(actThree); err != nil { - brontideConn.conn.Close() - l.rejectConn(rejectedConnErr(err, remoteAddr)) - return - } - - // We'll reset the deadline as it's no longer critical beyond the - // initial handshake. - errr = conn.SetReadDeadline(time.Time{}) - if errr != nil { - brontideConn.conn.Close() - l.rejectConn(rejectedConnErr(er.E(errr), remoteAddr)) - return - } - - l.acceptConn(brontideConn) -} - -// maybeConn holds either a brontide connection or an error returned from the -// handshake. -type maybeConn struct { - conn *Conn - err er.R -} - -// acceptConn returns a connection that successfully performed a handshake. -func (l *Listener) acceptConn(conn *Conn) { - select { - case l.conns <- maybeConn{conn: conn}: - case <-l.quit: - } -} - -// rejectConn returns any errors encountered during connection or handshake. -func (l *Listener) rejectConn(err er.R) { - select { - case l.conns <- maybeConn{err: err}: - case <-l.quit: - } -} - -// Accept waits for and returns the next connection to the listener. All -// incoming connections are authenticated via the three act Brontide -// key-exchange scheme. This function will fail with a non-nil error in the -// case that either the handshake breaks down, or the remote peer doesn't know -// our static public key. -// -// Part of the net.Listener interface. -func (l *Listener) Accept() (net.Conn, error) { - select { - case result := <-l.conns: - return result.conn, er.Native(result.err) - case <-l.quit: - return nil, er.Native(er.New("brontide connection closed")) - } -} - -// Close closes the listener. Any blocked Accept operations will be unblocked -// and return errors. -// -// Part of the net.Listener interface. -func (l *Listener) Close() error { - select { - case <-l.quit: - default: - close(l.quit) - } - - return l.tcp.Close() -} - -// Addr returns the listener's network address. -// -// Part of the net.Listener interface. -func (l *Listener) Addr() net.Addr { - return l.tcp.Addr() -} diff --git a/lnd/brontide/noise.go b/lnd/brontide/noise.go deleted file mode 100644 index 68c1870c..00000000 --- a/lnd/brontide/noise.go +++ /dev/null @@ -1,909 +0,0 @@ -package brontide - -import ( - "crypto/cipher" - "crypto/sha256" - "encoding/binary" - "io" - "math" - "time" - - "golang.org/x/crypto/chacha20poly1305" - "golang.org/x/crypto/hkdf" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/keychain" -) - -const ( - // protocolName is the precise instantiation of the Noise protocol - // handshake at the center of Brontide. This value will be used as part - // of the prologue. If the initiator and responder aren't using the - // exact same string for this value, along with prologue of the Bitcoin - // network, then the initial handshake will fail. - protocolName = "Noise_XK_secp256k1_ChaChaPoly_SHA256" - - // macSize is the length in bytes of the tags generated by poly1305. - macSize = 16 - - // lengthHeaderSize is the number of bytes used to prefix encode the - // length of a message payload. - lengthHeaderSize = 2 - - // encHeaderSize is the number of bytes required to hold an encrypted - // header and it's MAC. - encHeaderSize = lengthHeaderSize + macSize - - // keyRotationInterval is the number of messages sent on a single - // cipher stream before the keys are rotated forwards. - keyRotationInterval = 1000 - - // handshakeReadTimeout is a read timeout that will be enforced when - // waiting for data payloads during the various acts of Brontide. If - // the remote party fails to deliver the proper payload within this - // time frame, then we'll fail the connection. - handshakeReadTimeout = time.Second * 5 -) - -var ( - Err = er.NewErrorType("lnd.brontide") - // ErrMaxMessageLengthExceeded is returned when a message to be written to - // the cipher session exceeds the maximum allowed message payload. - ErrMaxMessageLengthExceeded = Err.CodeWithDetail("ErrMaxMessageLengthExceeded", - "the generated payload exceeds the max allowed message length of (2^16)-1") - - // ErrMessageNotFlushed signals that the connection cannot accept a new - // message because the prior message has not been fully flushed. - ErrMessageNotFlushed = Err.CodeWithDetail("ErrMessageNotFlushed", "prior message not flushed") - - // lightningPrologue is the noise prologue that is used to initialize - // the brontide noise handshake. - lightningPrologue = []byte("lightning") - - // ephemeralGen is the default ephemeral key generator, used to derive a - // unique ephemeral key for each brontide handshake. - ephemeralGen = func() (*btcec.PrivateKey, er.R) { - return btcec.NewPrivateKey(btcec.S256()) - } -) - -// TODO(roasbeef): free buffer pool? - -// ecdh performs an ECDH operation between pub and priv. The returned value is -// the sha256 of the compressed shared point. -func ecdh(pub *btcec.PublicKey, priv keychain.SingleKeyECDH) ([]byte, er.R) { - hash, err := priv.ECDH(pub) - return hash[:], err -} - -// cipherState encapsulates the state for the AEAD which will be used to -// encrypt+authenticate any payloads sent during the handshake, and messages -// sent once the handshake has completed. -type cipherState struct { - // nonce is the nonce passed into the chacha20-poly1305 instance for - // encryption+decryption. The nonce is incremented after each successful - // encryption/decryption. - // - // TODO(roasbeef): this should actually be 96 bit - nonce uint64 - - // secretKey is the shared symmetric key which will be used to - // instantiate the cipher. - // - // TODO(roasbeef): m-lock?? - secretKey [32]byte - - // salt is an additional secret which is used during key rotation to - // generate new keys. - salt [32]byte - - // cipher is an instance of the ChaCha20-Poly1305 AEAD construction - // created using the secretKey above. - cipher cipher.AEAD -} - -// Encrypt returns a ciphertext which is the encryption of the plainText -// observing the passed associatedData within the AEAD construction. -func (c *cipherState) Encrypt(associatedData, cipherText, plainText []byte) []byte { - defer func() { - c.nonce++ - - if c.nonce == keyRotationInterval { - c.rotateKey() - } - }() - - var nonce [12]byte - binary.LittleEndian.PutUint64(nonce[4:], c.nonce) - - return c.cipher.Seal(cipherText, nonce[:], plainText, associatedData) -} - -// Decrypt attempts to decrypt the passed ciphertext observing the specified -// associatedData within the AEAD construction. In the case that the final MAC -// check fails, then a non-nil error will be returned. -func (c *cipherState) Decrypt(associatedData, plainText, cipherText []byte) ([]byte, er.R) { - defer func() { - c.nonce++ - - if c.nonce == keyRotationInterval { - c.rotateKey() - } - }() - - var nonce [12]byte - binary.LittleEndian.PutUint64(nonce[4:], c.nonce) - - o, e := c.cipher.Open(plainText, nonce[:], cipherText, associatedData) - return o, er.E(e) -} - -// InitializeKey initializes the secret key and AEAD cipher scheme based off of -// the passed key. -func (c *cipherState) InitializeKey(key [32]byte) { - c.secretKey = key - c.nonce = 0 - - // Safe to ignore the error here as our key is properly sized - // (32-bytes). - c.cipher, _ = chacha20poly1305.New(c.secretKey[:]) -} - -// InitializeKeyWithSalt is identical to InitializeKey however it also sets the -// cipherState's salt field which is used for key rotation. -func (c *cipherState) InitializeKeyWithSalt(salt, key [32]byte) { - c.salt = salt - c.InitializeKey(key) -} - -// rotateKey rotates the current encryption/decryption key for this cipherState -// instance. Key rotation is performed by ratcheting the current key forward -// using an HKDF invocation with the cipherState's salt as the salt, and the -// current key as the input. -func (c *cipherState) rotateKey() { - var ( - info []byte - nextKey [32]byte - ) - - oldKey := c.secretKey - h := hkdf.New(sha256.New, oldKey[:], c.salt[:], info) - - // hkdf(ck, k, zero) - // | - // | \ - // | \ - // ck k' - h.Read(c.salt[:]) - h.Read(nextKey[:]) - - c.InitializeKey(nextKey) -} - -// symmetricState encapsulates a cipherState object and houses the ephemeral -// handshake digest state. This struct is used during the handshake to derive -// new shared secrets based off of the result of ECDH operations. Ultimately, -// the final key yielded by this struct is the result of an incremental -// Triple-DH operation. -type symmetricState struct { - cipherState - - // chainingKey is used as the salt to the HKDF function to derive a new - // chaining key as well as a new tempKey which is used for - // encryption/decryption. - chainingKey [32]byte - - // tempKey is the latter 32 bytes resulted from the latest HKDF - // iteration. This key is used to encrypt/decrypt any handshake - // messages or payloads sent until the next DH operation is executed. - tempKey [32]byte - - // handshakeDigest is the cumulative hash digest of all handshake - // messages sent from start to finish. This value is never transmitted - // to the other side, but will be used as the AD when - // encrypting/decrypting messages using our AEAD construction. - handshakeDigest [32]byte -} - -// mixKey implements a basic HKDF-based key ratchet. This method is called -// with the result of each DH output generated during the handshake process. -// The first 32 bytes extract from the HKDF reader is the next chaining key, -// then latter 32 bytes become the temp secret key using within any future AEAD -// operations until another DH operation is performed. -func (s *symmetricState) mixKey(input []byte) { - var info []byte - - secret := input - salt := s.chainingKey - h := hkdf.New(sha256.New, secret, salt[:], info) - - // hkdf(ck, input, zero) - // | - // | \ - // | \ - // ck k - h.Read(s.chainingKey[:]) - h.Read(s.tempKey[:]) - - // cipher.k = temp_key - s.InitializeKey(s.tempKey) -} - -// mixHash hashes the passed input data into the cumulative handshake digest. -// The running result of this value (h) is used as the associated data in all -// decryption/encryption operations. -func (s *symmetricState) mixHash(data []byte) { - h := sha256.New() - h.Write(s.handshakeDigest[:]) - h.Write(data) - - copy(s.handshakeDigest[:], h.Sum(nil)) -} - -// EncryptAndHash returns the authenticated encryption of the passed plaintext. -// When encrypting the handshake digest (h) is used as the associated data to -// the AEAD cipher. -func (s *symmetricState) EncryptAndHash(plaintext []byte) []byte { - ciphertext := s.Encrypt(s.handshakeDigest[:], nil, plaintext) - - s.mixHash(ciphertext) - - return ciphertext -} - -// DecryptAndHash returns the authenticated decryption of the passed -// ciphertext. When encrypting the handshake digest (h) is used as the -// associated data to the AEAD cipher. -func (s *symmetricState) DecryptAndHash(ciphertext []byte) ([]byte, er.R) { - plaintext, err := s.Decrypt(s.handshakeDigest[:], nil, ciphertext) - if err != nil { - return nil, err - } - - s.mixHash(ciphertext) - - return plaintext, nil -} - -// InitializeSymmetric initializes the symmetric state by setting the handshake -// digest (h) and the chaining key (ck) to protocol name. -func (s *symmetricState) InitializeSymmetric(protocolName []byte) { - var empty [32]byte - - s.handshakeDigest = sha256.Sum256(protocolName) - s.chainingKey = s.handshakeDigest - s.InitializeKey(empty) -} - -// handshakeState encapsulates the symmetricState and keeps track of all the -// public keys (static and ephemeral) for both sides during the handshake -// transcript. If the handshake completes successfully, then two instances of a -// cipherState are emitted: one to encrypt messages from initiator to -// responder, and the other for the opposite direction. -type handshakeState struct { - symmetricState - - initiator bool - - localStatic keychain.SingleKeyECDH - localEphemeral keychain.SingleKeyECDH // nolint (false positive) - - remoteStatic *btcec.PublicKey - remoteEphemeral *btcec.PublicKey -} - -// newHandshakeState returns a new instance of the handshake state initialized -// with the prologue and protocol name. If this is the responder's handshake -// state, then the remotePub can be nil. -func newHandshakeState(initiator bool, prologue []byte, - localKey keychain.SingleKeyECDH, - remotePub *btcec.PublicKey) handshakeState { - - h := handshakeState{ - initiator: initiator, - localStatic: localKey, - remoteStatic: remotePub, - } - - // Set the current chaining key and handshake digest to the hash of the - // protocol name, and additionally mix in the prologue. If either sides - // disagree about the prologue or protocol name, then the handshake - // will fail. - h.InitializeSymmetric([]byte(protocolName)) - h.mixHash(prologue) - - // In Noise_XK, the initiator should know the responder's static - // public key, therefore we include the responder's static key in the - // handshake digest. If the initiator gets this value wrong, then the - // handshake will fail. - if initiator { - h.mixHash(remotePub.SerializeCompressed()) - } else { - h.mixHash(localKey.PubKey().SerializeCompressed()) - } - - return h -} - -// EphemeralGenerator is a functional option that allows callers to substitute -// a custom function for use when generating ephemeral keys for ActOne or -// ActTwo. The function closure returned by this function can be passed into -// NewBrontideMachine as a function option parameter. -func EphemeralGenerator(gen func() (*btcec.PrivateKey, er.R)) func(*Machine) { - return func(m *Machine) { - m.ephemeralGen = gen - } -} - -// Machine is a state-machine which implements Brontide: an -// Authenticated-key Exchange in Three Acts. Brontide is derived from the Noise -// framework, specifically implementing the Noise_XK handshake. Once the -// initial 3-act handshake has completed all messages are encrypted with a -// chacha20 AEAD cipher. On the wire, all messages are prefixed with an -// authenticated+encrypted length field. Additionally, the encrypted+auth'd -// length prefix is used as the AD when encrypting+decryption messages. This -// construction provides confidentiality of packet length, avoids introducing -// a padding-oracle, and binds the encrypted packet length to the packet -// itself. -// -// The acts proceeds the following order (initiator on the left): -// GenActOne() -> -// RecvActOne() -// <- GenActTwo() -// RecvActTwo() -// GenActThree() -> -// RecvActThree() -// -// This exchange corresponds to the following Noise handshake: -// <- s -// ... -// -> e, es -// <- e, ee -// -> s, se -type Machine struct { - sendCipher cipherState - recvCipher cipherState - - ephemeralGen func() (*btcec.PrivateKey, er.R) - - handshakeState - - // nextCipherHeader is a static buffer that we'll use to read in the - // next ciphertext header from the wire. The header is a 2 byte length - // (of the next ciphertext), followed by a 16 byte MAC. - nextCipherHeader [encHeaderSize]byte - - // nextHeaderSend holds a reference to the remaining header bytes to - // write out for a pending message. This allows us to tolerate timeout - // errors that cause partial writes. - nextHeaderSend []byte - - // nextHeaderBody holds a reference to the remaining body bytes to write - // out for a pending message. This allows us to tolerate timeout errors - // that cause partial writes. - nextBodySend []byte -} - -// NewBrontideMachine creates a new instance of the brontide state-machine. If -// the responder (listener) is creating the object, then the remotePub should -// be nil. The handshake state within brontide is initialized using the ascii -// string "lightning" as the prologue. The last parameter is a set of variadic -// arguments for adding additional options to the brontide Machine -// initialization. -func NewBrontideMachine(initiator bool, localKey keychain.SingleKeyECDH, - remotePub *btcec.PublicKey, options ...func(*Machine)) *Machine { - - handshake := newHandshakeState( - initiator, lightningPrologue, localKey, remotePub, - ) - - m := &Machine{ - handshakeState: handshake, - ephemeralGen: ephemeralGen, - } - - // With the default options established, we'll now process all the - // options passed in as parameters. - for _, option := range options { - option(m) - } - - return m -} - -const ( - // HandshakeVersion is the expected version of the brontide handshake. - // Any messages that carry a different version will cause the handshake - // to abort immediately. - HandshakeVersion = byte(0) - - // ActOneSize is the size of the packet sent from initiator to - // responder in ActOne. The packet consists of a handshake version, an - // ephemeral key in compressed format, and a 16-byte poly1305 tag. - // - // 1 + 33 + 16 - ActOneSize = 50 - - // ActTwoSize is the size the packet sent from responder to initiator - // in ActTwo. The packet consists of a handshake version, an ephemeral - // key in compressed format and a 16-byte poly1305 tag. - // - // 1 + 33 + 16 - ActTwoSize = 50 - - // ActThreeSize is the size of the packet sent from initiator to - // responder in ActThree. The packet consists of a handshake version, - // the initiators static key encrypted with strong forward secrecy and - // a 16-byte poly1035 tag. - // - // 1 + 33 + 16 + 16 - ActThreeSize = 66 -) - -// GenActOne generates the initial packet (act one) to be sent from initiator -// to responder. During act one the initiator generates a fresh ephemeral key, -// hashes it into the handshake digest, and performs an ECDH between this key -// and the responder's static key. Future payloads are encrypted with a key -// derived from this result. -// -// -> e, es -func (b *Machine) GenActOne() ([ActOneSize]byte, er.R) { - var actOne [ActOneSize]byte - - // e - localEphemeral, err := b.ephemeralGen() - if err != nil { - return actOne, err - } - b.localEphemeral = &keychain.PrivKeyECDH{ - PrivKey: localEphemeral, - } - - ephemeral := localEphemeral.PubKey().SerializeCompressed() - b.mixHash(ephemeral) - - // es - s, err := ecdh(b.remoteStatic, b.localEphemeral) - if err != nil { - return actOne, err - } - b.mixKey(s[:]) - - authPayload := b.EncryptAndHash([]byte{}) - - actOne[0] = HandshakeVersion - copy(actOne[1:34], ephemeral) - copy(actOne[34:], authPayload) - - return actOne, nil -} - -// RecvActOne processes the act one packet sent by the initiator. The responder -// executes the mirrored actions to that of the initiator extending the -// handshake digest and deriving a new shared secret based on an ECDH with the -// initiator's ephemeral key and responder's static key. -func (b *Machine) RecvActOne(actOne [ActOneSize]byte) er.R { - var ( - err er.R - e [33]byte - p [16]byte - ) - - // If the handshake version is unknown, then the handshake fails - // immediately. - if actOne[0] != HandshakeVersion { - return er.Errorf("act one: invalid handshake version: %v, "+ - "only %v is valid, msg=%x", actOne[0], HandshakeVersion, - actOne[:]) - } - - copy(e[:], actOne[1:34]) - copy(p[:], actOne[34:]) - - // e - b.remoteEphemeral, err = btcec.ParsePubKey(e[:], btcec.S256()) - if err != nil { - return err - } - b.mixHash(b.remoteEphemeral.SerializeCompressed()) - - // es - s, err := ecdh(b.remoteEphemeral, b.localStatic) - if err != nil { - return err - } - b.mixKey(s) - - // If the initiator doesn't know our static key, then this operation - // will fail. - _, err = b.DecryptAndHash(p[:]) - return err -} - -// GenActTwo generates the second packet (act two) to be sent from the -// responder to the initiator. The packet for act two is identical to that of -// act one, but then results in a different ECDH operation between the -// initiator's and responder's ephemeral keys. -// -// <- e, ee -func (b *Machine) GenActTwo() ([ActTwoSize]byte, er.R) { - var actTwo [ActTwoSize]byte - - // e - localEphemeral, err := b.ephemeralGen() - if err != nil { - return actTwo, err - } - b.localEphemeral = &keychain.PrivKeyECDH{ - PrivKey: localEphemeral, - } - - ephemeral := localEphemeral.PubKey().SerializeCompressed() - b.mixHash(localEphemeral.PubKey().SerializeCompressed()) - - // ee - s, err := ecdh(b.remoteEphemeral, b.localEphemeral) - if err != nil { - return actTwo, err - } - b.mixKey(s) - - authPayload := b.EncryptAndHash([]byte{}) - - actTwo[0] = HandshakeVersion - copy(actTwo[1:34], ephemeral) - copy(actTwo[34:], authPayload) - - return actTwo, nil -} - -// RecvActTwo processes the second packet (act two) sent from the responder to -// the initiator. A successful processing of this packet authenticates the -// initiator to the responder. -func (b *Machine) RecvActTwo(actTwo [ActTwoSize]byte) er.R { - var ( - err er.R - e [33]byte - p [16]byte - ) - - // If the handshake version is unknown, then the handshake fails - // immediately. - if actTwo[0] != HandshakeVersion { - return er.Errorf("act two: invalid handshake version: %v, "+ - "only %v is valid, msg=%x", actTwo[0], HandshakeVersion, - actTwo[:]) - } - - copy(e[:], actTwo[1:34]) - copy(p[:], actTwo[34:]) - - // e - b.remoteEphemeral, err = btcec.ParsePubKey(e[:], btcec.S256()) - if err != nil { - return err - } - b.mixHash(b.remoteEphemeral.SerializeCompressed()) - - // ee - s, err := ecdh(b.remoteEphemeral, b.localEphemeral) - if err != nil { - return err - } - b.mixKey(s) - - _, err = b.DecryptAndHash(p[:]) - return err -} - -// GenActThree creates the final (act three) packet of the handshake. Act three -// is to be sent from the initiator to the responder. The purpose of act three -// is to transmit the initiator's public key under strong forward secrecy to -// the responder. This act also includes the final ECDH operation which yields -// the final session. -// -// -> s, se -func (b *Machine) GenActThree() ([ActThreeSize]byte, er.R) { - var actThree [ActThreeSize]byte - - ourPubkey := b.localStatic.PubKey().SerializeCompressed() - ciphertext := b.EncryptAndHash(ourPubkey) - - s, err := ecdh(b.remoteEphemeral, b.localStatic) - if err != nil { - return actThree, err - } - b.mixKey(s) - - authPayload := b.EncryptAndHash([]byte{}) - - actThree[0] = HandshakeVersion - copy(actThree[1:50], ciphertext) - copy(actThree[50:], authPayload) - - // With the final ECDH operation complete, derive the session sending - // and receiving keys. - b.split() - - return actThree, nil -} - -// RecvActThree processes the final act (act three) sent from the initiator to -// the responder. After processing this act, the responder learns of the -// initiator's static public key. Decryption of the static key serves to -// authenticate the initiator to the responder. -func (b *Machine) RecvActThree(actThree [ActThreeSize]byte) er.R { - var ( - err er.R - s [33 + 16]byte - p [16]byte - ) - - // If the handshake version is unknown, then the handshake fails - // immediately. - if actThree[0] != HandshakeVersion { - return er.Errorf("act three: invalid handshake version: %v, "+ - "only %v is valid, msg=%x", actThree[0], HandshakeVersion, - actThree[:]) - } - - copy(s[:], actThree[1:33+16+1]) - copy(p[:], actThree[33+16+1:]) - - // s - remotePub, err := b.DecryptAndHash(s[:]) - if err != nil { - return err - } - b.remoteStatic, err = btcec.ParsePubKey(remotePub, btcec.S256()) - if err != nil { - return err - } - - // se - se, err := ecdh(b.remoteStatic, b.localEphemeral) - if err != nil { - return err - } - b.mixKey(se) - - if _, err := b.DecryptAndHash(p[:]); err != nil { - return err - } - - // With the final ECDH operation complete, derive the session sending - // and receiving keys. - b.split() - - return nil -} - -// split is the final wrap-up act to be executed at the end of a successful -// three act handshake. This function creates two internal cipherState -// instances: one which is used to encrypt messages from the initiator to the -// responder, and another which is used to encrypt message for the opposite -// direction. -func (b *Machine) split() { - var ( - empty []byte - sendKey [32]byte - recvKey [32]byte - ) - - h := hkdf.New(sha256.New, empty, b.chainingKey[:], empty) - - // If we're the initiator the first 32 bytes are used to encrypt our - // messages and the second 32-bytes to decrypt their messages. For the - // responder the opposite is true. - if b.initiator { - h.Read(sendKey[:]) - b.sendCipher = cipherState{} - b.sendCipher.InitializeKeyWithSalt(b.chainingKey, sendKey) - - h.Read(recvKey[:]) - b.recvCipher = cipherState{} - b.recvCipher.InitializeKeyWithSalt(b.chainingKey, recvKey) - } else { - h.Read(recvKey[:]) - b.recvCipher = cipherState{} - b.recvCipher.InitializeKeyWithSalt(b.chainingKey, recvKey) - - h.Read(sendKey[:]) - b.sendCipher = cipherState{} - b.sendCipher.InitializeKeyWithSalt(b.chainingKey, sendKey) - } -} - -// WriteMessage encrypts and buffers the next message p. The ciphertext of the -// message is prepended with an encrypt+auth'd length which must be used as the -// AD to the AEAD construction when being decrypted by the other side. -// -// NOTE: This DOES NOT write the message to the wire, it should be followed by a -// call to Flush to ensure the message is written. -func (b *Machine) WriteMessage(p []byte) er.R { - // The total length of each message payload including the MAC size - // payload exceed the largest number encodable within a 16-bit unsigned - // integer. - if len(p) > math.MaxUint16 { - return ErrMaxMessageLengthExceeded.Default() - } - - // If a prior message was written but it hasn't been fully flushed, - // return an error as we only support buffering of one message at a - // time. - if len(b.nextHeaderSend) > 0 || len(b.nextBodySend) > 0 { - return ErrMessageNotFlushed.Default() - } - - // The full length of the packet is only the packet length, and does - // NOT include the MAC. - fullLength := uint16(len(p)) - - var pktLen [2]byte - binary.BigEndian.PutUint16(pktLen[:], fullLength) - - // First, generate the encrypted+MAC'd length prefix for the packet. - b.nextHeaderSend = b.sendCipher.Encrypt(nil, nil, pktLen[:]) - - // Finally, generate the encrypted packet itself. - b.nextBodySend = b.sendCipher.Encrypt(nil, nil, p) - - return nil -} - -// Flush attempts to write a message buffered using WriteMessage to the provided -// io.Writer. If no buffered message exists, this will result in a NOP. -// Otherwise, it will continue to write the remaining bytes, picking up where -// the byte stream left off in the event of a partial write. The number of bytes -// returned reflects the number of plaintext bytes in the payload, and does not -// account for the overhead of the header or MACs. -// -// NOTE: It is safe to call this method again iff a timeout error is returned. -func (b *Machine) Flush(w io.Writer) (int, er.R) { - // First, write out the pending header bytes, if any exist. Any header - // bytes written will not count towards the total amount flushed. - if len(b.nextHeaderSend) > 0 { - // Write any remaining header bytes and shift the slice to point - // to the next segment of unwritten bytes. If an error is - // encountered, we can continue to write the header from where - // we left off on a subsequent call to Flush. - n, err := util.Write(w, b.nextHeaderSend) - b.nextHeaderSend = b.nextHeaderSend[n:] - if err != nil { - return 0, err - } - } - - // Next, write the pending body bytes, if any exist. Only the number of - // bytes written that correspond to the ciphertext will be included in - // the total bytes written, bytes written as part of the MAC will not be - // counted. - var nn int - if len(b.nextBodySend) > 0 { - // Write out all bytes excluding the mac and shift the body - // slice depending on the number of actual bytes written. - n, err := util.Write(w, b.nextBodySend) - b.nextBodySend = b.nextBodySend[n:] - - // If we partially or fully wrote any of the body's MAC, we'll - // subtract that contribution from the total amount flushed to - // preserve the abstraction of returning the number of plaintext - // bytes written by the connection. - // - // There are three possible scenarios we must handle to ensure - // the returned value is correct. In the first case, the write - // straddles both payload and MAC bytes, and we must subtract - // the number of MAC bytes written from n. In the second, only - // payload bytes are written, thus we can return n unmodified. - // The final scenario pertains to the case where only MAC bytes - // are written, none of which count towards the total. - // - // |-----------Payload------------|----MAC----| - // Straddle: S---------------------------------E--------0 - // Payload-only: S------------------------E-----------------0 - // MAC-only: S-------E-0 - start, end := n+len(b.nextBodySend), len(b.nextBodySend) - switch { - - // Straddles payload and MAC bytes, subtract number of MAC bytes - // written from the actual number written. - case start > macSize && end <= macSize: - nn = n - (macSize - end) - - // Only payload bytes are written, return n directly. - case start > macSize && end > macSize: - nn = n - - // Only MAC bytes are written, return 0 bytes written. - default: - } - - if err != nil { - return nn, err - } - } - - return nn, nil -} - -// ReadMessage attempts to read the next message from the passed io.Reader. In -// the case of an authentication error, a non-nil error is returned. -func (b *Machine) ReadMessage(r io.Reader) ([]byte, er.R) { - pktLen, err := b.ReadHeader(r) - if err != nil { - return nil, err - } - - buf := make([]byte, pktLen) - return b.ReadBody(r, buf) -} - -// ReadHeader attempts to read the next message header from the passed -// io.Reader. The header contains the length of the next body including -// additional overhead of the MAC. In the case of an authentication error, a -// non-nil error is returned. -// -// NOTE: This method SHOULD NOT be used in the case that the io.Reader may be -// adversarial and induce long delays. If the caller needs to set read deadlines -// appropriately, it is preferred that they use the split ReadHeader and -// ReadBody methods so that the deadlines can be set appropriately on each. -func (b *Machine) ReadHeader(r io.Reader) (uint32, er.R) { - _, err := util.ReadFull(r, b.nextCipherHeader[:]) - if err != nil { - return 0, err - } - - // Attempt to decrypt+auth the packet length present in the stream. - pktLenBytes, err := b.recvCipher.Decrypt( - nil, nil, b.nextCipherHeader[:], - ) - if err != nil { - return 0, err - } - - // Compute the packet length that we will need to read off the wire. - pktLen := uint32(binary.BigEndian.Uint16(pktLenBytes)) + macSize - - return pktLen, nil -} - -// ReadBody attempts to ready the next message body from the passed io.Reader. -// The provided buffer MUST be the length indicated by the packet length -// returned by the preceding call to ReadHeader. In the case of an -// authentication eerror, a non-nil error is returned. -func (b *Machine) ReadBody(r io.Reader, buf []byte) ([]byte, er.R) { - // Next, using the length read from the packet header, read the - // encrypted packet itself into the buffer allocated by the read - // pool. - _, err := util.ReadFull(r, buf) - if err != nil { - return nil, err - } - - // Finally, decrypt the message held in the buffer, and return a - // new byte slice containing the plaintext. - // TODO(roasbeef): modify to let pass in slice - return b.recvCipher.Decrypt(nil, nil, buf) -} - -// SetCurveToNil sets the 'Curve' parameter to nil on the handshakeState keys. -// This allows us to log the Machine object without spammy log messages. -func (b *Machine) SetCurveToNil() { - if b.localStatic != nil { - b.localStatic.PubKey().Curve = nil - } - - if b.localEphemeral != nil { - b.localEphemeral.PubKey().Curve = nil - } - - if b.remoteStatic != nil { - b.remoteStatic.Curve = nil - } - - if b.remoteEphemeral != nil { - b.remoteEphemeral.Curve = nil - } -} diff --git a/lnd/brontide/noise_test.go b/lnd/brontide/noise_test.go deleted file mode 100644 index 0598698e..00000000 --- a/lnd/brontide/noise_test.go +++ /dev/null @@ -1,740 +0,0 @@ -package brontide - -import ( - "bytes" - "fmt" - "io" - "math" - "net" - "testing" - "testing/iotest" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/tor" -) - -type maybeNetConn struct { - conn net.Conn - err er.R -} - -func makeListener() (*Listener, *lnwire.NetAddress, er.R) { - // First, generate the long-term private keys for the brontide listener. - localPriv, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - return nil, nil, err - } - localKeyECDH := &keychain.PrivKeyECDH{PrivKey: localPriv} - - // Having a port of ":0" means a random port, and interface will be - // chosen for our listener. - addr := "localhost:0" - - // Our listener will be local, and the connection remote. - listener, errr := NewListener(localKeyECDH, addr) - if errr != nil { - return nil, nil, errr - } - - netAddr := &lnwire.NetAddress{ - IdentityKey: localPriv.PubKey(), - Address: listener.Addr().(*net.TCPAddr), - } - - return listener, netAddr, nil -} - -func dialTimeout(network, address string, timeout time.Duration) (net.Conn, er.R) { - c, e := net.DialTimeout(network, address, timeout) - return c, er.E(e) -} - -func establishTestConnection() (net.Conn, net.Conn, func(), er.R) { - listener, netAddr, err := makeListener() - if err != nil { - return nil, nil, nil, err - } - defer listener.Close() - - // Nos, generate the long-term private keys remote end of the connection - // within our test. - remotePriv, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - return nil, nil, nil, err - } - remoteKeyECDH := &keychain.PrivKeyECDH{PrivKey: remotePriv} - - // Initiate a connection with a separate goroutine, and listen with our - // main one. If both errors are nil, then encryption+auth was - // successful. - remoteConnChan := make(chan maybeNetConn, 1) - go func() { - remoteConn, err := Dial( - remoteKeyECDH, netAddr, - tor.DefaultConnTimeout, dialTimeout, - ) - remoteConnChan <- maybeNetConn{remoteConn, err} - }() - - localConnChan := make(chan maybeNetConn, 1) - go func() { - localConn, err := listener.Accept() - localConnChan <- maybeNetConn{localConn, er.E(err)} - }() - - remote := <-remoteConnChan - if remote.err != nil { - return nil, nil, nil, err - } - - local := <-localConnChan - if local.err != nil { - return nil, nil, nil, err - } - - cleanUp := func() { - local.conn.Close() - remote.conn.Close() - } - - return local.conn, remote.conn, cleanUp, nil -} - -func TestConnectionCorrectness(t *testing.T) { - // Create a test connection, grabbing either side of the connection - // into local variables. If the initial crypto handshake fails, then - // we'll get a non-nil error here. - localConn, remoteConn, cleanUp, err := establishTestConnection() - if err != nil { - t.Fatalf("unable to establish test connection: %v", err) - } - defer cleanUp() - - // Test out some message full-message reads. - for i := 0; i < 10; i++ { - msg := []byte(fmt.Sprintf("hello%d", i)) - - if _, err := localConn.Write(msg); err != nil { - t.Fatalf("remote conn failed to write: %v", err) - } - - readBuf := make([]byte, len(msg)) - if _, err := remoteConn.Read(readBuf); err != nil { - t.Fatalf("local conn failed to read: %v", err) - } - - if !bytes.Equal(readBuf, msg) { - t.Fatalf("messages don't match, %v vs %v", - string(readBuf), string(msg)) - } - } - - // Now try incremental message reads. This simulates first writing a - // message header, then a message body. - outMsg := []byte("hello world") - if _, err := localConn.Write(outMsg); err != nil { - t.Fatalf("remote conn failed to write: %v", err) - } - - readBuf := make([]byte, len(outMsg)) - if _, err := remoteConn.Read(readBuf[:len(outMsg)/2]); err != nil { - t.Fatalf("local conn failed to read: %v", err) - } - if _, err := remoteConn.Read(readBuf[len(outMsg)/2:]); err != nil { - t.Fatalf("local conn failed to read: %v", err) - } - - if !bytes.Equal(outMsg, readBuf) { - t.Fatalf("messages don't match, %v vs %v", - string(readBuf), string(outMsg)) - } -} - -// TestConecurrentHandshakes verifies the listener's ability to not be blocked -// by other pending handshakes. This is tested by opening multiple tcp -// connections with the listener, without completing any of the brontide acts. -// The test passes if real brontide dialer connects while the others are -// stalled. -func TestConcurrentHandshakes(t *testing.T) { - listener, netAddr, err := makeListener() - if err != nil { - t.Fatalf("unable to create listener connection: %v", err) - } - defer listener.Close() - - const nblocking = 5 - - // Open a handful of tcp connections, that do not complete any steps of - // the brontide handshake. - connChan := make(chan maybeNetConn) - for i := 0; i < nblocking; i++ { - go func() { - conn, err := net.Dial("tcp", listener.Addr().String()) - connChan <- maybeNetConn{conn, er.E(err)} - }() - } - - // Receive all connections/errors from our blocking tcp dials. We make a - // pass to gather all connections and errors to make sure we defer the - // calls to Close() on all successful connections. - tcpErrs := make([]error, 0, nblocking) - for i := 0; i < nblocking; i++ { - result := <-connChan - if result.conn != nil { - defer result.conn.Close() - } - if result.err != nil { - tcpErrs = append(tcpErrs, er.Native(result.err)) - } - } - for _, tcpErr := range tcpErrs { - if tcpErr != nil { - t.Fatalf("unable to tcp dial listener: %v", tcpErr) - } - } - - // Now, construct a new private key and use the brontide dialer to - // connect to the listener. - remotePriv, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Fatalf("unable to generate private key: %v", err) - } - remoteKeyECDH := &keychain.PrivKeyECDH{PrivKey: remotePriv} - - go func() { - remoteConn, err := Dial( - remoteKeyECDH, netAddr, - tor.DefaultConnTimeout, dialTimeout, - ) - connChan <- maybeNetConn{remoteConn, err} - }() - - // This connection should be accepted without error, as the brontide - // connection should bypass stalled tcp connections. - conn, errr := listener.Accept() - if errr != nil { - t.Fatalf("unable to accept dial: %v", errr) - } - defer conn.Close() - - result := <-connChan - if result.err != nil { - t.Fatalf("unable to dial %v: %v", netAddr, result.err) - } - result.conn.Close() -} - -func TestMaxPayloadLength(t *testing.T) { - t.Parallel() - - b := Machine{} - b.split() - - // Create a payload that's only *slightly* above the maximum allotted - // payload length. - payloadToReject := make([]byte, math.MaxUint16+1) - - // A write of the payload generated above to the state machine should - // be rejected as it's over the max payload length. - err := b.WriteMessage(payloadToReject) - if !ErrMaxMessageLengthExceeded.Is(err) { - t.Fatalf("payload is over the max allowed length, the write " + - "should have been rejected") - } - - // Generate another payload which should be accepted as a valid - // payload. - payloadToAccept := make([]byte, math.MaxUint16-1) - if err := b.WriteMessage(payloadToAccept); err != nil { - t.Fatalf("write for payload was rejected, should have been " + - "accepted") - } - - // Generate a final payload which is only *slightly* above the max payload length - // when the MAC is accounted for. - payloadToReject = make([]byte, math.MaxUint16+1) - - // This payload should be rejected. - err = b.WriteMessage(payloadToReject) - if !ErrMaxMessageLengthExceeded.Is(err) { - t.Fatalf("payload is over the max allowed length, the write " + - "should have been rejected") - } -} - -func TestWriteMessageChunking(t *testing.T) { - // Create a test connection, grabbing either side of the connection - // into local variables. If the initial crypto handshake fails, then - // we'll get a non-nil error here. - localConn, remoteConn, cleanUp, err := establishTestConnection() - if err != nil { - t.Fatalf("unable to establish test connection: %v", err) - } - defer cleanUp() - - // Attempt to write a message which is over 3x the max allowed payload - // size. - largeMessage := bytes.Repeat([]byte("kek"), math.MaxUint16*3) - - // Launch a new goroutine to write the large message generated above in - // chunks. We spawn a new goroutine because otherwise, we may block as - // the kernel waits for the buffer to flush. - errCh := make(chan er.R) - go func() { - defer close(errCh) - - bytesWritten, err := localConn.Write(largeMessage) - if err != nil { - errCh <- er.Errorf("unable to write message: %v", err) - return - } - - // The entire message should have been written out to the remote - // connection. - if bytesWritten != len(largeMessage) { - errCh <- er.Errorf("bytes not fully written") - return - } - }() - - // Attempt to read the entirety of the message generated above. - buf := make([]byte, len(largeMessage)) - if _, err := util.ReadFull(remoteConn, buf); err != nil { - t.Fatalf("unable to read message: %v", err) - } - - err = <-errCh - if err != nil { - t.Fatal(err) - } - - // Finally, the message the remote end of the connection received - // should be identical to what we sent from the local connection. - if !bytes.Equal(buf, largeMessage) { - t.Fatalf("bytes don't match") - } -} - -// TestBolt0008TestVectors ensures that our implementation of brontide exactly -// matches the test vectors within the specification. -func TestBolt0008TestVectors(t *testing.T) { - t.Parallel() - - // First, we'll generate the state of the initiator from the test - // vectors at the appendix of BOLT-0008 - initiatorKeyBytes, err := util.DecodeHex("1111111111111111111111" + - "111111111111111111111111111111111111111111") - if err != nil { - t.Fatalf("unable to decode hex: %v", err) - } - initiatorPriv, _ := btcec.PrivKeyFromBytes( - btcec.S256(), initiatorKeyBytes, - ) - initiatorKeyECDH := &keychain.PrivKeyECDH{PrivKey: initiatorPriv} - - // We'll then do the same for the responder. - responderKeyBytes, err := util.DecodeHex("212121212121212121212121" + - "2121212121212121212121212121212121212121") - if err != nil { - t.Fatalf("unable to decode hex: %v", err) - } - responderPriv, responderPub := btcec.PrivKeyFromBytes( - btcec.S256(), responderKeyBytes, - ) - responderKeyECDH := &keychain.PrivKeyECDH{PrivKey: responderPriv} - - // With the initiator's key data parsed, we'll now define a custom - // EphemeralGenerator function for the state machine to ensure that the - // initiator and responder both generate the ephemeral public key - // defined within the test vectors. - initiatorEphemeral := EphemeralGenerator(func() (*btcec.PrivateKey, er.R) { - e := "121212121212121212121212121212121212121212121212121212" + - "1212121212" - eBytes, err := util.DecodeHex(e) - if err != nil { - return nil, err - } - - priv, _ := btcec.PrivKeyFromBytes(btcec.S256(), eBytes) - return priv, nil - }) - responderEphemeral := EphemeralGenerator(func() (*btcec.PrivateKey, er.R) { - e := "222222222222222222222222222222222222222222222222222" + - "2222222222222" - eBytes, err := util.DecodeHex(e) - if err != nil { - return nil, err - } - - priv, _ := btcec.PrivKeyFromBytes(btcec.S256(), eBytes) - return priv, nil - }) - - // Finally, we'll create both brontide state machines, so we can begin - // our test. - initiator := NewBrontideMachine( - true, initiatorKeyECDH, responderPub, initiatorEphemeral, - ) - responder := NewBrontideMachine( - false, responderKeyECDH, nil, responderEphemeral, - ) - - // We'll start with the initiator generating the initial payload for - // act one. This should consist of exactly 50 bytes. We'll assert that - // the payload return is _exactly_ the same as what's specified within - // the test vectors. - actOne, err := initiator.GenActOne() - if err != nil { - t.Fatalf("unable to generate act one: %v", err) - } - expectedActOne, err := util.DecodeHex("00036360e856310ce5d294e" + - "8be33fc807077dc56ac80d95d9cd4ddbd21325eff73f70df608655115" + - "1f58b8afe6c195782c6a") - if err != nil { - t.Fatalf("unable to parse expected act one: %v", err) - } - if !bytes.Equal(expectedActOne, actOne[:]) { - t.Fatalf("act one mismatch: expected %x, got %x", - expectedActOne, actOne) - } - - // With the assertion above passed, we'll now process the act one - // payload with the responder of the crypto handshake. - if err := responder.RecvActOne(actOne); err != nil { - t.Fatalf("responder unable to process act one: %v", err) - } - - // Next, we'll start the second act by having the responder generate - // its contribution to the crypto handshake. We'll also verify that we - // produce the _exact_ same byte stream as advertised within the spec's - // test vectors. - actTwo, err := responder.GenActTwo() - if err != nil { - t.Fatalf("unable to generate act two: %v", err) - } - expectedActTwo, err := util.DecodeHex("0002466d7fcae563e5cb09a0" + - "d1870bb580344804617879a14949cf22285f1bae3f276e2470b93aac58" + - "3c9ef6eafca3f730ae") - if err != nil { - t.Fatalf("unable to parse expected act two: %v", err) - } - if !bytes.Equal(expectedActTwo, actTwo[:]) { - t.Fatalf("act two mismatch: expected %x, got %x", - expectedActTwo, actTwo) - } - - // Moving the handshake along, we'll also ensure that the initiator - // accepts the act two payload. - if err := initiator.RecvActTwo(actTwo); err != nil { - t.Fatalf("initiator unable to process act two: %v", err) - } - - // At the final step, we'll generate the last act from the initiator - // and once again verify that it properly matches the test vectors. - actThree, err := initiator.GenActThree() - if err != nil { - t.Fatalf("unable to generate act three: %v", err) - } - expectedActThree, err := util.DecodeHex("00b9e3a702e93e3a9948c2e" + - "d6e5fd7590a6e1c3a0344cfc9d5b57357049aa22355361aa02e55a8f" + - "c28fef5bd6d71ad0c38228dc68b1c466263b47fdf31e560e139ba") - if err != nil { - t.Fatalf("unable to parse expected act three: %v", err) - } - if !bytes.Equal(expectedActThree, actThree[:]) { - t.Fatalf("act three mismatch: expected %x, got %x", - expectedActThree, actThree) - } - - // Finally, we'll ensure that the responder itself also properly parses - // the last payload in the crypto handshake. - if err := responder.RecvActThree(actThree); err != nil { - t.Fatalf("responder unable to process act three: %v", err) - } - - // As a final assertion, we'll ensure that both sides have derived the - // proper symmetric encryption keys. - sendingKey, err := util.DecodeHex("969ab31b4d288cedf6218839b27a3e2" + - "140827047f2c0f01bf5c04435d43511a9") - if err != nil { - t.Fatalf("unable to parse sending key: %v", err) - } - recvKey, err := util.DecodeHex("bb9020b8965f4df047e07f955f3c4b884" + - "18984aadc5cdb35096b9ea8fa5c3442") - if err != nil { - t.Fatalf("unable to parse receiving key: %v", err) - } - - chainKey, err := util.DecodeHex("919219dbb2920afa8db80f9a51787a840" + - "bcf111ed8d588caf9ab4be716e42b01") - if err != nil { - t.Fatalf("unable to parse chaining key: %v", err) - } - - if !bytes.Equal(initiator.sendCipher.secretKey[:], sendingKey) { - t.Fatalf("sending key mismatch: expected %x, got %x", - initiator.sendCipher.secretKey[:], sendingKey) - } - if !bytes.Equal(initiator.recvCipher.secretKey[:], recvKey) { - t.Fatalf("receiving key mismatch: expected %x, got %x", - initiator.recvCipher.secretKey[:], recvKey) - } - if !bytes.Equal(initiator.chainingKey[:], chainKey) { - t.Fatalf("chaining key mismatch: expected %x, got %x", - initiator.chainingKey[:], chainKey) - } - - if !bytes.Equal(responder.sendCipher.secretKey[:], recvKey) { - t.Fatalf("sending key mismatch: expected %x, got %x", - responder.sendCipher.secretKey[:], recvKey) - } - if !bytes.Equal(responder.recvCipher.secretKey[:], sendingKey) { - t.Fatalf("receiving key mismatch: expected %x, got %x", - responder.recvCipher.secretKey[:], sendingKey) - } - if !bytes.Equal(responder.chainingKey[:], chainKey) { - t.Fatalf("chaining key mismatch: expected %x, got %x", - responder.chainingKey[:], chainKey) - } - - // Now test as per section "transport-message test" in Test Vectors - // (the transportMessageVectors ciphertexts are from this section of BOLT 8); - // we do slightly greater than 1000 encryption/decryption operations - // to ensure that the key rotation algorithm is operating as expected. - // The starting point for enc/decr is already guaranteed correct from the - // above tests of sendingKey, receivingKey, chainingKey. - transportMessageVectors := map[int]string{ - 0: "cf2b30ddf0cf3f80e7c35a6e6730b59fe802473180f396d88a8fb0db8cb" + - "cf25d2f214cf9ea1d95", - 1: "72887022101f0b6753e0c7de21657d35a4cb2a1f5cde2650528bbc8f837" + - "d0f0d7ad833b1a256a1", - 500: "178cb9d7387190fa34db9c2d50027d21793c9bc2d40b1e14dcf30ebeeeb2" + - "20f48364f7a4c68bf8", - 501: "1b186c57d44eb6de4c057c49940d79bb838a145cb528d6e8fd26dbe50a6" + - "0ca2c104b56b60e45bd", - 1000: "4a2f3cc3b5e78ddb83dcb426d9863d9d9a723b0337c89dd0b005d89f8d3" + - "c05c52b76b29b740f09", - 1001: "2ecd8c8a5629d0d02ab457a0fdd0f7b90a192cd46be5ecb6ca570bfc5e2" + - "68338b1a16cf4ef2d36", - } - - // Payload for every message is the string "hello". - payload := []byte("hello") - - var buf bytes.Buffer - - for i := 0; i < 1002; i++ { - err = initiator.WriteMessage(payload) - if err != nil { - t.Fatalf("could not write message %s", payload) - } - _, err = initiator.Flush(&buf) - if err != nil { - t.Fatalf("could not flush message: %v", err) - } - if val, ok := transportMessageVectors[i]; ok { - binaryVal, err := util.DecodeHex(val) - if err != nil { - t.Fatalf("Failed to decode hex string %s", val) - } - if !bytes.Equal(buf.Bytes(), binaryVal) { - t.Fatalf("Ciphertext %x was not equal to expected %s", - buf.String()[:], val) - } - } - - // Responder decrypts the bytes, in every iteration, and - // should always be able to decrypt the same payload message. - plaintext, err := responder.ReadMessage(&buf) - if err != nil { - t.Fatalf("failed to read message in responder: %v", err) - } - - // Ensure decryption succeeded - if !bytes.Equal(plaintext, payload) { - t.Fatalf("Decryption failed to receive plaintext: %s, got %s", - payload, plaintext) - } - - // Clear out the buffer for the next iteration - buf.Reset() - } -} - -// timeoutWriter wraps an io.Writer and throws an iotest.ErrTimeout after -// writing n bytes. -type timeoutWriter struct { - w io.Writer - n int64 -} - -func NewTimeoutWriter(w io.Writer, n int64) io.Writer { - return &timeoutWriter{w, n} -} - -func (t *timeoutWriter) Write(p []byte) (int, error) { - n := len(p) - if int64(n) > t.n { - n = int(t.n) - } - n, err := util.Write(t.w, p[:n]) - t.n -= int64(n) - if err == nil && t.n == 0 { - return n, iotest.ErrTimeout - } - return n, er.Native(err) -} - -const payloadSize = 10 - -type flushChunk struct { - errAfter int64 - expN int - expErr error -} - -type flushTest struct { - name string - chunks []flushChunk -} - -var flushTests = []flushTest{ - { - name: "partial header write", - chunks: []flushChunk{ - // Write 18-byte header in two parts, 16 then 2. - { - errAfter: encHeaderSize - 2, - expN: 0, - expErr: iotest.ErrTimeout, - }, - { - errAfter: 2, - expN: 0, - expErr: iotest.ErrTimeout, - }, - // Write payload and MAC in one go. - { - errAfter: -1, - expN: payloadSize, - }, - }, - }, - { - name: "full payload then full mac", - chunks: []flushChunk{ - // Write entire header and entire payload w/o MAC. - { - errAfter: encHeaderSize + payloadSize, - expN: payloadSize, - expErr: iotest.ErrTimeout, - }, - // Write the entire MAC. - { - errAfter: -1, - expN: 0, - }, - }, - }, - { - name: "payload-only, straddle, mac-only", - chunks: []flushChunk{ - // Write header and all but last byte of payload. - { - errAfter: encHeaderSize + payloadSize - 1, - expN: payloadSize - 1, - expErr: iotest.ErrTimeout, - }, - // Write last byte of payload and first byte of MAC. - { - errAfter: 2, - expN: 1, - expErr: iotest.ErrTimeout, - }, - // Write 10 bytes of the MAC. - { - errAfter: 10, - expN: 0, - expErr: iotest.ErrTimeout, - }, - // Write the remaining 5 MAC bytes. - { - errAfter: -1, - expN: 0, - }, - }, - }, -} - -// TestFlush asserts a Machine's ability to handle timeouts during Flush that -// cause partial writes, and that the machine can properly resume writes on -// subsequent calls to Flush. -func TestFlush(t *testing.T) { - // Run each test individually, to assert that they pass in isolation. - for _, test := range flushTests { - t.Run(test.name, func(t *testing.T) { - var ( - w bytes.Buffer - b Machine - ) - b.split() - testFlush(t, test, &b, &w) - }) - } - - // Finally, run the tests serially as if all on one connection. - t.Run("flush serial", func(t *testing.T) { - var ( - w bytes.Buffer - b Machine - ) - b.split() - for _, test := range flushTests { - testFlush(t, test, &b, &w) - } - }) -} - -// testFlush buffers a message on the Machine, then flushes it to the io.Writer -// in chunks. Once complete, a final call to flush is made to assert that Write -// is not called again. -func testFlush(t *testing.T, test flushTest, b *Machine, w io.Writer) { - payload := make([]byte, payloadSize) - if err := b.WriteMessage(payload); err != nil { - t.Fatalf("unable to write message: %v", err) - } - - for _, chunk := range test.chunks { - assertFlush(t, b, w, chunk.errAfter, chunk.expN, chunk.expErr) - } - - // We should always be able to call Flush after a message has been - // successfully written, and it should result in a NOP. - assertFlush(t, b, w, 0, 0, nil) -} - -// assertFlush flushes a chunk to the passed io.Writer. If n >= 0, a -// timeoutWriter will be used the flush should stop with iotest.ErrTimeout after -// n bytes. The method asserts that the returned error matches expErr and that -// the number of bytes written by Flush matches expN. -func assertFlush(t *testing.T, b *Machine, w io.Writer, n int64, expN int, - expErr error) { - - t.Helper() - - if n >= 0 { - w = NewTimeoutWriter(w, n) - } - nn, err := b.Flush(w) - if er.Wrapped(err) != expErr { - t.Fatalf("expected flush err: %v, got: %v", expErr, err) - } - if nn != expN { - t.Fatalf("expected n: %d, got: %d", expN, nn) - } -} diff --git a/lnd/buffer/buffer_test.go b/lnd/buffer/buffer_test.go deleted file mode 100644 index efda4c88..00000000 --- a/lnd/buffer/buffer_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package buffer_test - -import ( - "bytes" - "testing" - - "github.com/pkt-cash/pktd/lnd/buffer" -) - -// TestRecycleSlice asserts that RecycleSlice always zeros a byte slice. -func TestRecycleSlice(t *testing.T) { - tests := []struct { - name string - slice []byte - }{ - { - name: "length zero", - }, - { - name: "length one", - slice: []byte("a"), - }, - { - name: "length power of two length", - slice: bytes.Repeat([]byte("b"), 16), - }, - { - name: "length non power of two", - slice: bytes.Repeat([]byte("c"), 27), - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - buffer.RecycleSlice(test.slice) - - expSlice := make([]byte, len(test.slice)) - if !bytes.Equal(expSlice, test.slice) { - t.Fatalf("slice not recycled, want: %v, got: %v", - expSlice, test.slice) - } - }) - } -} diff --git a/lnd/buffer/read.go b/lnd/buffer/read.go deleted file mode 100644 index 1b12d20e..00000000 --- a/lnd/buffer/read.go +++ /dev/null @@ -1,19 +0,0 @@ -package buffer - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// ReadSize represents the size of the maximum message that can be read off the -// wire by brontide. The buffer is used to hold the ciphertext while the -// brontide state machine decrypts the message. -const ReadSize = lnwire.MaxMessagePayload + 16 - -// Read is a static byte array sized to the maximum-allowed Lightning message -// size, plus 16 bytes for the MAC. -type Read [ReadSize]byte - -// Recycle zeroes the Read, making it fresh for another use. -func (b *Read) Recycle() { - RecycleSlice(b[:]) -} diff --git a/lnd/buffer/utils.go b/lnd/buffer/utils.go deleted file mode 100644 index 40a386a9..00000000 --- a/lnd/buffer/utils.go +++ /dev/null @@ -1,17 +0,0 @@ -package buffer - -// RecycleSlice zeroes byte slice, making it fresh for another use. -// Zeroing the buffer using a logarithmic number of calls to the optimized copy -// method. Benchmarking shows this to be ~30 times faster than a for loop that -// sets each index to 0 for ~65KB buffers use for wire messages. Inspired by: -// https://stackoverflow.com/questions/30614165/is-there-analog-of-memset-in-go -func RecycleSlice(b []byte) { - if len(b) == 0 { - return - } - - b[0] = 0 - for i := 1; i < len(b); i *= 2 { - copy(b[i:], b[:i]) - } -} diff --git a/lnd/buffer/write.go b/lnd/buffer/write.go deleted file mode 100644 index a2bcfc9c..00000000 --- a/lnd/buffer/write.go +++ /dev/null @@ -1,19 +0,0 @@ -package buffer - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// WriteSize represents the size of the maximum plaintext message than can be -// sent using brontide. The buffer does not include extra space for the MAC, as -// that is applied by the Noise protocol after encrypting the plaintext. -const WriteSize = lnwire.MaxMessagePayload - -// Write is static byte array occupying to maximum-allowed plaintext-message -// size. -type Write [WriteSize]byte - -// Recycle zeroes the Write, making it fresh for another use. -func (b *Write) Recycle() { - RecycleSlice(b[:]) -} diff --git a/lnd/build/deployment.go b/lnd/build/deployment.go deleted file mode 100644 index 410f7e96..00000000 --- a/lnd/build/deployment.go +++ /dev/null @@ -1,36 +0,0 @@ -package build - -// DeploymentType is an enum specifying the deployment to compile. -type DeploymentType byte - -const ( - // Development is a deployment that includes extra testing hooks and - // logging configurations. - Development DeploymentType = iota - - // Production is a deployment that strips out testing logic and uses - // Default logging. - Production -) - -// String returns a human readable name for a build type. -func (b DeploymentType) String() string { - switch b { - case Development: - return "development" - case Production: - return "production" - default: - return "unknown" - } -} - -// IsProdBuild returns true if this is a production build. -func IsProdBuild() bool { - return Deployment == Production -} - -// IsDevBuild returns true if this is a development build. -func IsDevBuild() bool { - return Deployment == Development -} diff --git a/lnd/build/deployment_dev.go b/lnd/build/deployment_dev.go deleted file mode 100644 index fb2bb2b9..00000000 --- a/lnd/build/deployment_dev.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build dev - -package build - -// Deployment specifies a development build. -const Deployment = Development diff --git a/lnd/build/deployment_prod.go b/lnd/build/deployment_prod.go deleted file mode 100644 index 247f25ae..00000000 --- a/lnd/build/deployment_prod.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !dev - -package build - -// Deployment specifies a production build. -const Deployment = Production diff --git a/lnd/cert/go.sum b/lnd/cert/go.sum deleted file mode 100644 index 331fa698..00000000 --- a/lnd/cert/go.sum +++ /dev/null @@ -1,11 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/lnd/cert/selfsigned.go b/lnd/cert/selfsigned.go deleted file mode 100644 index 207162f5..00000000 --- a/lnd/cert/selfsigned.go +++ /dev/null @@ -1,292 +0,0 @@ -package cert - -import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "io/ioutil" - "math/big" - "net" - "os" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" -) - -const ( - // DefaultAutogenValidity is the default validity of a self-signed - // certificate. The value corresponds to 14 months - // (14 months * 30 days * 24 hours). - DefaultAutogenValidity = 14 * 30 * 24 * time.Hour -) - -var ( - // End of ASN.1 time. - endOfTime = time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC) - - // Max serial number. - serialNumberLimit = new(big.Int).Lsh(big.NewInt(1), 128) -) - -// ipAddresses returns the parserd IP addresses to use when creating the TLS -// certificate. If tlsDisableAutofill is true, we don't include interface -// addresses to protect users privacy. -func ipAddresses(tlsExtraIPs []string, tlsDisableAutofill bool) ([]net.IP, er.R) { - // Collect the host's IP addresses, including loopback, in a slice. - ipAddresses := []net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("::1")} - - // addIP appends an IP address only if it isn't already in the slice. - addIP := func(ipAddr net.IP) { - for _, ip := range ipAddresses { - if ip.Equal(ipAddr) { - return - } - } - ipAddresses = append(ipAddresses, ipAddr) - } - - // To protect their privacy, some users might not want to have all - // their network addresses include in the certificate as this could - // leak sensitive information. - if !tlsDisableAutofill { - // Add all the interface IPs that aren't already in the slice. - addrs, err := net.InterfaceAddrs() - if err != nil { - return nil, er.E(err) - } - for _, a := range addrs { - ipAddr, _, err := net.ParseCIDR(a.String()) - if err == nil { - addIP(ipAddr) - } - } - } - - // Add extra IPs to the slice. - for _, ip := range tlsExtraIPs { - ipAddr := net.ParseIP(ip) - if ipAddr != nil { - addIP(ipAddr) - } - } - - return ipAddresses, nil -} - -// dnsNames returns the host and DNS names to use when creating the TLS -// ceftificate. -func dnsNames(tlsExtraDomains []string, tlsDisableAutofill bool) (string, []string) { - // Collect the host's names into a slice. - host, err := os.Hostname() - - // To further protect their privacy, some users might not want - // to have their hostname include in the certificate as this could - // leak sensitive information. - if err != nil || tlsDisableAutofill { - // Nothing much we can do here, other than falling back to - // localhost as fallback. A hostname can still be provided with - // the tlsExtraDomain parameter if the problem persists on a - // system. - host = "localhost" - } - - dnsNames := []string{host} - if host != "localhost" { - dnsNames = append(dnsNames, "localhost") - } - dnsNames = append(dnsNames, tlsExtraDomains...) - - // Because we aren't including the hostname in the certificate when - // tlsDisableAutofill is set, we will use the first extra domain - // specified by the user, if it's set, as the Common Name. - if tlsDisableAutofill && len(tlsExtraDomains) > 0 { - host = tlsExtraDomains[0] - } - - // Also add fake hostnames for unix sockets, otherwise hostname - // verification will fail in the client. - dnsNames = append(dnsNames, "unix", "unixpacket") - - // Also add hostnames for 'bufconn' which is the hostname used for the - // in-memory connections used on mobile. - dnsNames = append(dnsNames, "bufconn") - - return host, dnsNames -} - -// IsOutdated returns whether the given certificate is outdated w.r.t. the IPs -// and domains given. The certificate is considered up to date if it was -// created with _exactly_ the IPs and domains given. -func IsOutdated(cert *x509.Certificate, tlsExtraIPs, - tlsExtraDomains []string, tlsDisableAutofill bool) (bool, er.R) { - - // Parse the slice of IP strings. - ips, err := ipAddresses(tlsExtraIPs, tlsDisableAutofill) - if err != nil { - return false, err - } - - // To not consider the certificate outdated if it has duplicate IPs or - // if only the order has changed, we create two maps from the slice of - // IPs to compare. - ips1 := make(map[string]net.IP) - for _, ip := range ips { - ips1[ip.String()] = ip - } - - ips2 := make(map[string]net.IP) - for _, ip := range cert.IPAddresses { - ips2[ip.String()] = ip - } - - // If the certificate has a different number of IP addresses, it is - // definitely out of date. - if len(ips1) != len(ips2) { - return true, nil - } - - // Go through each IP address, and check that they are equal. We expect - // both the string representation and the exact IP to match. - for s, ip1 := range ips1 { - // Assert the IP string is found in both sets. - ip2, ok := ips2[s] - if !ok { - return true, nil - } - - // And that the IPs are considered equal. - if !ip1.Equal(ip2) { - return true, nil - } - } - - // Get the full list of DNS names to use. - _, dnsNames := dnsNames(tlsExtraDomains, tlsDisableAutofill) - - // We do the same kind of deduplication for the DNS names. - dns1 := make(map[string]struct{}) - for _, n := range cert.DNSNames { - dns1[n] = struct{}{} - } - - dns2 := make(map[string]struct{}) - for _, n := range dnsNames { - dns2[n] = struct{}{} - } - - // If the number of domains are different, it is out of date. - if len(dns1) != len(dns2) { - return true, nil - } - - // Similarly, check that each DNS name matches what is found in the - // certificate. - for k := range dns1 { - if _, ok := dns2[k]; !ok { - return true, nil - } - } - - // Certificate was up-to-date. - return false, nil -} - -// GenCertPair generates a key/cert pair to the paths provided. The -// auto-generated certificates should *not* be used in production for public -// access as they're self-signed and don't necessarily contain all of the -// desired hostnames for the service. For production/public use, consider a -// real PKI. -// -// This function is adapted from https://github.com/btcsuite/btcd and -// https://github.com/btcsuite/btcutil -func GenCertPair(org, certFile, keyFile string, tlsExtraIPs, - tlsExtraDomains []string, tlsDisableAutofill bool, - certValidity time.Duration) er.R { - - now := time.Now() - validUntil := now.Add(certValidity) - - // Check that the certificate validity isn't past the ASN.1 end of time. - if validUntil.After(endOfTime) { - validUntil = endOfTime - } - - // Generate a serial number that's below the serialNumberLimit. - serialNumber, errr := rand.Int(rand.Reader, serialNumberLimit) - if errr != nil { - return er.Errorf("failed to generate serial number: %s", errr) - } - - // Get all DNS names and IP addresses to use when creating the - // certificate. - host, dnsNames := dnsNames(tlsExtraDomains, tlsDisableAutofill) - ipAddresses, err := ipAddresses(tlsExtraIPs, tlsDisableAutofill) - if err != nil { - return err - } - - // Generate a private key for the certificate. - priv, errr := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if errr != nil { - return er.E(errr) - } - - // Construct the certificate template. - template := x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{ - Organization: []string{org}, - CommonName: host, - }, - NotBefore: now.Add(-time.Hour * 24), - NotAfter: validUntil, - - KeyUsage: x509.KeyUsageKeyEncipherment | - x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - IsCA: true, // so can sign self. - BasicConstraintsValid: true, - - DNSNames: dnsNames, - IPAddresses: ipAddresses, - } - - derBytes, errr := x509.CreateCertificate(rand.Reader, &template, - &template, &priv.PublicKey, priv) - if errr != nil { - return er.Errorf("failed to create certificate: %v", errr) - } - - certBuf := &bytes.Buffer{} - errr = pem.Encode(certBuf, &pem.Block{Type: "CERTIFICATE", - Bytes: derBytes}) - if errr != nil { - return er.Errorf("failed to encode certificate: %v", errr) - } - - keybytes, errr := x509.MarshalECPrivateKey(priv) - if errr != nil { - return er.Errorf("unable to encode privkey: %v", errr) - } - keyBuf := &bytes.Buffer{} - errr = pem.Encode(keyBuf, &pem.Block{Type: "EC PRIVATE KEY", - Bytes: keybytes}) - if errr != nil { - return er.Errorf("failed to encode private key: %v", errr) - } - - // Write cert and key files. - if errr = ioutil.WriteFile(certFile, certBuf.Bytes(), 0644); errr != nil { - return er.E(errr) - } - if errr = ioutil.WriteFile(keyFile, keyBuf.Bytes(), 0600); errr != nil { - os.Remove(certFile) - return er.E(errr) - } - - return nil -} diff --git a/lnd/cert/selfsigned_test.go b/lnd/cert/selfsigned_test.go deleted file mode 100644 index 080bf58d..00000000 --- a/lnd/cert/selfsigned_test.go +++ /dev/null @@ -1,193 +0,0 @@ -package cert_test - -import ( - "io/ioutil" - "testing" - - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/cert" - "github.com/stretchr/testify/require" -) - -var ( - extraIPs = []string{"1.1.1.1", "123.123.123.1", "199.189.12.12"} - extraDomains = []string{"home", "and", "away"} -) - -// TestIsOutdatedCert checks that we'll consider the TLS certificate outdated -// if the ip addresses or dns names don't match. -func TestIsOutdatedCert(t *testing.T) { - tempDir, errr := ioutil.TempDir("", "certtest") - if errr != nil { - t.Fatal(errr) - } - - certPath := tempDir + "/tls.cert" - keyPath := tempDir + "/tls.key" - - // Generate TLS files with two extra IPs and domains. - err := cert.GenCertPair( - "lnd autogenerated cert", certPath, keyPath, extraIPs[:2], - extraDomains[:2], false, cert.DefaultAutogenValidity, - ) - if err != nil { - t.Fatal(err) - } - - // We'll attempt to check up-to-date status for all variants of 1-3 - // number of IPs and domains. - for numIPs := 1; numIPs <= len(extraIPs); numIPs++ { - for numDomains := 1; numDomains <= len(extraDomains); numDomains++ { - _, parsedCert, errr := cert.LoadCert( - certPath, keyPath, - ) - if errr != nil { - t.Fatal(errr) - } - - // Using the test case's number of IPs and domains, get - // the outdated status of the certificate we created - // above. - outdated, err := cert.IsOutdated( - parsedCert, extraIPs[:numIPs], - extraDomains[:numDomains], false, - ) - if err != nil { - t.Fatal(err) - } - - // We expect it to be considered outdated if the IPs or - // domains don't match exactly what we created. - expected := numIPs != 2 || numDomains != 2 - if outdated != expected { - t.Fatalf("expected certificate to be "+ - "outdated=%v, got=%v", expected, - outdated) - } - } - } -} - -// TestIsOutdatedPermutation tests that the order of listed IPs or DNS names, -// nor dulicates in the lists, matter for whether we consider the certificate -// outdated. -func TestIsOutdatedPermutation(t *testing.T) { - tempDir, errr := ioutil.TempDir("", "certtest") - if errr != nil { - t.Fatal(errr) - } - - certPath := tempDir + "/tls.cert" - keyPath := tempDir + "/tls.key" - - // Generate TLS files from the IPs and domains. - err := cert.GenCertPair( - "lnd autogenerated cert", certPath, keyPath, extraIPs[:], - extraDomains[:], false, cert.DefaultAutogenValidity, - ) - if err != nil { - t.Fatal(err) - } - _, parsedCert, errr := cert.LoadCert(certPath, keyPath) - if errr != nil { - t.Fatal(errr) - } - - // If we have duplicate IPs or DNS names listed, that shouldn't matter. - dupIPs := make([]string, len(extraIPs)*2) - for i := range dupIPs { - dupIPs[i] = extraIPs[i/2] - } - - dupDNS := make([]string, len(extraDomains)*2) - for i := range dupDNS { - dupDNS[i] = extraDomains[i/2] - } - - outdated, err := cert.IsOutdated(parsedCert, dupIPs, dupDNS, false) - if err != nil { - t.Fatal(err) - } - - if outdated { - t.Fatalf("did not expect duplicate IPs or DNS names be " + - "considered outdated") - } - - // Similarly, the order of the lists shouldn't matter. - revIPs := make([]string, len(extraIPs)) - for i := range revIPs { - revIPs[i] = extraIPs[len(extraIPs)-1-i] - } - - revDNS := make([]string, len(extraDomains)) - for i := range revDNS { - revDNS[i] = extraDomains[len(extraDomains)-1-i] - } - - outdated, err = cert.IsOutdated(parsedCert, revIPs, revDNS, false) - if err != nil { - t.Fatal(err) - } - - if outdated { - t.Fatalf("did not expect reversed IPs or DNS names be " + - "considered outdated") - } -} - -// TestTLSDisableAutofill checks that setting the --tlsdisableautofill flag -// does not add interface ip addresses or hostnames to the cert. -func TestTLSDisableAutofill(t *testing.T) { - tempDir, errr := ioutil.TempDir("", "certtest") - if errr != nil { - t.Fatal(errr) - } - - certPath := tempDir + "/tls.cert" - keyPath := tempDir + "/tls.key" - - // Generate TLS files with two extra IPs and domains and no interface IPs. - err := cert.GenCertPair( - "lnd autogenerated cert", certPath, keyPath, extraIPs[:2], - extraDomains[:2], true, cert.DefaultAutogenValidity, - ) - util.RequireNoErr( - t, err, - "unable to generate tls certificate pair", - ) - - _, parsedCert, errr := cert.LoadCert( - certPath, keyPath, - ) - require.NoError( - t, errr, - "unable to load tls certificate pair", - ) - - // Check if the TLS cert is outdated while still preventing - // interface IPs from being used. Should not be outdated - shouldNotBeOutdated, err := cert.IsOutdated( - parsedCert, extraIPs[:2], - extraDomains[:2], true, - ) - util.RequireNoErr(t, err) - - require.Equal( - t, false, shouldNotBeOutdated, - "TLS Certificate was marked as outdated when it should not be", - ) - - // Check if the TLS cert is outdated while allowing for - // interface IPs to be used. Should report as outdated. - shouldBeOutdated, err := cert.IsOutdated( - parsedCert, extraIPs[:2], - extraDomains[:2], false, - ) - util.RequireNoErr(t, err) - - require.Equal( - t, true, shouldBeOutdated, - "TLS Certificate was not marked as outdated when it should be", - ) -} diff --git a/lnd/cert/tls.go b/lnd/cert/tls.go deleted file mode 100644 index a8783158..00000000 --- a/lnd/cert/tls.go +++ /dev/null @@ -1,60 +0,0 @@ -package cert - -import ( - "crypto/tls" - "crypto/x509" -) - -var ( - /* - * tlsCipherSuites is the list of cipher suites we accept for TLS - * connections. These cipher suites fit the following criteria: - * - Don't use outdated algorithms like SHA-1 and 3DES - * - Don't use ECB mode or other insecure symmetric methods - * - Included in the TLS v1.2 suite - * - Are available in the Go 1.7.6 standard library (more are - * available in 1.8.3 and will be added after lnd no longer - * supports 1.7, including suites that support CBC mode) - **/ - tlsCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - } -) - -// LoadCert loads a certificate and its corresponding private key from the PEM -// files indicated and returns the certificate in the two formats it is most -// commonly used. -func LoadCert(certPath, keyPath string) (tls.Certificate, *x509.Certificate, - error) { - - // The certData returned here is just a wrapper around the PEM blocks - // loaded from the file. The PEM is not yet fully parsed but a basic - // check is performed that the certificate and private key actually - // belong together. - certData, err := tls.LoadX509KeyPair(certPath, keyPath) - if err != nil { - return tls.Certificate{}, nil, err - } - - // Now parse the the PEM block of the certificate into its x509 data - // structure so it can be examined in more detail. - x509Cert, err := x509.ParseCertificate(certData.Certificate[0]) - if err != nil { - return tls.Certificate{}, nil, err - } - - return certData, x509Cert, nil -} - -// TLSConfFromCert returns the default TLS configuration used for a server, -// using the given certificate as identity. -func TLSConfFromCert(certData tls.Certificate) *tls.Config { - return &tls.Config{ - Certificates: []tls.Certificate{certData}, - CipherSuites: tlsCipherSuites, - MinVersion: tls.VersionTLS12, - } -} diff --git a/lnd/chainntnfs/README.md b/lnd/chainntnfs/README.md deleted file mode 100644 index 353dca0c..00000000 --- a/lnd/chainntnfs/README.md +++ /dev/null @@ -1,30 +0,0 @@ -chainntnfs -========== - -[![Build Status](http://img.shields.io/travis/lightningnetwork/lnd.svg)](https://travis-ci.org/lightningnetwork/lnd) -[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/lightningnetwork/lnd/blob/master/LICENSE) -[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/lightningnetwork/lnd/chainntnfs) - -The chainntnfs package implements a set of interfaces which allow callers to -receive notifications in response to specific on-chain events. The set of -notifications available include: - - * Notifications for each new block connected to the current best chain. - * Notifications once a `txid` has reached a specified number of - confirmations. - * Notifications once a target outpoint (`txid:index`) has been spent. - -These notifications are used within `lnd` in order to properly handle the -workflows for: channel funding, cooperative channel closures, forced channel -closures, channel contract breaches, sweeping time-locked outputs, and finally -pruning the channel graph. - -This package is intentionally general enough to be applicable outside the -specific use cases within `lnd` outlined above. The current sole concrete -implementation of the `ChainNotifier` interface depends on `btcd`. - -## Installation and Updating - -```bash -$ go get -u github.com/lightningnetwork/lnd/chainntnfs -``` diff --git a/lnd/chainntnfs/btcdnotify/btcd.go b/lnd/chainntnfs/btcdnotify/btcd.go deleted file mode 100644 index 4fd1dedf..00000000 --- a/lnd/chainntnfs/btcdnotify/btcd.go +++ /dev/null @@ -1,1013 +0,0 @@ -package btcdnotify - -import ( - "sync" - "sync/atomic" - "time" - - "github.com/pkt-cash/pktd/btcjson" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/queue" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/rpcclient" - "github.com/pkt-cash/pktd/txscript" - "github.com/pkt-cash/pktd/wire" -) - -const ( - // notifierType uniquely identifies this concrete implementation of the - // ChainNotifier interface. - notifierType = "btcd" -) - -// chainUpdate encapsulates an update to the current main chain. This struct is -// used as an element within an unbounded queue in order to avoid blocking the -// main rpc dispatch rule. -type chainUpdate struct { - blockHash *chainhash.Hash - blockHeight int32 - - // connected is true if this update is a new block and false if it is a - // disconnected block. - connect bool -} - -// txUpdate encapsulates a transaction related notification sent from btcd to -// the registered RPC client. This struct is used as an element within an -// unbounded queue in order to avoid blocking the main rpc dispatch rule. -type txUpdate struct { - tx *btcutil.Tx - details *btcjson.BlockDetails -} - -// TODO(roasbeef): generalize struct below: -// * move chans to config, allow outside callers to handle send conditions - -// BtcdNotifier implements the ChainNotifier interface using btcd's websockets -// notifications. Multiple concurrent clients are supported. All notifications -// are achieved via non-blocking sends on client channels. -type BtcdNotifier struct { - epochClientCounter uint64 // To be used atomically. - - start sync.Once - active int32 // To be used atomically. - stopped int32 // To be used atomically. - - chainConn *rpcclient.Client - chainParams *chaincfg.Params - - notificationCancels chan interface{} - notificationRegistry chan interface{} - - txNotifier *chainntnfs.TxNotifier - - blockEpochClients map[uint64]*blockEpochRegistration - - bestBlock chainntnfs.BlockEpoch - - chainUpdates *queue.ConcurrentQueue - txUpdates *queue.ConcurrentQueue - - // spendHintCache is a cache used to query and update the latest height - // hints for an outpoint. Each height hint represents the earliest - // height at which the outpoint could have been spent within the chain. - spendHintCache chainntnfs.SpendHintCache - - // confirmHintCache is a cache used to query the latest height hints for - // a transaction. Each height hint represents the earliest height at - // which the transaction could have confirmed within the chain. - confirmHintCache chainntnfs.ConfirmHintCache - - wg sync.WaitGroup - quit chan struct{} -} - -// Ensure BtcdNotifier implements the ChainNotifier interface at compile time. -var _ chainntnfs.ChainNotifier = (*BtcdNotifier)(nil) - -// New returns a new BtcdNotifier instance. This function assumes the btcd node -// detailed in the passed configuration is already running, and willing to -// accept new websockets clients. -func New(config *rpcclient.ConnConfig, chainParams *chaincfg.Params, - spendHintCache chainntnfs.SpendHintCache, - confirmHintCache chainntnfs.ConfirmHintCache) (*BtcdNotifier, er.R) { - - notifier := &BtcdNotifier{ - chainParams: chainParams, - - notificationCancels: make(chan interface{}), - notificationRegistry: make(chan interface{}), - - blockEpochClients: make(map[uint64]*blockEpochRegistration), - - chainUpdates: queue.NewConcurrentQueue(10), - txUpdates: queue.NewConcurrentQueue(10), - - spendHintCache: spendHintCache, - confirmHintCache: confirmHintCache, - - quit: make(chan struct{}), - } - - ntfnCallbacks := &rpcclient.NotificationHandlers{ - OnBlockConnected: notifier.onBlockConnected, - OnBlockDisconnected: notifier.onBlockDisconnected, - OnRedeemingTx: notifier.onRedeemingTx, - } - - // Disable connecting to btcd within the rpcclient.New method. We - // defer establishing the connection to our .Start() method. - config.DisableConnectOnNew = true - config.DisableAutoReconnect = false - chainConn, err := rpcclient.New(config, ntfnCallbacks) - if err != nil { - return nil, err - } - notifier.chainConn = chainConn - - return notifier, nil -} - -// Start connects to the running btcd node over websockets, registers for block -// notifications, and finally launches all related helper goroutines. -func (b *BtcdNotifier) Start() er.R { - var startErr er.R - b.start.Do(func() { - startErr = b.startNotifier() - }) - return startErr -} - -// Started returns true if this instance has been started, and false otherwise. -func (b *BtcdNotifier) Started() bool { - return atomic.LoadInt32(&b.active) != 0 -} - -// Stop shutsdown the BtcdNotifier. -func (b *BtcdNotifier) Stop() er.R { - // Already shutting down? - if atomic.AddInt32(&b.stopped, 1) != 1 { - return nil - } - - // Shutdown the rpc client, this gracefully disconnects from btcd, and - // cleans up all related resources. - b.chainConn.Shutdown() - - close(b.quit) - b.wg.Wait() - - b.chainUpdates.Stop() - b.txUpdates.Stop() - - // Notify all pending clients of our shutdown by closing the related - // notification channels. - for _, epochClient := range b.blockEpochClients { - close(epochClient.cancelChan) - epochClient.wg.Wait() - - close(epochClient.epochChan) - } - b.txNotifier.TearDown() - - return nil -} - -func (b *BtcdNotifier) startNotifier() er.R { - // Start our concurrent queues before starting the chain connection, to - // ensure onBlockConnected and onRedeemingTx callbacks won't be - // blocked. - b.chainUpdates.Start() - b.txUpdates.Start() - - // Connect to btcd, and register for notifications on connected, and - // disconnected blocks. - if err := b.chainConn.Connect(20); err != nil { - b.txUpdates.Stop() - b.chainUpdates.Stop() - return err - } - - currentHash, currentHeight, err := b.chainConn.GetBestBlock() - if err != nil { - b.txUpdates.Stop() - b.chainUpdates.Stop() - return err - } - - b.txNotifier = chainntnfs.NewTxNotifier( - uint32(currentHeight), chainntnfs.ReorgSafetyLimit, - b.confirmHintCache, b.spendHintCache, - ) - - b.bestBlock = chainntnfs.BlockEpoch{ - Height: currentHeight, - Hash: currentHash, - } - - if err := b.chainConn.NotifyBlocks(); err != nil { - b.txUpdates.Stop() - b.chainUpdates.Stop() - return err - } - - b.wg.Add(1) - go b.notificationDispatcher() - - // Set the active flag now that we've completed the full - // startup. - atomic.StoreInt32(&b.active, 1) - - return nil -} - -// onBlockConnected implements on OnBlockConnected callback for rpcclient. -// Ingesting a block updates the wallet's internal utxo state based on the -// outputs created and destroyed within each block. -func (b *BtcdNotifier) onBlockConnected(hash *chainhash.Hash, height int32, t time.Time) { - // Append this new chain update to the end of the queue of new chain - // updates. - select { - case b.chainUpdates.ChanIn() <- &chainUpdate{ - blockHash: hash, - blockHeight: height, - connect: true, - }: - case <-b.quit: - return - } -} - -// filteredBlock represents a new block which has been connected to the main -// chain. The slice of transactions will only be populated if the block -// includes a transaction that confirmed one of our watched txids, or spends -// one of the outputs currently being watched. -// TODO(halseth): this is currently used for complete blocks. Change to use -// onFilteredBlockConnected and onFilteredBlockDisconnected, making it easier -// to unify with the Neutrino implementation. -type filteredBlock struct { - hash chainhash.Hash - height uint32 - txns []*btcutil.Tx - - // connected is true if this update is a new block and false if it is a - // disconnected block. - connect bool -} - -// onBlockDisconnected implements on OnBlockDisconnected callback for rpcclient. -func (b *BtcdNotifier) onBlockDisconnected(hash *chainhash.Hash, height int32, t time.Time) { - // Append this new chain update to the end of the queue of new chain - // updates. - select { - case b.chainUpdates.ChanIn() <- &chainUpdate{ - blockHash: hash, - blockHeight: height, - connect: false, - }: - case <-b.quit: - return - } -} - -// onRedeemingTx implements on OnRedeemingTx callback for rpcclient. -func (b *BtcdNotifier) onRedeemingTx(tx *btcutil.Tx, details *btcjson.BlockDetails) { - // Append this new transaction update to the end of the queue of new - // chain updates. - select { - case b.txUpdates.ChanIn() <- &txUpdate{tx, details}: - case <-b.quit: - return - } -} - -// notificationDispatcher is the primary goroutine which handles client -// notification registrations, as well as notification dispatches. -func (b *BtcdNotifier) notificationDispatcher() { - defer b.wg.Done() - -out: - for { - select { - case cancelMsg := <-b.notificationCancels: - switch msg := cancelMsg.(type) { - case *epochCancel: - log.Infof("Cancelling epoch "+ - "notification, epoch_id=%v", msg.epochID) - - // First, we'll lookup the original - // registration in order to stop the active - // queue goroutine. - reg := b.blockEpochClients[msg.epochID] - reg.epochQueue.Stop() - - // Next, close the cancel channel for this - // specific client, and wait for the client to - // exit. - close(b.blockEpochClients[msg.epochID].cancelChan) - b.blockEpochClients[msg.epochID].wg.Wait() - - // Once the client has exited, we can then - // safely close the channel used to send epoch - // notifications, in order to notify any - // listeners that the intent has been - // canceled. - close(b.blockEpochClients[msg.epochID].epochChan) - delete(b.blockEpochClients, msg.epochID) - } - case registerMsg := <-b.notificationRegistry: - switch msg := registerMsg.(type) { - case *chainntnfs.HistoricalConfDispatch: - // Look up whether the transaction/output script - // has already confirmed in the active chain. - // We'll do this in a goroutine to prevent - // blocking potentially long rescans. - // - // TODO(wilmer): add retry logic if rescan fails? - b.wg.Add(1) - go func() { - defer b.wg.Done() - - confDetails, _, err := b.historicalConfDetails( - msg.ConfRequest, - msg.StartHeight, msg.EndHeight, - ) - if err != nil { - log.Error(err) - return - } - - // If the historical dispatch finished - // without error, we will invoke - // UpdateConfDetails even if none were - // found. This allows the notifier to - // begin safely updating the height hint - // cache at tip, since any pending - // rescans have now completed. - err = b.txNotifier.UpdateConfDetails( - msg.ConfRequest, confDetails, - ) - if err != nil { - log.Error(err) - } - }() - - case *blockEpochRegistration: - log.Infof("New block epoch subscription") - - b.blockEpochClients[msg.epochID] = msg - - // If the client did not provide their best - // known block, then we'll immediately dispatch - // a notification for the current tip. - if msg.bestBlock == nil { - b.notifyBlockEpochClient( - msg, b.bestBlock.Height, - b.bestBlock.Hash, - ) - - msg.errorChan <- nil - continue - } - - // Otherwise, we'll attempt to deliver the - // backlog of notifications from their best - // known block. - missedBlocks, err := chainntnfs.GetClientMissedBlocks( - b.chainConn, msg.bestBlock, - b.bestBlock.Height, true, - ) - if err != nil { - msg.errorChan <- err - continue - } - - for _, block := range missedBlocks { - b.notifyBlockEpochClient( - msg, block.Height, block.Hash, - ) - } - - msg.errorChan <- nil - } - - case item := <-b.chainUpdates.ChanOut(): - update := item.(*chainUpdate) - if update.connect { - blockHeader, err := - b.chainConn.GetBlockHeader(update.blockHash) - if err != nil { - log.Errorf("Unable to fetch "+ - "block header: %v", err) - continue - } - - if blockHeader.PrevBlock != *b.bestBlock.Hash { - // Handle the case where the notifier - // missed some blocks from its chain - // backend - log.Infof("Missed blocks, " + - "attempting to catch up") - newBestBlock, missedBlocks, err := - chainntnfs.HandleMissedBlocks( - b.chainConn, - b.txNotifier, - b.bestBlock, - update.blockHeight, - true, - ) - if err != nil { - // Set the bestBlock here in case - // a catch up partially completed. - b.bestBlock = newBestBlock - log.Error(err) - continue - } - - for _, block := range missedBlocks { - err := b.handleBlockConnected(block) - if err != nil { - log.Error(err) - continue out - } - } - } - - newBlock := chainntnfs.BlockEpoch{ - Height: update.blockHeight, - Hash: update.blockHash, - } - if err := b.handleBlockConnected(newBlock); err != nil { - log.Error(err) - } - continue - } - - if update.blockHeight != b.bestBlock.Height { - log.Infof("Missed disconnected" + - "blocks, attempting to catch up") - } - - newBestBlock, err := chainntnfs.RewindChain( - b.chainConn, b.txNotifier, b.bestBlock, - update.blockHeight-1, - ) - if err != nil { - log.Errorf("Unable to rewind chain "+ - "from height %d to height %d: %v", - b.bestBlock.Height, update.blockHeight-1, err) - } - - // Set the bestBlock here in case a chain rewind - // partially completed. - b.bestBlock = newBestBlock - - case item := <-b.txUpdates.ChanOut(): - newSpend := item.(*txUpdate) - - // We only care about notifying on confirmed spends, so - // if this is a mempool spend, we can ignore it and wait - // for the spend to appear in on-chain. - if newSpend.details == nil { - continue - } - - err := b.txNotifier.ProcessRelevantSpendTx( - newSpend.tx, uint32(newSpend.details.Height), - ) - if err != nil { - log.Errorf("Unable to process "+ - "transaction %v: %v", - newSpend.tx.Hash(), err) - } - - case <-b.quit: - break out - } - } -} - -// historicalConfDetails looks up whether a confirmation request (txid/output -// script) has already been included in a block in the active chain and, if so, -// returns details about said block. -func (b *BtcdNotifier) historicalConfDetails(confRequest chainntnfs.ConfRequest, - startHeight, endHeight uint32) (*chainntnfs.TxConfirmation, - chainntnfs.TxConfStatus, er.R) { - - // If a txid was not provided, then we should dispatch upon seeing the - // script on-chain, so we'll short-circuit straight to scanning manually - // as there doesn't exist a script index to query. - if confRequest.TxID == chainntnfs.ZeroHash { - return b.confDetailsManually( - confRequest, startHeight, endHeight, - ) - } - - // Otherwise, we'll dispatch upon seeing a transaction on-chain with the - // given hash. - // - // We'll first attempt to retrieve the transaction using the node's - // txindex. - txNotFoundErr := "No information available about transaction" - txConf, txStatus, err := chainntnfs.ConfDetailsFromTxIndex( - b.chainConn, confRequest, txNotFoundErr, - ) - - // We'll then check the status of the transaction lookup returned to - // determine whether we should proceed with any fallback methods. - switch { - - // We failed querying the index for the transaction, fall back to - // scanning manually. - case err != nil: - log.Debugf("Unable to determine confirmation of %v "+ - "through the backend's txindex (%v), scanning manually", - confRequest.TxID, err) - - return b.confDetailsManually( - confRequest, startHeight, endHeight, - ) - - // The transaction was found within the node's mempool. - case txStatus == chainntnfs.TxFoundMempool: - - // The transaction was found within the node's txindex. - case txStatus == chainntnfs.TxFoundIndex: - - // The transaction was not found within the node's mempool or txindex. - case txStatus == chainntnfs.TxNotFoundIndex: - - // Unexpected txStatus returned. - default: - return nil, txStatus, - er.Errorf("Got unexpected txConfStatus: %v", txStatus) - } - - return txConf, txStatus, nil -} - -// confDetailsManually looks up whether a transaction/output script has already -// been included in a block in the active chain by scanning the chain's blocks -// within the given range. If the transaction/output script is found, its -// confirmation details are returned. Otherwise, nil is returned. -func (b *BtcdNotifier) confDetailsManually(confRequest chainntnfs.ConfRequest, - startHeight, endHeight uint32) (*chainntnfs.TxConfirmation, - chainntnfs.TxConfStatus, er.R) { - - // Begin scanning blocks at every height to determine where the - // transaction was included in. - for height := endHeight; height >= startHeight && height > 0; height-- { - // Ensure we haven't been requested to shut down before - // processing the next height. - select { - case <-b.quit: - return nil, chainntnfs.TxNotFoundManually, - chainntnfs.ErrChainNotifierShuttingDown.Default() - default: - } - - blockHash, err := b.chainConn.GetBlockHash(int64(height)) - if err != nil { - return nil, chainntnfs.TxNotFoundManually, - er.Errorf("unable to get hash from block "+ - "with height %d", height) - } - - // TODO: fetch the neutrino filters instead. - block, err := b.chainConn.GetBlock(blockHash) - if err != nil { - return nil, chainntnfs.TxNotFoundManually, - er.Errorf("unable to get block with hash "+ - "%v: %v", blockHash, err) - } - - // For every transaction in the block, check which one matches - // our request. If we find one that does, we can dispatch its - // confirmation details. - for txIndex, tx := range block.Transactions { - if !confRequest.MatchesTx(tx) { - continue - } - - return &chainntnfs.TxConfirmation{ - Tx: tx, - BlockHash: blockHash, - BlockHeight: height, - TxIndex: uint32(txIndex), - }, chainntnfs.TxFoundManually, nil - } - } - - // If we reach here, then we were not able to find the transaction - // within a block, so we avoid returning an error. - return nil, chainntnfs.TxNotFoundManually, nil -} - -// handleBlockConnected applies a chain update for a new block. Any watched -// transactions included this block will processed to either send notifications -// now or after numConfirmations confs. -// TODO(halseth): this is reusing the neutrino notifier implementation, unify -// them. -func (b *BtcdNotifier) handleBlockConnected(epoch chainntnfs.BlockEpoch) er.R { - // First, we'll fetch the raw block as we'll need to gather all the - // transactions to determine whether any are relevant to our registered - // clients. - rawBlock, err := b.chainConn.GetBlock(epoch.Hash) - if err != nil { - return er.Errorf("unable to get block: %v", err) - } - newBlock := &filteredBlock{ - hash: *epoch.Hash, - height: uint32(epoch.Height), - txns: btcutil.NewBlock(rawBlock).Transactions(), - connect: true, - } - - // We'll then extend the txNotifier's height with the information of - // this new block, which will handle all of the notification logic for - // us. - errr := b.txNotifier.ConnectTip( - &newBlock.hash, newBlock.height, newBlock.txns, - ) - if errr != nil { - return er.Errorf("unable to connect tip: %v", errr) - } - - log.Infof("New block: height=%v, sha=%v", epoch.Height, - epoch.Hash) - - // Now that we've guaranteed the new block extends the txNotifier's - // current tip, we'll proceed to dispatch notifications to all of our - // registered clients whom have had notifications fulfilled. Before - // doing so, we'll make sure update our in memory state in order to - // satisfy any client requests based upon the new block. - b.bestBlock = epoch - - b.notifyBlockEpochs(epoch.Height, epoch.Hash) - return b.txNotifier.NotifyHeight(uint32(epoch.Height)) -} - -// notifyBlockEpochs notifies all registered block epoch clients of the newly -// connected block to the main chain. -func (b *BtcdNotifier) notifyBlockEpochs(newHeight int32, newSha *chainhash.Hash) { - for _, client := range b.blockEpochClients { - b.notifyBlockEpochClient(client, newHeight, newSha) - } -} - -// notifyBlockEpochClient sends a registered block epoch client a notification -// about a specific block. -func (b *BtcdNotifier) notifyBlockEpochClient(epochClient *blockEpochRegistration, - height int32, sha *chainhash.Hash) { - - epoch := &chainntnfs.BlockEpoch{ - Height: height, - Hash: sha, - } - - select { - case epochClient.epochQueue.ChanIn() <- epoch: - case <-epochClient.cancelChan: - case <-b.quit: - } -} - -// RegisterSpendNtfn registers an intent to be notified once the target -// outpoint/output script has been spent by a transaction on-chain. When -// intending to be notified of the spend of an output script, a nil outpoint -// must be used. The heightHint should represent the earliest height in the -// chain of the transaction that spent the outpoint/output script. -// -// Once a spend of has been detected, the details of the spending event will be -// sent across the 'Spend' channel. -func (b *BtcdNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, - pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, er.R) { - - // Register the conf notification with the TxNotifier. A non-nil value - // for `dispatch` will be returned if we are required to perform a - // manual scan for the confirmation. Otherwise the notifier will begin - // watching at tip for the transaction to confirm. - ntfn, errr := b.txNotifier.RegisterSpend(outpoint, pkScript, heightHint) - if errr != nil { - return nil, errr - } - - // We'll then request the backend to notify us when it has detected the - // outpoint/output script as spent. - // - // TODO(wilmer): use LoadFilter API instead. - if outpoint == nil || *outpoint == chainntnfs.ZeroOutPoint { - _, addrs, _, err := txscript.ExtractPkScriptAddrs( - pkScript, b.chainParams, - ) - if err != nil { - return nil, er.Errorf("unable to parse script: %v", err) - } - if err := b.chainConn.NotifyReceived(addrs); err != nil { - return nil, err - } - } else { - ops := []*wire.OutPoint{outpoint} - if err := b.chainConn.NotifySpent(ops); err != nil { - return nil, err - } - } - - // If the txNotifier didn't return any details to perform a historical - // scan of the chain, then we can return early as there's nothing left - // for us to do. - if ntfn.HistoricalDispatch == nil { - return ntfn.Event, nil - } - - // Otherwise, we'll need to dispatch a historical rescan to determine if - // the outpoint was already spent at a previous height. - // - // We'll short-circuit the path when dispatching the spend of a script, - // rather than an outpoint, as there aren't any additional checks we can - // make for scripts. - if outpoint == nil || *outpoint == chainntnfs.ZeroOutPoint { - startHash, err := b.chainConn.GetBlockHash( - int64(ntfn.HistoricalDispatch.StartHeight), - ) - if err != nil { - return nil, err - } - - // TODO(wilmer): add retry logic if rescan fails? - _, addrs, _, err := txscript.ExtractPkScriptAddrs( - pkScript, b.chainParams, - ) - if err != nil { - return nil, er.Errorf("unable to parse address: %v", err) - } - - asyncResult := b.chainConn.RescanAsync(startHash, addrs, nil) - go func() { - if rescanErr := asyncResult.Receive(); rescanErr != nil { - log.Errorf("Rescan to determine "+ - "the spend details of %v failed: %v", - ntfn.HistoricalDispatch.SpendRequest, - rescanErr) - } - }() - - return ntfn.Event, nil - } - - // When dispatching spends of outpoints, there are a number of checks we - // can make to start our rescan from a better height or completely avoid - // it. - // - // We'll start by checking the backend's UTXO set to determine whether - // the outpoint has been spent. If it hasn't, we can return to the - // caller as well. - txOut, err := b.chainConn.GetTxOut(&outpoint.Hash, outpoint.Index, true) - if err != nil { - return nil, err - } - if txOut != nil { - // We'll let the txNotifier know the outpoint is still unspent - // in order to begin updating its spend hint. - err := b.txNotifier.UpdateSpendDetails( - ntfn.HistoricalDispatch.SpendRequest, nil, - ) - if err != nil { - return nil, err - } - - return ntfn.Event, nil - } - - // Since the outpoint was spent, as it no longer exists within the UTXO - // set, we'll determine when it happened by scanning the chain. We'll - // begin by fetching the block hash of our starting height. - startHash, err := b.chainConn.GetBlockHash( - int64(ntfn.HistoricalDispatch.StartHeight), - ) - if err != nil { - return nil, er.Errorf("unable to get block hash for height "+ - "%d: %v", ntfn.HistoricalDispatch.StartHeight, err) - } - - // As a minimal optimization, we'll query the backend's transaction - // index (if enabled) to determine if we have a better rescan starting - // height. We can do this as the GetRawTransaction call will return the - // hash of the block it was included in within the chain. - tx, err := b.chainConn.GetRawTransactionVerbose(&outpoint.Hash) - if err != nil { - // Avoid returning an error if the transaction was not found to - // proceed with fallback methods. - if !btcjson.ErrRPCNoTxInfo.Is(err) { - return nil, er.Errorf("unable to query for txid %v: %v", - outpoint.Hash, err) - } - } - - // If the transaction index was enabled, we'll use the block's hash to - // retrieve its height and check whether it provides a better starting - // point for our rescan. - if tx != nil { - // If the transaction containing the outpoint hasn't confirmed - // on-chain, then there's no need to perform a rescan. - if tx.BlockHash == "" { - return ntfn.Event, nil - } - - blockHash, err := chainhash.NewHashFromStr(tx.BlockHash) - if err != nil { - return nil, err - } - blockHeader, err := b.chainConn.GetBlockHeaderVerbose(blockHash) - if err != nil { - return nil, er.Errorf("unable to get header for "+ - "block %v: %v", blockHash, err) - } - - if uint32(blockHeader.Height) > ntfn.HistoricalDispatch.StartHeight { - startHash, err = b.chainConn.GetBlockHash( - int64(blockHeader.Height), - ) - if err != nil { - return nil, er.Errorf("unable to get block "+ - "hash for height %d: %v", - blockHeader.Height, err) - } - } - } - - // Now that we've determined the best starting point for our rescan, - // we can go ahead and dispatch it. - // - // In order to ensure that we don't block the caller on what may be a - // long rescan, we'll launch a new goroutine to handle the async result - // of the rescan. We purposefully prevent from adding this goroutine to - // the WaitGroup as we cannot wait for a quit signal due to the - // asyncResult channel not being exposed. - // - // TODO(wilmer): add retry logic if rescan fails? - asyncResult := b.chainConn.RescanAsync( - startHash, nil, []*wire.OutPoint{outpoint}, - ) - go func() { - if rescanErr := asyncResult.Receive(); rescanErr != nil { - log.Errorf("Rescan to determine the spend "+ - "details of %v failed: %v", outpoint, rescanErr) - } - }() - - return ntfn.Event, nil -} - -// RegisterConfirmationsNtfn registers an intent to be notified once the target -// txid/output script has reached numConfs confirmations on-chain. When -// intending to be notified of the confirmation of an output script, a nil txid -// must be used. The heightHint should represent the earliest height at which -// the txid/output script could have been included in the chain. -// -// Progress on the number of confirmations left can be read from the 'Updates' -// channel. Once it has reached all of its confirmations, a notification will be -// sent across the 'Confirmed' channel. -func (b *BtcdNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, - pkScript []byte, - numConfs, heightHint uint32) (*chainntnfs.ConfirmationEvent, er.R) { - - // Register the conf notification with the TxNotifier. A non-nil value - // for `dispatch` will be returned if we are required to perform a - // manual scan for the confirmation. Otherwise the notifier will begin - // watching at tip for the transaction to confirm. - ntfn, err := b.txNotifier.RegisterConf( - txid, pkScript, numConfs, heightHint, - ) - if err != nil { - return nil, err - } - - if ntfn.HistoricalDispatch == nil { - return ntfn.Event, nil - } - - select { - case b.notificationRegistry <- ntfn.HistoricalDispatch: - return ntfn.Event, nil - case <-b.quit: - return nil, chainntnfs.ErrChainNotifierShuttingDown.Default() - } -} - -// blockEpochRegistration represents a client's intent to receive a -// notification with each newly connected block. -type blockEpochRegistration struct { - epochID uint64 - - epochChan chan *chainntnfs.BlockEpoch - - epochQueue *queue.ConcurrentQueue - - bestBlock *chainntnfs.BlockEpoch - - errorChan chan er.R - - cancelChan chan struct{} - - wg sync.WaitGroup -} - -// epochCancel is a message sent to the BtcdNotifier when a client wishes to -// cancel an outstanding epoch notification that has yet to be dispatched. -type epochCancel struct { - epochID uint64 -} - -// RegisterBlockEpochNtfn returns a BlockEpochEvent which subscribes the -// caller to receive notifications, of each new block connected to the main -// chain. Clients have the option of passing in their best known block, which -// the notifier uses to check if they are behind on blocks and catch them up. If -// they do not provide one, then a notification will be dispatched immediately -// for the current tip of the chain upon a successful registration. -func (b *BtcdNotifier) RegisterBlockEpochNtfn( - bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, er.R) { - - reg := &blockEpochRegistration{ - epochQueue: queue.NewConcurrentQueue(20), - epochChan: make(chan *chainntnfs.BlockEpoch, 20), - cancelChan: make(chan struct{}), - epochID: atomic.AddUint64(&b.epochClientCounter, 1), - bestBlock: bestBlock, - errorChan: make(chan er.R, 1), - } - - reg.epochQueue.Start() - - // Before we send the request to the main goroutine, we'll launch a new - // goroutine to proxy items added to our queue to the client itself. - // This ensures that all notifications are received *in order*. - reg.wg.Add(1) - go func() { - defer reg.wg.Done() - - for { - select { - case ntfn := <-reg.epochQueue.ChanOut(): - blockNtfn := ntfn.(*chainntnfs.BlockEpoch) - select { - case reg.epochChan <- blockNtfn: - - case <-reg.cancelChan: - return - - case <-b.quit: - return - } - - case <-reg.cancelChan: - return - - case <-b.quit: - return - } - } - }() - - select { - case <-b.quit: - // As we're exiting before the registration could be sent, - // we'll stop the queue now ourselves. - reg.epochQueue.Stop() - - return nil, er.New("chainntnfs: system interrupt while " + - "attempting to register for block epoch notification.") - case b.notificationRegistry <- reg: - return &chainntnfs.BlockEpochEvent{ - Epochs: reg.epochChan, - Cancel: func() { - cancel := &epochCancel{ - epochID: reg.epochID, - } - - // Submit epoch cancellation to notification dispatcher. - select { - case b.notificationCancels <- cancel: - // Cancellation is being handled, drain - // the epoch channel until it is closed - // before yielding to caller. - for { - select { - case _, ok := <-reg.epochChan: - if !ok { - return - } - case <-b.quit: - return - } - } - case <-b.quit: - } - }, - }, nil - } -} diff --git a/lnd/chainntnfs/btcdnotify/btcd_dev.go b/lnd/chainntnfs/btcdnotify/btcd_dev.go deleted file mode 100644 index 68d5213c..00000000 --- a/lnd/chainntnfs/btcdnotify/btcd_dev.go +++ /dev/null @@ -1,80 +0,0 @@ -// +build dev - -package btcdnotify - -import ( - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/chainntnfs" -) - -// UnsafeStart starts the notifier with a specified best height and optional -// best hash. Its bestBlock and txNotifier are initialized with bestHeight and -// optionally bestHash. The parameter generateBlocks is necessary for the -// bitcoind notifier to ensure we drain all notifications up to syncHeight, -// since if they are generated ahead of UnsafeStart the chainConn may start up -// with an outdated best block and miss sending ntfns. Used for testing. -func (b *BtcdNotifier) UnsafeStart(bestHeight int32, bestHash *chainhash.Hash, - syncHeight int32, generateBlocks func() er.R) er.R { - - // Connect to btcd, and register for notifications on connected, and - // disconnected blocks. - if err := b.chainConn.Connect(20); err != nil { - return err - } - if err := b.chainConn.NotifyBlocks(); err != nil { - return err - } - - b.txNotifier = chainntnfs.NewTxNotifier( - uint32(bestHeight), chainntnfs.ReorgSafetyLimit, - b.confirmHintCache, b.spendHintCache, - ) - - b.chainUpdates.Start() - b.txUpdates.Start() - - if generateBlocks != nil { - // Ensure no block notifications are pending when we start the - // notification dispatcher goroutine. - - // First generate the blocks, then drain the notifications - // for the generated blocks. - if err := generateBlocks(); err != nil { - return err - } - - timeout := time.After(60 * time.Second) - loop: - for { - select { - case ntfn := <-b.chainUpdates.ChanOut(): - lastReceivedNtfn := ntfn.(*chainUpdate) - if lastReceivedNtfn.blockHeight >= syncHeight { - break loop - } - case <-timeout: - return er.Errorf("unable to catch up to height %d", - syncHeight) - } - } - } - - // Run notificationDispatcher after setting the notifier's best block - // to avoid a race condition. - b.bestBlock = chainntnfs.BlockEpoch{Height: bestHeight, Hash: bestHash} - if bestHash == nil { - hash, err := b.chainConn.GetBlockHash(int64(bestHeight)) - if err != nil { - return err - } - b.bestBlock.Hash = hash - } - - b.wg.Add(1) - go b.notificationDispatcher() - - return nil -} diff --git a/lnd/chainntnfs/btcdnotify/btcd_test.go b/lnd/chainntnfs/btcdnotify/btcd_test.go deleted file mode 100644 index a8d46bd6..00000000 --- a/lnd/chainntnfs/btcdnotify/btcd_test.go +++ /dev/null @@ -1,249 +0,0 @@ -// +build dev - -package btcdnotify - -import ( - "bytes" - "io/ioutil" - "os" - "testing" - - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/chaincfg/globalcfg" - "github.com/pkt-cash/pktd/integration/rpctest" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/channeldb" -) - -var ( - testScript = []byte{ - // OP_HASH160 - 0xA9, - // OP_DATA_20 - 0x14, - // <20-byte hash> - 0xec, 0x6f, 0x7a, 0x5a, 0xa8, 0xf2, 0xb1, 0x0c, 0xa5, 0x15, - 0x04, 0x52, 0x3a, 0x60, 0xd4, 0x03, 0x06, 0xf6, 0x96, 0xcd, - // OP_EQUAL - 0x87, - } -) - -func initHintCache(t *testing.T) *chainntnfs.HeightHintCache { - t.Helper() - - tempDir, errr := ioutil.TempDir("", "kek") - if errr != nil { - t.Fatalf("unable to create temp dir: %v", errr) - } - db, err := channeldb.Open(tempDir) - if err != nil { - t.Fatalf("unable to create db: %v", err) - } - testCfg := chainntnfs.CacheConfig{ - QueryDisable: false, - } - hintCache, err := chainntnfs.NewHeightHintCache(testCfg, db) - if err != nil { - t.Fatalf("unable to create hint cache: %v", err) - } - - return hintCache -} - -// setUpNotifier is a helper function to start a new notifier backed by a btcd -// driver. -func setUpNotifier(t *testing.T, h *rpctest.Harness) *BtcdNotifier { - hintCache := initHintCache(t) - - rpcCfg := h.RPCConfig() - notifier, err := New(&rpcCfg, chainntnfs.NetParams, hintCache, hintCache) - if err != nil { - t.Fatalf("unable to create notifier: %v", err) - } - if err := notifier.Start(); err != nil { - t.Fatalf("unable to start notifier: %v", err) - } - - return notifier -} - -// TestHistoricalConfDetailsTxIndex ensures that we correctly retrieve -// historical confirmation details using the backend node's txindex. -// TODO(cjd): DISABLED TEST - needs investigation -func _TestHistoricalConfDetailsTxIndex(t *testing.T) { - t.Parallel() - - harness, tearDown := chainntnfs.NewMiner( - t, []string{"--txindex"}, true, 25, - ) - defer tearDown() - - notifier := setUpNotifier(t, harness) - defer notifier.Stop() - - // A transaction unknown to the node should not be found within the - // txindex even if it is enabled, so we should not proceed with any - // fallback methods. - var unknownHash chainhash.Hash - copy(unknownHash[:], bytes.Repeat([]byte{0x10}, 32)) - unknownConfReq, err := chainntnfs.NewConfRequest(&unknownHash, testScript) - if err != nil { - t.Fatalf("unable to create conf request: %v", err) - } - _, txStatus, err := notifier.historicalConfDetails(unknownConfReq, 0, 0) - if err != nil { - t.Fatalf("unable to retrieve historical conf details: %v", err) - } - - switch txStatus { - case chainntnfs.TxNotFoundIndex: - case chainntnfs.TxNotFoundManually: - t.Fatal("should not have proceeded with fallback method, but did") - default: - t.Fatal("should not have found non-existent transaction, but did") - } - - // Now, we'll create a test transaction and attempt to retrieve its - // confirmation details. - txid, pkScript, err := chainntnfs.GetTestTxidAndScript(harness) - if err != nil { - t.Fatalf("unable to create tx: %v", err) - } - if err := chainntnfs.WaitForMempoolTx(harness, txid); err != nil { - t.Fatalf("unable to find tx in the mempool: %v", err) - } - confReq, err := chainntnfs.NewConfRequest(txid, pkScript) - if err != nil { - t.Fatalf("unable to create conf request: %v", err) - } - - // The transaction should be found in the mempool at this point. - _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0) - if err != nil { - t.Fatalf("unable to retrieve historical conf details: %v", err) - } - - // Since it has yet to be included in a block, it should have been found - // within the mempool. - switch txStatus { - case chainntnfs.TxFoundMempool: - default: - t.Fatalf("should have found the transaction within the "+ - "mempool, but did not: %v", txStatus) - } - - // We'll now confirm this transaction and re-attempt to retrieve its - // confirmation details. - if _, err := harness.Node.Generate(1); err != nil { - t.Fatalf("unable to generate block: %v", err) - } - - _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0) - if err != nil { - t.Fatalf("unable to retrieve historical conf details: %v", err) - } - - // Since the backend node's txindex is enabled and the transaction has - // confirmed, we should be able to retrieve it using the txindex. - switch txStatus { - case chainntnfs.TxFoundIndex: - default: - t.Fatal("should have found the transaction within the " + - "txindex, but did not") - } -} - -// TestHistoricalConfDetailsNoTxIndex ensures that we correctly retrieve -// historical confirmation details using the set of fallback methods when the -// backend node's txindex is disabled. -// TODO(cjd): DISABLED TEST - needs investigation -func _TestHistoricalConfDetailsNoTxIndex(t *testing.T) { - t.Parallel() - - harness, tearDown := chainntnfs.NewMiner(t, nil, true, 25) - defer tearDown() - - notifier := setUpNotifier(t, harness) - defer notifier.Stop() - - // Since the node has its txindex disabled, we fall back to scanning the - // chain manually. A transaction unknown to the network should not be - // found. - var unknownHash chainhash.Hash - copy(unknownHash[:], bytes.Repeat([]byte{0x10}, 32)) - unknownConfReq, err := chainntnfs.NewConfRequest(&unknownHash, testScript) - if err != nil { - t.Fatalf("unable to create conf request: %v", err) - } - _, txStatus, err := notifier.historicalConfDetails(unknownConfReq, 0, 0) - if err != nil { - t.Fatalf("unable to retrieve historical conf details: %v", err) - } - - switch txStatus { - case chainntnfs.TxNotFoundManually: - case chainntnfs.TxNotFoundIndex: - t.Fatal("should have proceeded with fallback method, but did not") - default: - t.Fatal("should not have found non-existent transaction, but did") - } - - // Now, we'll create a test transaction and attempt to retrieve its - // confirmation details. We'll note its broadcast height to use as the - // height hint when manually scanning the chain. - _, currentHeight, err := harness.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to retrieve current height: %v", err) - } - - txid, pkScript, err := chainntnfs.GetTestTxidAndScript(harness) - if err != nil { - t.Fatalf("unable to create tx: %v", err) - } - if err := chainntnfs.WaitForMempoolTx(harness, txid); err != nil { - t.Fatalf("unable to find tx in the mempool: %v", err) - } - confReq, err := chainntnfs.NewConfRequest(txid, pkScript) - if err != nil { - t.Fatalf("unable to create conf request: %v", err) - } - - _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0) - if err != nil { - t.Fatalf("unable to retrieve historical conf details: %v", err) - } - - // Since it has yet to be included in a block, it should have been found - // within the mempool. - if txStatus != chainntnfs.TxFoundMempool { - t.Fatal("should have found the transaction within the " + - "mempool, but did not") - } - - // We'll now confirm this transaction and re-attempt to retrieve its - // confirmation details. - if _, err := harness.Node.Generate(1); err != nil { - t.Fatalf("unable to generate block: %v", err) - } - - _, txStatus, err = notifier.historicalConfDetails( - confReq, uint32(currentHeight), uint32(currentHeight)+1, - ) - if err != nil { - t.Fatalf("unable to retrieve historical conf details: %v", err) - } - - // Since the backend node's txindex is disabled and the transaction has - // confirmed, we should be able to find it by falling back to scanning - // the chain manually. - if txStatus != chainntnfs.TxFoundManually { - t.Fatal("should have found the transaction by manually " + - "scanning the chain, but did not") - } -} - -func TestMain(m *testing.M) { - globalcfg.SelectConfig(globalcfg.BitcoinDefaults()) - os.Exit(m.Run()) -} diff --git a/lnd/chainntnfs/btcdnotify/driver.go b/lnd/chainntnfs/btcdnotify/driver.go deleted file mode 100644 index adfecfbf..00000000 --- a/lnd/chainntnfs/btcdnotify/driver.go +++ /dev/null @@ -1,60 +0,0 @@ -package btcdnotify - -import ( - "fmt" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/rpcclient" -) - -// createNewNotifier creates a new instance of the ChainNotifier interface -// implemented by BtcdNotifier. -func createNewNotifier(args ...interface{}) (chainntnfs.ChainNotifier, er.R) { - if len(args) != 4 { - return nil, er.Errorf("incorrect number of arguments to "+ - ".New(...), expected 4, instead passed %v", len(args)) - } - - config, ok := args[0].(*rpcclient.ConnConfig) - if !ok { - return nil, er.New("first argument to btcdnotify.New " + - "is incorrect, expected a *rpcclient.ConnConfig") - } - - chainParams, ok := args[1].(*chaincfg.Params) - if !ok { - return nil, er.New("second argument to btcdnotify.New " + - "is incorrect, expected a *chaincfg.Params") - } - - spendHintCache, ok := args[2].(chainntnfs.SpendHintCache) - if !ok { - return nil, er.New("third argument to btcdnotify.New " + - "is incorrect, expected a chainntnfs.SpendHintCache") - } - - confirmHintCache, ok := args[3].(chainntnfs.ConfirmHintCache) - if !ok { - return nil, er.New("fourth argument to btcdnotify.New " + - "is incorrect, expected a chainntnfs.ConfirmHintCache") - } - - return New(config, chainParams, spendHintCache, confirmHintCache) -} - -// init registers a driver for the BtcdNotifier concrete implementation of the -// chainntnfs.ChainNotifier interface. -func init() { - // Register the driver. - notifier := &chainntnfs.NotifierDriver{ - NotifierType: notifierType, - New: createNewNotifier, - } - - if err := chainntnfs.RegisterNotifier(notifier); err != nil { - panic(fmt.Sprintf("failed to register notifier driver '%s': %v", - notifierType, err)) - } -} diff --git a/lnd/chainntnfs/height_hint_cache.go b/lnd/chainntnfs/height_hint_cache.go deleted file mode 100644 index 3089a90e..00000000 --- a/lnd/chainntnfs/height_hint_cache.go +++ /dev/null @@ -1,325 +0,0 @@ -package chainntnfs - -import ( - "bytes" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/pktlog/log" -) - -var ( - // spendHintBucket is the name of the bucket which houses the height - // hint for outpoints. Each height hint represents the earliest height - // at which its corresponding outpoint could have been spent within. - spendHintBucket = []byte("spend-hints") - - // confirmHintBucket is the name of the bucket which houses the height - // hints for transactions. Each height hint represents the earliest - // height at which its corresponding transaction could have been - // confirmed within. - confirmHintBucket = []byte("confirm-hints") - - Err = er.NewErrorType("lnd.chainntnfs") - - // ErrCorruptedHeightHintCache indicates that the on-disk bucketing - // structure has altered since the height hint cache instance was - // initialized. - ErrCorruptedHeightHintCache = Err.CodeWithDetail("ErrCorruptedHeightHintCache", - "height hint cache has been corrupted") - - // ErrSpendHintNotFound is an error returned when a spend hint for an - // outpoint was not found. - ErrSpendHintNotFound = Err.CodeWithDetail("ErrSpendHintNotFound", - "spend hint not found") - - // ErrConfirmHintNotFound is an error returned when a confirm hint for a - // transaction was not found. - ErrConfirmHintNotFound = Err.CodeWithDetail("ErrConfirmHintNotFound", - "confirm hint not found") -) - -// CacheConfig contains the HeightHintCache configuration -type CacheConfig struct { - // QueryDisable prevents reliance on the Height Hint Cache. This is - // necessary to recover from an edge case when the height recorded in - // the cache is higher than the actual height of a spend, causing a - // channel to become "stuck" in a pending close state. - QueryDisable bool -} - -// SpendHintCache is an interface whose duty is to cache spend hints for -// outpoints. A spend hint is defined as the earliest height in the chain at -// which an outpoint could have been spent within. -type SpendHintCache interface { - // CommitSpendHint commits a spend hint for the outpoints to the cache. - CommitSpendHint(height uint32, spendRequests ...SpendRequest) er.R - - // QuerySpendHint returns the latest spend hint for an outpoint. - // ErrSpendHintNotFound is returned if a spend hint does not exist - // within the cache for the outpoint. - QuerySpendHint(spendRequest SpendRequest) (uint32, er.R) - - // PurgeSpendHint removes the spend hint for the outpoints from the - // cache. - PurgeSpendHint(spendRequests ...SpendRequest) er.R -} - -// ConfirmHintCache is an interface whose duty is to cache confirm hints for -// transactions. A confirm hint is defined as the earliest height in the chain -// at which a transaction could have been included in a block. -type ConfirmHintCache interface { - // CommitConfirmHint commits a confirm hint for the transactions to the - // cache. - CommitConfirmHint(height uint32, confRequests ...ConfRequest) er.R - - // QueryConfirmHint returns the latest confirm hint for a transaction - // hash. ErrConfirmHintNotFound is returned if a confirm hint does not - // exist within the cache for the transaction hash. - QueryConfirmHint(confRequest ConfRequest) (uint32, er.R) - - // PurgeConfirmHint removes the confirm hint for the transactions from - // the cache. - PurgeConfirmHint(confRequests ...ConfRequest) er.R -} - -// HeightHintCache is an implementation of the SpendHintCache and -// ConfirmHintCache interfaces backed by a channeldb DB instance where the hints -// will be stored. -type HeightHintCache struct { - cfg CacheConfig - db *channeldb.DB -} - -// Compile-time checks to ensure HeightHintCache satisfies the SpendHintCache -// and ConfirmHintCache interfaces. -var _ SpendHintCache = (*HeightHintCache)(nil) -var _ ConfirmHintCache = (*HeightHintCache)(nil) - -// NewHeightHintCache returns a new height hint cache backed by a database. -func NewHeightHintCache(cfg CacheConfig, db *channeldb.DB) (*HeightHintCache, er.R) { - cache := &HeightHintCache{cfg, db} - if err := cache.initBuckets(); err != nil { - return nil, err - } - - return cache, nil -} - -// initBuckets ensures that the primary buckets used by the circuit are -// initialized so that we can assume their existence after startup. -func (c *HeightHintCache) initBuckets() er.R { - return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) er.R { - _, err := tx.CreateTopLevelBucket(spendHintBucket) - if err != nil { - return err - } - - _, err = tx.CreateTopLevelBucket(confirmHintBucket) - return err - }) -} - -// CommitSpendHint commits a spend hint for the outpoints to the cache. -func (c *HeightHintCache) CommitSpendHint(height uint32, - spendRequests ...SpendRequest) er.R { - - if len(spendRequests) == 0 { - return nil - } - - log.Tracef("Updating spend hint to height %d for %v", height, - spendRequests) - - return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) er.R { - spendHints := tx.ReadWriteBucket(spendHintBucket) - if spendHints == nil { - return ErrCorruptedHeightHintCache.Default() - } - - var hint bytes.Buffer - if err := channeldb.WriteElement(&hint, height); err != nil { - return err - } - - for _, spendRequest := range spendRequests { - spendHintKey, err := spendRequest.SpendHintKey() - if err != nil { - return err - } - err = spendHints.Put(spendHintKey, hint.Bytes()) - if err != nil { - return err - } - } - - return nil - }) -} - -// QuerySpendHint returns the latest spend hint for an outpoint. -// ErrSpendHintNotFound is returned if a spend hint does not exist within the -// cache for the outpoint. -func (c *HeightHintCache) QuerySpendHint(spendRequest SpendRequest) (uint32, er.R) { - var hint uint32 - if c.cfg.QueryDisable { - log.Debugf("Ignoring spend height hint for %v (height hint cache "+ - "query disabled)", spendRequest) - return 0, nil - } - err := kvdb.View(c.db, func(tx kvdb.RTx) er.R { - spendHints := tx.ReadBucket(spendHintBucket) - if spendHints == nil { - return ErrCorruptedHeightHintCache.Default() - } - - spendHintKey, err := spendRequest.SpendHintKey() - if err != nil { - return err - } - spendHint := spendHints.Get(spendHintKey) - if spendHint == nil { - return ErrSpendHintNotFound.Default() - } - - return channeldb.ReadElement(bytes.NewReader(spendHint), &hint) - }, func() { - hint = 0 - }) - if err != nil { - return 0, err - } - - return hint, nil -} - -// PurgeSpendHint removes the spend hint for the outpoints from the cache. -func (c *HeightHintCache) PurgeSpendHint(spendRequests ...SpendRequest) er.R { - if len(spendRequests) == 0 { - return nil - } - - log.Tracef("Removing spend hints for %v", spendRequests) - - return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) er.R { - spendHints := tx.ReadWriteBucket(spendHintBucket) - if spendHints == nil { - return ErrCorruptedHeightHintCache.Default() - } - - for _, spendRequest := range spendRequests { - spendHintKey, err := spendRequest.SpendHintKey() - if err != nil { - return err - } - if err := spendHints.Delete(spendHintKey); err != nil { - return err - } - } - - return nil - }) -} - -// CommitConfirmHint commits a confirm hint for the transactions to the cache. -func (c *HeightHintCache) CommitConfirmHint(height uint32, - confRequests ...ConfRequest) er.R { - - if len(confRequests) == 0 { - return nil - } - - log.Tracef("Updating confirm hints to height %d for %v", height, - confRequests) - - return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) er.R { - confirmHints := tx.ReadWriteBucket(confirmHintBucket) - if confirmHints == nil { - return ErrCorruptedHeightHintCache.Default() - } - - var hint bytes.Buffer - if err := channeldb.WriteElement(&hint, height); err != nil { - return err - } - - for _, confRequest := range confRequests { - confHintKey, err := confRequest.ConfHintKey() - if err != nil { - return err - } - err = confirmHints.Put(confHintKey, hint.Bytes()) - if err != nil { - return err - } - } - - return nil - }) -} - -// QueryConfirmHint returns the latest confirm hint for a transaction hash. -// ErrConfirmHintNotFound is returned if a confirm hint does not exist within -// the cache for the transaction hash. -func (c *HeightHintCache) QueryConfirmHint(confRequest ConfRequest) (uint32, er.R) { - var hint uint32 - if c.cfg.QueryDisable { - log.Debugf("Ignoring confirmation height hint for %v (height hint "+ - "cache query disabled)", confRequest) - return 0, nil - } - err := kvdb.View(c.db, func(tx kvdb.RTx) er.R { - confirmHints := tx.ReadBucket(confirmHintBucket) - if confirmHints == nil { - return ErrCorruptedHeightHintCache.Default() - } - - confHintKey, err := confRequest.ConfHintKey() - if err != nil { - return err - } - confirmHint := confirmHints.Get(confHintKey) - if confirmHint == nil { - return ErrConfirmHintNotFound.Default() - } - - return channeldb.ReadElement(bytes.NewReader(confirmHint), &hint) - }, func() { - hint = 0 - }) - if err != nil { - return 0, err - } - - return hint, nil -} - -// PurgeConfirmHint removes the confirm hint for the transactions from the -// cache. -func (c *HeightHintCache) PurgeConfirmHint(confRequests ...ConfRequest) er.R { - if len(confRequests) == 0 { - return nil - } - - log.Tracef("Removing confirm hints for %v", confRequests) - - return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) er.R { - confirmHints := tx.ReadWriteBucket(confirmHintBucket) - if confirmHints == nil { - return ErrCorruptedHeightHintCache.Default() - } - - for _, confRequest := range confRequests { - confHintKey, err := confRequest.ConfHintKey() - if err != nil { - return err - } - if err := confirmHints.Delete(confHintKey); err != nil { - return err - } - } - - return nil - }) -} diff --git a/lnd/chainntnfs/height_hint_cache_test.go b/lnd/chainntnfs/height_hint_cache_test.go deleted file mode 100644 index eafdc47e..00000000 --- a/lnd/chainntnfs/height_hint_cache_test.go +++ /dev/null @@ -1,203 +0,0 @@ -package chainntnfs - -import ( - "bytes" - "io/ioutil" - "testing" - - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/wire" - "github.com/stretchr/testify/require" -) - -func initHintCache(t *testing.T) *HeightHintCache { - t.Helper() - - defaultCfg := CacheConfig{ - QueryDisable: false, - } - - return initHintCacheWithConfig(t, defaultCfg) -} - -func initHintCacheWithConfig(t *testing.T, cfg CacheConfig) *HeightHintCache { - t.Helper() - - tempDir, errr := ioutil.TempDir("", "kek") - if errr != nil { - t.Fatalf("unable to create temp dir: %v", errr) - } - db, err := channeldb.Open(tempDir) - if err != nil { - t.Fatalf("unable to create db: %v", err) - } - hintCache, err := NewHeightHintCache(cfg, db) - if err != nil { - t.Fatalf("unable to create hint cache: %v", err) - } - - return hintCache -} - -// TestHeightHintCacheConfirms ensures that the height hint cache properly -// caches confirm hints for transactions. -func TestHeightHintCacheConfirms(t *testing.T) { - t.Parallel() - - hintCache := initHintCache(t) - - // Querying for a transaction hash not found within the cache should - // return an error indication so. - var unknownHash chainhash.Hash - copy(unknownHash[:], bytes.Repeat([]byte{0x01}, 32)) - unknownConfRequest := ConfRequest{TxID: unknownHash} - _, err := hintCache.QueryConfirmHint(unknownConfRequest) - if !ErrConfirmHintNotFound.Is(err) { - t.Fatalf("expected ErrConfirmHintNotFound, got: %v", err) - } - - // Now, we'll create some transaction hashes and commit them to the - // cache with the same confirm hint. - const height = 100 - const numHashes = 5 - confRequests := make([]ConfRequest, numHashes) - for i := 0; i < numHashes; i++ { - var txHash chainhash.Hash - copy(txHash[:], bytes.Repeat([]byte{byte(i + 1)}, 32)) - confRequests[i] = ConfRequest{TxID: txHash} - } - - err = hintCache.CommitConfirmHint(height, confRequests...) - if err != nil { - t.Fatalf("unable to add entries to cache: %v", err) - } - - // With the hashes committed, we'll now query the cache to ensure that - // we're able to properly retrieve the confirm hints. - for _, confRequest := range confRequests { - confirmHint, err := hintCache.QueryConfirmHint(confRequest) - if err != nil { - t.Fatalf("unable to query for hint of %v: %v", confRequest, err) - } - if confirmHint != height { - t.Fatalf("expected confirm hint %d, got %d", height, - confirmHint) - } - } - - // We'll also attempt to purge all of them in a single database - // transaction. - if err := hintCache.PurgeConfirmHint(confRequests...); err != nil { - t.Fatalf("unable to remove confirm hints: %v", err) - } - - // Finally, we'll attempt to query for each hash. We should expect not - // to find a hint for any of them. - for _, confRequest := range confRequests { - _, err := hintCache.QueryConfirmHint(confRequest) - if !ErrConfirmHintNotFound.Is(err) { - t.Fatalf("expected ErrConfirmHintNotFound, got :%v", err) - } - } -} - -// TestHeightHintCacheSpends ensures that the height hint cache properly caches -// spend hints for outpoints. -func TestHeightHintCacheSpends(t *testing.T) { - t.Parallel() - - hintCache := initHintCache(t) - - // Querying for an outpoint not found within the cache should return an - // error indication so. - unknownOutPoint := wire.OutPoint{Index: 1} - unknownSpendRequest := SpendRequest{OutPoint: unknownOutPoint} - _, err := hintCache.QuerySpendHint(unknownSpendRequest) - if !ErrSpendHintNotFound.Is(err) { - t.Fatalf("expected ErrSpendHintNotFound, got: %v", err) - } - - // Now, we'll create some outpoints and commit them to the cache with - // the same spend hint. - const height = 100 - const numOutpoints = 5 - spendRequests := make([]SpendRequest, numOutpoints) - for i := uint32(0); i < numOutpoints; i++ { - spendRequests[i] = SpendRequest{ - OutPoint: wire.OutPoint{Index: i + 1}, - } - } - - err = hintCache.CommitSpendHint(height, spendRequests...) - if err != nil { - t.Fatalf("unable to add entries to cache: %v", err) - } - - // With the outpoints committed, we'll now query the cache to ensure - // that we're able to properly retrieve the confirm hints. - for _, spendRequest := range spendRequests { - spendHint, err := hintCache.QuerySpendHint(spendRequest) - if err != nil { - t.Fatalf("unable to query for hint: %v", err) - } - if spendHint != height { - t.Fatalf("expected spend hint %d, got %d", height, - spendHint) - } - } - - // We'll also attempt to purge all of them in a single database - // transaction. - if err := hintCache.PurgeSpendHint(spendRequests...); err != nil { - t.Fatalf("unable to remove spend hint: %v", err) - } - - // Finally, we'll attempt to query for each outpoint. We should expect - // not to find a hint for any of them. - for _, spendRequest := range spendRequests { - _, err = hintCache.QuerySpendHint(spendRequest) - if !ErrSpendHintNotFound.Is(err) { - t.Fatalf("expected ErrSpendHintNotFound, got: %v", err) - } - } -} - -// TestQueryDisable asserts querying for confirmation or spend hints always -// return height zero when QueryDisabled is set to true in the CacheConfig. -func TestQueryDisable(t *testing.T) { - cfg := CacheConfig{ - QueryDisable: true, - } - - hintCache := initHintCacheWithConfig(t, cfg) - - // Insert a new confirmation hint with a non-zero height. - const confHeight = 100 - confRequest := ConfRequest{ - TxID: chainhash.Hash{0x01, 0x02, 0x03}, - } - err := hintCache.CommitConfirmHint(confHeight, confRequest) - require.Nil(t, err) - - // Query for the confirmation hint, which should return zero. - cachedConfHeight, err := hintCache.QueryConfirmHint(confRequest) - require.Nil(t, err) - require.Equal(t, uint32(0), cachedConfHeight) - - // Insert a new spend hint with a non-zero height. - const spendHeight = 200 - spendRequest := SpendRequest{ - OutPoint: wire.OutPoint{ - Hash: chainhash.Hash{0x4, 0x05, 0x06}, - Index: 42, - }, - } - err = hintCache.CommitSpendHint(spendHeight, spendRequest) - require.Nil(t, err) - - // Query for the spend hint, which should return zero. - cachedSpendHeight, err := hintCache.QuerySpendHint(spendRequest) - require.Nil(t, err) - require.Equal(t, uint32(0), cachedSpendHeight) -} diff --git a/lnd/chainntnfs/interface.go b/lnd/chainntnfs/interface.go deleted file mode 100644 index c60f6e38..00000000 --- a/lnd/chainntnfs/interface.go +++ /dev/null @@ -1,704 +0,0 @@ -package chainntnfs - -import ( - "bytes" - "fmt" - "strings" - "sync" - - "github.com/pkt-cash/pktd/btcjson" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/wire" -) - -var ( - // ErrChainNotifierShuttingDown is used when we are trying to - // measure a spend notification when notifier is already stopped. - ErrChainNotifierShuttingDown = Err.CodeWithDetail("ErrChainNotifierShuttingDown", "chain notifier shutting down") -) - -// TxConfStatus denotes the status of a transaction's lookup. -type TxConfStatus uint8 - -const ( - // TxFoundMempool denotes that the transaction was found within the - // backend node's mempool. - TxFoundMempool TxConfStatus = iota - - // TxFoundIndex denotes that the transaction was found within the - // backend node's txindex. - TxFoundIndex - - // TxNotFoundIndex denotes that the transaction was not found within the - // backend node's txindex. - TxNotFoundIndex - - // TxFoundManually denotes that the transaction was found within the - // chain by scanning for it manually. - TxFoundManually - - // TxNotFoundManually denotes that the transaction was not found within - // the chain by scanning for it manually. - TxNotFoundManually -) - -// String returns the string representation of the TxConfStatus. -func (t TxConfStatus) String() string { - switch t { - case TxFoundMempool: - return "TxFoundMempool" - - case TxFoundIndex: - return "TxFoundIndex" - - case TxNotFoundIndex: - return "TxNotFoundIndex" - - case TxFoundManually: - return "TxFoundManually" - - case TxNotFoundManually: - return "TxNotFoundManually" - - default: - return "unknown" - } -} - -// ChainNotifier represents a trusted source to receive notifications concerning -// targeted events on the Bitcoin blockchain. The interface specification is -// intentionally general in order to support a wide array of chain notification -// implementations such as: btcd's websockets notifications, Bitcoin Core's -// ZeroMQ notifications, various Bitcoin API services, Electrum servers, etc. -// -// Concrete implementations of ChainNotifier should be able to support multiple -// concurrent client requests, as well as multiple concurrent notification events. -type ChainNotifier interface { - // RegisterConfirmationsNtfn registers an intent to be notified once - // txid reaches numConfs confirmations. We also pass in the pkScript as - // the default light client instead needs to match on scripts created in - // the block. If a nil txid is passed in, then not only should we match - // on the script, but we should also dispatch once the transaction - // containing the script reaches numConfs confirmations. This can be - // useful in instances where we only know the script in advance, but not - // the transaction containing it. - // - // The returned ConfirmationEvent should properly notify the client once - // the specified number of confirmations has been reached for the txid, - // as well as if the original tx gets re-org'd out of the mainchain. The - // heightHint parameter is provided as a convenience to light clients. - // It heightHint denotes the earliest height in the blockchain in which - // the target txid _could_ have been included in the chain. This can be - // used to bound the search space when checking to see if a notification - // can immediately be dispatched due to historical data. - // - // NOTE: Dispatching notifications to multiple clients subscribed to - // the same (txid, numConfs) tuple MUST be supported. - RegisterConfirmationsNtfn(txid *chainhash.Hash, pkScript []byte, - numConfs, heightHint uint32) (*ConfirmationEvent, er.R) - - // RegisterSpendNtfn registers an intent to be notified once the target - // outpoint is successfully spent within a transaction. The script that - // the outpoint creates must also be specified. This allows this - // interface to be implemented by BIP 158-like filtering. If a nil - // outpoint is passed in, then not only should we match on the script, - // but we should also dispatch once a transaction spends the output - // containing said script. This can be useful in instances where we only - // know the script in advance, but not the outpoint itself. - // - // The returned SpendEvent will receive a send on the 'Spend' - // transaction once a transaction spending the input is detected on the - // blockchain. The heightHint parameter is provided as a convenience to - // light clients. It denotes the earliest height in the blockchain in - // which the target output could have been spent. - // - // NOTE: The notification should only be triggered when the spending - // transaction receives a single confirmation. - // - // NOTE: Dispatching notifications to multiple clients subscribed to a - // spend of the same outpoint MUST be supported. - RegisterSpendNtfn(outpoint *wire.OutPoint, pkScript []byte, - heightHint uint32) (*SpendEvent, er.R) - - // RegisterBlockEpochNtfn registers an intent to be notified of each - // new block connected to the tip of the main chain. The returned - // BlockEpochEvent struct contains a channel which will be sent upon - // for each new block discovered. - // - // Clients have the option of passing in their best known block. - // If they specify a block, the ChainNotifier checks whether the client - // is behind on blocks. If they are, the ChainNotifier sends a backlog - // of block notifications for the missed blocks. If they do not provide - // one, then a notification will be dispatched immediately for the - // current tip of the chain upon a successful registration. - RegisterBlockEpochNtfn(*BlockEpoch) (*BlockEpochEvent, er.R) - - // Start the ChainNotifier. Once started, the implementation should be - // ready, and able to receive notification registrations from clients. - Start() er.R - - // Started returns true if this instance has been started, and false otherwise. - Started() bool - - // Stops the concrete ChainNotifier. Once stopped, the ChainNotifier - // should disallow any future requests from potential clients. - // Additionally, all pending client notifications will be canceled - // by closing the related channels on the *Event's. - Stop() er.R -} - -// TxConfirmation carries some additional block-level details of the exact -// block that specified transactions was confirmed within. -type TxConfirmation struct { - // BlockHash is the hash of the block that confirmed the original - // transition. - BlockHash *chainhash.Hash - - // BlockHeight is the height of the block in which the transaction was - // confirmed within. - BlockHeight uint32 - - // TxIndex is the index within the block of the ultimate confirmed - // transaction. - TxIndex uint32 - - // Tx is the transaction for which the notification was requested for. - Tx *wire.MsgTx -} - -// ConfirmationEvent encapsulates a confirmation notification. With this struct, -// callers can be notified of: the instance the target txid reaches the targeted -// number of confirmations, how many confirmations are left for the target txid -// to be fully confirmed at every new block height, and also in the event that -// the original txid becomes disconnected from the blockchain as a result of a -// re-org. -// -// Once the txid reaches the specified number of confirmations, the 'Confirmed' -// channel will be sent upon fulfilling the notification. -// -// If the event that the original transaction becomes re-org'd out of the main -// chain, the 'NegativeConf' will be sent upon with a value representing the -// depth of the re-org. -// -// NOTE: If the caller wishes to cancel their registered spend notification, -// the Cancel closure MUST be called. -type ConfirmationEvent struct { - // Confirmed is a channel that will be sent upon once the transaction - // has been fully confirmed. The struct sent will contain all the - // details of the channel's confirmation. - // - // NOTE: This channel must be buffered. - Confirmed chan *TxConfirmation - - // Updates is a channel that will sent upon, at every incremental - // confirmation, how many confirmations are left to declare the - // transaction as fully confirmed. - // - // NOTE: This channel must be buffered with the number of required - // confirmations. - Updates chan uint32 - - // NegativeConf is a channel that will be sent upon if the transaction - // confirms, but is later reorged out of the chain. The integer sent - // through the channel represents the reorg depth. - // - // NOTE: This channel must be buffered. - NegativeConf chan int32 - - // Done is a channel that gets sent upon once the confirmation request - // is no longer under the risk of being reorged out of the chain. - // - // NOTE: This channel must be buffered. - Done chan struct{} - - // Cancel is a closure that should be executed by the caller in the case - // that they wish to prematurely abandon their registered confirmation - // notification. - Cancel func() -} - -// NewConfirmationEvent constructs a new ConfirmationEvent with newly opened -// channels. -func NewConfirmationEvent(numConfs uint32, cancel func()) *ConfirmationEvent { - return &ConfirmationEvent{ - Confirmed: make(chan *TxConfirmation, 1), - Updates: make(chan uint32, numConfs), - NegativeConf: make(chan int32, 1), - Done: make(chan struct{}, 1), - Cancel: cancel, - } -} - -// SpendDetail contains details pertaining to a spent output. This struct itself -// is the spentness notification. It includes the original outpoint which triggered -// the notification, the hash of the transaction spending the output, the -// spending transaction itself, and finally the input index which spent the -// target output. -type SpendDetail struct { - SpentOutPoint *wire.OutPoint - SpenderTxHash *chainhash.Hash - SpendingTx *wire.MsgTx - SpenderInputIndex uint32 - SpendingHeight int32 -} - -// String returns a string representation of SpendDetail. -func (s *SpendDetail) String() string { - return fmt.Sprintf("%v[%d] spending %v at height=%v", s.SpenderTxHash, - s.SpenderInputIndex, s.SpentOutPoint, s.SpendingHeight) -} - -// SpendEvent encapsulates a spentness notification. Its only field 'Spend' will -// be sent upon once the target output passed into RegisterSpendNtfn has been -// spent on the blockchain. -// -// NOTE: If the caller wishes to cancel their registered spend notification, -// the Cancel closure MUST be called. -type SpendEvent struct { - // Spend is a receive only channel which will be sent upon once the - // target outpoint has been spent. - // - // NOTE: This channel must be buffered. - Spend chan *SpendDetail - - // Reorg is a channel that will be sent upon once we detect the spending - // transaction of the outpoint in question has been reorged out of the - // chain. - // - // NOTE: This channel must be buffered. - Reorg chan struct{} - - // Done is a channel that gets sent upon once the confirmation request - // is no longer under the risk of being reorged out of the chain. - // - // NOTE: This channel must be buffered. - Done chan struct{} - - // Cancel is a closure that should be executed by the caller in the case - // that they wish to prematurely abandon their registered spend - // notification. - Cancel func() -} - -// NewSpendEvent constructs a new SpendEvent with newly opened channels. -func NewSpendEvent(cancel func()) *SpendEvent { - return &SpendEvent{ - Spend: make(chan *SpendDetail, 1), - Reorg: make(chan struct{}, 1), - Done: make(chan struct{}, 1), - Cancel: cancel, - } -} - -// BlockEpoch represents metadata concerning each new block connected to the -// main chain. -type BlockEpoch struct { - // Hash is the block hash of the latest block to be added to the tip of - // the main chain. - Hash *chainhash.Hash - - // Height is the height of the latest block to be added to the tip of - // the main chain. - Height int32 -} - -// BlockEpochEvent encapsulates an on-going stream of block epoch -// notifications. Its only field 'Epochs' will be sent upon for each new block -// connected to the main-chain. -// -// NOTE: If the caller wishes to cancel their registered block epoch -// notification, the Cancel closure MUST be called. -type BlockEpochEvent struct { - // Epochs is a receive only channel that will be sent upon each time a - // new block is connected to the end of the main chain. - // - // NOTE: This channel must be buffered. - Epochs <-chan *BlockEpoch - - // Cancel is a closure that should be executed by the caller in the case - // that they wish to abandon their registered block epochs notification. - Cancel func() -} - -// NotifierDriver represents a "driver" for a particular interface. A driver is -// identified by a globally unique string identifier along with a 'New()' -// method which is responsible for initializing a particular ChainNotifier -// concrete implementation. -type NotifierDriver struct { - // NotifierType is a string which uniquely identifies the ChainNotifier - // that this driver, drives. - NotifierType string - - // New creates a new instance of a concrete ChainNotifier - // implementation given a variadic set up arguments. The function takes - // a variadic number of interface parameters in order to provide - // initialization flexibility, thereby accommodating several potential - // ChainNotifier implementations. - New func(args ...interface{}) (ChainNotifier, er.R) -} - -var ( - notifiers = make(map[string]*NotifierDriver) - registerMtx sync.Mutex -) - -// RegisteredNotifiers returns a slice of all currently registered notifiers. -// -// NOTE: This function is safe for concurrent access. -func RegisteredNotifiers() []*NotifierDriver { - registerMtx.Lock() - defer registerMtx.Unlock() - - drivers := make([]*NotifierDriver, 0, len(notifiers)) - for _, driver := range notifiers { - drivers = append(drivers, driver) - } - - return drivers -} - -// RegisterNotifier registers a NotifierDriver which is capable of driving a -// concrete ChainNotifier interface. In the case that this driver has already -// been registered, an error is returned. -// -// NOTE: This function is safe for concurrent access. -func RegisterNotifier(driver *NotifierDriver) er.R { - registerMtx.Lock() - defer registerMtx.Unlock() - - if _, ok := notifiers[driver.NotifierType]; ok { - return er.Errorf("notifier already registered") - } - - notifiers[driver.NotifierType] = driver - - return nil -} - -// SupportedNotifiers returns a slice of strings that represent the database -// drivers that have been registered and are therefore supported. -// -// NOTE: This function is safe for concurrent access. -func SupportedNotifiers() []string { - registerMtx.Lock() - defer registerMtx.Unlock() - - supportedNotifiers := make([]string, 0, len(notifiers)) - for driverName := range notifiers { - supportedNotifiers = append(supportedNotifiers, driverName) - } - - return supportedNotifiers -} - -// ChainConn enables notifiers to pass in their chain backend to interface -// functions that require it. -type ChainConn interface { - // GetBlockHeader returns the block header for a hash. - GetBlockHeader(blockHash *chainhash.Hash) (*wire.BlockHeader, er.R) - - // GetBlockHeaderVerbose returns the verbose block header for a hash. - GetBlockHeaderVerbose(blockHash *chainhash.Hash) ( - *btcjson.GetBlockHeaderVerboseResult, er.R) - - // GetBlockHash returns the hash from a block height. - GetBlockHash(blockHeight int64) (*chainhash.Hash, er.R) -} - -// GetCommonBlockAncestorHeight takes in: -// (1) the hash of a block that has been reorged out of the main chain -// (2) the hash of the block of the same height from the main chain -// It returns the height of the nearest common ancestor between the two hashes, -// or an error -func GetCommonBlockAncestorHeight(chainConn ChainConn, reorgHash, - chainHash chainhash.Hash) (int32, er.R) { - - for reorgHash != chainHash { - reorgHeader, err := chainConn.GetBlockHeader(&reorgHash) - if err != nil { - return 0, er.Errorf("unable to get header for hash=%v: %v", - reorgHash, err) - } - chainHeader, err := chainConn.GetBlockHeader(&chainHash) - if err != nil { - return 0, er.Errorf("unable to get header for hash=%v: %v", - chainHash, err) - } - reorgHash = reorgHeader.PrevBlock - chainHash = chainHeader.PrevBlock - } - - verboseHeader, err := chainConn.GetBlockHeaderVerbose(&chainHash) - if err != nil { - return 0, er.Errorf("unable to get verbose header for hash=%v: %v", - chainHash, err) - } - - return verboseHeader.Height, nil -} - -// GetClientMissedBlocks uses a client's best block to determine what blocks -// it missed being notified about, and returns them in a slice. Its -// backendStoresReorgs parameter tells it whether or not the notifier's -// chainConn stores information about blocks that have been reorged out of the -// chain, which allows GetClientMissedBlocks to find out whether the client's -// best block has been reorged out of the chain, rewind to the common ancestor -// and return blocks starting right after the common ancestor. -func GetClientMissedBlocks(chainConn ChainConn, clientBestBlock *BlockEpoch, - notifierBestHeight int32, backendStoresReorgs bool) ([]BlockEpoch, er.R) { - - startingHeight := clientBestBlock.Height - if backendStoresReorgs { - // If a reorg causes the client's best hash to be incorrect, - // retrieve the closest common ancestor and dispatch - // notifications from there. - hashAtBestHeight, err := chainConn.GetBlockHash( - int64(clientBestBlock.Height)) - if err != nil { - return nil, er.Errorf("unable to find blockhash for "+ - "height=%d: %v", clientBestBlock.Height, err) - } - - startingHeight, err = GetCommonBlockAncestorHeight( - chainConn, *clientBestBlock.Hash, *hashAtBestHeight, - ) - if err != nil { - return nil, er.Errorf("unable to find common ancestor: "+ - "%v", err) - } - } - - // We want to start dispatching historical notifications from the block - // right after the client's best block, to avoid a redundant notification. - missedBlocks, err := getMissedBlocks( - chainConn, startingHeight+1, notifierBestHeight+1, - ) - if err != nil { - return nil, er.Errorf("unable to get missed blocks: %v", err) - } - - return missedBlocks, nil -} - -// RewindChain handles internal state updates for the notifier's TxNotifier. It -// has no effect if given a height greater than or equal to our current best -// known height. It returns the new best block for the notifier. -func RewindChain(chainConn ChainConn, txNotifier *TxNotifier, - currBestBlock BlockEpoch, targetHeight int32) (BlockEpoch, er.R) { - - newBestBlock := BlockEpoch{ - Height: currBestBlock.Height, - Hash: currBestBlock.Hash, - } - - for height := currBestBlock.Height; height > targetHeight; height-- { - hash, err := chainConn.GetBlockHash(int64(height - 1)) - if err != nil { - return newBestBlock, er.Errorf("unable to "+ - "find blockhash for disconnected height=%d: %v", - height, err) - } - - log.Infof("Block disconnected from main chain: "+ - "height=%v, sha=%v", height, newBestBlock.Hash) - - err = txNotifier.DisconnectTip(uint32(height)) - if err != nil { - return newBestBlock, er.Errorf("unable to "+ - " disconnect tip for height=%d: %v", - height, err) - } - newBestBlock.Height = height - 1 - newBestBlock.Hash = hash - } - return newBestBlock, nil -} - -// HandleMissedBlocks is called when the chain backend for a notifier misses a -// series of blocks, handling a reorg if necessary. Its backendStoresReorgs -// parameter tells it whether or not the notifier's chainConn stores -// information about blocks that have been reorged out of the chain, which allows -// HandleMissedBlocks to check whether the notifier's best block has been -// reorged out, and rewind the chain accordingly. It returns the best block for -// the notifier and a slice of the missed blocks. The new best block needs to be -// returned in case a chain rewind occurs and partially completes before -// erroring. In the case where there is no rewind, the notifier's -// current best block is returned. -func HandleMissedBlocks(chainConn ChainConn, txNotifier *TxNotifier, - currBestBlock BlockEpoch, newHeight int32, - backendStoresReorgs bool) (BlockEpoch, []BlockEpoch, er.R) { - - startingHeight := currBestBlock.Height - - if backendStoresReorgs { - // If a reorg causes our best hash to be incorrect, rewind the - // chain so our best block is set to the closest common - // ancestor, then dispatch notifications from there. - hashAtBestHeight, err := - chainConn.GetBlockHash(int64(currBestBlock.Height)) - if err != nil { - return currBestBlock, nil, er.Errorf("unable to find "+ - "blockhash for height=%d: %v", - currBestBlock.Height, err) - } - - startingHeight, err = GetCommonBlockAncestorHeight( - chainConn, *currBestBlock.Hash, *hashAtBestHeight, - ) - if err != nil { - return currBestBlock, nil, er.Errorf("unable to find "+ - "common ancestor: %v", err) - } - - currBestBlock, err = RewindChain(chainConn, txNotifier, - currBestBlock, startingHeight) - if err != nil { - return currBestBlock, nil, er.Errorf("unable to "+ - "rewind chain: %v", err) - } - } - - // We want to start dispatching historical notifications from the block - // right after our best block, to avoid a redundant notification. - missedBlocks, err := getMissedBlocks(chainConn, startingHeight+1, newHeight) - if err != nil { - return currBestBlock, nil, er.Errorf("unable to get missed "+ - "blocks: %v", err) - } - - return currBestBlock, missedBlocks, nil -} - -// getMissedBlocks returns a slice of blocks: [startingHeight, endingHeight) -// fetched from the chain. -func getMissedBlocks(chainConn ChainConn, startingHeight, - endingHeight int32) ([]BlockEpoch, er.R) { - - numMissedBlocks := endingHeight - startingHeight - if numMissedBlocks < 0 { - return nil, er.Errorf("starting height %d is greater than "+ - "ending height %d", startingHeight, endingHeight) - } - - missedBlocks := make([]BlockEpoch, 0, numMissedBlocks) - for height := startingHeight; height < endingHeight; height++ { - hash, err := chainConn.GetBlockHash(int64(height)) - if err != nil { - return nil, er.Errorf("unable to find blockhash for "+ - "height=%d: %v", height, err) - } - missedBlocks = append(missedBlocks, - BlockEpoch{Hash: hash, Height: height}) - } - - return missedBlocks, nil -} - -// TxIndexConn abstracts an RPC backend with txindex enabled. -type TxIndexConn interface { - // GetRawTransactionVerbose returns the transaction identified by the - // passed chain hash, and returns additional information such as the - // block that the transaction confirmed. - GetRawTransactionVerbose(*chainhash.Hash) (*btcjson.TxRawResult, er.R) - - // GetBlockVerbose returns the block identified by the chain hash along - // with additional information such as the block's height in the chain. - GetBlockVerbose(*chainhash.Hash) (*btcjson.GetBlockVerboseResult, er.R) -} - -// ConfDetailsFromTxIndex looks up whether a transaction is already included in -// a block in the active chain by using the backend node's transaction index. -// If the transaction is found its TxConfStatus is returned. If it was found in -// the mempool this will be TxFoundMempool, if it is found in a block this will -// be TxFoundIndex. Otherwise TxNotFoundIndex is returned. If the tx is found -// in a block its confirmation details are also returned. -func ConfDetailsFromTxIndex(chainConn TxIndexConn, r ConfRequest, - txNotFoundErr string) (*TxConfirmation, TxConfStatus, er.R) { - - // If the transaction has some or all of its confirmations required, - // then we may be able to dispatch it immediately. - rawTxRes, err := chainConn.GetRawTransactionVerbose(&r.TxID) - if err != nil { - // If the transaction lookup was successful, but it wasn't found - // within the index itself, then we can exit early. We'll also - // need to look at the error message returned as the error code - // is used for multiple errors. - if btcjson.ErrRPCNoTxInfo.Is(err) && - strings.Contains(err.Message(), txNotFoundErr) { - return nil, TxNotFoundIndex, nil - } - - return nil, TxNotFoundIndex, - er.Errorf("unable to query for txid %v: %v", - r.TxID, err) - } - - // Deserialize the hex-encoded transaction to include it in the - // confirmation details. - rawTx, err := util.DecodeHex(rawTxRes.Hex) - if err != nil { - return nil, TxNotFoundIndex, - er.Errorf("unable to deserialize tx %v: %v", - r.TxID, err) - } - var tx wire.MsgTx - if err := tx.Deserialize(bytes.NewReader(rawTx)); err != nil { - return nil, TxNotFoundIndex, - er.Errorf("unable to deserialize tx %v: %v", - r.TxID, err) - } - - // Ensure the transaction matches our confirmation request in terms of - // txid and pkscript. - if !r.MatchesTx(&tx) { - return nil, TxNotFoundIndex, - er.Errorf("unable to locate tx %v", r.TxID) - } - - // Make sure we actually retrieved a transaction that is included in a - // block. If not, the transaction must be unconfirmed (in the mempool), - // and we'll return TxFoundMempool together with a nil TxConfirmation. - if rawTxRes.BlockHash == "" { - return nil, TxFoundMempool, nil - } - - // As we need to fully populate the returned TxConfirmation struct, - // grab the block in which the transaction was confirmed so we can - // locate its exact index within the block. - blockHash, err := chainhash.NewHashFromStr(rawTxRes.BlockHash) - if err != nil { - return nil, TxNotFoundIndex, - er.Errorf("unable to get block hash %v for "+ - "historical dispatch: %v", rawTxRes.BlockHash, err) - } - block, err := chainConn.GetBlockVerbose(blockHash) - if err != nil { - return nil, TxNotFoundIndex, - er.Errorf("unable to get block with hash %v for "+ - "historical dispatch: %v", blockHash, err) - } - - // If the block was obtained, locate the transaction's index within the - // block so we can give the subscriber full confirmation details. - txidStr := r.TxID.String() - for txIndex, txHash := range block.Tx { - if txHash != txidStr { - continue - } - - return &TxConfirmation{ - Tx: &tx, - BlockHash: blockHash, - BlockHeight: uint32(block.Height), - TxIndex: uint32(txIndex), - }, TxFoundIndex, nil - } - - // We return an error because we should have found the transaction - // within the block, but didn't. - return nil, TxNotFoundIndex, er.Errorf("unable to locate "+ - "tx %v in block %v", r.TxID, blockHash) -} diff --git a/lnd/chainntnfs/interface_dev.go b/lnd/chainntnfs/interface_dev.go deleted file mode 100644 index c020614f..00000000 --- a/lnd/chainntnfs/interface_dev.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build dev - -package chainntnfs - -import ( - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" -) - -// TestChainNotifier enables the use of methods that are only present during -// testing for ChainNotifiers. -type TestChainNotifier interface { - ChainNotifier - - // UnsafeStart enables notifiers to start up with a specific best block. - // Used for testing. - UnsafeStart(int32, *chainhash.Hash, int32, func() er.R) er.R -} diff --git a/lnd/chainntnfs/interface_test.go b/lnd/chainntnfs/interface_test.go deleted file mode 100644 index cae7136e..00000000 --- a/lnd/chainntnfs/interface_test.go +++ /dev/null @@ -1,2025 +0,0 @@ -// +build dev - -package chainntnfs_test - -import ( - "bytes" - "fmt" - "io/ioutil" - "log" - "os" - "sync" - "testing" - "time" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/chaincfg/globalcfg" - "github.com/pkt-cash/pktd/integration/rpctest" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/chainntnfs/btcdnotify" - "github.com/pkt-cash/pktd/lnd/chainntnfs/neutrinonotify" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/neutrino" - _ "github.com/pkt-cash/pktd/pktwallet/walletdb/bdb" // Required to auto-register the boltdb walletdb implementation. - "github.com/pkt-cash/pktd/rpcclient" - "github.com/pkt-cash/pktd/wire" -) - -func testSingleConfirmationNotification(miner *rpctest.Harness, - notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) { - - // We'd like to test the case of being notified once a txid reaches - // a *single* confirmation. - // - // So first, let's send some coins to "ourself", obtaining a txid. - // We're spending from a coinbase output here, so we use the dedicated - // function. - txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner) - if err != nil { - t.Fatalf("unable to create test tx: %v", err) - } - if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } - - _, currentHeight, err := miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } - - // Now that we have a txid, register a confirmation notification with - // the chainntfn source. - numConfs := uint32(1) - var confIntent *chainntnfs.ConfirmationEvent - if scriptDispatch { - confIntent, err = notifier.RegisterConfirmationsNtfn( - nil, pkScript, numConfs, uint32(currentHeight), - ) - } else { - confIntent, err = notifier.RegisterConfirmationsNtfn( - txid, pkScript, numConfs, uint32(currentHeight), - ) - } - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - - // Now generate a single block, the transaction should be included which - // should trigger a notification event. - blockHash, err := miner.Node.Generate(1) - if err != nil { - t.Fatalf("unable to generate single block: %v", err) - } - - select { - case confInfo := <-confIntent.Confirmed: - if !confInfo.BlockHash.IsEqual(blockHash[0]) { - t.Fatalf("mismatched block hashes: expected %v, got %v", - blockHash[0], confInfo.BlockHash) - } - - // Finally, we'll verify that the tx index returned is the exact same - // as the tx index of the transaction within the block itself. - msgBlock, err := miner.Node.GetBlock(blockHash[0]) - if err != nil { - t.Fatalf("unable to fetch block: %v", err) - } - - block := btcutil.NewBlock(msgBlock) - specifiedTxHash, err := block.TxHash(int(confInfo.TxIndex)) - if err != nil { - t.Fatalf("unable to index into block: %v", err) - } - - if !specifiedTxHash.IsEqual(txid) { - t.Fatalf("mismatched tx indexes: expected %v, got %v", - txid, specifiedTxHash) - } - case <-time.After(20 * time.Second): - t.Fatalf("confirmation notification never received") - } -} - -func testMultiConfirmationNotification(miner *rpctest.Harness, - notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) { - - // We'd like to test the case of being notified once a txid reaches - // N confirmations, where N > 1. - // - // Again, we'll begin by creating a fresh transaction, so we can obtain - // a fresh txid. - txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner) - if err != nil { - t.Fatalf("unable to create test addr: %v", err) - } - if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } - - _, currentHeight, err := miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } - - numConfs := uint32(6) - var confIntent *chainntnfs.ConfirmationEvent - if scriptDispatch { - confIntent, err = notifier.RegisterConfirmationsNtfn( - nil, pkScript, numConfs, uint32(currentHeight), - ) - } else { - confIntent, err = notifier.RegisterConfirmationsNtfn( - txid, pkScript, numConfs, uint32(currentHeight), - ) - } - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - - // Now generate a six blocks. The transaction should be included in the - // first block, which will be built upon by the other 5 blocks. - if _, err := miner.Node.Generate(6); err != nil { - t.Fatalf("unable to generate single block: %v", err) - } - - // TODO(roasbeef): reduce all timeouts after neutrino sync tightended - // up - - select { - case <-confIntent.Confirmed: - break - case <-time.After(20 * time.Second): - t.Fatalf("confirmation notification never received") - } -} - -func testBatchConfirmationNotification(miner *rpctest.Harness, - notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) { - - // We'd like to test a case of serving notifications to multiple - // clients, each requesting to be notified once a txid receives - // various numbers of confirmations. - confSpread := [6]uint32{1, 2, 3, 6, 20, 22} - confIntents := make([]*chainntnfs.ConfirmationEvent, len(confSpread)) - - _, currentHeight, err := miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } - - // Create a new txid spending miner coins for each confirmation entry - // in confSpread, we collect each conf intent into a slice so we can - // verify they're each notified at the proper number of confirmations - // below. - for i, numConfs := range confSpread { - txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner) - if err != nil { - t.Fatalf("unable to create test addr: %v", err) - } - var confIntent *chainntnfs.ConfirmationEvent - if scriptDispatch { - confIntent, err = notifier.RegisterConfirmationsNtfn( - nil, pkScript, numConfs, uint32(currentHeight), - ) - } else { - confIntent, err = notifier.RegisterConfirmationsNtfn( - txid, pkScript, numConfs, uint32(currentHeight), - ) - } - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - confIntents[i] = confIntent - if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } - - } - - initialConfHeight := uint32(currentHeight + 1) - - // Now, for each confirmation intent, generate the delta number of blocks - // needed to trigger the confirmation notification. A goroutine is - // spawned in order to verify the proper notification is triggered. - for i, numConfs := range confSpread { - var blocksToGen uint32 - - // If this is the last instance, manually index to generate the - // proper block delta in order to avoid a panic. - if i == len(confSpread)-1 { - blocksToGen = confSpread[len(confSpread)-1] - confSpread[len(confSpread)-2] - } else { - blocksToGen = confSpread[i+1] - confSpread[i] - } - - // Generate the number of blocks necessary to trigger this - // current confirmation notification. - if _, err := miner.Node.Generate(blocksToGen); err != nil { - t.Fatalf("unable to generate single block: %v", err) - } - - select { - case conf := <-confIntents[i].Confirmed: - // All of the notifications above were originally - // confirmed in the same block. The returned - // notification should list the initial confirmation - // height rather than the height they were _fully_ - // confirmed. - if conf.BlockHeight != initialConfHeight { - t.Fatalf("notification has incorrect initial "+ - "conf height: expected %v, got %v", - initialConfHeight, conf.BlockHeight) - } - continue - case <-time.After(20 * time.Second): - t.Fatalf("confirmation notification never received: %v", numConfs) - } - } -} - -func checkNotificationFields(ntfn *chainntnfs.SpendDetail, - outpoint *wire.OutPoint, spenderSha *chainhash.Hash, - height int32, t *testing.T) { - - t.Helper() - - if *ntfn.SpentOutPoint != *outpoint { - t.Fatalf("ntfn includes wrong output, reports "+ - "%v instead of %v", - ntfn.SpentOutPoint, outpoint) - } - if !bytes.Equal(ntfn.SpenderTxHash[:], spenderSha[:]) { - t.Fatalf("ntfn includes wrong spender tx sha, "+ - "reports %v instead of %v", - ntfn.SpenderTxHash[:], spenderSha[:]) - } - if ntfn.SpenderInputIndex != 0 { - t.Fatalf("ntfn includes wrong spending input "+ - "index, reports %v, should be %v", - ntfn.SpenderInputIndex, 0) - } - if ntfn.SpendingHeight != height { - t.Fatalf("ntfn has wrong spending height: "+ - "expected %v, got %v", height, - ntfn.SpendingHeight) - } -} - -func testSpendNotification(miner *rpctest.Harness, - notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) { - - // We'd like to test the spend notifications for all ChainNotifier - // concrete implementations. - // - // To do so, we first create a new output to our test target address. - outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, miner) - - _, currentHeight, err := miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } - - // Now that we have an output index and the pkScript, register for a - // spentness notification for the newly created output with multiple - // clients in order to ensure the implementation can support - // multi-client spend notifications. - const numClients = 5 - spendClients := make([]*chainntnfs.SpendEvent, numClients) - for i := 0; i < numClients; i++ { - var spentIntent *chainntnfs.SpendEvent - if scriptDispatch { - spentIntent, err = notifier.RegisterSpendNtfn( - nil, output.PkScript, uint32(currentHeight), - ) - } else { - spentIntent, err = notifier.RegisterSpendNtfn( - outpoint, output.PkScript, uint32(currentHeight), - ) - } - if err != nil { - t.Fatalf("unable to register for spend ntfn: %v", err) - } - - spendClients[i] = spentIntent - } - - // Next, create a new transaction spending that output. - spendingTx := chainntnfs.CreateSpendTx(t, outpoint, output, privKey) - - // Broadcast our spending transaction. - spenderSha, err := miner.Node.SendRawTransaction(spendingTx, true) - if err != nil { - t.Fatalf("unable to broadcast tx: %v", err) - } - - if err := chainntnfs.WaitForMempoolTx(miner, spenderSha); err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } - - // Make sure notifications are not yet sent. We launch a go routine for - // all the spend clients, such that we can wait for them all in - // parallel. - mempoolSpendTimeout := 2 * chainntnfs.TrickleInterval - mempoolSpends := make(chan *chainntnfs.SpendDetail, numClients) - for _, c := range spendClients { - go func(client *chainntnfs.SpendEvent) { - select { - case s := <-client.Spend: - mempoolSpends <- s - case <-time.After(mempoolSpendTimeout): - } - }(c) - } - - select { - case <-mempoolSpends: - t.Fatalf("did not expect to get notification before " + - "block was mined") - case <-time.After(mempoolSpendTimeout): - } - - // Make sure registering a client after the tx is in the mempool still - // doesn't trigger a notification. - var spentIntent *chainntnfs.SpendEvent - if scriptDispatch { - spentIntent, err = notifier.RegisterSpendNtfn( - nil, output.PkScript, uint32(currentHeight), - ) - } else { - spentIntent, err = notifier.RegisterSpendNtfn( - outpoint, output.PkScript, uint32(currentHeight), - ) - } - if err != nil { - t.Fatalf("unable to register for spend ntfn: %v", err) - } - - select { - case <-spentIntent.Spend: - t.Fatalf("did not expect to get notification before " + - "block was mined") - case <-time.After(mempoolSpendTimeout): - } - spendClients = append(spendClients, spentIntent) - - // Now we mine a single block, which should include our spend. The - // notification should also be sent off. - if _, err := miner.Node.Generate(1); err != nil { - t.Fatalf("unable to generate single block: %v", err) - } - - _, currentHeight, err = miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } - - for _, c := range spendClients { - select { - case ntfn := <-c.Spend: - // We've received the spend nftn. So now verify all the - // fields have been set properly. - checkNotificationFields(ntfn, outpoint, spenderSha, - currentHeight, t) - case <-time.After(30 * time.Second): - t.Fatalf("spend ntfn never received") - } - } -} - -func testBlockEpochNotification(miner *rpctest.Harness, - notifier chainntnfs.TestChainNotifier, t *testing.T) { - - // We'd like to test the case of multiple registered clients receiving - // block epoch notifications. - - const numBlocks = 10 - const numNtfns = numBlocks + 1 - const numClients = 5 - var wg sync.WaitGroup - - // Create numClients clients which will listen for block notifications. We - // expect each client to receive 11 notifications, one for the current - // tip of the chain, and one for each of the ten blocks we generate - // below. So we'll use a WaitGroup to synchronize the test. - for i := 0; i < numClients; i++ { - epochClient, err := notifier.RegisterBlockEpochNtfn(nil) - if err != nil { - t.Fatalf("unable to register for epoch notification") - } - - wg.Add(numNtfns) - go func() { - for i := 0; i < numNtfns; i++ { - <-epochClient.Epochs - wg.Done() - } - }() - } - - epochsSent := make(chan struct{}) - go func() { - wg.Wait() - close(epochsSent) - }() - - // Now generate 10 blocks, the clients above should each receive 10 - // notifications, thereby unblocking the goroutine above. - if _, err := miner.Node.Generate(numBlocks); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - select { - case <-epochsSent: - case <-time.After(30 * time.Second): - t.Fatalf("all notifications not sent") - } -} - -func testMultiClientConfirmationNotification(miner *rpctest.Harness, - notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) { - - // We'd like to test the case of a multiple clients registered to - // receive a confirmation notification for the same transaction. - txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner) - if err != nil { - t.Fatalf("unable to create test tx: %v", err) - } - if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } - - var wg sync.WaitGroup - const ( - numConfsClients = 5 - numConfs = 1 - ) - - _, currentHeight, err := miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } - - // Register for a conf notification for the above generated txid with - // numConfsClients distinct clients. - for i := 0; i < numConfsClients; i++ { - var confClient *chainntnfs.ConfirmationEvent - if scriptDispatch { - confClient, err = notifier.RegisterConfirmationsNtfn( - nil, pkScript, numConfs, uint32(currentHeight), - ) - } else { - confClient, err = notifier.RegisterConfirmationsNtfn( - txid, pkScript, numConfs, uint32(currentHeight), - ) - } - if err != nil { - t.Fatalf("unable to register for confirmation: %v", err) - } - - wg.Add(1) - go func() { - <-confClient.Confirmed - wg.Done() - }() - } - - confsSent := make(chan struct{}) - go func() { - wg.Wait() - close(confsSent) - }() - - // Finally, generate a single block which should trigger the unblocking - // of all numConfsClients blocked on the channel read above. - if _, err := miner.Node.Generate(1); err != nil { - t.Fatalf("unable to generate block: %v", err) - } - - select { - case <-confsSent: - case <-time.After(30 * time.Second): - t.Fatalf("all confirmation notifications not sent") - } -} - -// Tests the case in which a confirmation notification is requested for a -// transaction that has already been included in a block. In this case, the -// confirmation notification should be dispatched immediately. -func testTxConfirmedBeforeNtfnRegistration(miner *rpctest.Harness, - notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) { - - // First, let's send some coins to "ourself", obtaining a txid. We're - // spending from a coinbase output here, so we use the dedicated - // function. - txid3, pkScript3, err := chainntnfs.GetTestTxidAndScript(miner) - if err != nil { - t.Fatalf("unable to create test tx: %v", err) - } - if err := chainntnfs.WaitForMempoolTx(miner, txid3); err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } - - // Generate another block containing tx 3, but we won't register conf - // notifications for this tx until much later. The notifier must check - // older blocks when the confirmation event is registered below to ensure - // that the TXID hasn't already been included in the chain, otherwise the - // notification will never be sent. - _, err = miner.Node.Generate(1) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } - - txid1, pkScript1, err := chainntnfs.GetTestTxidAndScript(miner) - if err != nil { - t.Fatalf("unable to create test tx: %v", err) - } - if err := chainntnfs.WaitForMempoolTx(miner, txid1); err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } - - txid2, pkScript2, err := chainntnfs.GetTestTxidAndScript(miner) - if err != nil { - t.Fatalf("unable to create test tx: %v", err) - } - if err := chainntnfs.WaitForMempoolTx(miner, txid2); err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } - - _, currentHeight, err := miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } - - // Now generate another block containing txs 1 & 2. - blockHash, err := miner.Node.Generate(1) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } - - // Register a confirmation notification with the chainntfn source for tx2, - // which is included in the last block. The height hint is the height before - // the block is included. This notification should fire immediately since - // only 1 confirmation is required. - var ntfn1 *chainntnfs.ConfirmationEvent - if scriptDispatch { - ntfn1, err = notifier.RegisterConfirmationsNtfn( - nil, pkScript1, 1, uint32(currentHeight), - ) - } else { - ntfn1, err = notifier.RegisterConfirmationsNtfn( - txid1, pkScript1, 1, uint32(currentHeight), - ) - } - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - - select { - case confInfo := <-ntfn1.Confirmed: - // Finally, we'll verify that the tx index returned is the exact same - // as the tx index of the transaction within the block itself. - msgBlock, err := miner.Node.GetBlock(blockHash[0]) - if err != nil { - t.Fatalf("unable to fetch block: %v", err) - } - block := btcutil.NewBlock(msgBlock) - specifiedTxHash, err := block.TxHash(int(confInfo.TxIndex)) - if err != nil { - t.Fatalf("unable to index into block: %v", err) - } - if !specifiedTxHash.IsEqual(txid1) { - t.Fatalf("mismatched tx indexes: expected %v, got %v", - txid1, specifiedTxHash) - } - - // We'll also ensure that the block height has been set - // properly. - if confInfo.BlockHeight != uint32(currentHeight+1) { - t.Fatalf("incorrect block height: expected %v, got %v", - confInfo.BlockHeight, currentHeight) - } - break - case <-time.After(20 * time.Second): - t.Fatalf("confirmation notification never received") - } - - // Register a confirmation notification for tx2, requiring 3 confirmations. - // This transaction is only partially confirmed, so the notification should - // not fire yet. - var ntfn2 *chainntnfs.ConfirmationEvent - if scriptDispatch { - ntfn2, err = notifier.RegisterConfirmationsNtfn( - nil, pkScript2, 3, uint32(currentHeight), - ) - } else { - ntfn2, err = notifier.RegisterConfirmationsNtfn( - txid2, pkScript2, 3, uint32(currentHeight), - ) - } - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - - // Fully confirm tx3. - _, err = miner.Node.Generate(2) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } - - select { - case <-ntfn2.Confirmed: - case <-time.After(10 * time.Second): - t.Fatalf("confirmation notification never received") - } - - select { - case <-ntfn1.Confirmed: - t.Fatalf("received multiple confirmations for tx") - case <-time.After(1 * time.Second): - } - - // Finally register a confirmation notification for tx3, requiring 1 - // confirmation. Ensure that conf notifications do not refire on txs - // 1 or 2. - var ntfn3 *chainntnfs.ConfirmationEvent - if scriptDispatch { - ntfn3, err = notifier.RegisterConfirmationsNtfn( - nil, pkScript3, 1, uint32(currentHeight-1), - ) - } else { - ntfn3, err = notifier.RegisterConfirmationsNtfn( - txid3, pkScript3, 1, uint32(currentHeight-1), - ) - } - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - - // We'll also register for a confirmation notification with the pkscript - // of a different transaction. This notification shouldn't fire since we - // match on both txid and pkscript. - var ntfn4 *chainntnfs.ConfirmationEvent - ntfn4, err = notifier.RegisterConfirmationsNtfn( - txid3, pkScript2, 1, uint32(currentHeight-1), - ) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - - select { - case <-ntfn3.Confirmed: - case <-time.After(10 * time.Second): - t.Fatalf("confirmation notification never received") - } - - select { - case <-ntfn4.Confirmed: - t.Fatalf("confirmation notification received") - case <-time.After(5 * time.Second): - } - - time.Sleep(1 * time.Second) - - select { - case <-ntfn1.Confirmed: - t.Fatalf("received multiple confirmations for tx") - default: - } - - select { - case <-ntfn2.Confirmed: - t.Fatalf("received multiple confirmations for tx") - default: - } -} - -// Test the case of a notification consumer having forget or being delayed in -// checking for a confirmation. This should not cause the notifier to stop -// working -func testLazyNtfnConsumer(miner *rpctest.Harness, - notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) { - - // Create a transaction to be notified about. We'll register for - // notifications on this transaction but won't be prompt in checking them - txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner) - if err != nil { - t.Fatalf("unable to create test tx: %v", err) - } - if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } - - _, currentHeight, err := miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } - - numConfs := uint32(3) - - // Add a block right before registering, this makes race conditions - // between the historical dispatcher and the normal dispatcher more obvious - if _, err := miner.Node.Generate(1); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - var firstConfIntent *chainntnfs.ConfirmationEvent - if scriptDispatch { - firstConfIntent, err = notifier.RegisterConfirmationsNtfn( - nil, pkScript, numConfs, uint32(currentHeight), - ) - } else { - firstConfIntent, err = notifier.RegisterConfirmationsNtfn( - txid, pkScript, numConfs, uint32(currentHeight), - ) - } - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - - // Generate another 2 blocks, this should dispatch the confirm notification - if _, err := miner.Node.Generate(2); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - // Now make another transaction, just because we haven't checked to see - // if the first transaction has confirmed doesn't mean that we shouldn't - // be able to see if this transaction confirms first - txid, pkScript, err = chainntnfs.GetTestTxidAndScript(miner) - if err != nil { - t.Fatalf("unable to create test tx: %v", err) - } - if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } - - _, currentHeight, err = miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } - - numConfs = 1 - var secondConfIntent *chainntnfs.ConfirmationEvent - if scriptDispatch { - secondConfIntent, err = notifier.RegisterConfirmationsNtfn( - nil, pkScript, numConfs, uint32(currentHeight), - ) - } else { - secondConfIntent, err = notifier.RegisterConfirmationsNtfn( - txid, pkScript, numConfs, uint32(currentHeight), - ) - } - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - - if _, err := miner.Node.Generate(1); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - select { - case <-secondConfIntent.Confirmed: - // Successfully receive the second notification - break - case <-time.After(30 * time.Second): - t.Fatalf("Second confirmation notification never received") - } - - // Make sure the first tx confirmed successfully - select { - case <-firstConfIntent.Confirmed: - break - case <-time.After(30 * time.Second): - t.Fatalf("First confirmation notification never received") - } -} - -// Tests the case in which a spend notification is requested for a spend that -// has already been included in a block. In this case, the spend notification -// should be dispatched immediately. -func testSpendBeforeNtfnRegistration(miner *rpctest.Harness, - notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) { - - // We'd like to test the spend notifications for all ChainNotifier - // concrete implementations. - // - // To do so, we first create a new output to our test target address. - outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, miner) - - _, heightHint, err := miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } - - // We'll then spend this output and broadcast the spend transaction. - spendingTx := chainntnfs.CreateSpendTx(t, outpoint, output, privKey) - spenderSha, err := miner.Node.SendRawTransaction(spendingTx, true) - if err != nil { - t.Fatalf("unable to broadcast tx: %v", err) - } - if err := chainntnfs.WaitForMempoolTx(miner, spenderSha); err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } - - // We create an epoch client we can use to make sure the notifier is - // caught up to the mining node's chain. - epochClient, err := notifier.RegisterBlockEpochNtfn(nil) - if err != nil { - t.Fatalf("unable to register for block epoch: %v", err) - } - - // Now we mine an additional block, which should include our spend. - if _, err := miner.Node.Generate(1); err != nil { - t.Fatalf("unable to generate single block: %v", err) - } - _, spendHeight, err := miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } - - // checkSpends registers two clients to be notified of a spend that has - // already happened. The notifier should dispatch a spend notification - // immediately. - checkSpends := func() { - t.Helper() - - const numClients = 2 - spendClients := make([]*chainntnfs.SpendEvent, numClients) - for i := 0; i < numClients; i++ { - var spentIntent *chainntnfs.SpendEvent - if scriptDispatch { - spentIntent, err = notifier.RegisterSpendNtfn( - nil, output.PkScript, uint32(heightHint), - ) - } else { - spentIntent, err = notifier.RegisterSpendNtfn( - outpoint, output.PkScript, - uint32(heightHint), - ) - } - if err != nil { - t.Fatalf("unable to register for spend ntfn: %v", - err) - } - - spendClients[i] = spentIntent - } - - for _, client := range spendClients { - select { - case ntfn := <-client.Spend: - // We've received the spend nftn. So now verify - // all the fields have been set properly. - checkNotificationFields( - ntfn, outpoint, spenderSha, spendHeight, t, - ) - case <-time.After(30 * time.Second): - t.Fatalf("spend ntfn never received") - } - } - } - - // Wait for the notifier to have caught up to the mined block. - select { - case _, ok := <-epochClient.Epochs: - if !ok { - t.Fatalf("epoch channel was closed") - } - case <-time.After(15 * time.Second): - t.Fatalf("did not receive block epoch") - } - - // Check that the spend clients gets immediately notified for the spend - // in the previous block. - checkSpends() - - // Bury the spend even deeper, and do the same check. - const numBlocks = 10 - if _, err := miner.Node.Generate(numBlocks); err != nil { - t.Fatalf("unable to generate single block: %v", err) - } - - // Wait for the notifier to have caught up with the new blocks. - for i := 0; i < numBlocks; i++ { - select { - case _, ok := <-epochClient.Epochs: - if !ok { - t.Fatalf("epoch channel was closed") - } - case <-time.After(15 * time.Second): - t.Fatalf("did not receive block epoch") - } - } - - // The clients should still be notified immediately. - checkSpends() -} - -func testCancelSpendNtfn(node *rpctest.Harness, - notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) { - - // We'd like to test that once a spend notification is registered, it - // can be canceled before the notification is dispatched. - - // First, we'll start by creating a new output that we can spend - // ourselves. - outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, node) - - _, currentHeight, err := node.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } - - // Create two clients that each registered to the spend notification. - // We'll cancel the notification for the first client and leave the - // notification for the second client enabled. - const numClients = 2 - spendClients := make([]*chainntnfs.SpendEvent, numClients) - for i := 0; i < numClients; i++ { - var spentIntent *chainntnfs.SpendEvent - if scriptDispatch { - spentIntent, err = notifier.RegisterSpendNtfn( - nil, output.PkScript, uint32(currentHeight), - ) - } else { - spentIntent, err = notifier.RegisterSpendNtfn( - outpoint, output.PkScript, uint32(currentHeight), - ) - } - if err != nil { - t.Fatalf("unable to register for spend ntfn: %v", err) - } - - spendClients[i] = spentIntent - } - - // Next, create a new transaction spending that output. - spendingTx := chainntnfs.CreateSpendTx(t, outpoint, output, privKey) - - // Before we broadcast the spending transaction, we'll cancel the - // notification of the first client. - spendClients[1].Cancel() - - // Broadcast our spending transaction. - spenderSha, err := node.Node.SendRawTransaction(spendingTx, true) - if err != nil { - t.Fatalf("unable to broadcast tx: %v", err) - } - - if err := chainntnfs.WaitForMempoolTx(node, spenderSha); err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } - - // Now we mine a single block, which should include our spend. The - // notification should also be sent off. - if _, err := node.Node.Generate(1); err != nil { - t.Fatalf("unable to generate single block: %v", err) - } - - // The spend notification for the first client should have been - // dispatched. - select { - case ntfn := <-spendClients[0].Spend: - // We've received the spend nftn. So now verify all the - // fields have been set properly. - if *ntfn.SpentOutPoint != *outpoint { - t.Fatalf("ntfn includes wrong output, reports "+ - "%v instead of %v", - ntfn.SpentOutPoint, outpoint) - } - if !bytes.Equal(ntfn.SpenderTxHash[:], spenderSha[:]) { - t.Fatalf("ntfn includes wrong spender tx sha, "+ - "reports %v instead of %v", - ntfn.SpenderTxHash[:], spenderSha[:]) - } - if ntfn.SpenderInputIndex != 0 { - t.Fatalf("ntfn includes wrong spending input "+ - "index, reports %v, should be %v", - ntfn.SpenderInputIndex, 0) - } - case <-time.After(20 * time.Second): - t.Fatalf("spend ntfn never received") - } - - // However, the spend notification of the second client should NOT have - // been dispatched. - select { - case _, ok := <-spendClients[1].Spend: - if ok { - t.Fatalf("spend ntfn should have been canceled") - } - case <-time.After(20 * time.Second): - t.Fatalf("spend ntfn never canceled") - } -} - -func testCancelEpochNtfn(node *rpctest.Harness, - notifier chainntnfs.TestChainNotifier, t *testing.T) { - - // We'd like to ensure that once a client cancels their block epoch - // notifications, no further notifications are sent over the channel - // if/when new blocks come in. - const numClients = 2 - - epochClients := make([]*chainntnfs.BlockEpochEvent, numClients) - for i := 0; i < numClients; i++ { - epochClient, err := notifier.RegisterBlockEpochNtfn(nil) - if err != nil { - t.Fatalf("unable to register for epoch notification") - } - epochClients[i] = epochClient - } - - // Now before we mine any blocks, cancel the notification for the first - // epoch client. - epochClients[0].Cancel() - - // Now mine a single block, this should trigger the logic to dispatch - // epoch notifications. - if _, err := node.Node.Generate(1); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - // The epoch notification for the first client shouldn't have been - // dispatched. - select { - case _, ok := <-epochClients[0].Epochs: - if ok { - t.Fatalf("epoch notification should have been canceled") - } - case <-time.After(2 * time.Second): - t.Fatalf("epoch notification not sent") - } - - // However, the epoch notification for the second client should have - // been dispatched as normal. - select { - case _, ok := <-epochClients[1].Epochs: - if !ok { - t.Fatalf("epoch was canceled") - } - case <-time.After(20 * time.Second): - t.Fatalf("epoch notification not sent") - } -} - -func testReorgConf(miner *rpctest.Harness, - notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) { - - // Set up a new miner that we can use to cause a reorg. - miner2, err := rpctest.New(chainntnfs.NetParams, nil, []string{"--txindex"}) - if err != nil { - t.Fatalf("unable to create mining node: %v", err) - } - if err := miner2.SetUp(false, 0); err != nil { - t.Fatalf("unable to set up mining node: %v", err) - } - defer miner2.TearDown() - - // We start by connecting the new miner to our original miner, - // such that it will sync to our original chain. - if err := rpctest.ConnectNode(miner, miner2); err != nil { - t.Fatalf("unable to connect harnesses: %v", err) - } - nodeSlice := []*rpctest.Harness{miner, miner2} - if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil { - t.Fatalf("unable to join node on blocks: %v", err) - } - - // The two should be on the same blockheight. - _, nodeHeight1, err := miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current blockheight %v", err) - } - - _, nodeHeight2, err := miner2.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current blockheight %v", err) - } - - if nodeHeight1 != nodeHeight2 { - t.Fatalf("expected both miners to be on the same height: %v vs %v", - nodeHeight1, nodeHeight2) - } - - // We disconnect the two nodes, such that we can start mining on them - // individually without the other one learning about the new blocks. - err = miner.Node.AddNode(miner2.P2PAddress(), rpcclient.ANRemove) - if err != nil { - t.Fatalf("unable to remove node: %v", err) - } - - txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner) - if err != nil { - t.Fatalf("unable to create test tx: %v", err) - } - if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } - - _, currentHeight, err := miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } - - // Now that we have a txid, register a confirmation notification with - // the chainntfn source. - numConfs := uint32(2) - var confIntent *chainntnfs.ConfirmationEvent - if scriptDispatch { - confIntent, err = notifier.RegisterConfirmationsNtfn( - nil, pkScript, numConfs, uint32(currentHeight), - ) - } else { - confIntent, err = notifier.RegisterConfirmationsNtfn( - txid, pkScript, numConfs, uint32(currentHeight), - ) - } - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - - // Now generate a single block, the transaction should be included. - _, err = miner.Node.Generate(1) - if err != nil { - t.Fatalf("unable to generate single block: %v", err) - } - - // Transaction only has one confirmation, and the notification is registered - // with 2 confirmations, so we should not be notified yet. - select { - case <-confIntent.Confirmed: - t.Fatal("tx was confirmed unexpectedly") - case <-time.After(1 * time.Second): - } - - // Reorganize transaction out of the chain by generating a longer fork - // from the other miner. The transaction is not included in this fork. - miner2.Node.Generate(2) - - // Reconnect nodes to reach consensus on the longest chain. miner2's chain - // should win and become active on miner1. - if err := rpctest.ConnectNode(miner, miner2); err != nil { - t.Fatalf("unable to connect harnesses: %v", err) - } - nodeSlice = []*rpctest.Harness{miner, miner2} - if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil { - t.Fatalf("unable to join node on blocks: %v", err) - } - - _, nodeHeight1, err = miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current blockheight %v", err) - } - - _, nodeHeight2, err = miner2.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current blockheight %v", err) - } - - if nodeHeight1 != nodeHeight2 { - t.Fatalf("expected both miners to be on the same height: %v vs %v", - nodeHeight1, nodeHeight2) - } - - // Even though there is one block above the height of the block that the - // transaction was included in, it is not the active chain so the - // notification should not be sent. - select { - case <-confIntent.Confirmed: - t.Fatal("tx was confirmed unexpectedly") - case <-time.After(1 * time.Second): - } - - // Now confirm the transaction on the longest chain and verify that we - // receive the notification. - tx, err := miner.Node.GetRawTransaction(txid) - if err != nil { - t.Fatalf("unable to get raw tx: %v", err) - } - - txid, err = miner2.Node.SendRawTransaction(tx.MsgTx(), false) - if err != nil { - t.Fatalf("unable to get send tx: %v", err) - } - if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } - - _, err = miner.Node.Generate(3) - if err != nil { - t.Fatalf("unable to generate single block: %v", err) - } - - select { - case <-confIntent.Confirmed: - case <-time.After(20 * time.Second): - t.Fatalf("confirmation notification never received") - } -} - -// testReorgSpend ensures that the different ChainNotifier implementations -// correctly handle outpoints whose spending transaction has been reorged out of -// the chain. -func testReorgSpend(miner *rpctest.Harness, - notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) { - - // We'll start by creating an output and registering a spend - // notification for it. - outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, miner) - _, heightHint, err := miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to retrieve current height: %v", err) - } - - var spendIntent *chainntnfs.SpendEvent - if scriptDispatch { - spendIntent, err = notifier.RegisterSpendNtfn( - nil, output.PkScript, uint32(heightHint), - ) - } else { - spendIntent, err = notifier.RegisterSpendNtfn( - outpoint, output.PkScript, uint32(heightHint), - ) - } - if err != nil { - t.Fatalf("unable to register for spend: %v", err) - } - - // Set up a new miner that we can use to cause a reorg. - miner2, err := rpctest.New(chainntnfs.NetParams, nil, []string{"--txindex"}) - if err != nil { - t.Fatalf("unable to create mining node: %v", err) - } - if err := miner2.SetUp(false, 0); err != nil { - t.Fatalf("unable to set up mining node: %v", err) - } - defer miner2.TearDown() - - // We start by connecting the new miner to our original miner, in order - // to have a consistent view of the chain from both miners. They should - // be on the same block height. - if err := rpctest.ConnectNode(miner, miner2); err != nil { - t.Fatalf("unable to connect miners: %v", err) - } - nodeSlice := []*rpctest.Harness{miner, miner2} - if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil { - t.Fatalf("unable to sync miners: %v", err) - } - _, minerHeight1, err := miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get miner1's current height: %v", err) - } - _, minerHeight2, err := miner2.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get miner2's current height: %v", err) - } - if minerHeight1 != minerHeight2 { - t.Fatalf("expected both miners to be on the same height: "+ - "%v vs %v", minerHeight1, minerHeight2) - } - - // We disconnect the two nodes, such that we can start mining on them - // individually without the other one learning about the new blocks. - err = miner.Node.AddNode(miner2.P2PAddress(), rpcclient.ANRemove) - if err != nil { - t.Fatalf("unable to disconnect miners: %v", err) - } - - // Craft the spending transaction for the outpoint created above and - // confirm it under the chain of the original miner. - spendTx := chainntnfs.CreateSpendTx(t, outpoint, output, privKey) - spendTxHash, err := miner.Node.SendRawTransaction(spendTx, true) - if err != nil { - t.Fatalf("unable to broadcast spend tx: %v", err) - } - if err := chainntnfs.WaitForMempoolTx(miner, spendTxHash); err != nil { - t.Fatalf("spend tx not relayed to miner: %v", err) - } - const numBlocks = 1 - if _, err := miner.Node.Generate(numBlocks); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - _, spendHeight, err := miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get spend height: %v", err) - } - - // We should see a spend notification dispatched with the correct spend - // details. - select { - case spendDetails := <-spendIntent.Spend: - checkNotificationFields( - spendDetails, outpoint, spendTxHash, spendHeight, t, - ) - case <-time.After(5 * time.Second): - t.Fatal("expected spend notification to be dispatched") - } - - // Now, with the other miner, we'll generate one more block than the - // other miner and connect them to cause a reorg. - if _, err := miner2.Node.Generate(numBlocks + 1); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - if err := rpctest.ConnectNode(miner, miner2); err != nil { - t.Fatalf("unable to connect miners: %v", err) - } - nodeSlice = []*rpctest.Harness{miner2, miner} - if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil { - t.Fatalf("unable to sync miners: %v", err) - } - _, minerHeight1, err = miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get miner1's current height: %v", err) - } - _, minerHeight2, err = miner2.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get miner2's current height: %v", err) - } - if minerHeight1 != minerHeight2 { - t.Fatalf("expected both miners to be on the same height: "+ - "%v vs %v", minerHeight1, minerHeight2) - } - - // We should receive a reorg notification. - select { - case _, ok := <-spendIntent.Reorg: - if !ok { - t.Fatal("unexpected reorg channel closed") - } - case <-time.After(5 * time.Second): - t.Fatal("expected to receive reorg notification") - } - - // Now that both miners are on the same chain, we'll confirm the - // spending transaction of the outpoint and receive a notification for - // it. - if _, err = miner2.Node.SendRawTransaction(spendTx, true); err != nil { - t.Fatalf("unable to broadcast spend tx: %v", err) - } - if err := chainntnfs.WaitForMempoolTx(miner, spendTxHash); err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } - if _, err := miner.Node.Generate(numBlocks); err != nil { - t.Fatalf("unable to generate single block: %v", err) - } - _, spendHeight, err = miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to retrieve current height: %v", err) - } - - select { - case spendDetails := <-spendIntent.Spend: - checkNotificationFields( - spendDetails, outpoint, spendTxHash, spendHeight, t, - ) - case <-time.After(5 * time.Second): - t.Fatal("expected spend notification to be dispatched") - } -} - -// testCatchUpClientOnMissedBlocks tests the case of multiple registered client -// receiving historical block epoch notifications due to their best known block -// being out of date. -func testCatchUpClientOnMissedBlocks(miner *rpctest.Harness, - notifier chainntnfs.TestChainNotifier, t *testing.T) { - - const numBlocks = 10 - const numClients = 5 - var wg sync.WaitGroup - - outdatedHash, outdatedHeight, err := miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to retrieve current height: %v", err) - } - - // This function is used by UnsafeStart to ensure all notifications - // are fully drained before clients register for notifications. - generateBlocks := func() er.R { - _, err = miner.Node.Generate(numBlocks) - return err - } - - // We want to ensure that when a client registers for block notifications, - // the notifier's best block is at the tip of the chain. If it isn't, the - // client may not receive all historical notifications. - bestHeight := outdatedHeight + numBlocks - err = notifier.UnsafeStart(bestHeight, nil, bestHeight, generateBlocks) - if err != nil { - t.Fatalf("unable to unsafe start the notifier: %v", err) - } - defer notifier.Stop() - - // Create numClients clients whose best known block is 10 blocks behind - // the tip of the chain. We expect each client to receive numBlocks - // notifications, 1 for each block they're behind. - clients := make([]*chainntnfs.BlockEpochEvent, 0, numClients) - outdatedBlock := &chainntnfs.BlockEpoch{ - Height: outdatedHeight, Hash: outdatedHash, - } - for i := 0; i < numClients; i++ { - epochClient, err := notifier.RegisterBlockEpochNtfn(outdatedBlock) - if err != nil { - t.Fatalf("unable to register for epoch notification: %v", err) - } - clients = append(clients, epochClient) - } - for expectedHeight := outdatedHeight + 1; expectedHeight <= - bestHeight; expectedHeight++ { - - for _, epochClient := range clients { - select { - case block := <-epochClient.Epochs: - if block.Height != expectedHeight { - t.Fatalf("received block of height: %d, "+ - "expected: %d", block.Height, - expectedHeight) - } - case <-time.After(20 * time.Second): - t.Fatalf("did not receive historical notification "+ - "for height %d", expectedHeight) - } - - } - } - - // Finally, ensure that an extra block notification wasn't received. - anyExtras := make(chan struct{}, len(clients)) - for _, epochClient := range clients { - wg.Add(1) - go func(epochClient *chainntnfs.BlockEpochEvent) { - defer wg.Done() - select { - case <-epochClient.Epochs: - anyExtras <- struct{}{} - case <-time.After(5 * time.Second): - } - }(epochClient) - } - - wg.Wait() - close(anyExtras) - - var extraCount int - for range anyExtras { - extraCount++ - } - - if extraCount > 0 { - t.Fatalf("received %d unexpected block notification", extraCount) - } -} - -// testCatchUpOnMissedBlocks the case of multiple registered clients receiving -// historical block epoch notifications due to the notifier's best known block -// being out of date. -func testCatchUpOnMissedBlocks(miner *rpctest.Harness, - notifier chainntnfs.TestChainNotifier, t *testing.T) { - - const numBlocks = 10 - const numClients = 5 - var wg sync.WaitGroup - - _, bestHeight, err := miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current blockheight %v", err) - } - - // This function is used by UnsafeStart to ensure all notifications - // are fully drained before clients register for notifications. - generateBlocks := func() er.R { - _, err = miner.Node.Generate(numBlocks) - return err - } - - // Next, start the notifier with outdated best block information. - err = notifier.UnsafeStart( - bestHeight, nil, bestHeight+numBlocks, generateBlocks, - ) - if err != nil { - t.Fatalf("unable to unsafe start the notifier: %v", err) - } - defer notifier.Stop() - - // Create numClients clients who will listen for block notifications. - clients := make([]*chainntnfs.BlockEpochEvent, 0, numClients) - for i := 0; i < numClients; i++ { - epochClient, err := notifier.RegisterBlockEpochNtfn(nil) - if err != nil { - t.Fatalf("unable to register for epoch notification: %v", err) - } - - // Drain the notification dispatched upon registration as we're - // not interested in it. - select { - case <-epochClient.Epochs: - case <-time.After(5 * time.Second): - t.Fatal("expected to receive epoch for current block " + - "upon registration") - } - - clients = append(clients, epochClient) - } - - // Generate a single block to trigger the backlog of historical - // notifications for the previously mined blocks. - if _, err := miner.Node.Generate(1); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - // We expect each client to receive numBlocks + 1 notifications, 1 for - // each block that the notifier has missed out on. - for expectedHeight := bestHeight + 1; expectedHeight <= - bestHeight+numBlocks+1; expectedHeight++ { - - for _, epochClient := range clients { - select { - case block := <-epochClient.Epochs: - if block.Height != expectedHeight { - t.Fatalf("received block of height: %d, "+ - "expected: %d", block.Height, - expectedHeight) - } - case <-time.After(20 * time.Second): - t.Fatalf("did not receive historical notification "+ - "for height %d", expectedHeight) - } - } - } - - // Finally, ensure that an extra block notification wasn't received. - anyExtras := make(chan struct{}, len(clients)) - for _, epochClient := range clients { - wg.Add(1) - go func(epochClient *chainntnfs.BlockEpochEvent) { - defer wg.Done() - select { - case <-epochClient.Epochs: - anyExtras <- struct{}{} - case <-time.After(5 * time.Second): - } - }(epochClient) - } - - wg.Wait() - close(anyExtras) - - var extraCount int - for range anyExtras { - extraCount++ - } - - if extraCount > 0 { - t.Fatalf("received %d unexpected block notification", extraCount) - } -} - -// testCatchUpOnMissedBlocks tests that a client will still receive all valid -// block notifications in the case where a notifier's best block has been reorged -// out of the chain. -func testCatchUpOnMissedBlocksWithReorg(miner1 *rpctest.Harness, - notifier chainntnfs.TestChainNotifier, t *testing.T) { - - // If this is the neutrino notifier, then we'll skip this test for now - // as we're missing functionality required to ensure the test passes - // reliably. - if _, ok := notifier.(*neutrinonotify.NeutrinoNotifier); ok { - t.Skip("skipping re-org test for neutrino") - } - - const numBlocks = 10 - const numClients = 5 - var wg sync.WaitGroup - - // Set up a new miner that we can use to cause a reorg. - miner2, err := rpctest.New(chainntnfs.NetParams, nil, []string{"--txindex"}) - if err != nil { - t.Fatalf("unable to create mining node: %v", err) - } - if err := miner2.SetUp(false, 0); err != nil { - t.Fatalf("unable to set up mining node: %v", err) - } - defer miner2.TearDown() - - // We start by connecting the new miner to our original miner, - // such that it will sync to our original chain. - if err := rpctest.ConnectNode(miner1, miner2); err != nil { - t.Fatalf("unable to connect harnesses: %v", err) - } - nodeSlice := []*rpctest.Harness{miner1, miner2} - if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil { - t.Fatalf("unable to join node on blocks: %v", err) - } - - // The two should be on the same blockheight. - _, nodeHeight1, err := miner1.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current blockheight %v", err) - } - - _, nodeHeight2, err := miner2.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current blockheight %v", err) - } - - if nodeHeight1 != nodeHeight2 { - t.Fatalf("expected both miners to be on the same height: %v vs %v", - nodeHeight1, nodeHeight2) - } - - // We disconnect the two nodes, such that we can start mining on them - // individually without the other one learning about the new blocks. - err = miner1.Node.AddNode(miner2.P2PAddress(), rpcclient.ANRemove) - if err != nil { - t.Fatalf("unable to remove node: %v", err) - } - - // Now mine on each chain separately - blocks, err := miner1.Node.Generate(numBlocks) - if err != nil { - t.Fatalf("unable to generate single block: %v", err) - } - - // We generate an extra block on miner 2's chain to ensure it is the - // longer chain. - _, err = miner2.Node.Generate(numBlocks + 1) - if err != nil { - t.Fatalf("unable to generate single block: %v", err) - } - - // Sync the two chains to ensure they will sync to miner2's chain. - if err := rpctest.ConnectNode(miner1, miner2); err != nil { - t.Fatalf("unable to connect harnesses: %v", err) - } - nodeSlice = []*rpctest.Harness{miner1, miner2} - if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil { - t.Fatalf("unable to join node on blocks: %v", err) - } - - // The two should be on the same block hash. - timeout := time.After(10 * time.Second) - for { - nodeHash1, _, err := miner1.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current block hash: %v", err) - } - - nodeHash2, _, err := miner2.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current block hash: %v", err) - } - - if *nodeHash1 == *nodeHash2 { - break - } - select { - case <-timeout: - t.Fatalf("Unable to sync two chains") - case <-time.After(50 * time.Millisecond): - continue - } - } - - // Next, start the notifier with outdated best block information. - // We set the notifier's best block to be the last block mined on the - // shorter chain, to test that the notifier correctly rewinds to - // the common ancestor between the two chains. - syncHeight := nodeHeight1 + numBlocks + 1 - err = notifier.UnsafeStart( - nodeHeight1+numBlocks, blocks[numBlocks-1], syncHeight, nil, - ) - if err != nil { - t.Fatalf("Unable to unsafe start the notifier: %v", err) - } - defer notifier.Stop() - - // Create numClients clients who will listen for block notifications. - clients := make([]*chainntnfs.BlockEpochEvent, 0, numClients) - for i := 0; i < numClients; i++ { - epochClient, err := notifier.RegisterBlockEpochNtfn(nil) - if err != nil { - t.Fatalf("unable to register for epoch notification: %v", err) - } - - // Drain the notification dispatched upon registration as we're - // not interested in it. - select { - case <-epochClient.Epochs: - case <-time.After(5 * time.Second): - t.Fatal("expected to receive epoch for current block " + - "upon registration") - } - - clients = append(clients, epochClient) - } - - // Generate a single block, which should trigger the notifier to rewind - // to the common ancestor and dispatch notifications from there. - _, err = miner2.Node.Generate(1) - if err != nil { - t.Fatalf("unable to generate single block: %v", err) - } - - // If the chain backend to the notifier stores information about reorged - // blocks, the notifier is able to rewind the chain to the common - // ancestor between the chain tip and its outdated best known block. - // In this case, the client is expected to receive numBlocks + 2 - // notifications, 1 for each block the notifier has missed out on from - // the longer chain. - // - // If the chain backend does not store information about reorged blocks, - // the notifier has no way of knowing where to rewind to and therefore - // the client is only expected to receive notifications for blocks - // whose height is greater than the notifier's best known height: 2 - // notifications, in this case. - var startingHeight int32 - switch notifier.(type) { - case *neutrinonotify.NeutrinoNotifier: - startingHeight = nodeHeight1 + numBlocks + 1 - default: - startingHeight = nodeHeight1 + 1 - } - - for expectedHeight := startingHeight; expectedHeight <= - nodeHeight1+numBlocks+2; expectedHeight++ { - - for _, epochClient := range clients { - select { - case block := <-epochClient.Epochs: - if block.Height != expectedHeight { - t.Fatalf("received block of height: %d, "+ - "expected: %d", block.Height, - expectedHeight) - } - case <-time.After(20 * time.Second): - t.Fatalf("did not receive historical notification "+ - "for height %d", expectedHeight) - } - } - } - - // Finally, ensure that an extra block notification wasn't received. - anyExtras := make(chan struct{}, len(clients)) - for _, epochClient := range clients { - wg.Add(1) - go func(epochClient *chainntnfs.BlockEpochEvent) { - defer wg.Done() - select { - case <-epochClient.Epochs: - anyExtras <- struct{}{} - case <-time.After(5 * time.Second): - } - }(epochClient) - } - - wg.Wait() - close(anyExtras) - - var extraCount int - for range anyExtras { - extraCount++ - } - - if extraCount > 0 { - t.Fatalf("received %d unexpected block notification", extraCount) - } -} - -type txNtfnTestCase struct { - name string - test func(node *rpctest.Harness, notifier chainntnfs.TestChainNotifier, - scriptDispatch bool, t *testing.T) -} - -type blockNtfnTestCase struct { - name string - test func(node *rpctest.Harness, notifier chainntnfs.TestChainNotifier, - t *testing.T) -} - -type blockCatchupTestCase struct { - name string - test func(node *rpctest.Harness, notifier chainntnfs.TestChainNotifier, - t *testing.T) -} - -var txNtfnTests = []txNtfnTestCase{ - { - name: "single conf ntfn", - test: testSingleConfirmationNotification, - }, - { - name: "multi conf ntfn", - test: testMultiConfirmationNotification, - }, - { - name: "batch conf ntfn", - test: testBatchConfirmationNotification, - }, - { - name: "multi client conf", - test: testMultiClientConfirmationNotification, - }, - { - name: "lazy ntfn consumer", - test: testLazyNtfnConsumer, - }, - { - name: "historical conf dispatch", - test: testTxConfirmedBeforeNtfnRegistration, - }, - { - name: "reorg conf", - test: testReorgConf, - }, - { - name: "spend ntfn", - test: testSpendNotification, - }, - { - name: "historical spend dispatch", - test: testSpendBeforeNtfnRegistration, - }, - { - name: "reorg spend", - test: testReorgSpend, - }, - { - name: "cancel spend ntfn", - test: testCancelSpendNtfn, - }, -} - -var blockNtfnTests = []blockNtfnTestCase{ - { - name: "block epoch", - test: testBlockEpochNotification, - }, - { - name: "cancel epoch ntfn", - test: testCancelEpochNtfn, - }, -} - -var blockCatchupTests = []blockCatchupTestCase{ - { - name: "catch up client on historical block epoch ntfns", - test: testCatchUpClientOnMissedBlocks, - }, - { - name: "test catch up on missed blocks", - test: testCatchUpOnMissedBlocks, - }, - { - name: "test catch up on missed blocks w/ reorged best block", - test: testCatchUpOnMissedBlocksWithReorg, - }, -} - -// TestInterfaces tests all registered interfaces with a unified set of tests -// which exercise each of the required methods found within the ChainNotifier -// interface. -// -// NOTE: In the future, when additional implementations of the ChainNotifier -// interface have been implemented, in order to ensure the new concrete -// implementation is automatically tested, two steps must be undertaken. First, -// one needs add a "non-captured" (_) import from the new sub-package. This -// import should trigger an init() method within the package which registers -// the interface. Second, an additional case in the switch within the main loop -// below needs to be added which properly initializes the interface. -// TODO(cjd): DISABLED TEST - our neutrino not working with sha256 chains yet -func _TestInterfaces(t *testing.T) { - // Initialize the harness around a btcd node which will serve as our - // dedicated miner to generate blocks, cause re-orgs, etc. We'll set up - // this node with a chain length of 125, so we have plenty of BTC to - // play around with. - miner, tearDown := chainntnfs.NewMiner(t, nil, true, 25) - defer tearDown() - - rpcConfig := miner.RPCConfig() - p2pAddr := miner.P2PAddress() - - log.Printf("Running %v ChainNotifier interface tests", - 2*len(txNtfnTests)+len(blockNtfnTests)+len(blockCatchupTests)) - - for _, notifierDriver := range chainntnfs.RegisteredNotifiers() { - // Initialize a height hint cache for each notifier. - tempDir, errr := ioutil.TempDir("", "channeldb") - if errr != nil { - t.Fatalf("unable to create temp dir: %v", errr) - } - db, err := channeldb.Open(tempDir) - if err != nil { - t.Fatalf("unable to create db: %v", err) - } - testCfg := chainntnfs.CacheConfig{ - QueryDisable: false, - } - hintCache, err := chainntnfs.NewHeightHintCache(testCfg, db) - if err != nil { - t.Fatalf("unable to create height hint cache: %v", err) - } - - var ( - cleanUp func() - newNotifier func() (chainntnfs.TestChainNotifier, er.R) - notifierType = notifierDriver.NotifierType - ) - - switch notifierType { - case "btcd": - newNotifier = func() (chainntnfs.TestChainNotifier, er.R) { - return btcdnotify.New( - &rpcConfig, chainntnfs.NetParams, - hintCache, hintCache, - ) - } - - case "neutrino": - var spvNode *neutrino.ChainService - spvNode, cleanUp = chainntnfs.NewNeutrinoBackend( - t, p2pAddr, - ) - newNotifier = func() (chainntnfs.TestChainNotifier, er.R) { - return neutrinonotify.New( - spvNode, hintCache, hintCache, - ), nil - } - } - - log.Printf("Running ChainNotifier interface tests for: %v", - notifierType) - - notifier, err := newNotifier() - if err != nil { - t.Fatalf("unable to create %v notifier: %v", - notifierType, err) - } - if err := notifier.Start(); err != nil { - t.Fatalf("unable to start notifier %v: %v", - notifierType, err) - } - - for _, txNtfnTest := range txNtfnTests { - for _, scriptDispatch := range []bool{false, true} { - testName := fmt.Sprintf("%v %v", notifierType, - txNtfnTest.name) - if scriptDispatch { - testName += " with script dispatch" - } - success := t.Run(testName, func(t *testing.T) { - txNtfnTest.test( - miner, notifier, scriptDispatch, - t, - ) - }) - if !success { - break - } - } - } - - for _, blockNtfnTest := range blockNtfnTests { - testName := fmt.Sprintf("%v %v", notifierType, - blockNtfnTest.name) - success := t.Run(testName, func(t *testing.T) { - blockNtfnTest.test(miner, notifier, t) - }) - if !success { - break - } - } - - notifier.Stop() - - // Run catchup tests separately since they require restarting - // the notifier every time. - for _, blockCatchupTest := range blockCatchupTests { - notifier, err = newNotifier() - if err != nil { - t.Fatalf("unable to create %v notifier: %v", - notifierType, err) - } - - testName := fmt.Sprintf("%v %v", notifierType, - blockCatchupTest.name) - - success := t.Run(testName, func(t *testing.T) { - blockCatchupTest.test(miner, notifier, t) - }) - if !success { - break - } - } - - if cleanUp != nil { - cleanUp() - } - } -} - -func TestMain(m *testing.M) { - globalcfg.SelectConfig(globalcfg.BitcoinDefaults()) - os.Exit(m.Run()) -} diff --git a/lnd/chainntnfs/neutrinonotify/driver.go b/lnd/chainntnfs/neutrinonotify/driver.go deleted file mode 100644 index ae8dafad..00000000 --- a/lnd/chainntnfs/neutrinonotify/driver.go +++ /dev/null @@ -1,53 +0,0 @@ -package neutrinonotify - -import ( - "fmt" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/neutrino" -) - -// createNewNotifier creates a new instance of the ChainNotifier interface -// implemented by NeutrinoNotifier. -func createNewNotifier(args ...interface{}) (chainntnfs.ChainNotifier, er.R) { - if len(args) != 3 { - return nil, er.Errorf("incorrect number of arguments to "+ - ".New(...), expected 3, instead passed %v", len(args)) - } - - config, ok := args[0].(*neutrino.ChainService) - if !ok { - return nil, er.New("first argument to neutrinonotify.New " + - "is incorrect, expected a *neutrino.ChainService") - } - - spendHintCache, ok := args[1].(chainntnfs.SpendHintCache) - if !ok { - return nil, er.New("second argument to neutrinonotify.New " + - "is incorrect, expected a chainntfs.SpendHintCache") - } - - confirmHintCache, ok := args[2].(chainntnfs.ConfirmHintCache) - if !ok { - return nil, er.New("third argument to neutrinonotify.New " + - "is incorrect, expected a chainntfs.ConfirmHintCache") - } - - return New(config, spendHintCache, confirmHintCache), nil -} - -// init registers a driver for the NeutrinoNotify concrete implementation of -// the chainntnfs.ChainNotifier interface. -func init() { - // Register the driver. - notifier := &chainntnfs.NotifierDriver{ - NotifierType: notifierType, - New: createNewNotifier, - } - - if err := chainntnfs.RegisterNotifier(notifier); err != nil { - panic(fmt.Sprintf("failed to register notifier driver '%s': %v", - notifierType, err)) - } -} diff --git a/lnd/chainntnfs/neutrinonotify/neutrino.go b/lnd/chainntnfs/neutrinonotify/neutrino.go deleted file mode 100644 index 3ece4e8e..00000000 --- a/lnd/chainntnfs/neutrinonotify/neutrino.go +++ /dev/null @@ -1,1038 +0,0 @@ -package neutrinonotify - -import ( - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/pkt-cash/pktd/btcjson" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/gcs/builder" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/queue" - "github.com/pkt-cash/pktd/neutrino" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/pktwallet/waddrmgr" - "github.com/pkt-cash/pktd/rpcclient" - "github.com/pkt-cash/pktd/txscript" - "github.com/pkt-cash/pktd/wire" -) - -const ( - // notifierType uniquely identifies this concrete implementation of the - // ChainNotifier interface. - notifierType = "neutrino" -) - -// NeutrinoNotifier is a version of ChainNotifier that's backed by the neutrino -// Bitcoin light client. Unlike other implementations, this implementation -// speaks directly to the p2p network. As a result, this implementation of the -// ChainNotifier interface is much more light weight that other implementation -// which rely of receiving notification over an RPC interface backed by a -// running full node. -// -// TODO(roasbeef): heavily consolidate with NeutrinoNotifier code -// * maybe combine into single package? -type NeutrinoNotifier struct { - epochClientCounter uint64 // To be used atomically. - - start sync.Once - active int32 // To be used atomically. - stopped int32 // To be used atomically. - - bestBlockMtx sync.RWMutex - bestBlock chainntnfs.BlockEpoch - - p2pNode *neutrino.ChainService - chainView *neutrino.Rescan - - chainConn *NeutrinoChainConn - - notificationCancels chan interface{} - notificationRegistry chan interface{} - - txNotifier *chainntnfs.TxNotifier - - blockEpochClients map[uint64]*blockEpochRegistration - - rescanErr <-chan er.R - - chainUpdates *queue.ConcurrentQueue - txUpdates *queue.ConcurrentQueue - - // spendHintCache is a cache used to query and update the latest height - // hints for an outpoint. Each height hint represents the earliest - // height at which the outpoint could have been spent within the chain. - spendHintCache chainntnfs.SpendHintCache - - // confirmHintCache is a cache used to query the latest height hints for - // a transaction. Each height hint represents the earliest height at - // which the transaction could have confirmed within the chain. - confirmHintCache chainntnfs.ConfirmHintCache - - wg sync.WaitGroup - quit chan struct{} -} - -// Ensure NeutrinoNotifier implements the ChainNotifier interface at compile time. -var _ chainntnfs.ChainNotifier = (*NeutrinoNotifier)(nil) - -// New creates a new instance of the NeutrinoNotifier concrete implementation -// of the ChainNotifier interface. -// -// NOTE: The passed neutrino node should already be running and active before -// being passed into this function. -func New(node *neutrino.ChainService, spendHintCache chainntnfs.SpendHintCache, - confirmHintCache chainntnfs.ConfirmHintCache) *NeutrinoNotifier { - - return &NeutrinoNotifier{ - notificationCancels: make(chan interface{}), - notificationRegistry: make(chan interface{}), - - blockEpochClients: make(map[uint64]*blockEpochRegistration), - - p2pNode: node, - chainConn: &NeutrinoChainConn{node}, - - rescanErr: make(chan er.R), - - chainUpdates: queue.NewConcurrentQueue(10), - txUpdates: queue.NewConcurrentQueue(10), - - spendHintCache: spendHintCache, - confirmHintCache: confirmHintCache, - - quit: make(chan struct{}), - } -} - -// Start contacts the running neutrino light client and kicks off an initial -// empty rescan. -func (n *NeutrinoNotifier) Start() er.R { - var startErr er.R - n.start.Do(func() { - startErr = n.startNotifier() - }) - return startErr -} - -// Stop shuts down the NeutrinoNotifier. -func (n *NeutrinoNotifier) Stop() er.R { - // Already shutting down? - if atomic.AddInt32(&n.stopped, 1) != 1 { - return nil - } - - close(n.quit) - n.wg.Wait() - - n.chainUpdates.Stop() - n.txUpdates.Stop() - - // Notify all pending clients of our shutdown by closing the related - // notification channels. - for _, epochClient := range n.blockEpochClients { - close(epochClient.cancelChan) - epochClient.wg.Wait() - - close(epochClient.epochChan) - } - n.txNotifier.TearDown() - - return nil -} - -// Started returns true if this instance has been started, and false otherwise. -func (n *NeutrinoNotifier) Started() bool { - return atomic.LoadInt32(&n.active) != 0 -} - -func (n *NeutrinoNotifier) startNotifier() er.R { - // Start our concurrent queues before starting the rescan, to ensure - // onFilteredBlockConnected and onRelavantTx callbacks won't be - // blocked. - n.chainUpdates.Start() - n.txUpdates.Start() - - // First, we'll obtain the latest block height of the p2p node. We'll - // start the auto-rescan from this point. Once a caller actually wishes - // to register a chain view, the rescan state will be rewound - // accordingly. - startingPoint, err := n.p2pNode.BestBlock() - if err != nil { - n.txUpdates.Stop() - n.chainUpdates.Stop() - return err - } - n.bestBlock.Hash = &startingPoint.Hash - n.bestBlock.Height = startingPoint.Height - - n.txNotifier = chainntnfs.NewTxNotifier( - uint32(n.bestBlock.Height), chainntnfs.ReorgSafetyLimit, - n.confirmHintCache, n.spendHintCache, - ) - - // Next, we'll create our set of rescan options. Currently it's - // required that a user MUST set an addr/outpoint/txid when creating a - // rescan. To get around this, we'll add a "zero" outpoint, that won't - // actually be matched. - var zeroInput neutrino.InputWithScript - rescanOptions := []neutrino.RescanOption{ - neutrino.StartBlock(startingPoint), - neutrino.QuitChan(n.quit), - neutrino.NotificationHandlers( - rpcclient.NotificationHandlers{ - OnFilteredBlockConnected: n.onFilteredBlockConnected, - OnFilteredBlockDisconnected: n.onFilteredBlockDisconnected, - OnRedeemingTx: n.onRelevantTx, - }, - ), - neutrino.WatchInputs(zeroInput), - } - - // Finally, we'll create our rescan struct, start it, and launch all - // the goroutines we need to operate this ChainNotifier instance. - n.chainView = neutrino.NewRescan( - &neutrino.RescanChainSource{ - ChainService: n.p2pNode, - }, - rescanOptions..., - ) - n.rescanErr = n.chainView.Start() - - n.wg.Add(1) - go n.notificationDispatcher() - - // Set the active flag now that we've completed the full - // startup. - atomic.StoreInt32(&n.active, 1) - - return nil -} - -// filteredBlock represents a new block which has been connected to the main -// chain. The slice of transactions will only be populated if the block -// includes a transaction that confirmed one of our watched txids, or spends -// one of the outputs currently being watched. -type filteredBlock struct { - hash chainhash.Hash - height uint32 - txns []*btcutil.Tx - - // connected is true if this update is a new block and false if it is a - // disconnected block. - connect bool -} - -// rescanFilterUpdate represents a request that will be sent to the -// notificaionRegistry in order to prevent race conditions between the filter -// update and new block notifications. -type rescanFilterUpdate struct { - updateOptions []neutrino.UpdateOption - errChan chan er.R -} - -// onFilteredBlockConnected is a callback which is executed each a new block is -// connected to the end of the main chain. -func (n *NeutrinoNotifier) onFilteredBlockConnected(height int32, - header *wire.BlockHeader, txns []*btcutil.Tx) { - - // Append this new chain update to the end of the queue of new chain - // updates. - select { - case n.chainUpdates.ChanIn() <- &filteredBlock{ - hash: header.BlockHash(), - height: uint32(height), - txns: txns, - connect: true, - }: - case <-n.quit: - } -} - -// onFilteredBlockDisconnected is a callback which is executed each time a new -// block has been disconnected from the end of the mainchain due to a re-org. -func (n *NeutrinoNotifier) onFilteredBlockDisconnected(height int32, - header *wire.BlockHeader) { - - // Append this new chain update to the end of the queue of new chain - // disconnects. - select { - case n.chainUpdates.ChanIn() <- &filteredBlock{ - hash: header.BlockHash(), - height: uint32(height), - connect: false, - }: - case <-n.quit: - } -} - -// relevantTx represents a relevant transaction to the notifier that fulfills -// any outstanding spend requests. -type relevantTx struct { - tx *btcutil.Tx - details *btcjson.BlockDetails -} - -// onRelevantTx is a callback that proxies relevant transaction notifications -// from the backend to the notifier's main event handler. -func (n *NeutrinoNotifier) onRelevantTx(tx *btcutil.Tx, details *btcjson.BlockDetails) { - select { - case n.txUpdates.ChanIn() <- &relevantTx{tx, details}: - case <-n.quit: - } -} - -// notificationDispatcher is the primary goroutine which handles client -// notification registrations, as well as notification dispatches. -func (n *NeutrinoNotifier) notificationDispatcher() { - defer n.wg.Done() -out: - for { - select { - case cancelMsg := <-n.notificationCancels: - switch msg := cancelMsg.(type) { - case *epochCancel: - log.Infof("Cancelling epoch "+ - "notification, epoch_id=%v", msg.epochID) - - // First, we'll lookup the original - // registration in order to stop the active - // queue goroutine. - reg := n.blockEpochClients[msg.epochID] - reg.epochQueue.Stop() - - // Next, close the cancel channel for this - // specific client, and wait for the client to - // exit. - close(n.blockEpochClients[msg.epochID].cancelChan) - n.blockEpochClients[msg.epochID].wg.Wait() - - // Once the client has exited, we can then - // safely close the channel used to send epoch - // notifications, in order to notify any - // listeners that the intent has been - // canceled. - close(n.blockEpochClients[msg.epochID].epochChan) - delete(n.blockEpochClients, msg.epochID) - } - - case registerMsg := <-n.notificationRegistry: - switch msg := registerMsg.(type) { - case *chainntnfs.HistoricalConfDispatch: - // We'll start a historical rescan chain of the - // chain asynchronously to prevent blocking - // potentially long rescans. - n.wg.Add(1) - go func() { - defer n.wg.Done() - - confDetails, err := n.historicalConfDetails( - msg.ConfRequest, - msg.StartHeight, msg.EndHeight, - ) - if err != nil { - log.Error(err) - return - } - - // If the historical dispatch finished - // without error, we will invoke - // UpdateConfDetails even if none were - // found. This allows the notifier to - // begin safely updating the height hint - // cache at tip, since any pending - // rescans have now completed. - err = n.txNotifier.UpdateConfDetails( - msg.ConfRequest, confDetails, - ) - if err != nil { - log.Error(err) - } - }() - - case *blockEpochRegistration: - log.Infof("New block epoch subscription") - - n.blockEpochClients[msg.epochID] = msg - - // If the client did not provide their best - // known block, then we'll immediately dispatch - // a notification for the current tip. - if msg.bestBlock == nil { - n.notifyBlockEpochClient( - msg, n.bestBlock.Height, - n.bestBlock.Hash, - ) - - msg.errorChan <- nil - continue - } - - // Otherwise, we'll attempt to deliver the - // backlog of notifications from their best - // known block. - n.bestBlockMtx.Lock() - bestHeight := n.bestBlock.Height - n.bestBlockMtx.Unlock() - - missedBlocks, err := chainntnfs.GetClientMissedBlocks( - n.chainConn, msg.bestBlock, bestHeight, - false, - ) - if err != nil { - msg.errorChan <- err - continue - } - - for _, block := range missedBlocks { - n.notifyBlockEpochClient( - msg, block.Height, block.Hash, - ) - } - - msg.errorChan <- nil - - case *rescanFilterUpdate: - err := n.chainView.Update(msg.updateOptions...) - if err != nil { - log.Errorf("Unable to "+ - "update rescan filter: %v", err) - } - msg.errChan <- err - } - - case item := <-n.chainUpdates.ChanOut(): - update := item.(*filteredBlock) - if update.connect { - n.bestBlockMtx.Lock() - // Since neutrino has no way of knowing what - // height to rewind to in the case of a reorged - // best known height, there is no point in - // checking that the previous hash matches the - // the hash from our best known height the way - // the other notifiers do when they receive - // a new connected block. Therefore, we just - // compare the heights. - if update.height != uint32(n.bestBlock.Height+1) { - // Handle the case where the notifier - // missed some blocks from its chain - // backend - log.Infof("Missed blocks, " + - "attempting to catch up") - - _, missedBlocks, err := - chainntnfs.HandleMissedBlocks( - n.chainConn, - n.txNotifier, - n.bestBlock, - int32(update.height), - false, - ) - if err != nil { - log.Error(err) - n.bestBlockMtx.Unlock() - continue - } - - for _, block := range missedBlocks { - filteredBlock, err := - n.getFilteredBlock(block) - if err != nil { - log.Error(err) - n.bestBlockMtx.Unlock() - continue out - } - err = n.handleBlockConnected(filteredBlock) - if err != nil { - log.Error(err) - n.bestBlockMtx.Unlock() - continue out - } - } - - } - - err := n.handleBlockConnected(update) - if err != nil { - log.Error(err) - } - - n.bestBlockMtx.Unlock() - continue - } - - n.bestBlockMtx.Lock() - if update.height != uint32(n.bestBlock.Height) { - log.Infof("Missed disconnected " + - "blocks, attempting to catch up") - } - newBestBlock, err := chainntnfs.RewindChain( - n.chainConn, n.txNotifier, n.bestBlock, - int32(update.height-1), - ) - if err != nil { - log.Errorf("Unable to rewind chain "+ - "from height %d to height %d: %v", - n.bestBlock.Height, update.height-1, err) - } - - // Set the bestHeight here in case a chain rewind - // partially completed. - n.bestBlock = newBestBlock - n.bestBlockMtx.Unlock() - - case txUpdate := <-n.txUpdates.ChanOut(): - // A new relevant transaction notification has been - // received from the backend. We'll attempt to process - // it to determine if it fulfills any outstanding - // confirmation and/or spend requests and dispatch - // notifications for them. - update := txUpdate.(*relevantTx) - err := n.txNotifier.ProcessRelevantSpendTx( - update.tx, uint32(update.details.Height), - ) - if err != nil { - log.Errorf("Unable to process "+ - "transaction %v: %v", update.tx.Hash(), - err) - } - - case err := <-n.rescanErr: - log.Errorf("Error during rescan: %v", err) - - case <-n.quit: - return - - } - } -} - -// historicalConfDetails looks up whether a confirmation request (txid/output -// script) has already been included in a block in the active chain and, if so, -// returns details about said block. -func (n *NeutrinoNotifier) historicalConfDetails(confRequest chainntnfs.ConfRequest, - startHeight, endHeight uint32) (*chainntnfs.TxConfirmation, er.R) { - - // Starting from the height hint, we'll walk forwards in the chain to - // see if this transaction/output script has already been confirmed. - for scanHeight := endHeight; scanHeight >= startHeight && scanHeight > 0; scanHeight-- { - // Ensure we haven't been requested to shut down before - // processing the next height. - select { - case <-n.quit: - return nil, chainntnfs.ErrChainNotifierShuttingDown.Default() - default: - } - - // First, we'll fetch the block header for this height so we - // can compute the current block hash. - blockHash, err := n.p2pNode.GetBlockHash(int64(scanHeight)) - if err != nil { - return nil, er.Errorf("unable to get header for height=%v: %v", - scanHeight, err) - } - - // With the hash computed, we can now fetch the basic filter for this - // height. Since the range of required items is known we avoid - // roundtrips by requesting a batched response and save bandwidth by - // limiting the max number of items per batch. Since neutrino populates - // its underline filters cache with the batch response, the next call - // will execute a network query only once per batch and not on every - // iteration. - regFilter, err := n.p2pNode.GetCFilter( - *blockHash, wire.GCSFilterRegular, - neutrino.NumRetries(5), - neutrino.OptimisticReverseBatch(), - // TODO(cjd): Maybe we want to implement MaxBatchSize in neutrino? - //neutrino.MaxBatchSize(int64(scanHeight-startHeight+1)), - ) - if err != nil { - return nil, er.Errorf("unable to retrieve regular filter for "+ - "height=%v: %v", scanHeight, err) - } - - // In the case that the filter exists, we'll attempt to see if - // any element in it matches our target public key script. - key := builder.DeriveKey(blockHash) - match, err := regFilter.Match(key, confRequest.PkScript.Script()) - if err != nil { - return nil, er.Errorf("unable to query filter: %v", err) - } - - // If there's no match, then we can continue forward to the - // next block. - if !match { - continue - } - - // In the case that we do have a match, we'll fetch the block - // from the network so we can find the positional data required - // to send the proper response. - block, err := n.p2pNode.GetBlock(*blockHash) - if err != nil { - return nil, er.Errorf("unable to get block from network: %v", err) - } - - // For every transaction in the block, check which one matches - // our request. If we find one that does, we can dispatch its - // confirmation details. - for i, tx := range block.Transactions() { - if !confRequest.MatchesTx(tx.MsgTx()) { - continue - } - - return &chainntnfs.TxConfirmation{ - Tx: tx.MsgTx(), - BlockHash: blockHash, - BlockHeight: scanHeight, - TxIndex: uint32(i), - }, nil - } - } - - return nil, nil -} - -// handleBlockConnected applies a chain update for a new block. Any watched -// transactions included this block will processed to either send notifications -// now or after numConfirmations confs. -// -// NOTE: This method must be called with the bestBlockMtx lock held. -func (n *NeutrinoNotifier) handleBlockConnected(newBlock *filteredBlock) er.R { - // We'll extend the txNotifier's height with the information of this new - // block, which will handle all of the notification logic for us. - err := n.txNotifier.ConnectTip( - &newBlock.hash, newBlock.height, newBlock.txns, - ) - if err != nil { - return er.Errorf("unable to connect tip: %v", err) - } - - log.Infof("New block: height=%v, sha=%v", newBlock.height, - newBlock.hash) - - // Now that we've guaranteed the new block extends the txNotifier's - // current tip, we'll proceed to dispatch notifications to all of our - // registered clients whom have had notifications fulfilled. Before - // doing so, we'll make sure update our in memory state in order to - // satisfy any client requests based upon the new block. - n.bestBlock.Hash = &newBlock.hash - n.bestBlock.Height = int32(newBlock.height) - - n.notifyBlockEpochs(int32(newBlock.height), &newBlock.hash) - return n.txNotifier.NotifyHeight(newBlock.height) -} - -// getFilteredBlock is a utility to retrieve the full filtered block from a block epoch. -func (n *NeutrinoNotifier) getFilteredBlock(epoch chainntnfs.BlockEpoch) (*filteredBlock, er.R) { - rawBlock, err := n.p2pNode.GetBlock(*epoch.Hash) - if err != nil { - return nil, er.Errorf("unable to get block: %v", err) - } - - txns := rawBlock.Transactions() - - block := &filteredBlock{ - hash: *epoch.Hash, - height: uint32(epoch.Height), - txns: txns, - connect: true, - } - return block, nil -} - -// notifyBlockEpochs notifies all registered block epoch clients of the newly -// connected block to the main chain. -func (n *NeutrinoNotifier) notifyBlockEpochs(newHeight int32, newSha *chainhash.Hash) { - for _, client := range n.blockEpochClients { - n.notifyBlockEpochClient(client, newHeight, newSha) - } -} - -// notifyBlockEpochClient sends a registered block epoch client a notification -// about a specific block. -func (n *NeutrinoNotifier) notifyBlockEpochClient(epochClient *blockEpochRegistration, - height int32, sha *chainhash.Hash) { - - epoch := &chainntnfs.BlockEpoch{ - Height: height, - Hash: sha, - } - - select { - case epochClient.epochQueue.ChanIn() <- epoch: - case <-epochClient.cancelChan: - case <-n.quit: - } -} - -// RegisterSpendNtfn registers an intent to be notified once the target -// outpoint/output script has been spent by a transaction on-chain. When -// intending to be notified of the spend of an output script, a nil outpoint -// must be used. The heightHint should represent the earliest height in the -// chain of the transaction that spent the outpoint/output script. -// -// Once a spend of has been detected, the details of the spending event will be -// sent across the 'Spend' channel. -func (n *NeutrinoNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, - pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, er.R) { - - // Register the conf notification with the TxNotifier. A non-nil value - // for `dispatch` will be returned if we are required to perform a - // manual scan for the confirmation. Otherwise the notifier will begin - // watching at tip for the transaction to confirm. - ntfn, err := n.txNotifier.RegisterSpend(outpoint, pkScript, heightHint) - if err != nil { - return nil, err - } - - // To determine whether this outpoint has been spent on-chain, we'll - // update our filter to watch for the transaction at tip and we'll also - // dispatch a historical rescan to determine if it has been spent in the - // past. - // - // We'll update our filter first to ensure we can immediately detect the - // spend at tip. - if outpoint == nil { - outpoint = &chainntnfs.ZeroOutPoint - } - inputToWatch := neutrino.InputWithScript{ - OutPoint: *outpoint, - PkScript: pkScript, - } - updateOptions := []neutrino.UpdateOption{ - neutrino.AddInputs(inputToWatch), - neutrino.DisableDisconnectedNtfns(true), - } - - // We'll use the txNotifier's tip as the starting point of our filter - // update. In the case of an output script spend request, we'll check if - // we should perform a historical rescan and start from there, as we - // cannot do so with GetUtxo since it matches outpoints. - rewindHeight := ntfn.Height - if ntfn.HistoricalDispatch != nil && *outpoint == chainntnfs.ZeroOutPoint { - rewindHeight = ntfn.HistoricalDispatch.StartHeight - } - updateOptions = append(updateOptions, neutrino.Rewind(rewindHeight)) - - errChan := make(chan er.R, 1) - select { - case n.notificationRegistry <- &rescanFilterUpdate{ - updateOptions: updateOptions, - errChan: errChan, - }: - case <-n.quit: - return nil, chainntnfs.ErrChainNotifierShuttingDown.Default() - } - - select { - case err = <-errChan: - case <-n.quit: - return nil, chainntnfs.ErrChainNotifierShuttingDown.Default() - } - if err != nil { - return nil, er.Errorf("unable to update filter: %v", err) - } - - // If the txNotifier didn't return any details to perform a historical - // scan of the chain, or if we already performed one like in the case of - // output script spend requests, then we can return early as there's - // nothing left for us to do. - if ntfn.HistoricalDispatch == nil || *outpoint == chainntnfs.ZeroOutPoint { - return ntfn.Event, nil - } - - // With the filter updated, we'll dispatch our historical rescan to - // ensure we detect the spend if it happened in the past. - n.wg.Add(1) - go func() { - defer n.wg.Done() - - // We'll ensure that neutrino is caught up to the starting - // height before we attempt to fetch the UTXO from the chain. - // If we're behind, then we may miss a notification dispatch. - for { - n.bestBlockMtx.RLock() - currentHeight := uint32(n.bestBlock.Height) - n.bestBlockMtx.RUnlock() - - if currentHeight >= ntfn.HistoricalDispatch.StartHeight { - break - } - - select { - case <-time.After(time.Millisecond * 200): - case <-n.quit: - return - } - } - - spendReport, err := n.p2pNode.GetUtxo( - neutrino.WatchInputs(inputToWatch), - neutrino.StartBlock(&waddrmgr.BlockStamp{ - Height: int32(ntfn.HistoricalDispatch.StartHeight), - }), - neutrino.EndBlock(&waddrmgr.BlockStamp{ - Height: int32(ntfn.HistoricalDispatch.EndHeight), - }), - neutrino.QuitChan(n.quit), - ) - if err != nil && !strings.Contains(err.String(), "not found") { - log.Errorf("Failed getting UTXO: %v", err) - return - } - - // If a spend report was returned, and the transaction is present, then - // this means that the output is already spent. - var spendDetails *chainntnfs.SpendDetail - if spendReport != nil && spendReport.SpendingTx != nil { - spendingTxHash := spendReport.SpendingTx.TxHash() - spendDetails = &chainntnfs.SpendDetail{ - SpentOutPoint: outpoint, - SpenderTxHash: &spendingTxHash, - SpendingTx: spendReport.SpendingTx, - SpenderInputIndex: spendReport.SpendingInputIndex, - SpendingHeight: int32(spendReport.SpendingTxHeight), - } - } - - // Finally, no matter whether the rescan found a spend in the past or - // not, we'll mark our historical rescan as complete to ensure the - // outpoint's spend hint gets updated upon connected/disconnected - // blocks. - errr := n.txNotifier.UpdateSpendDetails( - ntfn.HistoricalDispatch.SpendRequest, spendDetails, - ) - if errr != nil { - log.Errorf("Failed to update spend details: %v", errr) - return - } - }() - - return ntfn.Event, nil -} - -// RegisterConfirmationsNtfn registers an intent to be notified once the target -// txid/output script has reached numConfs confirmations on-chain. When -// intending to be notified of the confirmation of an output script, a nil txid -// must be used. The heightHint should represent the earliest height at which -// the txid/output script could have been included in the chain. -// -// Progress on the number of confirmations left can be read from the 'Updates' -// channel. Once it has reached all of its confirmations, a notification will be -// sent across the 'Confirmed' channel. -func (n *NeutrinoNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, - pkScript []byte, - numConfs, heightHint uint32) (*chainntnfs.ConfirmationEvent, er.R) { - - // Register the conf notification with the TxNotifier. A non-nil value - // for `dispatch` will be returned if we are required to perform a - // manual scan for the confirmation. Otherwise the notifier will begin - // watching at tip for the transaction to confirm. - ntfn, err := n.txNotifier.RegisterConf( - txid, pkScript, numConfs, heightHint, - ) - if err != nil { - return nil, err - } - - // To determine whether this transaction has confirmed on-chain, we'll - // update our filter to watch for the transaction at tip and we'll also - // dispatch a historical rescan to determine if it has confirmed in the - // past. - // - // We'll update our filter first to ensure we can immediately detect the - // confirmation at tip. To do so, we'll map the script into an address - // type so we can instruct neutrino to match if the transaction - // containing the script is found in a block. - params := n.p2pNode.ChainParams() - _, addrs, _, err := txscript.ExtractPkScriptAddrs(pkScript, ¶ms) - if err != nil { - return nil, er.Errorf("unable to extract script: %v", err) - } - - // We'll send the filter update request to the notifier's main event - // handler and wait for its response. - errChan := make(chan er.R, 1) - select { - case n.notificationRegistry <- &rescanFilterUpdate{ - updateOptions: []neutrino.UpdateOption{ - neutrino.AddAddrs(addrs...), - neutrino.Rewind(ntfn.Height), - neutrino.DisableDisconnectedNtfns(true), - }, - errChan: errChan, - }: - case <-n.quit: - return nil, chainntnfs.ErrChainNotifierShuttingDown.Default() - } - - select { - case err = <-errChan: - case <-n.quit: - return nil, chainntnfs.ErrChainNotifierShuttingDown.Default() - } - if err != nil { - return nil, er.Errorf("unable to update filter: %v", err) - } - - // If a historical rescan was not requested by the txNotifier, then we - // can return to the caller. - if ntfn.HistoricalDispatch == nil { - return ntfn.Event, nil - } - - // Finally, with the filter updated, we can dispatch the historical - // rescan to ensure we can detect if the event happened in the past. - select { - case n.notificationRegistry <- ntfn.HistoricalDispatch: - case <-n.quit: - return nil, chainntnfs.ErrChainNotifierShuttingDown.Default() - } - - return ntfn.Event, nil -} - -// blockEpochRegistration represents a client's intent to receive a -// notification with each newly connected block. -type blockEpochRegistration struct { - epochID uint64 - - epochChan chan *chainntnfs.BlockEpoch - - epochQueue *queue.ConcurrentQueue - - cancelChan chan struct{} - - bestBlock *chainntnfs.BlockEpoch - - errorChan chan er.R - - wg sync.WaitGroup -} - -// epochCancel is a message sent to the NeutrinoNotifier when a client wishes -// to cancel an outstanding epoch notification that has yet to be dispatched. -type epochCancel struct { - epochID uint64 -} - -// RegisterBlockEpochNtfn returns a BlockEpochEvent which subscribes the -// caller to receive notifications, of each new block connected to the main -// chain. Clients have the option of passing in their best known block, which -// the notifier uses to check if they are behind on blocks and catch them up. If -// they do not provide one, then a notification will be dispatched immediately -// for the current tip of the chain upon a successful registration. -func (n *NeutrinoNotifier) RegisterBlockEpochNtfn( - bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, er.R) { - - reg := &blockEpochRegistration{ - epochQueue: queue.NewConcurrentQueue(20), - epochChan: make(chan *chainntnfs.BlockEpoch, 20), - cancelChan: make(chan struct{}), - epochID: atomic.AddUint64(&n.epochClientCounter, 1), - bestBlock: bestBlock, - errorChan: make(chan er.R, 1), - } - reg.epochQueue.Start() - - // Before we send the request to the main goroutine, we'll launch a new - // goroutine to proxy items added to our queue to the client itself. - // This ensures that all notifications are received *in order*. - reg.wg.Add(1) - go func() { - defer reg.wg.Done() - - for { - select { - case ntfn := <-reg.epochQueue.ChanOut(): - blockNtfn := ntfn.(*chainntnfs.BlockEpoch) - select { - case reg.epochChan <- blockNtfn: - - case <-reg.cancelChan: - return - - case <-n.quit: - return - } - - case <-reg.cancelChan: - return - - case <-n.quit: - return - } - } - }() - - select { - case <-n.quit: - // As we're exiting before the registration could be sent, - // we'll stop the queue now ourselves. - reg.epochQueue.Stop() - - return nil, er.New("chainntnfs: system interrupt while " + - "attempting to register for block epoch notification.") - case n.notificationRegistry <- reg: - return &chainntnfs.BlockEpochEvent{ - Epochs: reg.epochChan, - Cancel: func() { - cancel := &epochCancel{ - epochID: reg.epochID, - } - - // Submit epoch cancellation to notification dispatcher. - select { - case n.notificationCancels <- cancel: - // Cancellation is being handled, drain the epoch channel until it is - // closed before yielding to caller. - for { - select { - case _, ok := <-reg.epochChan: - if !ok { - return - } - case <-n.quit: - return - } - } - case <-n.quit: - } - }, - }, nil - } -} - -// NeutrinoChainConn is a wrapper around neutrino's chain backend in order -// to satisfy the chainntnfs.ChainConn interface. -type NeutrinoChainConn struct { - p2pNode *neutrino.ChainService -} - -// GetBlockHeader returns the block header for a hash. -func (n *NeutrinoChainConn) GetBlockHeader(blockHash *chainhash.Hash) (*wire.BlockHeader, er.R) { - return n.p2pNode.GetBlockHeader(blockHash) -} - -// GetBlockHeaderVerbose returns a verbose block header result for a hash. This -// result only contains the height with a nil hash. -func (n *NeutrinoChainConn) GetBlockHeaderVerbose(blockHash *chainhash.Hash) ( - *btcjson.GetBlockHeaderVerboseResult, er.R) { - - height, err := n.p2pNode.GetBlockHeight(blockHash) - if err != nil { - return nil, err - } - // Since only the height is used from the result, leave the hash nil. - return &btcjson.GetBlockHeaderVerboseResult{Height: int32(height)}, nil -} - -// GetBlockHash returns the hash from a block height. -func (n *NeutrinoChainConn) GetBlockHash(blockHeight int64) (*chainhash.Hash, er.R) { - return n.p2pNode.GetBlockHash(blockHeight) -} diff --git a/lnd/chainntnfs/neutrinonotify/neutrino_dev.go b/lnd/chainntnfs/neutrinonotify/neutrino_dev.go deleted file mode 100644 index fa987def..00000000 --- a/lnd/chainntnfs/neutrinonotify/neutrino_dev.go +++ /dev/null @@ -1,104 +0,0 @@ -// +build dev - -package neutrinonotify - -import ( - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/neutrino" - "github.com/pkt-cash/pktd/rpcclient" -) - -// UnsafeStart starts the notifier with a specified best height and optional -// best hash. Its bestHeight, txNotifier and neutrino node are initialized with -// bestHeight. The parameter generateBlocks is necessary for the bitcoind -// notifier to ensure we drain all notifications up to syncHeight, since if they -// are generated ahead of UnsafeStart the chainConn may start up with an -// outdated best block and miss sending ntfns. Used for testing. -func (n *NeutrinoNotifier) UnsafeStart(bestHeight int32, - bestHash *chainhash.Hash, syncHeight int32, - generateBlocks func() er.R) er.R { - - // We'll obtain the latest block height of the p2p node. We'll - // start the auto-rescan from this point. Once a caller actually wishes - // to register a chain view, the rescan state will be rewound - // accordingly. - startingPoint, err := n.p2pNode.BestBlock() - if err != nil { - return err - } - - // Next, we'll create our set of rescan options. Currently it's - // required that a user MUST set an addr/outpoint/txid when creating a - // rescan. To get around this, we'll add a "zero" outpoint, that won't - // actually be matched. - var zeroInput neutrino.InputWithScript - rescanOptions := []neutrino.RescanOption{ - neutrino.StartBlock(startingPoint), - neutrino.QuitChan(n.quit), - neutrino.NotificationHandlers( - rpcclient.NotificationHandlers{ - OnFilteredBlockConnected: n.onFilteredBlockConnected, - OnFilteredBlockDisconnected: n.onFilteredBlockDisconnected, - }, - ), - neutrino.WatchInputs(zeroInput), - } - - n.txNotifier = chainntnfs.NewTxNotifier( - uint32(bestHeight), chainntnfs.ReorgSafetyLimit, - n.confirmHintCache, n.spendHintCache, - ) - - // Finally, we'll create our rescan struct, start it, and launch all - // the goroutines we need to operate this ChainNotifier instance. - n.chainView = neutrino.NewRescan( - &neutrino.RescanChainSource{ - ChainService: n.p2pNode, - }, - rescanOptions..., - ) - n.rescanErr = n.chainView.Start() - - n.chainUpdates.Start() - n.txUpdates.Start() - - if generateBlocks != nil { - // Ensure no block notifications are pending when we start the - // notification dispatcher goroutine. - - // First generate the blocks, then drain the notifications - // for the generated blocks. - if err := generateBlocks(); err != nil { - return err - } - - timeout := time.After(60 * time.Second) - loop: - for { - select { - case ntfn := <-n.chainUpdates.ChanOut(): - lastReceivedNtfn := ntfn.(*filteredBlock) - if lastReceivedNtfn.height >= uint32(syncHeight) { - break loop - } - case <-timeout: - return er.Errorf("unable to catch up to height %d", - syncHeight) - } - } - } - - // Run notificationDispatcher after setting the notifier's best height - // to avoid a race condition. - n.bestBlock.Hash = bestHash - n.bestBlock.Height = bestHeight - - n.wg.Add(1) - go n.notificationDispatcher() - - return nil -} diff --git a/lnd/chainntnfs/test_utils.go b/lnd/chainntnfs/test_utils.go deleted file mode 100644 index 33be12ad..00000000 --- a/lnd/chainntnfs/test_utils.go +++ /dev/null @@ -1,233 +0,0 @@ -// +build dev - -package chainntnfs - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcjson" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/integration/rpctest" - "github.com/pkt-cash/pktd/neutrino" - "github.com/pkt-cash/pktd/pktwallet/walletdb" - "github.com/pkt-cash/pktd/txscript" - "github.com/pkt-cash/pktd/txscript/params" - "github.com/pkt-cash/pktd/wire" -) - -var ( - // TrickleInterval is the interval at which the miner should trickle - // transactions to its peers. We'll set it small to ensure the miner - // propagates transactions quickly in the tests. - TrickleInterval = 10 * time.Millisecond -) - -var ( - NetParams = &chaincfg.RegressionNetParams -) - -// randPubKeyHashScript generates a P2PKH script that pays to the public key of -// a randomly-generated private key. -func randPubKeyHashScript() ([]byte, *btcec.PrivateKey, er.R) { - privKey, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - return nil, nil, err - } - - pubKeyHash := btcutil.Hash160(privKey.PubKey().SerializeCompressed()) - addrScript, err := btcutil.NewAddressPubKeyHash(pubKeyHash, NetParams) - if err != nil { - return nil, nil, err - } - - pkScript, err := txscript.PayToAddrScript(addrScript) - if err != nil { - return nil, nil, err - } - - return pkScript, privKey, nil -} - -// GetTestTxidAndScript generate a new test transaction and returns its txid and -// the script of the output being generated. -func GetTestTxidAndScript(h *rpctest.Harness) (*chainhash.Hash, []byte, er.R) { - pkScript, _, err := randPubKeyHashScript() - if err != nil { - return nil, nil, er.Errorf("unable to generate pkScript: %v", err) - } - output := &wire.TxOut{Value: 2e8, PkScript: pkScript} - txid, err := h.SendOutputs([]*wire.TxOut{output}, 10) - if err != nil { - return nil, nil, err - } - - return txid, pkScript, nil -} - -// WaitForMempoolTx waits for the txid to be seen in the miner's mempool. -func WaitForMempoolTx(miner *rpctest.Harness, txid *chainhash.Hash) er.R { - timeout := time.After(10 * time.Second) - trickle := time.After(2 * TrickleInterval) - for { - // Check for the harness' knowledge of the txid. - tx, err := miner.Node.GetRawTransaction(txid) - if err != nil { - if btcjson.ErrRPCNoTxInfo.Is(err) { - continue - } - return err - } - - if tx != nil && tx.Hash().IsEqual(txid) { - break - } - - select { - case <-time.After(100 * time.Millisecond): - case <-timeout: - return er.New("timed out waiting for tx") - } - } - - // To ensure any transactions propagate from the miner to the peers - // before returning, ensure we have waited for at least - // 2*trickleInterval before returning. - select { - case <-trickle: - case <-timeout: - return er.New("timeout waiting for trickle interval. " + - "Trickle interval to large?") - } - - return nil -} - -// CreateSpendableOutput creates and returns an output that can be spent later -// on. -func CreateSpendableOutput(t *testing.T, - miner *rpctest.Harness) (*wire.OutPoint, *wire.TxOut, *btcec.PrivateKey) { - - t.Helper() - - // Create a transaction that only has one output, the one destined for - // the recipient. - pkScript, privKey, err := randPubKeyHashScript() - if err != nil { - t.Fatalf("unable to generate pkScript: %v", err) - } - output := &wire.TxOut{Value: 2e8, PkScript: pkScript} - txid, err := miner.SendOutputsWithoutChange([]*wire.TxOut{output}, 10) - if err != nil { - t.Fatalf("unable to create tx: %v", err) - } - - // Mine the transaction to mark the output as spendable. - if err := WaitForMempoolTx(miner, txid); err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } - if _, err := miner.Node.Generate(1); err != nil { - t.Fatalf("unable to generate single block: %v", err) - } - - return wire.NewOutPoint(txid, 0), output, privKey -} - -// CreateSpendTx creates a transaction spending the specified output. -func CreateSpendTx(t *testing.T, prevOutPoint *wire.OutPoint, - prevOutput *wire.TxOut, privKey *btcec.PrivateKey) *wire.MsgTx { - - t.Helper() - - spendingTx := wire.NewMsgTx(1) - spendingTx.AddTxIn(&wire.TxIn{PreviousOutPoint: *prevOutPoint}) - spendingTx.AddTxOut(&wire.TxOut{Value: 1e8, PkScript: prevOutput.PkScript}) - - sigScript, err := txscript.SignatureScript( - spendingTx, 0, prevOutput.PkScript, params.SigHashAll, - privKey, true, - ) - if err != nil { - t.Fatalf("unable to sign tx: %v", err) - } - spendingTx.TxIn[0].SignatureScript = sigScript - - return spendingTx -} - -// NewMiner spawns testing harness backed by a btcd node that can serve as a -// miner. -func NewMiner(t *testing.T, extraArgs []string, createChain bool, - spendableOutputs uint32) (*rpctest.Harness, func()) { - - t.Helper() - - // Add the trickle interval argument to the extra args. - trickle := fmt.Sprintf("--trickleinterval=%v", TrickleInterval) - extraArgs = append(extraArgs, trickle, "--tls") - - node, err := rpctest.New(NetParams, nil, extraArgs) - if err != nil { - t.Fatalf("unable to create backend node: %v", err) - } - if err := node.SetUp(createChain, spendableOutputs); err != nil { - node.TearDown() - t.Fatalf("unable to set up backend node: %v", err) - } - - return node, func() { node.TearDown() } -} - -// NewNeutrinoBackend spawns a new neutrino node that connects to a miner at -// the specified address. -func NewNeutrinoBackend(t *testing.T, minerAddr string) (*neutrino.ChainService, func()) { - t.Helper() - - spvDir, errr := ioutil.TempDir("", "neutrino") - if errr != nil { - t.Fatalf("unable to create temp dir: %v", errr) - } - - dbName := filepath.Join(spvDir, "neutrino.db") - spvDatabase, err := walletdb.Create("bdb", dbName, true) - if err != nil { - os.RemoveAll(spvDir) - t.Fatalf("unable to create walletdb: %v", err) - } - - // Create an instance of neutrino connected to the running btcd - // instance. - spvConfig := neutrino.Config{ - DataDir: spvDir, - Database: spvDatabase, - ChainParams: *NetParams, - ConnectPeers: []string{minerAddr}, - } - spvNode, err := neutrino.NewChainService(spvConfig) - if err != nil { - os.RemoveAll(spvDir) - spvDatabase.Close() - t.Fatalf("unable to create neutrino: %v", err) - } - - // We'll also wait for the instance to sync up fully to the chain - // generated by the btcd instance. - spvNode.Start() - for !spvNode.IsCurrent() { - time.Sleep(time.Millisecond * 100) - } - - return spvNode, func() { - spvNode.Stop() - spvDatabase.Close() - os.RemoveAll(spvDir) - } -} diff --git a/lnd/chainntnfs/txnotifier.go b/lnd/chainntnfs/txnotifier.go deleted file mode 100644 index b339c9cb..00000000 --- a/lnd/chainntnfs/txnotifier.go +++ /dev/null @@ -1,1979 +0,0 @@ -package chainntnfs - -import ( - "bytes" - "fmt" - "sync" - "sync/atomic" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/txscript" - "github.com/pkt-cash/pktd/wire" -) - -const ( - // ReorgSafetyLimit is the chain depth beyond which it is assumed a - // block will not be reorganized out of the chain. This is used to - // determine when to prune old confirmation requests so that reorgs are - // handled correctly. The average number of blocks in a day is a - // reasonable value to use. - ReorgSafetyLimit = 144 - - // MaxNumConfs is the maximum number of confirmations that can be - // requested on a transaction. - MaxNumConfs = ReorgSafetyLimit -) - -var ( - // ZeroHash is the value that should be used as the txid when - // registering for the confirmation of a script on-chain. This allows - // the notifier to match _and_ dispatch upon the inclusion of the script - // on-chain, rather than the txid. - ZeroHash chainhash.Hash - - // ZeroOutPoint is the value that should be used as the outpoint when - // registering for the spend of a script on-chain. This allows the - // notifier to match _and_ dispatch upon detecting the spend of the - // script on-chain, rather than the outpoint. - ZeroOutPoint wire.OutPoint -) - -var ( - // ErrTxNotifierExiting is an error returned when attempting to interact - // with the TxNotifier but it been shut down. - ErrTxNotifierExiting = Err.CodeWithDetail("ErrTxNotifierExiting", "TxNotifier is exiting") - - // ErrNoScript is an error returned when a confirmation/spend - // registration is attempted without providing an accompanying output - // script. - ErrNoScript = Err.CodeWithDetail("ErrNoScript", "an output script must be provided") - - // ErrNoHeightHint is an error returned when a confirmation/spend - // registration is attempted without providing an accompanying height - // hint. - ErrNoHeightHint = Err.CodeWithDetail("ErrNoHeightHint", - "a height hint greater than 0 must be provided") - - // ErrNumConfsOutOfRange is an error returned when a confirmation/spend - // registration is attempted and the number of confirmations provided is - // out of range. - ErrNumConfsOutOfRange = Err.CodeWithDetail("ErrNumConfsOutOfRange", - fmt.Sprintf("number of confirmations must be "+ - "between %d and %d", 1, MaxNumConfs)) -) - -// rescanState indicates the progression of a registration before the notifier -// can begin dispatching confirmations at tip. -type rescanState byte - -const ( - // rescanNotStarted is the initial state, denoting that a historical - // dispatch may be required. - rescanNotStarted rescanState = iota - - // rescanPending indicates that a dispatch has already been made, and we - // are waiting for its completion. No other rescans should be dispatched - // while in this state. - rescanPending - - // rescanComplete signals either that a rescan was dispatched and has - // completed, or that we began watching at tip immediately. In either - // case, the notifier can only dispatch notifications from tip when in - // this state. - rescanComplete -) - -// confNtfnSet holds all known, registered confirmation notifications for a -// txid/output script. If duplicates notifications are requested, only one -// historical dispatch will be spawned to ensure redundant scans are not -// permitted. A single conf detail will be constructed and dispatched to all -// interested -// clients. -type confNtfnSet struct { - // ntfns keeps tracks of all the active client notification requests for - // a transaction/output script - ntfns map[uint64]*ConfNtfn - - // rescanStatus represents the current rescan state for the - // transaction/output script. - rescanStatus rescanState - - // details serves as a cache of the confirmation details of a - // transaction that we'll use to determine if a transaction/output - // script has already confirmed at the time of registration. - // details is also used to make sure that in case of an address reuse - // (funds sent to a previously confirmed script) no additional - // notification is registered which would lead to an inconsistent state. - details *TxConfirmation -} - -// newConfNtfnSet constructs a fresh confNtfnSet for a group of clients -// interested in a notification for a particular txid. -func newConfNtfnSet() *confNtfnSet { - return &confNtfnSet{ - ntfns: make(map[uint64]*ConfNtfn), - rescanStatus: rescanNotStarted, - } -} - -// spendNtfnSet holds all known, registered spend notifications for a spend -// request (outpoint/output script). If duplicate notifications are requested, -// only one historical dispatch will be spawned to ensure redundant scans are -// not permitted. -type spendNtfnSet struct { - // ntfns keeps tracks of all the active client notification requests for - // an outpoint/output script. - ntfns map[uint64]*SpendNtfn - - // rescanStatus represents the current rescan state for the spend - // request (outpoint/output script). - rescanStatus rescanState - - // details serves as a cache of the spend details for an outpoint/output - // script that we'll use to determine if it has already been spent at - // the time of registration. - details *SpendDetail -} - -// newSpendNtfnSet constructs a new spend notification set. -func newSpendNtfnSet() *spendNtfnSet { - return &spendNtfnSet{ - ntfns: make(map[uint64]*SpendNtfn), - rescanStatus: rescanNotStarted, - } -} - -// ConfRequest encapsulates a request for a confirmation notification of either -// a txid or output script. -type ConfRequest struct { - // TxID is the hash of the transaction for which confirmation - // notifications are requested. If set to a zero hash, then a - // confirmation notification will be dispatched upon inclusion of the - // _script_, rather than the txid. - TxID chainhash.Hash - - // PkScript is the public key script of an outpoint created in this - // transaction. - PkScript txscript.PkScript -} - -// NewConfRequest creates a request for a confirmation notification of either a -// txid or output script. A nil txid or an allocated ZeroHash can be used to -// dispatch the confirmation notification on the script. -func NewConfRequest(txid *chainhash.Hash, pkScript []byte) (ConfRequest, er.R) { - var r ConfRequest - outputScript, err := txscript.ParsePkScript(pkScript) - if err != nil { - return r, err - } - - // We'll only set a txid for which we'll dispatch a confirmation - // notification on this request if one was provided. Otherwise, we'll - // default to dispatching on the confirmation of the script instead. - if txid != nil { - r.TxID = *txid - } - r.PkScript = outputScript - - return r, nil -} - -// String returns the string representation of the ConfRequest. -func (r ConfRequest) String() string { - if r.TxID != ZeroHash { - return fmt.Sprintf("txid=%v", r.TxID) - } - return fmt.Sprintf("script=%v", r.PkScript) -} - -// ConfHintKey returns the key that will be used to index the confirmation -// request's hint within the height hint cache. -func (r ConfRequest) ConfHintKey() ([]byte, er.R) { - if r.TxID == ZeroHash { - return r.PkScript.Script(), nil - } - - var txid bytes.Buffer - if err := channeldb.WriteElement(&txid, r.TxID); err != nil { - return nil, err - } - - return txid.Bytes(), nil -} - -// MatchesTx determines whether the given transaction satisfies the confirmation -// request. If the confirmation request is for a script, then we'll check all of -// the outputs of the transaction to determine if it matches. Otherwise, we'll -// match on the txid. -func (r ConfRequest) MatchesTx(tx *wire.MsgTx) bool { - scriptMatches := func() bool { - pkScript := r.PkScript.Script() - for _, txOut := range tx.TxOut { - if bytes.Equal(txOut.PkScript, pkScript) { - return true - } - } - - return false - } - - if r.TxID != ZeroHash { - return r.TxID == tx.TxHash() && scriptMatches() - } - - return scriptMatches() -} - -// ConfNtfn represents a notifier client's request to receive a notification -// once the target transaction/output script gets sufficient confirmations. The -// client is asynchronously notified via the ConfirmationEvent channels. -type ConfNtfn struct { - // ConfID uniquely identifies the confirmation notification request for - // the specified transaction/output script. - ConfID uint64 - - // ConfRequest represents either the txid or script we should detect - // inclusion of within the chain. - ConfRequest - - // NumConfirmations is the number of confirmations after which the - // notification is to be sent. - NumConfirmations uint32 - - // Event contains references to the channels that the notifications are to - // be sent over. - Event *ConfirmationEvent - - // HeightHint is the minimum height in the chain that we expect to find - // this txid. - HeightHint uint32 - - // dispatched is false if the confirmed notification has not been sent yet. - dispatched bool -} - -// HistoricalConfDispatch parameterizes a manual rescan for a particular -// transaction/output script. The parameters include the start and end block -// heights specifying the range of blocks to scan. -type HistoricalConfDispatch struct { - // ConfRequest represents either the txid or script we should detect - // inclusion of within the chain. - ConfRequest - - // StartHeight specifies the block height at which to begin the - // historical rescan. - StartHeight uint32 - - // EndHeight specifies the last block height (inclusive) that the - // historical scan should consider. - EndHeight uint32 -} - -// ConfRegistration encompasses all of the information required for callers to -// retrieve details about a confirmation event. -type ConfRegistration struct { - // Event contains references to the channels that the notifications are - // to be sent over. - Event *ConfirmationEvent - - // HistoricalDispatch, if non-nil, signals to the client who registered - // the notification that they are responsible for attempting to manually - // rescan blocks for the txid/output script between the start and end - // heights. - HistoricalDispatch *HistoricalConfDispatch - - // Height is the height of the TxNotifier at the time the confirmation - // notification was registered. This can be used so that backends can - // request to be notified of confirmations from this point forwards. - Height uint32 -} - -// SpendRequest encapsulates a request for a spend notification of either an -// outpoint or output script. -type SpendRequest struct { - // OutPoint is the outpoint for which a client has requested a spend - // notification for. If set to a zero outpoint, then a spend - // notification will be dispatched upon detecting the spend of the - // _script_, rather than the outpoint. - OutPoint wire.OutPoint - - // PkScript is the script of the outpoint. If a zero outpoint is set, - // then this can be an arbitrary script. - PkScript txscript.PkScript -} - -// NewSpendRequest creates a request for a spend notification of either an -// outpoint or output script. A nil outpoint or an allocated ZeroOutPoint can be -// used to dispatch the confirmation notification on the script. -func NewSpendRequest(op *wire.OutPoint, pkScript []byte) (SpendRequest, er.R) { - var r SpendRequest - outputScript, err := txscript.ParsePkScript(pkScript) - if err != nil { - return r, err - } - - // We'll only set an outpoint for which we'll dispatch a spend - // notification on this request if one was provided. Otherwise, we'll - // default to dispatching on the spend of the script instead. - if op != nil { - r.OutPoint = *op - } - r.PkScript = outputScript - - return r, nil -} - -// String returns the string representation of the SpendRequest. -func (r SpendRequest) String() string { - if r.OutPoint != ZeroOutPoint { - return fmt.Sprintf("outpoint=%v, script=%v", r.OutPoint, - r.PkScript) - } - return fmt.Sprintf("outpoint=, script=%v", r.PkScript) -} - -// SpendHintKey returns the key that will be used to index the spend request's -// hint within the height hint cache. -func (r SpendRequest) SpendHintKey() ([]byte, er.R) { - if r.OutPoint == ZeroOutPoint { - return r.PkScript.Script(), nil - } - - var outpoint bytes.Buffer - err := channeldb.WriteElement(&outpoint, r.OutPoint) - if err != nil { - return nil, err - } - - return outpoint.Bytes(), nil -} - -// MatchesTx determines whether the given transaction satisfies the spend -// request. If the spend request is for an outpoint, then we'll check all of -// the outputs being spent by the inputs of the transaction to determine if it -// matches. Otherwise, we'll need to match on the output script being spent, so -// we'll recompute it for each input of the transaction to determine if it -// matches. -func (r SpendRequest) MatchesTx(tx *wire.MsgTx) (bool, uint32, er.R) { - if r.OutPoint != ZeroOutPoint { - for i, txIn := range tx.TxIn { - if txIn.PreviousOutPoint == r.OutPoint { - return true, uint32(i), nil - } - } - - return false, 0, nil - } - - for i, txIn := range tx.TxIn { - pkScript, err := txscript.ComputePkScript( - txIn.SignatureScript, txIn.Witness, - ) - if txscript.ErrUnsupportedScriptType.Is(err) { - continue - } - if err != nil { - return false, 0, err - } - - if bytes.Equal(pkScript.Script(), r.PkScript.Script()) { - return true, uint32(i), nil - } - } - - return false, 0, nil -} - -// SpendNtfn represents a client's request to receive a notification once an -// outpoint/output script has been spent on-chain. The client is asynchronously -// notified via the SpendEvent channels. -type SpendNtfn struct { - // SpendID uniquely identies the spend notification request for the - // specified outpoint/output script. - SpendID uint64 - - // SpendRequest represents either the outpoint or script we should - // detect the spend of. - SpendRequest - - // Event contains references to the channels that the notifications are - // to be sent over. - Event *SpendEvent - - // HeightHint is the earliest height in the chain that we expect to find - // the spending transaction of the specified outpoint/output script. - // This value will be overridden by the spend hint cache if it contains - // an entry for it. - HeightHint uint32 - - // dispatched signals whether a spend notification has been disptached - // to the client. - dispatched bool -} - -// HistoricalSpendDispatch parameterizes a manual rescan to determine the -// spending details (if any) of an outpoint/output script. The parameters -// include the start and end block heights specifying the range of blocks to -// scan. -type HistoricalSpendDispatch struct { - // SpendRequest represents either the outpoint or script we should - // detect the spend of. - SpendRequest - - // StartHeight specified the block height at which to begin the - // historical rescan. - StartHeight uint32 - - // EndHeight specifies the last block height (inclusive) that the - // historical rescan should consider. - EndHeight uint32 -} - -// SpendRegistration encompasses all of the information required for callers to -// retrieve details about a spend event. -type SpendRegistration struct { - // Event contains references to the channels that the notifications are - // to be sent over. - Event *SpendEvent - - // HistoricalDispatch, if non-nil, signals to the client who registered - // the notification that they are responsible for attempting to manually - // rescan blocks for the txid/output script between the start and end - // heights. - HistoricalDispatch *HistoricalSpendDispatch - - // Height is the height of the TxNotifier at the time the spend - // notification was registered. This can be used so that backends can - // request to be notified of spends from this point forwards. - Height uint32 -} - -// TxNotifier is a struct responsible for delivering transaction notifications -// to subscribers. These notifications can be of two different types: -// transaction/output script confirmations and/or outpoint/output script spends. -// The TxNotifier will watch the blockchain as new blocks come in, in order to -// satisfy its client requests. -type TxNotifier struct { - confClientCounter uint64 // To be used atomically. - spendClientCounter uint64 // To be used atomically. - - // currentHeight is the height of the tracked blockchain. It is used to - // determine the number of confirmations a tx has and ensure blocks are - // connected and disconnected in order. - currentHeight uint32 - - // reorgSafetyLimit is the chain depth beyond which it is assumed a - // block will not be reorganized out of the chain. This is used to - // determine when to prune old notification requests so that reorgs are - // handled correctly. The coinbase maturity period is a reasonable value - // to use. - reorgSafetyLimit uint32 - - // reorgDepth is the depth of a chain organization that this system is - // being informed of. This is incremented as long as a sequence of - // blocks are disconnected without being interrupted by a new block. - reorgDepth uint32 - - // confNotifications is an index of confirmation notification requests - // by transaction hash/output script. - confNotifications map[ConfRequest]*confNtfnSet - - // confsByInitialHeight is an index of watched transactions/output - // scripts by the height that they are included at in the chain. This - // is tracked so that incorrect notifications are not sent if a - // transaction/output script is reorged out of the chain and so that - // negative confirmations can be recognized. - confsByInitialHeight map[uint32]map[ConfRequest]struct{} - - // ntfnsByConfirmHeight is an index of notification requests by the - // height at which the transaction/output script will have sufficient - // confirmations. - ntfnsByConfirmHeight map[uint32]map[*ConfNtfn]struct{} - - // spendNotifications is an index of all active notification requests - // per outpoint/output script. - spendNotifications map[SpendRequest]*spendNtfnSet - - // spendsByHeight is an index that keeps tracks of the spending height - // of outpoints/output scripts we are currently tracking notifications - // for. This is used in order to recover from spending transactions - // being reorged out of the chain. - spendsByHeight map[uint32]map[SpendRequest]struct{} - - // confirmHintCache is a cache used to maintain the latest height hints - // for transactions/output scripts. Each height hint represents the - // earliest height at which they scripts could have been confirmed - // within the chain. - confirmHintCache ConfirmHintCache - - // spendHintCache is a cache used to maintain the latest height hints - // for outpoints/output scripts. Each height hint represents the - // earliest height at which they could have been spent within the chain. - spendHintCache SpendHintCache - - // quit is closed in order to signal that the notifier is gracefully - // exiting. - quit chan struct{} - - sync.Mutex -} - -// NewTxNotifier creates a TxNotifier. The current height of the blockchain is -// accepted as a parameter. The different hint caches (confirm and spend) are -// used as an optimization in order to retrieve a better starting point when -// dispatching a recan for a historical event in the chain. -func NewTxNotifier(startHeight uint32, reorgSafetyLimit uint32, - confirmHintCache ConfirmHintCache, - spendHintCache SpendHintCache) *TxNotifier { - - return &TxNotifier{ - currentHeight: startHeight, - reorgSafetyLimit: reorgSafetyLimit, - confNotifications: make(map[ConfRequest]*confNtfnSet), - confsByInitialHeight: make(map[uint32]map[ConfRequest]struct{}), - ntfnsByConfirmHeight: make(map[uint32]map[*ConfNtfn]struct{}), - spendNotifications: make(map[SpendRequest]*spendNtfnSet), - spendsByHeight: make(map[uint32]map[SpendRequest]struct{}), - confirmHintCache: confirmHintCache, - spendHintCache: spendHintCache, - quit: make(chan struct{}), - } -} - -// newConfNtfn validates all of the parameters required to successfully create -// and register a confirmation notification. -func (n *TxNotifier) newConfNtfn(txid *chainhash.Hash, - pkScript []byte, numConfs, heightHint uint32) (*ConfNtfn, er.R) { - - // An accompanying output script must always be provided. - if len(pkScript) == 0 { - return nil, ErrNoScript.Default() - } - - // Enforce that we will not dispatch confirmations beyond the reorg - // safety limit. - if numConfs == 0 || numConfs > n.reorgSafetyLimit { - return nil, ErrNumConfsOutOfRange.Default() - } - - // A height hint must be provided to prevent scanning from the genesis - // block. - if heightHint == 0 { - return nil, ErrNoHeightHint.Default() - } - - // Ensure the output script is of a supported type. - confRequest, err := NewConfRequest(txid, pkScript) - if err != nil { - return nil, err - } - - confID := atomic.AddUint64(&n.confClientCounter, 1) - return &ConfNtfn{ - ConfID: confID, - ConfRequest: confRequest, - NumConfirmations: numConfs, - Event: NewConfirmationEvent(numConfs, func() { - n.CancelConf(confRequest, confID) - }), - HeightHint: heightHint, - }, nil -} - -// RegisterConf handles a new confirmation notification request. The client will -// be notified when the transaction/output script gets a sufficient number of -// confirmations in the blockchain. -// -// NOTE: If the transaction/output script has already been included in a block -// on the chain, the confirmation details must be provided with the -// UpdateConfDetails method, otherwise we will wait for the transaction/output -// script to confirm even though it already has. -func (n *TxNotifier) RegisterConf(txid *chainhash.Hash, pkScript []byte, - numConfs, heightHint uint32) (*ConfRegistration, er.R) { - - select { - case <-n.quit: - return nil, ErrTxNotifierExiting.Default() - default: - } - - // We'll start by performing a series of validation checks. - ntfn, err := n.newConfNtfn(txid, pkScript, numConfs, heightHint) - if err != nil { - return nil, err - } - - // Before proceeding to register the notification, we'll query our - // height hint cache to determine whether a better one exists. - // - // TODO(conner): verify that all submitted height hints are identical. - startHeight := ntfn.HeightHint - hint, err := n.confirmHintCache.QueryConfirmHint(ntfn.ConfRequest) - if err == nil { - if hint > startHeight { - log.Debugf("Using height hint %d retrieved from cache "+ - "for %v instead of %d", hint, ntfn.ConfRequest, - startHeight) - startHeight = hint - } - } else if !ErrConfirmHintNotFound.Is(err) { - log.Errorf("Unable to query confirm hint for %v: %v", - ntfn.ConfRequest, err) - } - - log.Infof("New confirmation subscription: conf_id=%d, %v, "+ - "num_confs=%v height_hint=%d", ntfn.ConfID, ntfn.ConfRequest, - numConfs, startHeight) - - n.Lock() - defer n.Unlock() - - confSet, ok := n.confNotifications[ntfn.ConfRequest] - if !ok { - // If this is the first registration for this request, construct - // a confSet to coalesce all notifications for the same request. - confSet = newConfNtfnSet() - n.confNotifications[ntfn.ConfRequest] = confSet - } - confSet.ntfns[ntfn.ConfID] = ntfn - - switch confSet.rescanStatus { - - // A prior rescan has already completed and we are actively watching at - // tip for this request. - case rescanComplete: - // If the confirmation details for this set of notifications has - // already been found, we'll attempt to deliver them immediately - // to this client. - log.Debugf("Attempting to dispatch confirmation for %v on "+ - "registration since rescan has finished", - ntfn.ConfRequest) - - err := n.dispatchConfDetails(ntfn, confSet.details) - if err != nil { - return nil, err - } - - return &ConfRegistration{ - Event: ntfn.Event, - HistoricalDispatch: nil, - Height: n.currentHeight, - }, nil - - // A rescan is already in progress, return here to prevent dispatching - // another. When the rescan returns, this notification's details will be - // updated as well. - case rescanPending: - log.Debugf("Waiting for pending rescan to finish before "+ - "notifying %v at tip", ntfn.ConfRequest) - - return &ConfRegistration{ - Event: ntfn.Event, - HistoricalDispatch: nil, - Height: n.currentHeight, - }, nil - - // If no rescan has been dispatched, attempt to do so now. - case rescanNotStarted: - } - - // If the provided or cached height hint indicates that the - // transaction with the given txid/output script is to be confirmed at a - // height greater than the notifier's current height, we'll refrain from - // spawning a historical dispatch. - if startHeight > n.currentHeight { - log.Debugf("Height hint is above current height, not "+ - "dispatching historical confirmation rescan for %v", - ntfn.ConfRequest) - - // Set the rescan status to complete, which will allow the - // notifier to start delivering messages for this set - // immediately. - confSet.rescanStatus = rescanComplete - return &ConfRegistration{ - Event: ntfn.Event, - HistoricalDispatch: nil, - Height: n.currentHeight, - }, nil - } - - log.Debugf("Dispatching historical confirmation rescan for %v", - ntfn.ConfRequest) - - // Construct the parameters for historical dispatch, scanning the range - // of blocks between our best known height hint and the notifier's - // current height. The notifier will begin also watching for - // confirmations at tip starting with the next block. - dispatch := &HistoricalConfDispatch{ - ConfRequest: ntfn.ConfRequest, - StartHeight: startHeight, - EndHeight: n.currentHeight, - } - - // Set this confSet's status to pending, ensuring subsequent - // registrations don't also attempt a dispatch. - confSet.rescanStatus = rescanPending - - return &ConfRegistration{ - Event: ntfn.Event, - HistoricalDispatch: dispatch, - Height: n.currentHeight, - }, nil -} - -// CancelConf cancels an existing request for a spend notification of an -// outpoint/output script. The request is identified by its spend ID. -func (n *TxNotifier) CancelConf(confRequest ConfRequest, confID uint64) { - select { - case <-n.quit: - return - default: - } - - n.Lock() - defer n.Unlock() - - confSet, ok := n.confNotifications[confRequest] - if !ok { - return - } - ntfn, ok := confSet.ntfns[confID] - if !ok { - return - } - - log.Infof("Canceling confirmation notification: conf_id=%d, %v", confID, - confRequest) - - // We'll close all the notification channels to let the client know - // their cancel request has been fulfilled. - close(ntfn.Event.Confirmed) - close(ntfn.Event.Updates) - close(ntfn.Event.NegativeConf) - - // Finally, we'll clean up any lingering references to this - // notification. - delete(confSet.ntfns, confID) - - // Remove the queued confirmation notification if the transaction has - // already confirmed, but hasn't met its required number of - // confirmations. - if confSet.details != nil { - confHeight := confSet.details.BlockHeight + - ntfn.NumConfirmations - 1 - delete(n.ntfnsByConfirmHeight[confHeight], ntfn) - } -} - -// UpdateConfDetails attempts to update the confirmation details for an active -// notification within the notifier. This should only be used in the case of a -// transaction/output script that has confirmed before the notifier's current -// height. -// -// NOTE: The notification should be registered first to ensure notifications are -// dispatched correctly. -func (n *TxNotifier) UpdateConfDetails(confRequest ConfRequest, - details *TxConfirmation) er.R { - - select { - case <-n.quit: - return ErrTxNotifierExiting.Default() - default: - } - - // Ensure we hold the lock throughout handling the notification to - // prevent the notifier from advancing its height underneath us. - n.Lock() - defer n.Unlock() - - // First, we'll determine whether we have an active confirmation - // notification for the given txid/script. - confSet, ok := n.confNotifications[confRequest] - if !ok { - return er.Errorf("confirmation notification for %v not found", - confRequest) - } - - // If the confirmation details were already found at tip, all existing - // notifications will have been dispatched or queued for dispatch. We - // can exit early to avoid sending too many notifications on the - // buffered channels. - if confSet.details != nil { - return nil - } - - // The historical dispatch has been completed for this confSet. We'll - // update the rescan status and cache any details that were found. If - // the details are nil, that implies we did not find them and will - // continue to watch for them at tip. - confSet.rescanStatus = rescanComplete - - // The notifier has yet to reach the height at which the - // transaction/output script was included in a block, so we should defer - // until handling it then within ConnectTip. - if details == nil { - log.Debugf("Confirmation details for %v not found during "+ - "historical dispatch, waiting to dispatch at tip", - confRequest) - - // We'll commit the current height as the confirm hint to - // prevent another potentially long rescan if we restart before - // a new block comes in. - err := n.confirmHintCache.CommitConfirmHint( - n.currentHeight, confRequest, - ) - if err != nil { - // The error is not fatal as this is an optimistic - // optimization, so we'll avoid returning an error. - log.Debugf("Unable to update confirm hint to %d for "+ - "%v: %v", n.currentHeight, confRequest, err) - } - - return nil - } - - if details.BlockHeight > n.currentHeight { - log.Debugf("Confirmation details for %v found above current "+ - "height, waiting to dispatch at tip", confRequest) - - return nil - } - - log.Debugf("Updating confirmation details for %v", confRequest) - - err := n.confirmHintCache.CommitConfirmHint( - details.BlockHeight, confRequest, - ) - if err != nil { - // The error is not fatal, so we should not return an error to - // the caller. - log.Errorf("Unable to update confirm hint to %d for %v: %v", - details.BlockHeight, confRequest, err) - } - - // Cache the details found in the rescan and attempt to dispatch any - // notifications that have not yet been delivered. - confSet.details = details - for _, ntfn := range confSet.ntfns { - err = n.dispatchConfDetails(ntfn, details) - if err != nil { - return err - } - } - - return nil -} - -// dispatchConfDetails attempts to cache and dispatch details to a particular -// client if the transaction/output script has sufficiently confirmed. If the -// provided details are nil, this method will be a no-op. -func (n *TxNotifier) dispatchConfDetails( - ntfn *ConfNtfn, details *TxConfirmation) er.R { - - // If no details are provided, return early as we can't dispatch. - if details == nil { - log.Debugf("Unable to dispatch %v, no details provided", - ntfn.ConfRequest) - - return nil - } - - // Now, we'll examine whether the transaction/output script of this - // request has reached its required number of confirmations. If it has, - // we'll dispatch a confirmation notification to the caller. - confHeight := details.BlockHeight + ntfn.NumConfirmations - 1 - if confHeight <= n.currentHeight { - log.Infof("Dispatching %v confirmation notification for %v", - ntfn.NumConfirmations, ntfn.ConfRequest) - - // We'll send a 0 value to the Updates channel, - // indicating that the transaction/output script has already - // been confirmed. - select { - case ntfn.Event.Updates <- 0: - case <-n.quit: - return ErrTxNotifierExiting.Default() - } - - select { - case ntfn.Event.Confirmed <- details: - ntfn.dispatched = true - case <-n.quit: - return ErrTxNotifierExiting.Default() - } - } else { - log.Debugf("Queueing %v confirmation notification for %v at tip ", - ntfn.NumConfirmations, ntfn.ConfRequest) - - // Otherwise, we'll keep track of the notification - // request by the height at which we should dispatch the - // confirmation notification. - ntfnSet, exists := n.ntfnsByConfirmHeight[confHeight] - if !exists { - ntfnSet = make(map[*ConfNtfn]struct{}) - n.ntfnsByConfirmHeight[confHeight] = ntfnSet - } - ntfnSet[ntfn] = struct{}{} - - // We'll also send an update to the client of how many - // confirmations are left for the transaction/output script to - // be confirmed. - numConfsLeft := confHeight - n.currentHeight - select { - case ntfn.Event.Updates <- numConfsLeft: - case <-n.quit: - return ErrTxNotifierExiting.Default() - } - } - - // As a final check, we'll also watch the transaction/output script if - // it's still possible for it to get reorged out of the chain. - reorgSafeHeight := details.BlockHeight + n.reorgSafetyLimit - if reorgSafeHeight > n.currentHeight { - txSet, exists := n.confsByInitialHeight[details.BlockHeight] - if !exists { - txSet = make(map[ConfRequest]struct{}) - n.confsByInitialHeight[details.BlockHeight] = txSet - } - txSet[ntfn.ConfRequest] = struct{}{} - } - - return nil -} - -// newSpendNtfn validates all of the parameters required to successfully create -// and register a spend notification. -func (n *TxNotifier) newSpendNtfn(outpoint *wire.OutPoint, - pkScript []byte, heightHint uint32) (*SpendNtfn, er.R) { - - // An accompanying output script must always be provided. - if len(pkScript) == 0 { - return nil, ErrNoScript.Default() - } - - // A height hint must be provided to prevent scanning from the genesis - // block. - if heightHint == 0 { - return nil, ErrNoHeightHint.Default() - } - - // Ensure the output script is of a supported type. - spendRequest, err := NewSpendRequest(outpoint, pkScript) - if err != nil { - return nil, err - } - - spendID := atomic.AddUint64(&n.spendClientCounter, 1) - return &SpendNtfn{ - SpendID: spendID, - SpendRequest: spendRequest, - Event: NewSpendEvent(func() { - n.CancelSpend(spendRequest, spendID) - }), - HeightHint: heightHint, - }, nil -} - -// RegisterSpend handles a new spend notification request. The client will be -// notified once the outpoint/output script is detected as spent within the -// chain. -// -// NOTE: If the outpoint/output script has already been spent within the chain -// before the notifier's current tip, the spend details must be provided with -// the UpdateSpendDetails method, otherwise we will wait for the outpoint/output -// script to be spent at tip, even though it already has. -func (n *TxNotifier) RegisterSpend(outpoint *wire.OutPoint, pkScript []byte, - heightHint uint32) (*SpendRegistration, er.R) { - - select { - case <-n.quit: - return nil, ErrTxNotifierExiting.Default() - default: - } - - // We'll start by performing a series of validation checks. - ntfn, err := n.newSpendNtfn(outpoint, pkScript, heightHint) - if err != nil { - return nil, err - } - - // Before proceeding to register the notification, we'll query our spend - // hint cache to determine whether a better one exists. - startHeight := ntfn.HeightHint - hint, err := n.spendHintCache.QuerySpendHint(ntfn.SpendRequest) - if err == nil { - if hint > startHeight { - log.Debugf("Using height hint %d retrieved from cache "+ - "for %v instead of %d", hint, ntfn.SpendRequest, - startHeight) - startHeight = hint - } - } else if !ErrSpendHintNotFound.Is(err) { - log.Errorf("Unable to query spend hint for %v: %v", - ntfn.SpendRequest, err) - } - - n.Lock() - defer n.Unlock() - - log.Infof("New spend subscription: spend_id=%d, %v, height_hint=%d", - ntfn.SpendID, ntfn.SpendRequest, startHeight) - - // Keep track of the notification request so that we can properly - // dispatch a spend notification later on. - spendSet, ok := n.spendNotifications[ntfn.SpendRequest] - if !ok { - // If this is the first registration for the request, we'll - // construct a spendNtfnSet to coalesce all notifications. - spendSet = newSpendNtfnSet() - n.spendNotifications[ntfn.SpendRequest] = spendSet - } - spendSet.ntfns[ntfn.SpendID] = ntfn - - // We'll now let the caller know whether a historical rescan is needed - // depending on the current rescan status. - switch spendSet.rescanStatus { - - // If the spending details for this request have already been determined - // and cached, then we can use them to immediately dispatch the spend - // notification to the client. - case rescanComplete: - log.Debugf("Attempting to dispatch spend for %v on "+ - "registration since rescan has finished", - ntfn.SpendRequest) - - err := n.dispatchSpendDetails(ntfn, spendSet.details) - if err != nil { - return nil, err - } - - return &SpendRegistration{ - Event: ntfn.Event, - HistoricalDispatch: nil, - Height: n.currentHeight, - }, nil - - // If there is an active rescan to determine whether the request has - // been spent, then we won't trigger another one. - case rescanPending: - log.Debugf("Waiting for pending rescan to finish before "+ - "notifying %v at tip", ntfn.SpendRequest) - - return &SpendRegistration{ - Event: ntfn.Event, - HistoricalDispatch: nil, - Height: n.currentHeight, - }, nil - - // Otherwise, we'll fall through and let the caller know that a rescan - // should be dispatched to determine whether the request has already - // been spent. - case rescanNotStarted: - } - - // However, if the spend hint, either provided by the caller or - // retrieved from the cache, is found to be at a later height than the - // TxNotifier is aware of, then we'll refrain from dispatching a - // historical rescan and wait for the spend to come in at tip. - if startHeight > n.currentHeight { - log.Debugf("Spend hint of %d for %v is above current height %d", - startHeight, ntfn.SpendRequest, n.currentHeight) - - // We'll also set the rescan status as complete to ensure that - // spend hints for this request get updated upon - // connected/disconnected blocks. - spendSet.rescanStatus = rescanComplete - return &SpendRegistration{ - Event: ntfn.Event, - HistoricalDispatch: nil, - Height: n.currentHeight, - }, nil - } - - // We'll set the rescan status to pending to ensure subsequent - // notifications don't also attempt a historical dispatch. - spendSet.rescanStatus = rescanPending - - log.Infof("Dispatching historical spend rescan for %v, start=%d, "+ - "end=%d", ntfn.SpendRequest, startHeight, n.currentHeight) - - return &SpendRegistration{ - Event: ntfn.Event, - HistoricalDispatch: &HistoricalSpendDispatch{ - SpendRequest: ntfn.SpendRequest, - StartHeight: startHeight, - EndHeight: n.currentHeight, - }, - Height: n.currentHeight, - }, nil -} - -// CancelSpend cancels an existing request for a spend notification of an -// outpoint/output script. The request is identified by its spend ID. -func (n *TxNotifier) CancelSpend(spendRequest SpendRequest, spendID uint64) { - select { - case <-n.quit: - return - default: - } - - n.Lock() - defer n.Unlock() - - spendSet, ok := n.spendNotifications[spendRequest] - if !ok { - return - } - ntfn, ok := spendSet.ntfns[spendID] - if !ok { - return - } - - log.Infof("Canceling spend notification: spend_id=%d, %v", spendID, - spendRequest) - - // We'll close all the notification channels to let the client know - // their cancel request has been fulfilled. - close(ntfn.Event.Spend) - close(ntfn.Event.Reorg) - close(ntfn.Event.Done) - delete(spendSet.ntfns, spendID) -} - -// ProcessRelevantSpendTx processes a transaction provided externally. This will -// check whether the transaction is relevant to the notifier if it spends any -// outpoints/output scripts for which we currently have registered notifications -// for. If it is relevant, spend notifications will be dispatched to the caller. -func (n *TxNotifier) ProcessRelevantSpendTx(tx *btcutil.Tx, - blockHeight uint32) er.R { - - select { - case <-n.quit: - return ErrTxNotifierExiting.Default() - default: - } - - // Ensure we hold the lock throughout handling the notification to - // prevent the notifier from advancing its height underneath us. - n.Lock() - defer n.Unlock() - - // We'll use a channel to coalesce all the spend requests that this - // transaction fulfills. - type spend struct { - request *SpendRequest - details *SpendDetail - } - - // We'll set up the onSpend filter callback to gather all the fulfilled - // spends requests within this transaction. - var spends []spend - onSpend := func(request SpendRequest, details *SpendDetail) { - spends = append(spends, spend{&request, details}) - } - n.filterTx(tx, nil, blockHeight, nil, onSpend) - - // After the transaction has been filtered, we can finally dispatch - // notifications for each request. - for _, spend := range spends { - err := n.updateSpendDetails(*spend.request, spend.details) - if err != nil { - return err - } - } - - return nil -} - -// UpdateSpendDetails attempts to update the spend details for all active spend -// notification requests for an outpoint/output script. This method should be -// used once a historical scan of the chain has finished. If the historical scan -// did not find a spending transaction for it, the spend details may be nil. -// -// NOTE: A notification request for the outpoint/output script must be -// registered first to ensure notifications are delivered. -func (n *TxNotifier) UpdateSpendDetails(spendRequest SpendRequest, - details *SpendDetail) er.R { - - select { - case <-n.quit: - return ErrTxNotifierExiting.Default() - default: - } - - // Ensure we hold the lock throughout handling the notification to - // prevent the notifier from advancing its height underneath us. - n.Lock() - defer n.Unlock() - - return n.updateSpendDetails(spendRequest, details) -} - -// updateSpendDetails attempts to update the spend details for all active spend -// notification requests for an outpoint/output script. This method should be -// used once a historical scan of the chain has finished. If the historical scan -// did not find a spending transaction for it, the spend details may be nil. -// -// NOTE: This method must be called with the TxNotifier's lock held. -func (n *TxNotifier) updateSpendDetails(spendRequest SpendRequest, - details *SpendDetail) er.R { - - // Mark the ongoing historical rescan for this request as finished. This - // will allow us to update the spend hints for it at tip. - spendSet, ok := n.spendNotifications[spendRequest] - if !ok { - return er.Errorf("spend notification for %v not found", - spendRequest) - } - - // If the spend details have already been found either at tip, then the - // notifications should have already been dispatched, so we can exit - // early to prevent sending duplicate notifications. - if spendSet.details != nil { - return nil - } - - // Since the historical rescan has completed for this request, we'll - // mark its rescan status as complete in order to ensure that the - // TxNotifier can properly update its spend hints upon - // connected/disconnected blocks. - spendSet.rescanStatus = rescanComplete - - // If the historical rescan was not able to find a spending transaction - // for this request, then we can track the spend at tip. - if details == nil { - // We'll commit the current height as the spend hint to prevent - // another potentially long rescan if we restart before a new - // block comes in. - err := n.spendHintCache.CommitSpendHint( - n.currentHeight, spendRequest, - ) - if err != nil { - // The error is not fatal as this is an optimistic - // optimization, so we'll avoid returning an error. - log.Debugf("Unable to update spend hint to %d for %v: %v", - n.currentHeight, spendRequest, err) - } - - log.Debugf("Updated spend hint to height=%v for unconfirmed "+ - "spend request %v", n.currentHeight, spendRequest) - return nil - } - - // If the historical rescan found the spending transaction for this - // request, but it's at a later height than the notifier (this can - // happen due to latency with the backend during a reorg), then we'll - // defer handling the notification until the notifier has caught up to - // such height. - if uint32(details.SpendingHeight) > n.currentHeight { - return nil - } - - // Now that we've determined the request has been spent, we'll commit - // its spending height as its hint in the cache and dispatch - // notifications to all of its respective clients. - err := n.spendHintCache.CommitSpendHint( - uint32(details.SpendingHeight), spendRequest, - ) - if err != nil { - // The error is not fatal as this is an optimistic optimization, - // so we'll avoid returning an error. - log.Debugf("Unable to update spend hint to %d for %v: %v", - details.SpendingHeight, spendRequest, err) - } - - log.Debugf("Updated spend hint to height=%v for confirmed spend "+ - "request %v", details.SpendingHeight, spendRequest) - - spendSet.details = details - for _, ntfn := range spendSet.ntfns { - err := n.dispatchSpendDetails(ntfn, spendSet.details) - if err != nil { - return err - } - } - - return nil -} - -// dispatchSpendDetails dispatches a spend notification to the client. -// -// NOTE: This must be called with the TxNotifier's lock held. -func (n *TxNotifier) dispatchSpendDetails(ntfn *SpendNtfn, details *SpendDetail) er.R { - // If there are no spend details to dispatch or if the notification has - // already been dispatched, then we can skip dispatching to this client. - if details == nil || ntfn.dispatched { - log.Debugf("Skipping dispatch of spend details(%v) for "+ - "request %v, dispatched=%v", details, ntfn.SpendRequest, - ntfn.dispatched) - return nil - } - - log.Infof("Dispatching confirmed spend notification for %v at "+ - "current height=%d: %v", ntfn.SpendRequest, n.currentHeight, - details) - - select { - case ntfn.Event.Spend <- details: - ntfn.dispatched = true - case <-n.quit: - return ErrTxNotifierExiting.Default() - } - - return nil -} - -// ConnectTip handles a new block extending the current chain. It will go -// through every transaction and determine if it is relevant to any of its -// clients. A transaction can be relevant in either of the following two ways: -// -// 1. One of the inputs in the transaction spends an outpoint/output script -// for which we currently have an active spend registration for. -// -// 2. The transaction has a txid or output script for which we currently have -// an active confirmation registration for. -// -// In the event that the transaction is relevant, a confirmation/spend -// notification will be queued for dispatch to the relevant clients. -// Confirmation notifications will only be dispatched for transactions/output -// scripts that have met the required number of confirmations required by the -// client. -// -// NOTE: In order to actually dispatch the relevant transaction notifications to -// clients, NotifyHeight must be called with the same block height in order to -// maintain correctness. -func (n *TxNotifier) ConnectTip(blockHash *chainhash.Hash, blockHeight uint32, - txns []*btcutil.Tx) er.R { - - select { - case <-n.quit: - return ErrTxNotifierExiting.Default() - default: - } - - n.Lock() - defer n.Unlock() - - if blockHeight != n.currentHeight+1 { - return er.Errorf("received blocks out of order: "+ - "current height=%d, new height=%d", - n.currentHeight, blockHeight) - } - n.currentHeight++ - n.reorgDepth = 0 - - // First, we'll iterate over all the transactions found in this block to - // determine if it includes any relevant transactions to the TxNotifier. - log.Debugf("Filtering %d txns for %d spend requests at height %d", - len(txns), len(n.spendNotifications), blockHeight) - for _, tx := range txns { - n.filterTx( - tx, blockHash, blockHeight, n.handleConfDetailsAtTip, - n.handleSpendDetailsAtTip, - ) - } - - // Now that we've determined which requests were confirmed and spent - // within the new block, we can update their entries in their respective - // caches, along with all of our unconfirmed and unspent requests. - n.updateHints(blockHeight) - - // Finally, we'll clear the entries from our set of notifications for - // requests that are no longer under the risk of being reorged out of - // the chain. - if blockHeight >= n.reorgSafetyLimit { - matureBlockHeight := blockHeight - n.reorgSafetyLimit - for confRequest := range n.confsByInitialHeight[matureBlockHeight] { - confSet := n.confNotifications[confRequest] - for _, ntfn := range confSet.ntfns { - select { - case ntfn.Event.Done <- struct{}{}: - case <-n.quit: - return ErrTxNotifierExiting.Default() - } - } - - delete(n.confNotifications, confRequest) - } - delete(n.confsByInitialHeight, matureBlockHeight) - - for spendRequest := range n.spendsByHeight[matureBlockHeight] { - spendSet := n.spendNotifications[spendRequest] - for _, ntfn := range spendSet.ntfns { - select { - case ntfn.Event.Done <- struct{}{}: - case <-n.quit: - return ErrTxNotifierExiting.Default() - } - } - - log.Debugf("Deleting mature spend request %v at "+ - "height=%d", spendRequest, blockHeight) - delete(n.spendNotifications, spendRequest) - } - delete(n.spendsByHeight, matureBlockHeight) - } - - return nil -} - -// filterTx determines whether the transaction spends or confirms any -// outstanding pending requests. The onConf and onSpend callbacks can be used to -// retrieve all the requests fulfilled by this transaction as they occur. -func (n *TxNotifier) filterTx(tx *btcutil.Tx, blockHash *chainhash.Hash, - blockHeight uint32, onConf func(ConfRequest, *TxConfirmation), - onSpend func(SpendRequest, *SpendDetail)) { - - // In order to determine if this transaction is relevant to the - // notifier, we'll check its inputs for any outstanding spend - // requests. - txHash := tx.Hash() - if onSpend != nil { - // notifyDetails is a helper closure that will construct the - // spend details of a request and hand them off to the onSpend - // callback. - notifyDetails := func(spendRequest SpendRequest, - prevOut wire.OutPoint, inputIdx uint32) { - - log.Debugf("Found spend of %v: spend_tx=%v, "+ - "block_height=%d", spendRequest, txHash, - blockHeight) - - onSpend(spendRequest, &SpendDetail{ - SpentOutPoint: &prevOut, - SpenderTxHash: txHash, - SpendingTx: tx.MsgTx(), - SpenderInputIndex: inputIdx, - SpendingHeight: int32(blockHeight), - }) - } - - for i, txIn := range tx.MsgTx().TxIn { - // We'll re-derive the script of the output being spent - // to determine if the inputs spends any registered - // requests. - prevOut := txIn.PreviousOutPoint - pkScript, err := txscript.ComputePkScript( - txIn.SignatureScript, txIn.Witness, - ) - if err != nil { - continue - } - spendRequest := SpendRequest{ - OutPoint: prevOut, - PkScript: pkScript, - } - - // If we have any, we'll record their spend height so - // that notifications get dispatched to the respective - // clients. - if _, ok := n.spendNotifications[spendRequest]; ok { - notifyDetails(spendRequest, prevOut, uint32(i)) - } - spendRequest.OutPoint = ZeroOutPoint - if _, ok := n.spendNotifications[spendRequest]; ok { - notifyDetails(spendRequest, prevOut, uint32(i)) - } - } - } - - // We'll also check its outputs to determine if there are any - // outstanding confirmation requests. - if onConf != nil { - // notifyDetails is a helper closure that will construct the - // confirmation details of a request and hand them off to the - // onConf callback. - notifyDetails := func(confRequest ConfRequest) { - log.Debugf("Found initial confirmation of %v: "+ - "height=%d, hash=%v", confRequest, - blockHeight, blockHash) - - details := &TxConfirmation{ - Tx: tx.MsgTx(), - BlockHash: blockHash, - BlockHeight: blockHeight, - TxIndex: uint32(tx.Index()), - } - - onConf(confRequest, details) - } - - for _, txOut := range tx.MsgTx().TxOut { - // We'll parse the script of the output to determine if - // we have any registered requests for it or the - // transaction itself. - pkScript, err := txscript.ParsePkScript(txOut.PkScript) - if err != nil { - continue - } - confRequest := ConfRequest{ - TxID: *txHash, - PkScript: pkScript, - } - - // If we have any, we'll record their confirmed height - // so that notifications get dispatched when they - // reaches the clients' desired number of confirmations. - if _, ok := n.confNotifications[confRequest]; ok { - notifyDetails(confRequest) - } - confRequest.TxID = ZeroHash - if _, ok := n.confNotifications[confRequest]; ok { - notifyDetails(confRequest) - } - } - } -} - -// handleConfDetailsAtTip tracks the confirmation height of the txid/output -// script in order to properly dispatch a confirmation notification after -// meeting each request's desired number of confirmations for all current and -// future registered clients. -func (n *TxNotifier) handleConfDetailsAtTip(confRequest ConfRequest, - details *TxConfirmation) { - - // TODO(wilmer): cancel pending historical rescans if any? - confSet := n.confNotifications[confRequest] - - // If we already have details for this request, we don't want to add it - // again since we have already dispatched notifications for it. - if confSet.details != nil { - log.Warnf("Ignoring address reuse for %s at height %d.", - confRequest, details.BlockHeight) - return - } - - confSet.rescanStatus = rescanComplete - confSet.details = details - - for _, ntfn := range confSet.ntfns { - // In the event that this notification was aware that the - // transaction/output script was reorged out of the chain, we'll - // consume the reorg notification if it hasn't been done yet - // already. - select { - case <-ntfn.Event.NegativeConf: - default: - } - - // We'll note this client's required number of confirmations so - // that we can notify them when expected. - confHeight := details.BlockHeight + ntfn.NumConfirmations - 1 - ntfnSet, exists := n.ntfnsByConfirmHeight[confHeight] - if !exists { - ntfnSet = make(map[*ConfNtfn]struct{}) - n.ntfnsByConfirmHeight[confHeight] = ntfnSet - } - ntfnSet[ntfn] = struct{}{} - } - - // We'll also note the initial confirmation height in order to correctly - // handle dispatching notifications when the transaction/output script - // gets reorged out of the chain. - txSet, exists := n.confsByInitialHeight[details.BlockHeight] - if !exists { - txSet = make(map[ConfRequest]struct{}) - n.confsByInitialHeight[details.BlockHeight] = txSet - } - txSet[confRequest] = struct{}{} -} - -// handleSpendDetailsAtTip tracks the spend height of the outpoint/output script -// in order to properly dispatch a spend notification for all current and future -// registered clients. -func (n *TxNotifier) handleSpendDetailsAtTip(spendRequest SpendRequest, - details *SpendDetail) { - - // TODO(wilmer): cancel pending historical rescans if any? - spendSet := n.spendNotifications[spendRequest] - spendSet.rescanStatus = rescanComplete - spendSet.details = details - - for _, ntfn := range spendSet.ntfns { - // In the event that this notification was aware that the - // spending transaction of its outpoint/output script was - // reorged out of the chain, we'll consume the reorg - // notification if it hasn't been done yet already. - select { - case <-ntfn.Event.Reorg: - default: - } - } - - // We'll note the spending height of the request in order to correctly - // handle dispatching notifications when the spending transactions gets - // reorged out of the chain. - spendHeight := uint32(details.SpendingHeight) - opSet, exists := n.spendsByHeight[spendHeight] - if !exists { - opSet = make(map[SpendRequest]struct{}) - n.spendsByHeight[spendHeight] = opSet - } - opSet[spendRequest] = struct{}{} - - log.Debugf("Spend request %v spent at tip=%d", spendRequest, - spendHeight) -} - -// NotifyHeight dispatches confirmation and spend notifications to the clients -// who registered for a notification which has been fulfilled at the passed -// height. -func (n *TxNotifier) NotifyHeight(height uint32) er.R { - n.Lock() - defer n.Unlock() - - // First, we'll dispatch an update to all of the notification clients - // for our watched requests with the number of confirmations left at - // this new height. - for _, confRequests := range n.confsByInitialHeight { - for confRequest := range confRequests { - confSet := n.confNotifications[confRequest] - for _, ntfn := range confSet.ntfns { - txConfHeight := confSet.details.BlockHeight + - ntfn.NumConfirmations - 1 - numConfsLeft := txConfHeight - height - - // Since we don't clear notifications until - // transactions/output scripts are no longer - // under the risk of being reorganized out of - // the chain, we'll skip sending updates for - // those that have already been confirmed. - if int32(numConfsLeft) < 0 { - continue - } - - select { - case ntfn.Event.Updates <- numConfsLeft: - case <-n.quit: - return ErrTxNotifierExiting.Default() - } - } - } - } - - // Then, we'll dispatch notifications for all the requests that have - // become confirmed at this new block height. - for ntfn := range n.ntfnsByConfirmHeight[height] { - confSet := n.confNotifications[ntfn.ConfRequest] - - log.Infof("Dispatching %v confirmation notification for %v", - ntfn.NumConfirmations, ntfn.ConfRequest) - - select { - case ntfn.Event.Confirmed <- confSet.details: - ntfn.dispatched = true - case <-n.quit: - return ErrTxNotifierExiting.Default() - } - } - delete(n.ntfnsByConfirmHeight, height) - - // Finally, we'll dispatch spend notifications for all the requests that - // were spent at this new block height. - for spendRequest := range n.spendsByHeight[height] { - spendSet := n.spendNotifications[spendRequest] - for _, ntfn := range spendSet.ntfns { - err := n.dispatchSpendDetails(ntfn, spendSet.details) - if err != nil { - return err - } - } - } - - return nil -} - -// DisconnectTip handles the tip of the current chain being disconnected during -// a chain reorganization. If any watched requests were included in this block, -// internal structures are updated to ensure confirmation/spend notifications -// are consumed (if not already), and reorg notifications are dispatched -// instead. Confirmation/spend notifications will be dispatched again upon block -// inclusion. -func (n *TxNotifier) DisconnectTip(blockHeight uint32) er.R { - select { - case <-n.quit: - return ErrTxNotifierExiting.Default() - default: - } - - n.Lock() - defer n.Unlock() - - if blockHeight != n.currentHeight { - return er.Errorf("received blocks out of order: "+ - "current height=%d, disconnected height=%d", - n.currentHeight, blockHeight) - } - n.currentHeight-- - n.reorgDepth++ - - // With the block disconnected, we'll update the confirm and spend hints - // for our notification requests to reflect the new height, except for - // those that have confirmed/spent at previous heights. - n.updateHints(blockHeight) - - // We'll go through all of our watched confirmation requests and attempt - // to drain their notification channels to ensure sending notifications - // to the clients is always non-blocking. - for initialHeight, txHashes := range n.confsByInitialHeight { - for txHash := range txHashes { - // If the transaction/output script has been reorged out - // of the chain, we'll make sure to remove the cached - // confirmation details to prevent notifying clients - // with old information. - confSet := n.confNotifications[txHash] - if initialHeight == blockHeight { - confSet.details = nil - } - - for _, ntfn := range confSet.ntfns { - // First, we'll attempt to drain an update - // from each notification to ensure sends to the - // Updates channel are always non-blocking. - select { - case <-ntfn.Event.Updates: - case <-n.quit: - return ErrTxNotifierExiting.Default() - default: - } - - // Then, we'll check if the current - // transaction/output script was included in the - // block currently being disconnected. If it - // was, we'll need to dispatch a reorg - // notification to the client. - if initialHeight == blockHeight { - err := n.dispatchConfReorg( - ntfn, blockHeight, - ) - if err != nil { - return err - } - } - } - } - } - - // We'll also go through our watched spend requests and attempt to drain - // their dispatched notifications to ensure dispatching notifications to - // clients later on is always non-blocking. We're only interested in - // requests whose spending transaction was included at the height being - // disconnected. - for op := range n.spendsByHeight[blockHeight] { - // Since the spending transaction is being reorged out of the - // chain, we'll need to clear out the spending details of the - // request. - spendSet := n.spendNotifications[op] - spendSet.details = nil - - // For all requests which have had a spend notification - // dispatched, we'll attempt to drain it and send a reorg - // notification instead. - for _, ntfn := range spendSet.ntfns { - if err := n.dispatchSpendReorg(ntfn); err != nil { - return err - } - } - } - - // Finally, we can remove the requests that were confirmed and/or spent - // at the height being disconnected. We'll still continue to track them - // until they have been confirmed/spent and are no longer under the risk - // of being reorged out of the chain again. - delete(n.confsByInitialHeight, blockHeight) - delete(n.spendsByHeight, blockHeight) - - return nil -} - -// updateHints attempts to update the confirm and spend hints for all relevant -// requests respectively. The height parameter is used to determine which -// requests we should update based on whether a new block is being -// connected/disconnected. -// -// NOTE: This must be called with the TxNotifier's lock held and after its -// height has already been reflected by a block being connected/disconnected. -func (n *TxNotifier) updateHints(height uint32) { - // TODO(wilmer): update under one database transaction. - // - // To update the height hint for all the required confirmation requests - // under one database transaction, we'll gather the set of unconfirmed - // requests along with the ones that confirmed at the height being - // connected/disconnected. - confRequests := n.unconfirmedRequests() - for confRequest := range n.confsByInitialHeight[height] { - confRequests = append(confRequests, confRequest) - } - err := n.confirmHintCache.CommitConfirmHint( - n.currentHeight, confRequests..., - ) - if err != nil { - // The error is not fatal as this is an optimistic optimization, - // so we'll avoid returning an error. - log.Debugf("Unable to update confirm hints to %d for "+ - "%v: %v", n.currentHeight, confRequests, err) - } - - // Similarly, to update the height hint for all the required spend - // requests under one database transaction, we'll gather the set of - // unspent requests along with the ones that were spent at the height - // being connected/disconnected. - spendRequests := n.unspentRequests() - for spendRequest := range n.spendsByHeight[height] { - spendRequests = append(spendRequests, spendRequest) - } - err = n.spendHintCache.CommitSpendHint(n.currentHeight, spendRequests...) - if err != nil { - // The error is not fatal as this is an optimistic optimization, - // so we'll avoid returning an error. - log.Debugf("Unable to update spend hints to %d for "+ - "%v: %v", n.currentHeight, spendRequests, err) - } -} - -// unconfirmedRequests returns the set of confirmation requests that are -// still seen as unconfirmed by the TxNotifier. -// -// NOTE: This method must be called with the TxNotifier's lock held. -func (n *TxNotifier) unconfirmedRequests() []ConfRequest { - var unconfirmed []ConfRequest - for confRequest, confNtfnSet := range n.confNotifications { - // If the notification is already aware of its confirmation - // details, or it's in the process of learning them, we'll skip - // it as we can't yet determine if it's confirmed or not. - if confNtfnSet.rescanStatus != rescanComplete || - confNtfnSet.details != nil { - continue - } - - unconfirmed = append(unconfirmed, confRequest) - } - - return unconfirmed -} - -// unspentRequests returns the set of spend requests that are still seen as -// unspent by the TxNotifier. -// -// NOTE: This method must be called with the TxNotifier's lock held. -func (n *TxNotifier) unspentRequests() []SpendRequest { - var unspent []SpendRequest - for spendRequest, spendNtfnSet := range n.spendNotifications { - // If the notification is already aware of its spend details, or - // it's in the process of learning them, we'll skip it as we - // can't yet determine if it's unspent or not. - if spendNtfnSet.rescanStatus != rescanComplete || - spendNtfnSet.details != nil { - continue - } - - unspent = append(unspent, spendRequest) - } - - return unspent -} - -// dispatchConfReorg dispatches a reorg notification to the client if the -// confirmation notification was already delivered. -// -// NOTE: This must be called with the TxNotifier's lock held. -func (n *TxNotifier) dispatchConfReorg(ntfn *ConfNtfn, - heightDisconnected uint32) er.R { - - // If the request's confirmation notification has yet to be dispatched, - // we'll need to clear its entry within the ntfnsByConfirmHeight index - // to prevent from notifying the client once the notifier reaches the - // confirmation height. - if !ntfn.dispatched { - confHeight := heightDisconnected + ntfn.NumConfirmations - 1 - ntfnSet, exists := n.ntfnsByConfirmHeight[confHeight] - if exists { - delete(ntfnSet, ntfn) - } - return nil - } - - // Otherwise, the entry within the ntfnsByConfirmHeight has already been - // deleted, so we'll attempt to drain the confirmation notification to - // ensure sends to the Confirmed channel are always non-blocking. - select { - case <-ntfn.Event.Confirmed: - case <-n.quit: - return ErrTxNotifierExiting.Default() - default: - } - - ntfn.dispatched = false - - // Send a negative confirmation notification to the client indicating - // how many blocks have been disconnected successively. - select { - case ntfn.Event.NegativeConf <- int32(n.reorgDepth): - case <-n.quit: - return ErrTxNotifierExiting.Default() - } - - return nil -} - -// dispatchSpendReorg dispatches a reorg notification to the client if a spend -// notiification was already delivered. -// -// NOTE: This must be called with the TxNotifier's lock held. -func (n *TxNotifier) dispatchSpendReorg(ntfn *SpendNtfn) er.R { - if !ntfn.dispatched { - return nil - } - - // Attempt to drain the spend notification to ensure sends to the Spend - // channel are always non-blocking. - select { - case <-ntfn.Event.Spend: - default: - } - - // Send a reorg notification to the client in order for them to - // correctly handle reorgs. - select { - case ntfn.Event.Reorg <- struct{}{}: - case <-n.quit: - return ErrTxNotifierExiting.Default() - } - - ntfn.dispatched = false - - return nil -} - -// TearDown is to be called when the owner of the TxNotifier is exiting. This -// closes the event channels of all registered notifications that have not been -// dispatched yet. -func (n *TxNotifier) TearDown() { - close(n.quit) - - n.Lock() - defer n.Unlock() - - for _, confSet := range n.confNotifications { - for confID, ntfn := range confSet.ntfns { - close(ntfn.Event.Confirmed) - close(ntfn.Event.Updates) - close(ntfn.Event.NegativeConf) - close(ntfn.Event.Done) - delete(confSet.ntfns, confID) - } - } - - for _, spendSet := range n.spendNotifications { - for spendID, ntfn := range spendSet.ntfns { - close(ntfn.Event.Spend) - close(ntfn.Event.Reorg) - close(ntfn.Event.Done) - delete(spendSet.ntfns, spendID) - } - } -} diff --git a/lnd/chainntnfs/txnotifier_test.go b/lnd/chainntnfs/txnotifier_test.go deleted file mode 100644 index ef0dd4d4..00000000 --- a/lnd/chainntnfs/txnotifier_test.go +++ /dev/null @@ -1,2674 +0,0 @@ -package chainntnfs_test - -import ( - "bytes" - "sync" - "testing" - "time" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/wire" -) - -var ( - testRawScript = []byte{ - // OP_HASH160 - 0xa9, - // OP_DATA_20 - 0x14, - // <20-byte script hash> - 0x90, 0x1c, 0x86, 0x94, 0xc0, 0x3f, 0xaf, 0xd5, - 0x52, 0x28, 0x10, 0xe0, 0x33, 0x0f, 0x26, 0xe6, - 0x7a, 0x85, 0x33, 0xcd, - // OP_EQUAL - 0x87, - } - testSigScript = []byte{ - // OP_DATA_16 - 0x16, - // <22-byte redeem script> - 0x00, 0x14, 0x1d, 0x7c, 0xd6, 0xc7, 0x5c, 0x2e, - 0x86, 0xf4, 0xcb, 0xf9, 0x8e, 0xae, 0xd2, 0x21, - 0xb3, 0x0b, 0xd9, 0xa0, 0xb9, 0x28, - } -) - -type mockHintCache struct { - mu sync.Mutex - confHints map[chainntnfs.ConfRequest]uint32 - spendHints map[chainntnfs.SpendRequest]uint32 -} - -var _ chainntnfs.SpendHintCache = (*mockHintCache)(nil) -var _ chainntnfs.ConfirmHintCache = (*mockHintCache)(nil) - -func (c *mockHintCache) CommitSpendHint(heightHint uint32, - spendRequests ...chainntnfs.SpendRequest) er.R { - - c.mu.Lock() - defer c.mu.Unlock() - - for _, spendRequest := range spendRequests { - c.spendHints[spendRequest] = heightHint - } - - return nil -} - -func (c *mockHintCache) QuerySpendHint(spendRequest chainntnfs.SpendRequest) (uint32, er.R) { - c.mu.Lock() - defer c.mu.Unlock() - - hint, ok := c.spendHints[spendRequest] - if !ok { - return 0, chainntnfs.ErrSpendHintNotFound.Default() - } - - return hint, nil -} - -func (c *mockHintCache) PurgeSpendHint(spendRequests ...chainntnfs.SpendRequest) er.R { - c.mu.Lock() - defer c.mu.Unlock() - - for _, spendRequest := range spendRequests { - delete(c.spendHints, spendRequest) - } - - return nil -} - -func (c *mockHintCache) CommitConfirmHint(heightHint uint32, - confRequests ...chainntnfs.ConfRequest) er.R { - - c.mu.Lock() - defer c.mu.Unlock() - - for _, confRequest := range confRequests { - c.confHints[confRequest] = heightHint - } - - return nil -} - -func (c *mockHintCache) QueryConfirmHint(confRequest chainntnfs.ConfRequest) (uint32, er.R) { - c.mu.Lock() - defer c.mu.Unlock() - - hint, ok := c.confHints[confRequest] - if !ok { - return 0, chainntnfs.ErrConfirmHintNotFound.Default() - } - - return hint, nil -} - -func (c *mockHintCache) PurgeConfirmHint(confRequests ...chainntnfs.ConfRequest) er.R { - c.mu.Lock() - defer c.mu.Unlock() - - for _, confRequest := range confRequests { - delete(c.confHints, confRequest) - } - - return nil -} - -func newMockHintCache() *mockHintCache { - return &mockHintCache{ - confHints: make(map[chainntnfs.ConfRequest]uint32), - spendHints: make(map[chainntnfs.SpendRequest]uint32), - } -} - -// TestTxNotifierRegistrationValidation ensures that we are not able to register -// requests with invalid parameters. -func TestTxNotifierRegistrationValidation(t *testing.T) { - t.Parallel() - - testCases := []struct { - name string - pkScript []byte - numConfs uint32 - heightHint uint32 - checkSpend bool - err *er.ErrorCode - }{ - { - name: "empty output script", - pkScript: nil, - numConfs: 1, - heightHint: 1, - checkSpend: true, - err: chainntnfs.ErrNoScript, - }, - { - name: "zero num confs", - pkScript: testRawScript, - numConfs: 0, - heightHint: 1, - err: chainntnfs.ErrNumConfsOutOfRange, - }, - { - name: "exceed max num confs", - pkScript: testRawScript, - numConfs: chainntnfs.MaxNumConfs + 1, - heightHint: 1, - err: chainntnfs.ErrNumConfsOutOfRange, - }, - { - name: "empty height hint", - pkScript: testRawScript, - numConfs: 1, - heightHint: 0, - checkSpend: true, - err: chainntnfs.ErrNoHeightHint, - }, - } - - for _, testCase := range testCases { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { - hintCache := newMockHintCache() - n := chainntnfs.NewTxNotifier( - 10, chainntnfs.ReorgSafetyLimit, hintCache, hintCache, - ) - - _, err := n.RegisterConf( - &chainntnfs.ZeroHash, testCase.pkScript, - testCase.numConfs, testCase.heightHint, - ) - if testCase.err == nil && err == nil { - } else if testCase.err == nil || !testCase.err.Is(err) { - t.Fatalf("conf registration expected error "+ - "\"%v\", got \"%v\"", testCase.err, err) - } - - if !testCase.checkSpend { - return - } - - _, err = n.RegisterSpend( - &chainntnfs.ZeroOutPoint, testCase.pkScript, - testCase.heightHint, - ) - if testCase.err == nil && err == nil { - } else if testCase.err == nil || !testCase.err.Is(err) { - t.Fatalf("spend registration expected error "+ - "\"%v\", got \"%v\"", testCase.err, err) - } - }) - } -} - -// TestTxNotifierFutureConfDispatch tests that the TxNotifier dispatches -// registered notifications when a transaction confirms after registration. -func TestTxNotifierFutureConfDispatch(t *testing.T) { - t.Parallel() - - const ( - tx1NumConfs uint32 = 1 - tx2NumConfs uint32 = 2 - ) - - hintCache := newMockHintCache() - n := chainntnfs.NewTxNotifier( - 10, chainntnfs.ReorgSafetyLimit, hintCache, hintCache, - ) - - // Create the test transactions and register them with the TxNotifier - // before including them in a block to receive future - // notifications. - tx1 := wire.MsgTx{Version: 1} - tx1.AddTxOut(&wire.TxOut{PkScript: testRawScript}) - tx1Hash := tx1.TxHash() - ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, tx1NumConfs, 1) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - - tx2 := wire.MsgTx{Version: 2} - tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript}) - tx2Hash := tx2.TxHash() - ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, tx2NumConfs, 1) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - - // We should not receive any notifications from both transactions - // since they have not been included in a block yet. - select { - case <-ntfn1.Event.Updates: - t.Fatal("Received unexpected confirmation update for tx1") - case txConf := <-ntfn1.Event.Confirmed: - t.Fatalf("Received unexpected confirmation for tx1: %v", txConf) - default: - } - - select { - case <-ntfn2.Event.Updates: - t.Fatal("Received unexpected confirmation update for tx2") - case txConf := <-ntfn2.Event.Confirmed: - t.Fatalf("Received unexpected confirmation for tx2: %v", txConf) - default: - } - - // Include the transactions in a block and add it to the TxNotifier. - // This should confirm tx1, but not tx2. - block1 := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{&tx1, &tx2}, - }) - - err = n.ConnectTip(block1.Hash(), 11, block1.Transactions()) - if err != nil { - t.Fatalf("Failed to connect block: %v", err) - } - if err := n.NotifyHeight(11); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // We should only receive one update for tx1 since it only requires - // one confirmation and it already met it. - select { - case numConfsLeft := <-ntfn1.Event.Updates: - const expected = 0 - if numConfsLeft != expected { - t.Fatalf("Received incorrect confirmation update: tx1 "+ - "expected %d confirmations left, got %d", - expected, numConfsLeft) - } - default: - t.Fatal("Expected confirmation update for tx1") - } - - // A confirmation notification for this tranaction should be dispatched, - // as it only required one confirmation. - select { - case txConf := <-ntfn1.Event.Confirmed: - expectedConf := chainntnfs.TxConfirmation{ - BlockHash: block1.Hash(), - BlockHeight: 11, - TxIndex: 0, - Tx: &tx1, - } - assertConfDetails(t, txConf, &expectedConf) - default: - t.Fatalf("Expected confirmation for tx1") - } - - // We should only receive one update for tx2 since it only has one - // confirmation so far and it requires two. - select { - case numConfsLeft := <-ntfn2.Event.Updates: - const expected = 1 - if numConfsLeft != expected { - t.Fatalf("Received incorrect confirmation update: tx2 "+ - "expected %d confirmations left, got %d", - expected, numConfsLeft) - } - default: - t.Fatal("Expected confirmation update for tx2") - } - - // A confirmation notification for tx2 should not be dispatched yet, as - // it requires one more confirmation. - select { - case txConf := <-ntfn2.Event.Confirmed: - t.Fatalf("Received unexpected confirmation for tx2: %v", txConf) - default: - } - - // Create a new block and add it to the TxNotifier at the next height. - // This should confirm tx2. - block2 := btcutil.NewBlock(&wire.MsgBlock{}) - err = n.ConnectTip(block2.Hash(), 12, block2.Transactions()) - if err != nil { - t.Fatalf("Failed to connect block: %v", err) - } - if err := n.NotifyHeight(12); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // We should not receive any event notifications for tx1 since it has - // already been confirmed. - select { - case <-ntfn1.Event.Updates: - t.Fatal("Received unexpected confirmation update for tx1") - case txConf := <-ntfn1.Event.Confirmed: - t.Fatalf("Received unexpected confirmation for tx1: %v", txConf) - default: - } - - // We should only receive one update since the last at the new height, - // indicating how many confirmations are still left. - select { - case numConfsLeft := <-ntfn2.Event.Updates: - const expected = 0 - if numConfsLeft != expected { - t.Fatalf("Received incorrect confirmation update: tx2 "+ - "expected %d confirmations left, got %d", - expected, numConfsLeft) - } - default: - t.Fatal("Expected confirmation update for tx2") - } - - // A confirmation notification for tx2 should be dispatched, since it - // now meets its required number of confirmations. - select { - case txConf := <-ntfn2.Event.Confirmed: - expectedConf := chainntnfs.TxConfirmation{ - BlockHash: block1.Hash(), - BlockHeight: 11, - TxIndex: 1, - Tx: &tx2, - } - assertConfDetails(t, txConf, &expectedConf) - default: - t.Fatalf("Expected confirmation for tx2") - } -} - -// TestTxNotifierHistoricalConfDispatch tests that the TxNotifier dispatches -// registered notifications when the transaction is confirmed before -// registration. -func TestTxNotifierHistoricalConfDispatch(t *testing.T) { - t.Parallel() - - const ( - tx1NumConfs uint32 = 1 - tx2NumConfs uint32 = 3 - ) - - var ( - tx1 = wire.MsgTx{Version: 1} - tx2 = wire.MsgTx{Version: 2} - tx3 = wire.MsgTx{Version: 3} - ) - - hintCache := newMockHintCache() - n := chainntnfs.NewTxNotifier( - 10, chainntnfs.ReorgSafetyLimit, hintCache, hintCache, - ) - - // Create the test transactions at a height before the TxNotifier's - // starting height so that they are confirmed once registering them. - tx1Hash := tx1.TxHash() - ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, tx1NumConfs, 1) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - - tx2Hash := tx2.TxHash() - ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, tx2NumConfs, 1) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - - // Update tx1 with its confirmation details. We should only receive one - // update since it only requires one confirmation and it already met it. - txConf1 := chainntnfs.TxConfirmation{ - BlockHash: &chainntnfs.ZeroHash, - BlockHeight: 9, - TxIndex: 1, - Tx: &tx1, - } - err = n.UpdateConfDetails(ntfn1.HistoricalDispatch.ConfRequest, &txConf1) - if err != nil { - t.Fatalf("unable to update conf details: %v", err) - } - select { - case numConfsLeft := <-ntfn1.Event.Updates: - const expected = 0 - if numConfsLeft != expected { - t.Fatalf("Received incorrect confirmation update: tx1 "+ - "expected %d confirmations left, got %d", - expected, numConfsLeft) - } - default: - t.Fatal("Expected confirmation update for tx1") - } - - // A confirmation notification for tx1 should also be dispatched. - select { - case txConf := <-ntfn1.Event.Confirmed: - assertConfDetails(t, txConf, &txConf1) - default: - t.Fatalf("Expected confirmation for tx1") - } - - // Update tx2 with its confirmation details. This should not trigger a - // confirmation notification since it hasn't reached its required number - // of confirmations, but we should receive a confirmation update - // indicating how many confirmation are left. - txConf2 := chainntnfs.TxConfirmation{ - BlockHash: &chainntnfs.ZeroHash, - BlockHeight: 9, - TxIndex: 2, - Tx: &tx2, - } - err = n.UpdateConfDetails(ntfn2.HistoricalDispatch.ConfRequest, &txConf2) - if err != nil { - t.Fatalf("unable to update conf details: %v", err) - } - select { - case numConfsLeft := <-ntfn2.Event.Updates: - const expected = 1 - if numConfsLeft != expected { - t.Fatalf("Received incorrect confirmation update: tx2 "+ - "expected %d confirmations left, got %d", - expected, numConfsLeft) - } - default: - t.Fatal("Expected confirmation update for tx2") - } - - select { - case txConf := <-ntfn2.Event.Confirmed: - t.Fatalf("Received unexpected confirmation for tx2: %v", txConf) - default: - } - - // Create a new block and add it to the TxNotifier at the next height. - // This should confirm tx2. - block := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{&tx3}, - }) - - err = n.ConnectTip(block.Hash(), 11, block.Transactions()) - if err != nil { - t.Fatalf("Failed to connect block: %v", err) - } - if err := n.NotifyHeight(11); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // We should not receive any event notifications for tx1 since it has - // already been confirmed. - select { - case <-ntfn1.Event.Updates: - t.Fatal("Received unexpected confirmation update for tx1") - case txConf := <-ntfn1.Event.Confirmed: - t.Fatalf("Received unexpected confirmation for tx1: %v", txConf) - default: - } - - // We should only receive one update for tx2 since the last one, - // indicating how many confirmations are still left. - select { - case numConfsLeft := <-ntfn2.Event.Updates: - const expected = 0 - if numConfsLeft != expected { - t.Fatalf("Received incorrect confirmation update: tx2 "+ - "expected %d confirmations left, got %d", - expected, numConfsLeft) - } - default: - t.Fatal("Expected confirmation update for tx2") - } - - // A confirmation notification for tx2 should be dispatched, as it met - // its required number of confirmations. - select { - case txConf := <-ntfn2.Event.Confirmed: - assertConfDetails(t, txConf, &txConf2) - default: - t.Fatalf("Expected confirmation for tx2") - } -} - -// TestTxNotifierFutureSpendDispatch tests that the TxNotifier dispatches -// registered notifications when an outpoint is spent after registration. -func TestTxNotifierFutureSpendDispatch(t *testing.T) { - t.Parallel() - - hintCache := newMockHintCache() - n := chainntnfs.NewTxNotifier( - 10, chainntnfs.ReorgSafetyLimit, hintCache, hintCache, - ) - - // We'll start off by registering for a spend notification of an - // outpoint. - op := wire.OutPoint{Index: 1} - ntfn, err := n.RegisterSpend(&op, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } - - // We should not receive a notification as the outpoint has not been - // spent yet. - select { - case <-ntfn.Event.Spend: - t.Fatal("received unexpected spend notification") - default: - } - - // Construct the details of the spending transaction of the outpoint - // above. We'll include it in the next block, which should trigger a - // spend notification. - spendTx := wire.NewMsgTx(2) - spendTx.AddTxIn(&wire.TxIn{ - PreviousOutPoint: op, - SignatureScript: testSigScript, - }) - spendTxHash := spendTx.TxHash() - block := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{spendTx}, - }) - err = n.ConnectTip(block.Hash(), 11, block.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } - if err := n.NotifyHeight(11); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - expectedSpendDetails := &chainntnfs.SpendDetail{ - SpentOutPoint: &op, - SpenderTxHash: &spendTxHash, - SpendingTx: spendTx, - SpenderInputIndex: 0, - SpendingHeight: 11, - } - - // Ensure that the details of the notification match as expected. - select { - case spendDetails := <-ntfn.Event.Spend: - assertSpendDetails(t, spendDetails, expectedSpendDetails) - default: - t.Fatal("expected to receive spend details") - } - - // Finally, we'll ensure that if the spending transaction has also been - // spent, then we don't receive another spend notification. - prevOut := wire.OutPoint{Hash: spendTxHash, Index: 0} - spendOfSpend := wire.NewMsgTx(2) - spendOfSpend.AddTxIn(&wire.TxIn{ - PreviousOutPoint: prevOut, - SignatureScript: testSigScript, - }) - block = btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{spendOfSpend}, - }) - err = n.ConnectTip(block.Hash(), 12, block.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } - if err := n.NotifyHeight(12); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - select { - case <-ntfn.Event.Spend: - t.Fatal("received unexpected spend notification") - default: - } -} - -// TestTxNotifierFutureConfDispatchReuseSafe tests that the notifier does not -// misbehave even if two confirmation requests for the same script are issued -// at different block heights (which means funds are being sent to the same -// script multiple times). -func TestTxNotifierFutureConfDispatchReuseSafe(t *testing.T) { - t.Parallel() - - currentBlock := uint32(10) - hintCache := newMockHintCache() - n := chainntnfs.NewTxNotifier( - currentBlock, 2, hintCache, hintCache, - ) - - // We'll register a TX that sends to our test script and put it into a - // block. Additionally we register a notification request for just the - // script which should also be confirmed with that block. - tx1 := wire.MsgTx{Version: 1} - tx1.AddTxOut(&wire.TxOut{PkScript: testRawScript}) - tx1Hash := tx1.TxHash() - ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - scriptNtfn1, err := n.RegisterConf(nil, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - block := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{&tx1}, - }) - currentBlock++ - err = n.ConnectTip(block.Hash(), currentBlock, block.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } - if err := n.NotifyHeight(currentBlock); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // Expect an update and confirmation of TX 1 at this point. We save the - // confirmation details because we expect to receive the same details - // for all further registrations. - var confDetails *chainntnfs.TxConfirmation - select { - case <-ntfn1.Event.Updates: - default: - t.Fatal("expected update of TX 1") - } - select { - case confDetails = <-ntfn1.Event.Confirmed: - if confDetails.BlockHeight != currentBlock { - t.Fatalf("expected TX to be confirmed in latest block") - } - default: - t.Fatal("expected confirmation of TX 1") - } - - // The notification for the script should also have received a - // confirmation. - select { - case <-scriptNtfn1.Event.Updates: - default: - t.Fatal("expected update of script ntfn") - } - select { - case details := <-scriptNtfn1.Event.Confirmed: - assertConfDetails(t, details, confDetails) - default: - t.Fatal("expected update of script ntfn") - } - - // Now register a second TX that spends to two outputs with the same - // script so we have a different TXID. And again register a confirmation - // for just the script. - tx2 := wire.MsgTx{Version: 1} - tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript}) - tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript}) - tx2Hash := tx2.TxHash() - ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - scriptNtfn2, err := n.RegisterConf(nil, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - block2 := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{&tx2}, - }) - currentBlock++ - err = n.ConnectTip(block2.Hash(), currentBlock, block2.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } - if err := n.NotifyHeight(currentBlock); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // Transaction 2 should get a confirmation here too. Since it was - // a different TXID we wouldn't get the cached details here but the TX - // should be confirmed right away still. - select { - case <-ntfn2.Event.Updates: - default: - t.Fatal("expected update of TX 2") - } - select { - case details := <-ntfn2.Event.Confirmed: - if details.BlockHeight != currentBlock { - t.Fatalf("expected TX to be confirmed in latest block") - } - default: - t.Fatal("expected update of TX 2") - } - - // The second notification for the script should also have received a - // confirmation. Since it's the same script, we expect to get the cached - // details from the first TX back immediately. Nothing should be - // registered at the notifier for the current block height for that - // script any more. - select { - case <-scriptNtfn2.Event.Updates: - default: - t.Fatal("expected update of script ntfn") - } - select { - case details := <-scriptNtfn2.Event.Confirmed: - assertConfDetails(t, details, confDetails) - default: - t.Fatal("expected update of script ntfn") - } - - // Finally, mine a few empty blocks and expect both TXs to be confirmed. - for currentBlock < 15 { - block := btcutil.NewBlock(&wire.MsgBlock{}) - currentBlock++ - err = n.ConnectTip( - block.Hash(), currentBlock, block.Transactions(), - ) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } - if err := n.NotifyHeight(currentBlock); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - } - - // Events for both confirmation requests should have been dispatched. - select { - case <-ntfn1.Event.Done: - default: - t.Fatal("expected notifications for TX 1 to be done") - } - select { - case <-ntfn2.Event.Done: - default: - t.Fatal("expected notifications for TX 2 to be done") - } -} - -// TestTxNotifierHistoricalSpendDispatch tests that the TxNotifier dispatches -// registered notifications when an outpoint is spent before registration. -func TestTxNotifierHistoricalSpendDispatch(t *testing.T) { - t.Parallel() - - const startingHeight = 10 - - hintCache := newMockHintCache() - n := chainntnfs.NewTxNotifier( - startingHeight, chainntnfs.ReorgSafetyLimit, hintCache, - hintCache, - ) - - // We'll start by constructing the spending details of the outpoint - // below. - spentOutpoint := wire.OutPoint{Index: 1} - spendTx := wire.NewMsgTx(2) - spendTx.AddTxIn(&wire.TxIn{ - PreviousOutPoint: spentOutpoint, - SignatureScript: testSigScript, - }) - spendTxHash := spendTx.TxHash() - - expectedSpendDetails := &chainntnfs.SpendDetail{ - SpentOutPoint: &spentOutpoint, - SpenderTxHash: &spendTxHash, - SpendingTx: spendTx, - SpenderInputIndex: 0, - SpendingHeight: startingHeight - 1, - } - - // We'll register for a spend notification of the outpoint and ensure - // that a notification isn't dispatched. - ntfn, err := n.RegisterSpend(&spentOutpoint, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } - - select { - case <-ntfn.Event.Spend: - t.Fatal("received unexpected spend notification") - default: - } - - // Because we're interested in testing the case of a historical spend, - // we'll hand off the spending details of the outpoint to the notifier - // as it is not possible for it to view historical events in the chain. - // By doing this, we replicate the functionality of the ChainNotifier. - err = n.UpdateSpendDetails( - ntfn.HistoricalDispatch.SpendRequest, expectedSpendDetails, - ) - if err != nil { - t.Fatalf("unable to update spend details: %v", err) - } - - // Now that we have the spending details, we should receive a spend - // notification. We'll ensure that the details match as intended. - select { - case spendDetails := <-ntfn.Event.Spend: - assertSpendDetails(t, spendDetails, expectedSpendDetails) - default: - t.Fatalf("expected to receive spend details") - } - - // Finally, we'll ensure that if the spending transaction has also been - // spent, then we don't receive another spend notification. - prevOut := wire.OutPoint{Hash: spendTxHash, Index: 0} - spendOfSpend := wire.NewMsgTx(2) - spendOfSpend.AddTxIn(&wire.TxIn{ - PreviousOutPoint: prevOut, - SignatureScript: testSigScript, - }) - block := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{spendOfSpend}, - }) - err = n.ConnectTip(block.Hash(), startingHeight+1, block.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } - if err := n.NotifyHeight(startingHeight + 1); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - select { - case <-ntfn.Event.Spend: - t.Fatal("received unexpected spend notification") - default: - } -} - -// TestTxNotifierMultipleHistoricalRescans ensures that we don't attempt to -// request multiple historical confirmation rescans per transactions. -func TestTxNotifierMultipleHistoricalConfRescans(t *testing.T) { - t.Parallel() - - const startingHeight = 10 - hintCache := newMockHintCache() - n := chainntnfs.NewTxNotifier( - startingHeight, chainntnfs.ReorgSafetyLimit, hintCache, - hintCache, - ) - - // The first registration for a transaction in the notifier should - // request a historical confirmation rescan as it does not have a - // historical view of the chain. - ntfn1, err := n.RegisterConf(&chainntnfs.ZeroHash, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } - if ntfn1.HistoricalDispatch == nil { - t.Fatal("expected to receive historical dispatch request") - } - - // We'll register another confirmation notification for the same - // transaction. This should not request a historical confirmation rescan - // since the first one is still pending. - ntfn2, err := n.RegisterConf(&chainntnfs.ZeroHash, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } - if ntfn2.HistoricalDispatch != nil { - t.Fatal("received unexpected historical rescan request") - } - - // Finally, we'll mark the ongoing historical rescan as complete and - // register another notification. We should also expect not to see a - // historical rescan request since the confirmation details should be - // cached. - confDetails := &chainntnfs.TxConfirmation{ - BlockHeight: startingHeight - 1, - } - err = n.UpdateConfDetails(ntfn1.HistoricalDispatch.ConfRequest, confDetails) - if err != nil { - t.Fatalf("unable to update conf details: %v", err) - } - - ntfn3, err := n.RegisterConf(&chainntnfs.ZeroHash, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } - if ntfn3.HistoricalDispatch != nil { - t.Fatal("received unexpected historical rescan request") - } -} - -// TestTxNotifierMultipleHistoricalRescans ensures that we don't attempt to -// request multiple historical spend rescans per outpoints. -func TestTxNotifierMultipleHistoricalSpendRescans(t *testing.T) { - t.Parallel() - - const startingHeight = 10 - hintCache := newMockHintCache() - n := chainntnfs.NewTxNotifier( - startingHeight, chainntnfs.ReorgSafetyLimit, hintCache, - hintCache, - ) - - // The first registration for an outpoint in the notifier should request - // a historical spend rescan as it does not have a historical view of - // the chain. - op := wire.OutPoint{Index: 1} - ntfn1, err := n.RegisterSpend(&op, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } - if ntfn1.HistoricalDispatch == nil { - t.Fatal("expected to receive historical dispatch request") - } - - // We'll register another spend notification for the same outpoint. This - // should not request a historical spend rescan since the first one is - // still pending. - ntfn2, err := n.RegisterSpend(&op, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } - if ntfn2.HistoricalDispatch != nil { - t.Fatal("received unexpected historical rescan request") - } - - // Finally, we'll mark the ongoing historical rescan as complete and - // register another notification. We should also expect not to see a - // historical rescan request since the confirmation details should be - // cached. - spendDetails := &chainntnfs.SpendDetail{ - SpentOutPoint: &op, - SpenderTxHash: &chainntnfs.ZeroHash, - SpendingTx: wire.NewMsgTx(2), - SpenderInputIndex: 0, - SpendingHeight: startingHeight - 1, - } - err = n.UpdateSpendDetails( - ntfn1.HistoricalDispatch.SpendRequest, spendDetails, - ) - if err != nil { - t.Fatalf("unable to update spend details: %v", err) - } - - ntfn3, err := n.RegisterSpend(&op, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } - if ntfn3.HistoricalDispatch != nil { - t.Fatal("received unexpected historical rescan request") - } -} - -// TestTxNotifierMultipleHistoricalNtfns ensures that the TxNotifier will only -// request one rescan for a transaction/outpoint when having multiple client -// registrations. Once the rescan has completed and retrieved the -// confirmation/spend details, a notification should be dispatched to _all_ -// clients. -func TestTxNotifierMultipleHistoricalNtfns(t *testing.T) { - t.Parallel() - - const ( - numNtfns = 5 - startingHeight = 10 - ) - - hintCache := newMockHintCache() - n := chainntnfs.NewTxNotifier( - startingHeight, chainntnfs.ReorgSafetyLimit, hintCache, - hintCache, - ) - - var txid chainhash.Hash - copy(txid[:], bytes.Repeat([]byte{0x01}, 32)) - - // We'll start off by registered 5 clients for a confirmation - // notification on the same transaction. - confNtfns := make([]*chainntnfs.ConfRegistration, numNtfns) - for i := uint64(0); i < numNtfns; i++ { - ntfn, err := n.RegisterConf(&txid, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register conf ntfn #%d: %v", i, err) - } - confNtfns[i] = ntfn - } - - // Ensure none of them have received the confirmation details. - for i, ntfn := range confNtfns { - select { - case <-ntfn.Event.Confirmed: - t.Fatalf("request #%d received unexpected confirmation "+ - "notification", i) - default: - } - } - - // We'll assume a historical rescan was dispatched and found the - // following confirmation details. We'll let the notifier know so that - // it can stop watching at tip. - expectedConfDetails := &chainntnfs.TxConfirmation{ - BlockHeight: startingHeight - 1, - Tx: wire.NewMsgTx(1), - } - err := n.UpdateConfDetails( - confNtfns[0].HistoricalDispatch.ConfRequest, expectedConfDetails, - ) - if err != nil { - t.Fatalf("unable to update conf details: %v", err) - } - - // With the confirmation details retrieved, each client should now have - // been notified of the confirmation. - for i, ntfn := range confNtfns { - select { - case confDetails := <-ntfn.Event.Confirmed: - assertConfDetails(t, confDetails, expectedConfDetails) - default: - t.Fatalf("request #%d expected to received "+ - "confirmation notification", i) - } - } - - // In order to ensure that the confirmation details are properly cached, - // we'll register another client for the same transaction. We should not - // see a historical rescan request and the confirmation notification - // should come through immediately. - extraConfNtfn, err := n.RegisterConf(&txid, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register conf ntfn: %v", err) - } - if extraConfNtfn.HistoricalDispatch != nil { - t.Fatal("received unexpected historical rescan request") - } - - select { - case confDetails := <-extraConfNtfn.Event.Confirmed: - assertConfDetails(t, confDetails, expectedConfDetails) - default: - t.Fatal("expected to receive spend notification") - } - - // Similarly, we'll do the same thing but for spend notifications. - op := wire.OutPoint{Index: 1} - spendNtfns := make([]*chainntnfs.SpendRegistration, numNtfns) - for i := uint64(0); i < numNtfns; i++ { - ntfn, err := n.RegisterSpend(&op, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn #%d: %v", i, err) - } - spendNtfns[i] = ntfn - } - - // Ensure none of them have received the spend details. - for i, ntfn := range spendNtfns { - select { - case <-ntfn.Event.Spend: - t.Fatalf("request #%d received unexpected spend "+ - "notification", i) - default: - } - } - - // We'll assume a historical rescan was dispatched and found the - // following spend details. We'll let the notifier know so that it can - // stop watching at tip. - expectedSpendDetails := &chainntnfs.SpendDetail{ - SpentOutPoint: &op, - SpenderTxHash: &chainntnfs.ZeroHash, - SpendingTx: wire.NewMsgTx(2), - SpenderInputIndex: 0, - SpendingHeight: startingHeight - 1, - } - err = n.UpdateSpendDetails( - spendNtfns[0].HistoricalDispatch.SpendRequest, expectedSpendDetails, - ) - if err != nil { - t.Fatalf("unable to update spend details: %v", err) - } - - // With the spend details retrieved, each client should now have been - // notified of the spend. - for i, ntfn := range spendNtfns { - select { - case spendDetails := <-ntfn.Event.Spend: - assertSpendDetails(t, spendDetails, expectedSpendDetails) - default: - t.Fatalf("request #%d expected to received spend "+ - "notification", i) - } - } - - // Finally, in order to ensure that the spend details are properly - // cached, we'll register another client for the same outpoint. We - // should not see a historical rescan request and the spend notification - // should come through immediately. - extraSpendNtfn, err := n.RegisterSpend(&op, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } - if extraSpendNtfn.HistoricalDispatch != nil { - t.Fatal("received unexpected historical rescan request") - } - - select { - case spendDetails := <-extraSpendNtfn.Event.Spend: - assertSpendDetails(t, spendDetails, expectedSpendDetails) - default: - t.Fatal("expected to receive spend notification") - } -} - -// TestTxNotifierCancelConf ensures that a confirmation notification after a -// client has canceled their intent to receive one. -func TestTxNotifierCancelConf(t *testing.T) { - t.Parallel() - - const startingHeight = 10 - hintCache := newMockHintCache() - n := chainntnfs.NewTxNotifier(startingHeight, 100, hintCache, hintCache) - - // We'll register four notification requests. The last three will be - // canceled. - tx1 := wire.NewMsgTx(1) - tx1.AddTxOut(&wire.TxOut{PkScript: testRawScript}) - tx1Hash := tx1.TxHash() - ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } - - tx2 := wire.NewMsgTx(2) - tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript}) - tx2Hash := tx2.TxHash() - ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } - ntfn3, err := n.RegisterConf(&tx2Hash, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } - - // This request will have a three block num confs. - ntfn4, err := n.RegisterConf(&tx2Hash, testRawScript, 3, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } - - // Extend the chain with a block that will confirm both transactions. - // This will queue confirmation notifications to dispatch once their - // respective heights have been met. - block := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{tx1, tx2}, - }) - tx1ConfDetails := &chainntnfs.TxConfirmation{ - BlockHeight: startingHeight + 1, - BlockHash: block.Hash(), - TxIndex: 0, - Tx: tx1, - } - - // Cancel the second notification before connecting the block. - ntfn2.Event.Cancel() - - err = n.ConnectTip(block.Hash(), startingHeight+1, block.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } - - // Cancel the third notification before notifying to ensure its queued - // confirmation notification gets removed as well. - ntfn3.Event.Cancel() - - if err := n.NotifyHeight(startingHeight + 1); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // The first request should still be active, so we should receive a - // confirmation notification with the correct details. - select { - case confDetails := <-ntfn1.Event.Confirmed: - assertConfDetails(t, confDetails, tx1ConfDetails) - default: - t.Fatalf("expected to receive confirmation notification") - } - - // The second and third, however, should not have. The event's Confirmed - // channel must have also been closed to indicate the caller that the - // TxNotifier can no longer fulfill their canceled request. - select { - case _, ok := <-ntfn2.Event.Confirmed: - if ok { - t.Fatal("expected Confirmed channel to be closed") - } - default: - t.Fatal("expected Confirmed channel to be closed") - } - select { - case _, ok := <-ntfn3.Event.Confirmed: - if ok { - t.Fatal("expected Confirmed channel to be closed") - } - default: - t.Fatal("expected Confirmed channel to be closed") - } - - // Connect yet another block. - block1 := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{}, - }) - - err = n.ConnectTip(block1.Hash(), startingHeight+2, block1.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } - - if err := n.NotifyHeight(startingHeight + 2); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // Since neither it reached the set confirmation height or was - // canceled, nothing should happen to ntfn4 in this block. - select { - case <-ntfn4.Event.Confirmed: - t.Fatal("expected nothing to happen") - case <-time.After(10 * time.Millisecond): - } - - // Now cancel the notification. - ntfn4.Event.Cancel() - select { - case _, ok := <-ntfn4.Event.Confirmed: - if ok { - t.Fatal("expected Confirmed channel to be closed") - } - default: - t.Fatal("expected Confirmed channel to be closed") - } - - // Finally, confirm a block that would trigger ntfn4 confirmation - // hadn't it already been canceled. - block2 := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{}, - }) - - err = n.ConnectTip(block2.Hash(), startingHeight+3, block2.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } - - if err := n.NotifyHeight(startingHeight + 3); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } -} - -// TestTxNotifierCancelSpend ensures that a spend notification after a client -// has canceled their intent to receive one. -func TestTxNotifierCancelSpend(t *testing.T) { - t.Parallel() - - const startingHeight = 10 - hintCache := newMockHintCache() - n := chainntnfs.NewTxNotifier( - startingHeight, chainntnfs.ReorgSafetyLimit, hintCache, - hintCache, - ) - - // We'll register two notification requests. Only the second one will be - // canceled. - op1 := wire.OutPoint{Index: 1} - ntfn1, err := n.RegisterSpend(&op1, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } - - op2 := wire.OutPoint{Index: 2} - ntfn2, err := n.RegisterSpend(&op2, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } - - // Construct the spending details of the outpoint and create a dummy - // block containing it. - spendTx := wire.NewMsgTx(2) - spendTx.AddTxIn(&wire.TxIn{ - PreviousOutPoint: op1, - SignatureScript: testSigScript, - }) - spendTxHash := spendTx.TxHash() - expectedSpendDetails := &chainntnfs.SpendDetail{ - SpentOutPoint: &op1, - SpenderTxHash: &spendTxHash, - SpendingTx: spendTx, - SpenderInputIndex: 0, - SpendingHeight: startingHeight + 1, - } - - block := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{spendTx}, - }) - - // Before extending the notifier's tip with the dummy block above, we'll - // cancel the second request. - n.CancelSpend(ntfn2.HistoricalDispatch.SpendRequest, 2) - - err = n.ConnectTip(block.Hash(), startingHeight+1, block.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } - if err := n.NotifyHeight(startingHeight + 1); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // The first request should still be active, so we should receive a - // spend notification with the correct spending details. - select { - case spendDetails := <-ntfn1.Event.Spend: - assertSpendDetails(t, spendDetails, expectedSpendDetails) - default: - t.Fatalf("expected to receive spend notification") - } - - // The second one, however, should not have. The event's Spend channel - // must have also been closed to indicate the caller that the TxNotifier - // can no longer fulfill their canceled request. - select { - case _, ok := <-ntfn2.Event.Spend: - if ok { - t.Fatal("expected Spend channel to be closed") - } - default: - t.Fatal("expected Spend channel to be closed") - } -} - -// TestTxNotifierConfReorg ensures that clients are notified of a reorg when a -// transaction for which they registered a confirmation notification has been -// reorged out of the chain. -func TestTxNotifierConfReorg(t *testing.T) { - t.Parallel() - - const ( - tx1NumConfs uint32 = 2 - tx2NumConfs uint32 = 1 - tx3NumConfs uint32 = 2 - ) - - hintCache := newMockHintCache() - n := chainntnfs.NewTxNotifier( - 7, chainntnfs.ReorgSafetyLimit, hintCache, hintCache, - ) - - // Tx 1 will be confirmed in block 9 and requires 2 confs. - tx1 := wire.MsgTx{Version: 1} - tx1.AddTxOut(&wire.TxOut{PkScript: testRawScript}) - tx1Hash := tx1.TxHash() - ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, tx1NumConfs, 1) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - - err = n.UpdateConfDetails(ntfn1.HistoricalDispatch.ConfRequest, nil) - if err != nil { - t.Fatalf("unable to deliver conf details: %v", err) - } - - // Tx 2 will be confirmed in block 10 and requires 1 conf. - tx2 := wire.MsgTx{Version: 2} - tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript}) - tx2Hash := tx2.TxHash() - ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, tx2NumConfs, 1) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - - err = n.UpdateConfDetails(ntfn2.HistoricalDispatch.ConfRequest, nil) - if err != nil { - t.Fatalf("unable to deliver conf details: %v", err) - } - - // Tx 3 will be confirmed in block 10 and requires 2 confs. - tx3 := wire.MsgTx{Version: 3} - tx3.AddTxOut(&wire.TxOut{PkScript: testRawScript}) - tx3Hash := tx3.TxHash() - ntfn3, err := n.RegisterConf(&tx3Hash, testRawScript, tx3NumConfs, 1) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - - err = n.UpdateConfDetails(ntfn3.HistoricalDispatch.ConfRequest, nil) - if err != nil { - t.Fatalf("unable to deliver conf details: %v", err) - } - - // Sync chain to block 10. Txs 1 & 2 should be confirmed. - block1 := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{&tx1}, - }) - if err := n.ConnectTip(nil, 8, block1.Transactions()); err != nil { - t.Fatalf("Failed to connect block: %v", err) - } - if err := n.NotifyHeight(8); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - if err := n.ConnectTip(nil, 9, nil); err != nil { - t.Fatalf("Failed to connect block: %v", err) - } - if err := n.NotifyHeight(9); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - block2 := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{&tx2, &tx3}, - }) - if err := n.ConnectTip(nil, 10, block2.Transactions()); err != nil { - t.Fatalf("Failed to connect block: %v", err) - } - if err := n.NotifyHeight(10); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // We should receive two updates for tx1 since it requires two - // confirmations and it has already met them. - for i := 0; i < 2; i++ { - select { - case <-ntfn1.Event.Updates: - default: - t.Fatal("Expected confirmation update for tx1") - } - } - - // A confirmation notification for tx1 should be dispatched, as it met - // its required number of confirmations. - select { - case <-ntfn1.Event.Confirmed: - default: - t.Fatalf("Expected confirmation for tx1") - } - - // We should only receive one update for tx2 since it only requires - // one confirmation and it already met it. - select { - case <-ntfn2.Event.Updates: - default: - t.Fatal("Expected confirmation update for tx2") - } - - // A confirmation notification for tx2 should be dispatched, as it met - // its required number of confirmations. - select { - case <-ntfn2.Event.Confirmed: - default: - t.Fatalf("Expected confirmation for tx2") - } - - // We should only receive one update for tx3 since it only has one - // confirmation so far and it requires two. - select { - case <-ntfn3.Event.Updates: - default: - t.Fatal("Expected confirmation update for tx3") - } - - // A confirmation notification for tx3 should not be dispatched yet, as - // it requires one more confirmation. - select { - case txConf := <-ntfn3.Event.Confirmed: - t.Fatalf("Received unexpected confirmation for tx3: %v", txConf) - default: - } - - // The block that included tx2 and tx3 is disconnected and two next - // blocks without them are connected. - if err := n.DisconnectTip(10); err != nil { - t.Fatalf("Failed to connect block: %v", err) - } - - if err := n.ConnectTip(nil, 10, nil); err != nil { - t.Fatalf("Failed to connect block: %v", err) - } - if err := n.NotifyHeight(10); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - if err := n.ConnectTip(nil, 11, nil); err != nil { - t.Fatalf("Failed to connect block: %v", err) - } - if err := n.NotifyHeight(11); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - select { - case reorgDepth := <-ntfn2.Event.NegativeConf: - if reorgDepth != 1 { - t.Fatalf("Incorrect value for negative conf notification: "+ - "expected %d, got %d", 1, reorgDepth) - } - default: - t.Fatalf("Expected negative conf notification for tx1") - } - - // We should not receive any event notifications from all of the - // transactions because tx1 has already been confirmed and tx2 and tx3 - // have not been included in the chain since the reorg. - select { - case <-ntfn1.Event.Updates: - t.Fatal("Received unexpected confirmation update for tx1") - case txConf := <-ntfn1.Event.Confirmed: - t.Fatalf("Received unexpected confirmation for tx1: %v", txConf) - default: - } - - select { - case <-ntfn2.Event.Updates: - t.Fatal("Received unexpected confirmation update for tx2") - case txConf := <-ntfn2.Event.Confirmed: - t.Fatalf("Received unexpected confirmation for tx2: %v", txConf) - default: - } - - select { - case <-ntfn3.Event.Updates: - t.Fatal("Received unexpected confirmation update for tx3") - case txConf := <-ntfn3.Event.Confirmed: - t.Fatalf("Received unexpected confirmation for tx3: %v", txConf) - default: - } - - // Now transactions 2 & 3 are re-included in a new block. - block3 := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{&tx2, &tx3}, - }) - block4 := btcutil.NewBlock(&wire.MsgBlock{}) - - err = n.ConnectTip(block3.Hash(), 12, block3.Transactions()) - if err != nil { - t.Fatalf("Failed to connect block: %v", err) - } - if err := n.NotifyHeight(12); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - err = n.ConnectTip(block4.Hash(), 13, block4.Transactions()) - if err != nil { - t.Fatalf("Failed to connect block: %v", err) - } - if err := n.NotifyHeight(13); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // We should only receive one update for tx2 since it only requires - // one confirmation and it already met it. - select { - case numConfsLeft := <-ntfn2.Event.Updates: - const expected = 0 - if numConfsLeft != expected { - t.Fatalf("Received incorrect confirmation update: tx2 "+ - "expected %d confirmations left, got %d", - expected, numConfsLeft) - } - default: - t.Fatal("Expected confirmation update for tx2") - } - - // A confirmation notification for tx2 should be dispatched, as it met - // its required number of confirmations. - select { - case txConf := <-ntfn2.Event.Confirmed: - expectedConf := chainntnfs.TxConfirmation{ - BlockHash: block3.Hash(), - BlockHeight: 12, - TxIndex: 0, - Tx: &tx2, - } - assertConfDetails(t, txConf, &expectedConf) - default: - t.Fatalf("Expected confirmation for tx2") - } - - // We should receive two updates for tx3 since it requires two - // confirmations and it has already met them. - for i := uint32(1); i <= 2; i++ { - select { - case numConfsLeft := <-ntfn3.Event.Updates: - expected := tx3NumConfs - i - if numConfsLeft != expected { - t.Fatalf("Received incorrect confirmation update: tx3 "+ - "expected %d confirmations left, got %d", - expected, numConfsLeft) - } - default: - t.Fatal("Expected confirmation update for tx2") - } - } - - // A confirmation notification for tx3 should be dispatched, as it met - // its required number of confirmations. - select { - case txConf := <-ntfn3.Event.Confirmed: - expectedConf := chainntnfs.TxConfirmation{ - BlockHash: block3.Hash(), - BlockHeight: 12, - TxIndex: 1, - Tx: &tx3, - } - assertConfDetails(t, txConf, &expectedConf) - default: - t.Fatalf("Expected confirmation for tx3") - } -} - -// TestTxNotifierSpendReorg ensures that clients are notified of a reorg when -// the spending transaction of an outpoint for which they registered a spend -// notification for has been reorged out of the chain. -func TestTxNotifierSpendReorg(t *testing.T) { - t.Parallel() - - const startingHeight = 10 - hintCache := newMockHintCache() - n := chainntnfs.NewTxNotifier( - startingHeight, chainntnfs.ReorgSafetyLimit, hintCache, - hintCache, - ) - - // We'll have two outpoints that will be spent throughout the test. The - // first will be spent and will not experience a reorg, while the second - // one will. - op1 := wire.OutPoint{Index: 1} - spendTx1 := wire.NewMsgTx(2) - spendTx1.AddTxIn(&wire.TxIn{ - PreviousOutPoint: op1, - SignatureScript: testSigScript, - }) - spendTxHash1 := spendTx1.TxHash() - expectedSpendDetails1 := &chainntnfs.SpendDetail{ - SpentOutPoint: &op1, - SpenderTxHash: &spendTxHash1, - SpendingTx: spendTx1, - SpenderInputIndex: 0, - SpendingHeight: startingHeight + 1, - } - - op2 := wire.OutPoint{Index: 2} - spendTx2 := wire.NewMsgTx(2) - spendTx2.AddTxIn(&wire.TxIn{ - PreviousOutPoint: chainntnfs.ZeroOutPoint, - SignatureScript: testSigScript, - }) - spendTx2.AddTxIn(&wire.TxIn{ - PreviousOutPoint: op2, - SignatureScript: testSigScript, - }) - spendTxHash2 := spendTx2.TxHash() - - // The second outpoint will experience a reorg and get re-spent at a - // different height, so we'll need to construct the spend details for - // before and after the reorg. - expectedSpendDetails2BeforeReorg := chainntnfs.SpendDetail{ - SpentOutPoint: &op2, - SpenderTxHash: &spendTxHash2, - SpendingTx: spendTx2, - SpenderInputIndex: 1, - SpendingHeight: startingHeight + 2, - } - - // The spend details after the reorg will be exactly the same, except - // for the spend confirming at the next height. - expectedSpendDetails2AfterReorg := expectedSpendDetails2BeforeReorg - expectedSpendDetails2AfterReorg.SpendingHeight++ - - // We'll register for a spend notification for each outpoint above. - ntfn1, err := n.RegisterSpend(&op1, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } - - ntfn2, err := n.RegisterSpend(&op2, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } - - // We'll extend the chain by connecting a new block at tip. This block - // will only contain the spending transaction of the first outpoint. - block1 := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{spendTx1}, - }) - err = n.ConnectTip(block1.Hash(), startingHeight+1, block1.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } - if err := n.NotifyHeight(startingHeight + 1); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // We should receive a spend notification for the first outpoint with - // its correct spending details. - select { - case spendDetails := <-ntfn1.Event.Spend: - assertSpendDetails(t, spendDetails, expectedSpendDetails1) - default: - t.Fatal("expected to receive spend details") - } - - // We should not, however, receive one for the second outpoint as it has - // yet to be spent. - select { - case <-ntfn2.Event.Spend: - t.Fatal("received unexpected spend notification") - default: - } - - // Now, we'll extend the chain again, this time with a block containing - // the spending transaction of the second outpoint. - block2 := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{spendTx2}, - }) - err = n.ConnectTip(block2.Hash(), startingHeight+2, block2.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } - if err := n.NotifyHeight(startingHeight + 2); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // We should not receive another spend notification for the first - // outpoint. - select { - case <-ntfn1.Event.Spend: - t.Fatal("received unexpected spend notification") - default: - } - - // We should receive one for the second outpoint. - select { - case spendDetails := <-ntfn2.Event.Spend: - assertSpendDetails( - t, spendDetails, &expectedSpendDetails2BeforeReorg, - ) - default: - t.Fatal("expected to receive spend details") - } - - // Now, to replicate a chain reorg, we'll disconnect the block that - // contained the spending transaction of the second outpoint. - if err := n.DisconnectTip(startingHeight + 2); err != nil { - t.Fatalf("unable to disconnect block: %v", err) - } - - // No notifications should be dispatched for the first outpoint as it - // was spent at a previous height. - select { - case <-ntfn1.Event.Spend: - t.Fatal("received unexpected spend notification") - case <-ntfn1.Event.Reorg: - t.Fatal("received unexpected spend reorg notification") - default: - } - - // We should receive a reorg notification for the second outpoint. - select { - case <-ntfn2.Event.Spend: - t.Fatal("received unexpected spend notification") - case <-ntfn2.Event.Reorg: - default: - t.Fatal("expected spend reorg notification") - } - - // We'll now extend the chain with an empty block, to ensure that we can - // properly detect when an outpoint has been re-spent at a later height. - emptyBlock := btcutil.NewBlock(&wire.MsgBlock{}) - err = n.ConnectTip( - emptyBlock.Hash(), startingHeight+2, emptyBlock.Transactions(), - ) - if err != nil { - t.Fatalf("unable to disconnect block: %v", err) - } - if err := n.NotifyHeight(startingHeight + 2); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // We shouldn't receive notifications for either of the outpoints. - select { - case <-ntfn1.Event.Spend: - t.Fatal("received unexpected spend notification") - case <-ntfn1.Event.Reorg: - t.Fatal("received unexpected spend reorg notification") - case <-ntfn2.Event.Spend: - t.Fatal("received unexpected spend notification") - case <-ntfn2.Event.Reorg: - t.Fatal("received unexpected spend reorg notification") - default: - } - - // Finally, extend the chain with another block containing the same - // spending transaction of the second outpoint. - err = n.ConnectTip( - block2.Hash(), startingHeight+3, block2.Transactions(), - ) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } - if err := n.NotifyHeight(startingHeight + 3); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // We should now receive a spend notification once again for the second - // outpoint containing the new spend details. - select { - case spendDetails := <-ntfn2.Event.Spend: - assertSpendDetails( - t, spendDetails, &expectedSpendDetails2AfterReorg, - ) - default: - t.Fatalf("expected to receive spend notification") - } - - // Once again, we should not receive one for the first outpoint. - select { - case <-ntfn1.Event.Spend: - t.Fatal("received unexpected spend notification") - default: - } -} - -// TestTxNotifierConfirmHintCache ensures that the height hints for transactions -// are kept track of correctly with each new block connected/disconnected. This -// test also asserts that the height hints are not updated until the simulated -// historical dispatches have returned, and we know the transactions aren't -// already in the chain. -func TestTxNotifierConfirmHintCache(t *testing.T) { - t.Parallel() - - const ( - startingHeight = 200 - txDummyHeight = 201 - tx1Height = 202 - tx2Height = 203 - ) - - // Initialize our TxNotifier instance backed by a height hint cache. - hintCache := newMockHintCache() - n := chainntnfs.NewTxNotifier( - startingHeight, chainntnfs.ReorgSafetyLimit, hintCache, - hintCache, - ) - - // Create two test transactions and register them for notifications. - tx1 := wire.MsgTx{Version: 1} - tx1.AddTxOut(&wire.TxOut{PkScript: testRawScript}) - tx1Hash := tx1.TxHash() - ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register tx1: %v", err) - } - - tx2 := wire.MsgTx{Version: 2} - tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript}) - tx2Hash := tx2.TxHash() - ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, 2, 1) - if err != nil { - t.Fatalf("unable to register tx2: %v", err) - } - - // Both transactions should not have a height hint set, as RegisterConf - // should not alter the cache state. - _, err = hintCache.QueryConfirmHint(ntfn1.HistoricalDispatch.ConfRequest) - if !chainntnfs.ErrConfirmHintNotFound.Is(err) { - t.Fatalf("unexpected error when querying for height hint "+ - "want: %v, got %v", - chainntnfs.ErrConfirmHintNotFound, err) - } - - _, err = hintCache.QueryConfirmHint(ntfn2.HistoricalDispatch.ConfRequest) - if !chainntnfs.ErrConfirmHintNotFound.Is(err) { - t.Fatalf("unexpected error when querying for height hint "+ - "want: %v, got %v", - chainntnfs.ErrConfirmHintNotFound, err) - } - - // Create a new block that will include the dummy transaction and extend - // the chain. - txDummy := wire.MsgTx{Version: 3} - block1 := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{&txDummy}, - }) - - err = n.ConnectTip(block1.Hash(), txDummyHeight, block1.Transactions()) - if err != nil { - t.Fatalf("Failed to connect block: %v", err) - } - if err := n.NotifyHeight(txDummyHeight); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // Since UpdateConfDetails has not been called for either transaction, - // the height hints should remain unchanged. This simulates blocks - // confirming while the historical dispatch is processing the - // registration. - hint, err := hintCache.QueryConfirmHint(ntfn1.HistoricalDispatch.ConfRequest) - if !chainntnfs.ErrConfirmHintNotFound.Is(err) { - t.Fatalf("unexpected error when querying for height hint "+ - "want: %v, got %v", - chainntnfs.ErrConfirmHintNotFound, err) - } - - hint, err = hintCache.QueryConfirmHint(ntfn2.HistoricalDispatch.ConfRequest) - if !chainntnfs.ErrConfirmHintNotFound.Is(err) { - t.Fatalf("unexpected error when querying for height hint "+ - "want: %v, got %v", - chainntnfs.ErrConfirmHintNotFound, err) - } - - // Now, update the conf details reporting that the neither txn was found - // in the historical dispatch. - err = n.UpdateConfDetails(ntfn1.HistoricalDispatch.ConfRequest, nil) - if err != nil { - t.Fatalf("unable to update conf details: %v", err) - } - err = n.UpdateConfDetails(ntfn2.HistoricalDispatch.ConfRequest, nil) - if err != nil { - t.Fatalf("unable to update conf details: %v", err) - } - - // We'll create another block that will include the first transaction - // and extend the chain. - block2 := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{&tx1}, - }) - - err = n.ConnectTip(block2.Hash(), tx1Height, block2.Transactions()) - if err != nil { - t.Fatalf("Failed to connect block: %v", err) - } - if err := n.NotifyHeight(tx1Height); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // Now that both notifications are waiting at tip for confirmations, - // they should have their height hints updated to the latest block - // height. - hint, err = hintCache.QueryConfirmHint(ntfn1.HistoricalDispatch.ConfRequest) - if err != nil { - t.Fatalf("unable to query for hint: %v", err) - } - if hint != tx1Height { - t.Fatalf("expected hint %d, got %d", - tx1Height, hint) - } - - hint, err = hintCache.QueryConfirmHint(ntfn2.HistoricalDispatch.ConfRequest) - if err != nil { - t.Fatalf("unable to query for hint: %v", err) - } - if hint != tx1Height { - t.Fatalf("expected hint %d, got %d", - tx2Height, hint) - } - - // Next, we'll create another block that will include the second - // transaction and extend the chain. - block3 := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{&tx2}, - }) - - err = n.ConnectTip(block3.Hash(), tx2Height, block3.Transactions()) - if err != nil { - t.Fatalf("Failed to connect block: %v", err) - } - if err := n.NotifyHeight(tx2Height); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // The height hint for the first transaction should remain the same. - hint, err = hintCache.QueryConfirmHint(ntfn1.HistoricalDispatch.ConfRequest) - if err != nil { - t.Fatalf("unable to query for hint: %v", err) - } - if hint != tx1Height { - t.Fatalf("expected hint %d, got %d", - tx1Height, hint) - } - - // The height hint for the second transaction should now be updated to - // reflect its confirmation. - hint, err = hintCache.QueryConfirmHint(ntfn2.HistoricalDispatch.ConfRequest) - if err != nil { - t.Fatalf("unable to query for hint: %v", err) - } - if hint != tx2Height { - t.Fatalf("expected hint %d, got %d", - tx2Height, hint) - } - - // Finally, we'll attempt do disconnect the last block in order to - // simulate a chain reorg. - if err := n.DisconnectTip(tx2Height); err != nil { - t.Fatalf("Failed to disconnect block: %v", err) - } - - // This should update the second transaction's height hint within the - // cache to the previous height. - hint, err = hintCache.QueryConfirmHint(ntfn2.HistoricalDispatch.ConfRequest) - if err != nil { - t.Fatalf("unable to query for hint: %v", err) - } - if hint != tx1Height { - t.Fatalf("expected hint %d, got %d", - tx1Height, hint) - } - - // The first transaction's height hint should remain at the original - // confirmation height. - hint, err = hintCache.QueryConfirmHint(ntfn2.HistoricalDispatch.ConfRequest) - if err != nil { - t.Fatalf("unable to query for hint: %v", err) - } - if hint != tx1Height { - t.Fatalf("expected hint %d, got %d", - tx1Height, hint) - } -} - -// TestTxNotifierSpendHintCache ensures that the height hints for outpoints are -// kept track of correctly with each new block connected/disconnected. This test -// also asserts that the height hints are not updated until the simulated -// historical dispatches have returned, and we know the outpoints haven't -// already been spent in the chain. -func TestTxNotifierSpendHintCache(t *testing.T) { - t.Parallel() - - const ( - startingHeight = 200 - dummyHeight = 201 - op1Height = 202 - op2Height = 203 - ) - - // Intiialize our TxNotifier instance backed by a height hint cache. - hintCache := newMockHintCache() - n := chainntnfs.NewTxNotifier( - startingHeight, chainntnfs.ReorgSafetyLimit, hintCache, - hintCache, - ) - - // Create two test outpoints and register them for spend notifications. - op1 := wire.OutPoint{Index: 1} - ntfn1, err := n.RegisterSpend(&op1, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend for op1: %v", err) - } - op2 := wire.OutPoint{Index: 2} - ntfn2, err := n.RegisterSpend(&op2, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend for op2: %v", err) - } - - // Both outpoints should not have a spend hint set upon registration, as - // we must first determine whether they have already been spent in the - // chain. - _, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest) - if !chainntnfs.ErrSpendHintNotFound.Is(err) { - t.Fatalf("unexpected error when querying for height hint "+ - "expected: %v, got %v", chainntnfs.ErrSpendHintNotFound, - err) - } - _, err = hintCache.QuerySpendHint(ntfn2.HistoricalDispatch.SpendRequest) - if !chainntnfs.ErrSpendHintNotFound.Is(err) { - t.Fatalf("unexpected error when querying for height hint "+ - "expected: %v, got %v", chainntnfs.ErrSpendHintNotFound, - err) - } - - // Create a new empty block and extend the chain. - emptyBlock := btcutil.NewBlock(&wire.MsgBlock{}) - err = n.ConnectTip( - emptyBlock.Hash(), dummyHeight, emptyBlock.Transactions(), - ) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } - if err := n.NotifyHeight(dummyHeight); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // Since we haven't called UpdateSpendDetails on any of the test - // outpoints, this implies that there is a still a pending historical - // rescan for them, so their spend hints should not be created/updated. - _, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest) - if !chainntnfs.ErrSpendHintNotFound.Is(err) { - t.Fatalf("unexpected error when querying for height hint "+ - "expected: %v, got %v", chainntnfs.ErrSpendHintNotFound, - err) - } - _, err = hintCache.QuerySpendHint(ntfn2.HistoricalDispatch.SpendRequest) - if !chainntnfs.ErrSpendHintNotFound.Is(err) { - t.Fatalf("unexpected error when querying for height hint "+ - "expected: %v, got %v", chainntnfs.ErrSpendHintNotFound, - err) - } - - // Now, we'll simulate that their historical rescans have finished by - // calling UpdateSpendDetails. This should allow their spend hints to be - // updated upon every block connected/disconnected. - err = n.UpdateSpendDetails(ntfn1.HistoricalDispatch.SpendRequest, nil) - if err != nil { - t.Fatalf("unable to update spend details: %v", err) - } - err = n.UpdateSpendDetails(ntfn2.HistoricalDispatch.SpendRequest, nil) - if err != nil { - t.Fatalf("unable to update spend details: %v", err) - } - - // We'll create a new block that only contains the spending transaction - // of the first outpoint. - spendTx1 := wire.NewMsgTx(2) - spendTx1.AddTxIn(&wire.TxIn{ - PreviousOutPoint: op1, - SignatureScript: testSigScript, - }) - block1 := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{spendTx1}, - }) - err = n.ConnectTip(block1.Hash(), op1Height, block1.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } - if err := n.NotifyHeight(op1Height); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // Both outpoints should have their spend hints reflect the height of - // the new block being connected due to the first outpoint being spent - // at this height, and the second outpoint still being unspent. - op1Hint, err := hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest) - if err != nil { - t.Fatalf("unable to query for spend hint of op1: %v", err) - } - if op1Hint != op1Height { - t.Fatalf("expected hint %d, got %d", op1Height, op1Hint) - } - op2Hint, err := hintCache.QuerySpendHint(ntfn2.HistoricalDispatch.SpendRequest) - if err != nil { - t.Fatalf("unable to query for spend hint of op2: %v", err) - } - if op2Hint != op1Height { - t.Fatalf("expected hint %d, got %d", op1Height, op2Hint) - } - - // Then, we'll create another block that spends the second outpoint. - spendTx2 := wire.NewMsgTx(2) - spendTx2.AddTxIn(&wire.TxIn{ - PreviousOutPoint: op2, - SignatureScript: testSigScript, - }) - block2 := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{spendTx2}, - }) - err = n.ConnectTip(block2.Hash(), op2Height, block2.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } - if err := n.NotifyHeight(op2Height); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // Only the second outpoint should have its spend hint updated due to - // being spent within the new block. The first outpoint's spend hint - // should remain the same as it's already been spent before. - op1Hint, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest) - if err != nil { - t.Fatalf("unable to query for spend hint of op1: %v", err) - } - if op1Hint != op1Height { - t.Fatalf("expected hint %d, got %d", op1Height, op1Hint) - } - op2Hint, err = hintCache.QuerySpendHint(ntfn2.HistoricalDispatch.SpendRequest) - if err != nil { - t.Fatalf("unable to query for spend hint of op2: %v", err) - } - if op2Hint != op2Height { - t.Fatalf("expected hint %d, got %d", op2Height, op2Hint) - } - - // Finally, we'll attempt do disconnect the last block in order to - // simulate a chain reorg. - if err := n.DisconnectTip(op2Height); err != nil { - t.Fatalf("unable to disconnect block: %v", err) - } - - // This should update the second outpoint's spend hint within the cache - // to the previous height, as that's where its spending transaction was - // included in within the chain. The first outpoint's spend hint should - // remain the same. - op1Hint, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest) - if err != nil { - t.Fatalf("unable to query for spend hint of op1: %v", err) - } - if op1Hint != op1Height { - t.Fatalf("expected hint %d, got %d", op1Height, op1Hint) - } - op2Hint, err = hintCache.QuerySpendHint(ntfn2.HistoricalDispatch.SpendRequest) - if err != nil { - t.Fatalf("unable to query for spend hint of op2: %v", err) - } - if op2Hint != op1Height { - t.Fatalf("expected hint %d, got %d", op1Height, op2Hint) - } -} - -// TestTxNotifierSpendHinthistoricalRescan checks that the height hints and -// spend notifications behave as expected when a spend is found at tip during a -// historical rescan. -func TestTxNotifierSpendDuringHistoricalRescan(t *testing.T) { - t.Parallel() - - const ( - startingHeight = 200 - reorgSafety = 10 - ) - - // Intiialize our TxNotifier instance backed by a height hint cache. - hintCache := newMockHintCache() - n := chainntnfs.NewTxNotifier( - startingHeight, reorgSafety, hintCache, hintCache, - ) - - // Create a test outpoint and register it for spend notifications. - op1 := wire.OutPoint{Index: 1} - ntfn1, err := n.RegisterSpend(&op1, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend for op1: %v", err) - } - - // A historical rescan should be initiated from the height hint to the - // current height. - if ntfn1.HistoricalDispatch.StartHeight != 1 { - t.Fatalf("expected historical dispatch to start at height hint") - } - - if ntfn1.HistoricalDispatch.EndHeight != startingHeight { - t.Fatalf("expected historical dispatch to end at current height") - } - - // It should not have a spend hint set upon registration, as we must - // first determine whether it has already been spent in the chain. - _, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest) - if !chainntnfs.ErrSpendHintNotFound.Is(err) { - t.Fatalf("unexpected error when querying for height hint "+ - "expected: %v, got %v", chainntnfs.ErrSpendHintNotFound, - err) - } - - // Create a new empty block and extend the chain. - height := uint32(startingHeight) + 1 - emptyBlock := btcutil.NewBlock(&wire.MsgBlock{}) - err = n.ConnectTip( - emptyBlock.Hash(), height, emptyBlock.Transactions(), - ) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } - if err := n.NotifyHeight(height); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // Since we haven't called UpdateSpendDetails yet, there should be no - // spend hint found. - _, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest) - if !chainntnfs.ErrSpendHintNotFound.Is(err) { - t.Fatalf("unexpected error when querying for height hint "+ - "expected: %v, got %v", chainntnfs.ErrSpendHintNotFound, - err) - } - - // Simulate a bunch of blocks being mined while the historical rescan - // is still in progress. We make sure to not mine more than reorgSafety - // blocks after the spend, since it will be forgotten then. - var spendHeight uint32 - for i := 0; i < reorgSafety; i++ { - height++ - - // Let the outpoint we are watching be spent midway. - var block *btcutil.Block - if i == 5 { - // We'll create a new block that only contains the - // spending transaction of the outpoint. - spendTx1 := wire.NewMsgTx(2) - spendTx1.AddTxIn(&wire.TxIn{ - PreviousOutPoint: op1, - SignatureScript: testSigScript, - }) - block = btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{spendTx1}, - }) - spendHeight = height - } else { - // Otherwise we just create an empty block. - block = btcutil.NewBlock(&wire.MsgBlock{}) - } - - err = n.ConnectTip( - block.Hash(), height, block.Transactions(), - ) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } - if err := n.NotifyHeight(height); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - } - - // Check that the height hint was set to the spending block. - op1Hint, err := hintCache.QuerySpendHint( - ntfn1.HistoricalDispatch.SpendRequest, - ) - if err != nil { - t.Fatalf("unable to query for spend hint of op1: %v", err) - } - if op1Hint != spendHeight { - t.Fatalf("expected hint %d, got %d", spendHeight, op1Hint) - } - - // We should be getting notified about the spend at this point. - select { - case <-ntfn1.Event.Spend: - default: - t.Fatal("expected to receive spend notification") - } - - // Now, we'll simulate that the historical rescan finished by - // calling UpdateSpendDetails. Since a the spend actually happened at - // tip while the rescan was in progress, the height hint should not be - // updated to the latest height, but stay at the spend height. - err = n.UpdateSpendDetails(ntfn1.HistoricalDispatch.SpendRequest, nil) - if err != nil { - t.Fatalf("unable to update spend details: %v", err) - } - - op1Hint, err = hintCache.QuerySpendHint( - ntfn1.HistoricalDispatch.SpendRequest, - ) - if err != nil { - t.Fatalf("unable to query for spend hint of op1: %v", err) - } - if op1Hint != spendHeight { - t.Fatalf("expected hint %d, got %d", spendHeight, op1Hint) - } - - // Then, we'll create another block that spends a second outpoint. - op2 := wire.OutPoint{Index: 2} - spendTx2 := wire.NewMsgTx(2) - spendTx2.AddTxIn(&wire.TxIn{ - PreviousOutPoint: op2, - SignatureScript: testSigScript, - }) - height++ - block2 := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{spendTx2}, - }) - err = n.ConnectTip(block2.Hash(), height, block2.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } - if err := n.NotifyHeight(height); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // The outpoint's spend hint should remain the same as it's already - // been spent before. - op1Hint, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest) - if err != nil { - t.Fatalf("unable to query for spend hint of op1: %v", err) - } - if op1Hint != spendHeight { - t.Fatalf("expected hint %d, got %d", spendHeight, op1Hint) - } - - // Now mine enough blocks for the spend notification to be forgotten. - for i := 0; i < 2*reorgSafety; i++ { - height++ - block := btcutil.NewBlock(&wire.MsgBlock{}) - - err := n.ConnectTip( - block.Hash(), height, block.Transactions(), - ) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } - if err := n.NotifyHeight(height); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - } - - // Attempting to update spend details at this point should fail, since - // the spend request should be removed. This is to ensure the height - // hint won't be overwritten if the historical rescan finishes after - // the spend request has been notified and removed because it has - // matured. - err = n.UpdateSpendDetails(ntfn1.HistoricalDispatch.SpendRequest, nil) - if err == nil { - t.Fatalf("expcted updating spend details to fail") - } - - // Finally, check that the height hint is still there, unchanged. - op1Hint, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest) - if err != nil { - t.Fatalf("unable to query for spend hint of op1: %v", err) - } - if op1Hint != spendHeight { - t.Fatalf("expected hint %d, got %d", spendHeight, op1Hint) - } -} - -// TestTxNotifierNtfnDone ensures that a notification is sent to registered -// clients through the Done channel once the notification request is no longer -// under the risk of being reorged out of the chain. -func TestTxNotifierNtfnDone(t *testing.T) { - t.Parallel() - - hintCache := newMockHintCache() - const reorgSafetyLimit = 100 - n := chainntnfs.NewTxNotifier(10, reorgSafetyLimit, hintCache, hintCache) - - // We'll start by creating two notification requests: one confirmation - // and one spend. - confNtfn, err := n.RegisterConf(&chainntnfs.ZeroHash, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register conf ntfn: %v", err) - } - spendNtfn, err := n.RegisterSpend(&chainntnfs.ZeroOutPoint, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend: %v", err) - } - - // We'll create two transactions that will satisfy the notification - // requests above and include them in the next block of the chain. - tx := wire.NewMsgTx(1) - tx.AddTxOut(&wire.TxOut{PkScript: testRawScript}) - spendTx := wire.NewMsgTx(1) - spendTx.AddTxIn(&wire.TxIn{ - PreviousOutPoint: wire.OutPoint{Index: 1}, - SignatureScript: testSigScript, - }) - block := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{tx, spendTx}, - }) - - err = n.ConnectTip(block.Hash(), 11, block.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } - if err := n.NotifyHeight(11); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - // With the chain extended, we should see notifications dispatched for - // both requests. - select { - case <-confNtfn.Event.Confirmed: - default: - t.Fatal("expected to receive confirmation notification") - } - - select { - case <-spendNtfn.Event.Spend: - default: - t.Fatal("expected to receive spend notification") - } - - // The done notifications should not be dispatched yet as the requests - // are still under the risk of being reorged out the chain. - select { - case <-confNtfn.Event.Done: - t.Fatal("received unexpected done notification for confirmation") - case <-spendNtfn.Event.Done: - t.Fatal("received unexpected done notification for spend") - default: - } - - // Now, we'll disconnect the block at tip to simulate a reorg. The reorg - // notifications should be dispatched to the respective clients. - if err := n.DisconnectTip(11); err != nil { - t.Fatalf("unable to disconnect block: %v", err) - } - - select { - case <-confNtfn.Event.NegativeConf: - default: - t.Fatal("expected to receive reorg notification for confirmation") - } - - select { - case <-spendNtfn.Event.Reorg: - default: - t.Fatal("expected to receive reorg notification for spend") - } - - // We'll reconnect the block that satisfies both of these requests. - // We should see notifications dispatched for both once again. - err = n.ConnectTip(block.Hash(), 11, block.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } - if err := n.NotifyHeight(11); err != nil { - t.Fatalf("unable to dispatch notifications: %v", err) - } - - select { - case <-confNtfn.Event.Confirmed: - default: - t.Fatal("expected to receive confirmation notification") - } - - select { - case <-spendNtfn.Event.Spend: - default: - t.Fatal("expected to receive spend notification") - } - - // Finally, we'll extend the chain with blocks until the requests are no - // longer under the risk of being reorged out of the chain. We should - // expect the done notifications to be dispatched. - nextHeight := uint32(12) - for i := nextHeight; i < nextHeight+reorgSafetyLimit; i++ { - dummyBlock := btcutil.NewBlock(&wire.MsgBlock{}) - if err := n.ConnectTip(dummyBlock.Hash(), i, nil); err != nil { - t.Fatalf("unable to connect block: %v", err) - } - } - - select { - case <-confNtfn.Event.Done: - default: - t.Fatal("expected to receive done notification for confirmation") - } - - select { - case <-spendNtfn.Event.Done: - default: - t.Fatal("expected to receive done notification for spend") - } -} - -// TestTxNotifierTearDown ensures that the TxNotifier properly alerts clients -// that it is shutting down and will be unable to deliver notifications. -func TestTxNotifierTearDown(t *testing.T) { - t.Parallel() - - hintCache := newMockHintCache() - n := chainntnfs.NewTxNotifier( - 10, chainntnfs.ReorgSafetyLimit, hintCache, hintCache, - ) - - // To begin the test, we'll register for a confirmation and spend - // notification. - confNtfn, err := n.RegisterConf(&chainntnfs.ZeroHash, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register conf ntfn: %v", err) - } - spendNtfn, err := n.RegisterSpend(&chainntnfs.ZeroOutPoint, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } - - // With the notifications registered, we'll now tear down the notifier. - // The notification channels should be closed for notifications, whether - // they have been dispatched or not, so we should not expect to receive - // any more updates. - n.TearDown() - - select { - case _, ok := <-confNtfn.Event.Confirmed: - if ok { - t.Fatal("expected closed Confirmed channel for conf ntfn") - } - case _, ok := <-confNtfn.Event.Updates: - if ok { - t.Fatal("expected closed Updates channel for conf ntfn") - } - case _, ok := <-confNtfn.Event.NegativeConf: - if ok { - t.Fatal("expected closed NegativeConf channel for conf ntfn") - } - case _, ok := <-spendNtfn.Event.Spend: - if ok { - t.Fatal("expected closed Spend channel for spend ntfn") - } - case _, ok := <-spendNtfn.Event.Reorg: - if ok { - t.Fatalf("expected closed Reorg channel for spend ntfn") - } - default: - t.Fatalf("expected closed notification channels for all ntfns") - } - - // Now that the notifier is torn down, we should no longer be able to - // register notification requests. - _, err = n.RegisterConf(&chainntnfs.ZeroHash, testRawScript, 1, 1) - if err == nil { - t.Fatal("expected confirmation registration to fail") - } - _, err = n.RegisterSpend(&chainntnfs.ZeroOutPoint, testRawScript, 1) - if err == nil { - t.Fatal("expected spend registration to fail") - } -} - -func assertConfDetails(t *testing.T, result, expected *chainntnfs.TxConfirmation) { - t.Helper() - - if result.BlockHeight != expected.BlockHeight { - t.Fatalf("Incorrect block height in confirmation details: "+ - "expected %d, got %d", expected.BlockHeight, - result.BlockHeight) - } - if !result.BlockHash.IsEqual(expected.BlockHash) { - t.Fatalf("Incorrect block hash in confirmation details: "+ - "expected %d, got %d", expected.BlockHash, - result.BlockHash) - } - if result.TxIndex != expected.TxIndex { - t.Fatalf("Incorrect tx index in confirmation details: "+ - "expected %d, got %d", expected.TxIndex, result.TxIndex) - } - if result.Tx.TxHash() != expected.Tx.TxHash() { - t.Fatalf("expected tx hash %v, got %v", expected.Tx.TxHash(), - result.Tx.TxHash()) - } -} - -func assertSpendDetails(t *testing.T, result, expected *chainntnfs.SpendDetail) { - t.Helper() - - if *result.SpentOutPoint != *expected.SpentOutPoint { - t.Fatalf("expected spent outpoint %v, got %v", - expected.SpentOutPoint, result.SpentOutPoint) - } - if !result.SpenderTxHash.IsEqual(expected.SpenderTxHash) { - t.Fatalf("expected spender tx hash %v, got %v", - expected.SpenderTxHash, result.SpenderTxHash) - } - if result.SpenderInputIndex != expected.SpenderInputIndex { - t.Fatalf("expected spender input index %d, got %d", - expected.SpenderInputIndex, result.SpenderInputIndex) - } - if result.SpendingHeight != expected.SpendingHeight { - t.Fatalf("expected spending height %d, got %d", - expected.SpendingHeight, result.SpendingHeight) - } -} diff --git a/lnd/chainreg/chaincode.go b/lnd/chainreg/chaincode.go deleted file mode 100644 index 5a75865d..00000000 --- a/lnd/chainreg/chaincode.go +++ /dev/null @@ -1,29 +0,0 @@ -package chainreg - -// ChainCode is an enum-like structure for keeping track of the chains -// currently supported within lnd. -type ChainCode uint32 - -const ( - // BitcoinChain is Bitcoin's chain. - BitcoinChain ChainCode = iota - - // LitecoinChain is Litecoin's chain. - LitecoinChain - - PktChain -) - -// String returns a string representation of the target ChainCode. -func (c ChainCode) String() string { - switch c { - case BitcoinChain: - return "bitcoin" - case LitecoinChain: - return "litecoin" - case PktChain: - return "pkt" - default: - return "kekcoin" - } -} diff --git a/lnd/chainreg/chainparams.go b/lnd/chainreg/chainparams.go deleted file mode 100644 index 69e732c9..00000000 --- a/lnd/chainreg/chainparams.go +++ /dev/null @@ -1,153 +0,0 @@ -package chainreg - -import ( - litecoinCfg "github.com/ltcsuite/ltcd/chaincfg" - litecoinWire "github.com/ltcsuite/ltcd/wire" - "github.com/pkt-cash/pktd/chaincfg" - bitcoinCfg "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/wire/protocol" -) - -// BitcoinNetParams couples the p2p parameters of a network with the -// corresponding RPC port of a daemon running on the particular network. -type BitcoinNetParams struct { - *bitcoinCfg.Params - RPCPort string - CoinType uint32 -} - -// LitecoinNetParams couples the p2p parameters of a network with the -// corresponding RPC port of a daemon running on the particular network. -type LitecoinNetParams struct { - *litecoinCfg.Params - RPCPort string - CoinType uint32 -} - -// BitcoinTestNetParams contains parameters specific to the 3rd version of the -// test network. -var BitcoinTestNetParams = BitcoinNetParams{ - Params: &bitcoinCfg.TestNet3Params, - RPCPort: "18334", - CoinType: keychain.CoinTypeTestnet, -} - -// BitcoinMainNetParams contains parameters specific to the current Bitcoin -// mainnet. -var BitcoinMainNetParams = BitcoinNetParams{ - Params: &bitcoinCfg.MainNetParams, - RPCPort: "8334", - CoinType: keychain.CoinTypeBitcoin, -} - -// BitcoinSimNetParams contains parameters specific to the simulation test -// network. -var BitcoinSimNetParams = BitcoinNetParams{ - Params: &bitcoinCfg.SimNetParams, - RPCPort: "18556", - CoinType: keychain.CoinTypeTestnet, -} - -// LitecoinSimNetParams contains parameters specific to the simulation test -// network. -var LitecoinSimNetParams = LitecoinNetParams{ - Params: &litecoinCfg.TestNet4Params, - RPCPort: "18556", - CoinType: keychain.CoinTypeTestnet, -} - -// LitecoinTestNetParams contains parameters specific to the 4th version of the -// test network. -var LitecoinTestNetParams = LitecoinNetParams{ - Params: &litecoinCfg.TestNet4Params, - RPCPort: "19334", - CoinType: keychain.CoinTypeTestnet, -} - -// LitecoinMainNetParams contains the parameters specific to the current -// Litecoin mainnet. -var LitecoinMainNetParams = LitecoinNetParams{ - Params: &litecoinCfg.MainNetParams, - RPCPort: "9334", - CoinType: keychain.CoinTypeLitecoin, -} - -// LitecoinRegTestNetParams contains parameters specific to a local litecoin -// regtest network. -var LitecoinRegTestNetParams = LitecoinNetParams{ - Params: &litecoinCfg.RegressionNetParams, - RPCPort: "18334", - CoinType: keychain.CoinTypeTestnet, -} - -// BitcoinRegTestNetParams contains parameters specific to a local bitcoin -// regtest network. -var BitcoinRegTestNetParams = BitcoinNetParams{ - Params: &bitcoinCfg.RegressionNetParams, - RPCPort: "18334", - CoinType: keychain.CoinTypeTestnet, -} - -// BitcoinMainNetParams contains parameters specific to the current Bitcoin -// mainnet. -var PktMainNetParams = BitcoinNetParams{ - Params: &bitcoinCfg.PktMainNetParams, - RPCPort: "8334", - CoinType: keychain.CoinTypeBitcoin, -} - -// ApplyLitecoinParams applies the relevant chain configuration parameters that -// differ for litecoin to the chain parameters typed for btcsuite derivation. -// This function is used in place of using something like interface{} to -// abstract over _which_ chain (or fork) the parameters are for. -func ApplyLitecoinParams(params *BitcoinNetParams, - litecoinParams *LitecoinNetParams) { - - params.Name = litecoinParams.Name - params.Net = protocol.BitcoinNet(litecoinParams.Net) - params.DefaultPort = litecoinParams.DefaultPort - params.CoinbaseMaturity = litecoinParams.CoinbaseMaturity - - copy(params.GenesisHash[:], litecoinParams.GenesisHash[:]) - - // Address encoding magics - params.PubKeyHashAddrID = litecoinParams.PubKeyHashAddrID - params.ScriptHashAddrID = litecoinParams.ScriptHashAddrID - params.PrivateKeyID = litecoinParams.PrivateKeyID - params.WitnessPubKeyHashAddrID = litecoinParams.WitnessPubKeyHashAddrID - params.WitnessScriptHashAddrID = litecoinParams.WitnessScriptHashAddrID - params.Bech32HRPSegwit = litecoinParams.Bech32HRPSegwit - - copy(params.HDPrivateKeyID[:], litecoinParams.HDPrivateKeyID[:]) - copy(params.HDPublicKeyID[:], litecoinParams.HDPublicKeyID[:]) - - params.HDCoinType = litecoinParams.HDCoinType - - checkPoints := make([]chaincfg.Checkpoint, len(litecoinParams.Checkpoints)) - for i := 0; i < len(litecoinParams.Checkpoints); i++ { - var chainHash chainhash.Hash - copy(chainHash[:], litecoinParams.Checkpoints[i].Hash[:]) - - checkPoints[i] = chaincfg.Checkpoint{ - Height: litecoinParams.Checkpoints[i].Height, - Hash: &chainHash, - } - } - params.Checkpoints = checkPoints - - params.RPCPort = litecoinParams.RPCPort - params.CoinType = litecoinParams.CoinType -} - -// IsTestnet tests if the givern params correspond to a testnet -// parameter configuration. -func IsTestnet(params *BitcoinNetParams) bool { - switch params.Params.Net { - case protocol.BitcoinNet(litecoinWire.TestNet4): - return true - default: - return false - } -} diff --git a/lnd/chainreg/chainregistry.go b/lnd/chainreg/chainregistry.go deleted file mode 100644 index a8b611d1..00000000 --- a/lnd/chainreg/chainregistry.go +++ /dev/null @@ -1,751 +0,0 @@ -package chainreg - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "strings" - "sync" - "time" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/chainntnfs/btcdnotify" - "github.com/pkt-cash/pktd/lnd/chainntnfs/neutrinonotify" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/htlcswitch" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/lncfg" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwallet/btcwallet" - "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/routing/chainview" - "github.com/pkt-cash/pktd/neutrino" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/pktwallet/chain" - "github.com/pkt-cash/pktd/pktwallet/wallet" - "github.com/pkt-cash/pktd/rpcclient" -) - -// Config houses necessary fields that a chainControl instance needs to -// function. -type Config struct { - // Bitcoin defines settings for the Bitcoin chain. - Bitcoin *lncfg.Chain - - // Litecoin defines settings for the Litecoin chain. - Litecoin *lncfg.Chain - - Pkt *lncfg.Chain - - // PrimaryChain is a function that returns our primary chain via its - // ChainCode. - PrimaryChain func() ChainCode - - // HeightHintCacheQueryDisable is a boolean that disables height hint - // queries if true. - HeightHintCacheQueryDisable bool - - // NeutrinoMode defines settings for connecting to a neutrino light-client. - NeutrinoMode *lncfg.Neutrino - - // BitcoindMode defines settings for connecting to a bitcoind node. - BitcoindMode *lncfg.Bitcoind - - // LitecoindMode defines settings for connecting to a litecoind node. - LitecoindMode *lncfg.Bitcoind - - // BtcdMode defines settings for connecting to a btcd node. - BtcdMode *lncfg.Btcd - - // LtcdMode defines settings for connecting to an ltcd node. - LtcdMode *lncfg.Btcd - - // LocalChanDB is a pointer to the local backing channel database. - LocalChanDB *channeldb.DB - - // RemoteChanDB is a pointer to the remote backing channel database. - RemoteChanDB *channeldb.DB - - // PrivateWalletPw is the private wallet password to the underlying - // btcwallet instance. - PrivateWalletPw []byte - - // PublicWalletPw is the public wallet password to the underlying btcwallet - // instance. - PublicWalletPw []byte - - // Birthday specifies the time the wallet was initially created. - Birthday time.Time - - // RecoveryWindow specifies the address look-ahead for which to scan when - // restoring a wallet. - RecoveryWindow uint32 - - // Wallet is a pointer to the backing wallet instance. - Wallet *wallet.Wallet - - // NeutrinoCS is a pointer to a neutrino ChainService. Must be non-nil if - // using neutrino. - NeutrinoCS *neutrino.ChainService - - // ActiveNetParams details the current chain we are on. - ActiveNetParams BitcoinNetParams - - // FeeURL defines the URL for fee estimation we will use. This field is - // optional. - FeeURL string -} - -const ( - // DefaultBitcoinMinHTLCInMSat is the default smallest value htlc this - // node will accept. This value is proposed in the channel open sequence - // and cannot be changed during the life of the channel. It is 1 msat by - // default to allow maximum flexibility in deciding what size payments - // to forward. - // - // All forwarded payments are subjected to the min htlc constraint of - // the routing policy of the outgoing channel. This implicitly controls - // the minimum htlc value on the incoming channel too. - DefaultBitcoinMinHTLCInMSat = lnwire.MilliSatoshi(1) - - // DefaultBitcoinMinHTLCOutMSat is the default minimum htlc value that - // we require for sending out htlcs. Our channel peer may have a lower - // min htlc channel parameter, but we - by default - don't forward - // anything under the value defined here. - DefaultBitcoinMinHTLCOutMSat = lnwire.MilliSatoshi(1000) - - // DefaultBitcoinBaseFeeMSat is the default forwarding base fee. - DefaultBitcoinBaseFeeMSat = lnwire.MilliSatoshi(1000) - - // DefaultBitcoinFeeRate is the default forwarding fee rate. - DefaultBitcoinFeeRate = lnwire.MilliSatoshi(1) - - // DefaultBitcoinTimeLockDelta is the default forwarding time lock - // delta. - DefaultBitcoinTimeLockDelta = 40 - - DefaultLitecoinMinHTLCInMSat = lnwire.MilliSatoshi(1) - DefaultLitecoinMinHTLCOutMSat = lnwire.MilliSatoshi(1000) - DefaultLitecoinBaseFeeMSat = lnwire.MilliSatoshi(1000) - DefaultLitecoinFeeRate = lnwire.MilliSatoshi(1) - DefaultLitecoinTimeLockDelta = 576 - DefaultLitecoinDustLimit = btcutil.Amount(54600) - - DefaultPktMinHTLCInMSat = lnwire.MilliSatoshi(1) - DefaultPktMinHTLCOutMSat = lnwire.MilliSatoshi(1000) - DefaultPktBaseFeeMSat = lnwire.MilliSatoshi(1000) - DefaultPktFeeRate = lnwire.MilliSatoshi(1) - DefaultPktTimeLockDelta = 576 - DefaultPktDustLimit = btcutil.Amount(54600) - - // DefaultBitcoinStaticFeePerKW is the fee rate of 50 sat/vbyte - // expressed in sat/kw. - DefaultBitcoinStaticFeePerKW = chainfee.SatPerKWeight(12500) - - // DefaultBitcoinStaticMinRelayFeeRate is the min relay fee used for - // static estimators. - DefaultBitcoinStaticMinRelayFeeRate = chainfee.FeePerKwFloor - - // DefaultLitecoinStaticFeePerKW is the fee rate of 200 sat/vbyte - // expressed in sat/kw. - DefaultLitecoinStaticFeePerKW = chainfee.SatPerKWeight(50000) - - DefaultPktStaticFeePerKW = chainfee.SatPerKWeight(1000) - - // BtcToLtcConversionRate is a fixed ratio used in order to scale up - // payments when running on the Litecoin chain. - BtcToLtcConversionRate = 60 -) - -// DefaultBtcChannelConstraints is the default set of channel constraints that are -// meant to be used when initially funding a Bitcoin channel. -// -// TODO(halseth): make configurable at startup? -var DefaultBtcChannelConstraints = channeldb.ChannelConstraints{ - DustLimit: lnwallet.DefaultDustLimit(), - MaxAcceptedHtlcs: input.MaxHTLCNumber / 2, -} - -// DefaultLtcChannelConstraints is the default set of channel constraints that are -// meant to be used when initially funding a Litecoin channel. -var DefaultLtcChannelConstraints = channeldb.ChannelConstraints{ - DustLimit: DefaultLitecoinDustLimit, - MaxAcceptedHtlcs: input.MaxHTLCNumber / 2, -} - -// ChainControl couples the three primary interfaces lnd utilizes for a -// particular chain together. A single ChainControl instance will exist for all -// the chains lnd is currently active on. -type ChainControl struct { - // ChainIO represents an abstraction over a source that can query the blockchain. - ChainIO lnwallet.BlockChainIO - - // HealthCheck is a function which can be used to send a low-cost, fast - // query to the chain backend to ensure we still have access to our - // node. - HealthCheck func() er.R - - // FeeEstimator is used to estimate an optimal fee for transactions important to us. - FeeEstimator chainfee.Estimator - - // Signer is used to provide signatures over things like transactions. - Signer input.Signer - - // KeyRing represents a set of keys that we have the private keys to. - KeyRing keychain.SecretKeyRing - - // Wc is an abstraction over some basic wallet commands. This base set of commands - // will be provided to the Wallet *LightningWallet raw pointer below. - Wc lnwallet.WalletController - - // MsgSigner is used to sign arbitrary messages. - MsgSigner lnwallet.MessageSigner - - // ChainNotifier is used to receive blockchain events that we are interested in. - ChainNotifier chainntnfs.ChainNotifier - - // ChainView is used in the router for maintaining an up-to-date graph. - ChainView chainview.FilteredChainView - - // Wallet is our LightningWallet that also contains the abstract Wc above. This wallet - // handles all of the lightning operations. - Wallet *lnwallet.LightningWallet - - // RoutingPolicy is the routing policy we have decided to use. - RoutingPolicy htlcswitch.ForwardingPolicy - - // MinHtlcIn is the minimum HTLC we will accept. - MinHtlcIn lnwire.MilliSatoshi -} - -// NewChainControl attempts to create a ChainControl instance according -// to the parameters in the passed configuration. Currently three -// branches of ChainControl instances exist: one backed by a running btcd -// full-node, another backed by a running bitcoind full-node, and the other -// backed by a running neutrino light client instance. When running with a -// neutrino light client instance, `neutrinoCS` must be non-nil. -func NewChainControl(cfg *Config) (*ChainControl, er.R) { - - // Set the RPC config from the "home" chain. Multi-chain isn't yet - // active, so we'll restrict usage to a particular chain for now. - homeChainConfig := cfg.Bitcoin - if cfg.PrimaryChain() == LitecoinChain { - homeChainConfig = cfg.Litecoin - } - if cfg.PrimaryChain() == PktChain { - homeChainConfig = cfg.Pkt - } - log.Infof("Primary chain is set to: %v", - cfg.PrimaryChain()) - - cc := &ChainControl{} - - switch cfg.PrimaryChain() { - case BitcoinChain: - cc.RoutingPolicy = htlcswitch.ForwardingPolicy{ - MinHTLCOut: cfg.Bitcoin.MinHTLCOut, - BaseFee: cfg.Bitcoin.BaseFee, - FeeRate: cfg.Bitcoin.FeeRate, - TimeLockDelta: cfg.Bitcoin.TimeLockDelta, - } - cc.MinHtlcIn = cfg.Bitcoin.MinHTLCIn - cc.FeeEstimator = chainfee.NewStaticEstimator( - DefaultBitcoinStaticFeePerKW, - DefaultBitcoinStaticMinRelayFeeRate, - ) - case LitecoinChain: - cc.RoutingPolicy = htlcswitch.ForwardingPolicy{ - MinHTLCOut: cfg.Litecoin.MinHTLCOut, - BaseFee: cfg.Litecoin.BaseFee, - FeeRate: cfg.Litecoin.FeeRate, - TimeLockDelta: cfg.Litecoin.TimeLockDelta, - } - cc.MinHtlcIn = cfg.Litecoin.MinHTLCIn - cc.FeeEstimator = chainfee.NewStaticEstimator( - DefaultLitecoinStaticFeePerKW, 0, - ) - case PktChain: - cc.RoutingPolicy = htlcswitch.ForwardingPolicy{ - MinHTLCOut: cfg.Pkt.MinHTLCOut, - BaseFee: cfg.Pkt.BaseFee, - FeeRate: cfg.Pkt.FeeRate, - TimeLockDelta: cfg.Pkt.TimeLockDelta, - } - cc.MinHtlcIn = cfg.Pkt.MinHTLCIn - cc.FeeEstimator = chainfee.NewStaticEstimator( - DefaultPktStaticFeePerKW, 0, - ) - default: - return nil, er.Errorf("default routing policy for chain %v is "+ - "unknown", cfg.PrimaryChain()) - } - - walletConfig := &btcwallet.Config{ - PrivatePass: cfg.PrivateWalletPw, - PublicPass: nil, - Birthday: cfg.Birthday, - RecoveryWindow: cfg.RecoveryWindow, - DataDir: homeChainConfig.ChainDir, - NetParams: cfg.ActiveNetParams.Params, - CoinType: cfg.ActiveNetParams.CoinType, - Wallet: cfg.Wallet, - } - - var err er.R - - heightHintCacheConfig := chainntnfs.CacheConfig{ - QueryDisable: cfg.HeightHintCacheQueryDisable, - } - if cfg.HeightHintCacheQueryDisable { - log.Infof("Height Hint Cache Queries disabled") - } - - // Initialize the height hint cache within the chain directory. - hintCache, err := chainntnfs.NewHeightHintCache( - heightHintCacheConfig, cfg.LocalChanDB, - ) - if err != nil { - return nil, er.Errorf("unable to initialize height hint "+ - "cache: %v", err) - } - - // If spv mode is active, then we'll be using a distinct set of - // chainControl interfaces that interface directly with the p2p network - // of the selected chain. - switch homeChainConfig.Node { - case "neutrino": - // We'll create ChainNotifier and FilteredChainView instances, - // along with the wallet's ChainSource, which are all backed by - // the neutrino light client. - cc.ChainNotifier = neutrinonotify.New( - cfg.NeutrinoCS, hintCache, hintCache, - ) - cc.ChainView, err = chainview.NewCfFilteredChainView(cfg.NeutrinoCS) - if err != nil { - return nil, err - } - - // Map the deprecated neutrino feeurl flag to the general fee - // url. - if cfg.NeutrinoMode.FeeURL != "" { - if cfg.FeeURL != "" { - return nil, er.New("feeurl and " + - "neutrino.feeurl are mutually exclusive") - } - - cfg.FeeURL = cfg.NeutrinoMode.FeeURL - } - - walletConfig.ChainSource = chain.NewNeutrinoClient( - cfg.ActiveNetParams.Params, cfg.NeutrinoCS, - ) - - // Get our best block as a health check. - cc.HealthCheck = func() er.R { - _, _, err := walletConfig.ChainSource.GetBestBlock() - return err - } - - case "btcd", "ltcd": - // Otherwise, we'll be speaking directly via RPC to a node. - // - // So first we'll load btcd/ltcd's TLS cert for the RPC - // connection. If a raw cert was specified in the config, then - // we'll set that directly. Otherwise, we attempt to read the - // cert from the path specified in the config. - var btcdMode *lncfg.Btcd - switch { - case cfg.Bitcoin.Active: - btcdMode = cfg.BtcdMode - case cfg.Litecoin.Active: - btcdMode = cfg.LtcdMode - } - var rpcCert []byte - if btcdMode.RawRPCCert != "" { - rpcCert, err = util.DecodeHex(btcdMode.RawRPCCert) - if err != nil { - return nil, err - } - } else { - certFile, err := os.Open(btcdMode.RPCCert) - if err != nil { - return nil, er.E(err) - } - rpcCert, err = ioutil.ReadAll(certFile) - if err != nil { - return nil, er.E(err) - } - if err := certFile.Close(); err != nil { - return nil, er.E(err) - } - } - - // If the specified host for the btcd/ltcd RPC server already - // has a port specified, then we use that directly. Otherwise, - // we assume the default port according to the selected chain - // parameters. - var btcdHost string - if strings.Contains(btcdMode.RPCHost, ":") { - btcdHost = btcdMode.RPCHost - } else { - btcdHost = fmt.Sprintf("%v:%v", btcdMode.RPCHost, - cfg.ActiveNetParams.RPCPort) - } - - btcdUser := btcdMode.RPCUser - btcdPass := btcdMode.RPCPass - rpcConfig := &rpcclient.ConnConfig{ - Host: btcdHost, - Endpoint: "ws", - User: btcdUser, - Pass: btcdPass, - Certificates: rpcCert, - DisableTLS: false, - DisableConnectOnNew: true, - DisableAutoReconnect: false, - } - cc.ChainNotifier, err = btcdnotify.New( - rpcConfig, cfg.ActiveNetParams.Params, hintCache, hintCache, - ) - if err != nil { - return nil, err - } - - // Finally, we'll create an instance of the default chain view to be - // used within the routing layer. - cc.ChainView, err = chainview.NewBtcdFilteredChainView(*rpcConfig) - if err != nil { - log.Errorf("unable to create chain view: %v", err) - return nil, err - } - - // Create a special websockets rpc client for btcd which will be used - // by the wallet for notifications, calls, etc. - chainRPC, err := chain.NewRPCClient(cfg.ActiveNetParams.Params, btcdHost, - btcdUser, btcdPass, rpcCert, false, 20) - if err != nil { - return nil, err - } - - walletConfig.ChainSource = chainRPC - - // Use a query for our best block as a health check. - cc.HealthCheck = func() er.R { - _, _, err := walletConfig.ChainSource.GetBestBlock() - return err - } - - // If we're not in simnet or regtest mode, then we'll attempt - // to use a proper fee estimator for testnet. - if !cfg.Bitcoin.SimNet && !cfg.Litecoin.SimNet && - !cfg.Bitcoin.RegTest && !cfg.Litecoin.RegTest { - - log.Info("Initializing btcd backed fee estimator") - - // Finally, we'll re-initialize the fee estimator, as - // if we're using btcd as a backend, then we can use - // live fee estimates, rather than a statically coded - // value. - fallBackFeeRate := chainfee.SatPerKVByte(25 * 1000) - cc.FeeEstimator, err = chainfee.NewBtcdEstimator( - *rpcConfig, fallBackFeeRate.FeePerKWeight(), - ) - if err != nil { - return nil, err - } - } - default: - return nil, er.Errorf("unknown node type: %s", - homeChainConfig.Node) - } - - // Override default fee estimator if an external service is specified. - if cfg.FeeURL != "" { - // Do not cache fees on regtest to make it easier to execute - // manual or automated test cases. - cacheFees := !cfg.Bitcoin.RegTest - - log.Infof("Using external fee estimator %v: cached=%v", - cfg.FeeURL, cacheFees) - - cc.FeeEstimator = chainfee.NewWebAPIEstimator( - chainfee.SparseConfFeeSource{ - URL: cfg.FeeURL, - }, - !cacheFees, - ) - } - - // Start fee estimator. - if err := cc.FeeEstimator.Start(); err != nil { - return nil, err - } - - wc, err := btcwallet.New(*walletConfig) - if err != nil { - fmt.Printf("unable to create wallet controller: %v\n", err) - return nil, err - } - - cc.MsgSigner = wc - cc.Signer = wc - cc.ChainIO = wc - cc.Wc = wc - - // Select the default channel constraints for the primary chain. - channelConstraints := DefaultBtcChannelConstraints - if cfg.PrimaryChain() == LitecoinChain { - channelConstraints = DefaultLtcChannelConstraints - } - - keyRing := keychain.NewBtcWalletKeyRing( - wc.InternalWallet(), cfg.ActiveNetParams.CoinType, - ) - cc.KeyRing = keyRing - - // Create, and start the lnwallet, which handles the core payment - // channel logic, and exposes control via proxy state machines. - walletCfg := lnwallet.Config{ - Database: cfg.RemoteChanDB, - Notifier: cc.ChainNotifier, - WalletController: wc, - Signer: cc.Signer, - FeeEstimator: cc.FeeEstimator, - SecretKeyRing: keyRing, - ChainIO: cc.ChainIO, - DefaultConstraints: channelConstraints, - NetParams: *cfg.ActiveNetParams.Params, - } - lnWallet, err := lnwallet.NewLightningWallet(walletCfg) - if err != nil { - fmt.Printf("unable to create wallet: %v\n", err) - return nil, err - } - if err := lnWallet.Startup(); err != nil { - fmt.Printf("unable to start wallet: %v\n", err) - return nil, err - } - - log.Info("LightningWallet opened") - - cc.Wallet = lnWallet - - return cc, nil -} - -// getBitcoindHealthCheckCmd queries bitcoind for its version to decide which -// api we should use for our health check. We prefer to use the uptime -// command, because it has no locking and is an inexpensive call, which was -// added in version 0.15. If we are on an earlier version, we fallback to using -// getblockchaininfo. -func getBitcoindHealthCheckCmd(client *rpcclient.Client) (string, er.R) { - // Query bitcoind to get our current version. - resp, err := client.RawRequest("getnetworkinfo", nil) - if err != nil { - return "", err - } - - // Parse the response to retrieve bitcoind's version. - info := struct { - Version int64 `json:"version"` - }{} - if err := json.Unmarshal(resp, &info); err != nil { - return "", er.E(err) - } - - // Bitcoind returns a single value representing the semantic version: - // 1000000 * CLIENT_VERSION_MAJOR + 10000 * CLIENT_VERSION_MINOR - // + 100 * CLIENT_VERSION_REVISION + 1 * CLIENT_VERSION_BUILD - // - // The uptime call was added in version 0.15.0, so we return it for - // any version value >= 150000, as per the above calculation. - if info.Version >= 150000 { - return "uptime", nil - } - - return "getblockchaininfo", nil -} - -var ( - // BitcoinTestnetGenesis is the genesis hash of Bitcoin's testnet - // chain. - BitcoinTestnetGenesis = chainhash.Hash([chainhash.HashSize]byte{ - 0x43, 0x49, 0x7f, 0xd7, 0xf8, 0x26, 0x95, 0x71, - 0x08, 0xf4, 0xa3, 0x0f, 0xd9, 0xce, 0xc3, 0xae, - 0xba, 0x79, 0x97, 0x20, 0x84, 0xe9, 0x0e, 0xad, - 0x01, 0xea, 0x33, 0x09, 0x00, 0x00, 0x00, 0x00, - }) - - // BitcoinMainnetGenesis is the genesis hash of Bitcoin's main chain. - BitcoinMainnetGenesis = chainhash.Hash([chainhash.HashSize]byte{ - 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, - 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, - 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, - 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, - }) - - // LitecoinTestnetGenesis is the genesis hash of Litecoin's testnet4 - // chain. - LitecoinTestnetGenesis = chainhash.Hash([chainhash.HashSize]byte{ - 0xa0, 0x29, 0x3e, 0x4e, 0xeb, 0x3d, 0xa6, 0xe6, - 0xf5, 0x6f, 0x81, 0xed, 0x59, 0x5f, 0x57, 0x88, - 0x0d, 0x1a, 0x21, 0x56, 0x9e, 0x13, 0xee, 0xfd, - 0xd9, 0x51, 0x28, 0x4b, 0x5a, 0x62, 0x66, 0x49, - }) - - // LitecoinMainnetGenesis is the genesis hash of Litecoin's main chain. - LitecoinMainnetGenesis = chainhash.Hash([chainhash.HashSize]byte{ - 0xe2, 0xbf, 0x04, 0x7e, 0x7e, 0x5a, 0x19, 0x1a, - 0xa4, 0xef, 0x34, 0xd3, 0x14, 0x97, 0x9d, 0xc9, - 0x98, 0x6e, 0x0f, 0x19, 0x25, 0x1e, 0xda, 0xba, - 0x59, 0x40, 0xfd, 0x1f, 0xe3, 0x65, 0xa7, 0x12, - }) - - // chainMap is a simple index that maps a chain's genesis hash to the - // ChainCode enum for that chain. - chainMap = map[chainhash.Hash]ChainCode{ - BitcoinTestnetGenesis: BitcoinChain, - LitecoinTestnetGenesis: LitecoinChain, - - BitcoinMainnetGenesis: BitcoinChain, - LitecoinMainnetGenesis: LitecoinChain, - } - - // ChainDNSSeeds is a map of a chain's hash to the set of DNS seeds - // that will be use to bootstrap peers upon first startup. - // - // The first item in the array is the primary host we'll use to attempt - // the SRV lookup we require. If we're unable to receive a response - // over UDP, then we'll fall back to manual TCP resolution. The second - // item in the array is a special A record that we'll query in order to - // receive the IP address of the current authoritative DNS server for - // the network seed. - // - // TODO(roasbeef): extend and collapse these and chainparams.go into - // struct like chaincfg.Params - ChainDNSSeeds = map[chainhash.Hash][][2]string{ - BitcoinMainnetGenesis: { - { - "nodes.lightning.directory", - "soa.nodes.lightning.directory", - }, - { - "lseed.bitcoinstats.com", - }, - }, - - BitcoinTestnetGenesis: { - { - "test.nodes.lightning.directory", - "soa.nodes.lightning.directory", - }, - }, - - LitecoinMainnetGenesis: { - { - "ltc.nodes.lightning.directory", - "soa.nodes.lightning.directory", - }, - }, - } -) - -// ChainRegistry keeps track of the current chains -type ChainRegistry struct { - sync.RWMutex - - activeChains map[ChainCode]*ChainControl - netParams map[ChainCode]*BitcoinNetParams - - primaryChain ChainCode -} - -// NewChainRegistry creates a new ChainRegistry. -func NewChainRegistry() *ChainRegistry { - return &ChainRegistry{ - activeChains: make(map[ChainCode]*ChainControl), - netParams: make(map[ChainCode]*BitcoinNetParams), - } -} - -// RegisterChain assigns an active ChainControl instance to a target chain -// identified by its ChainCode. -func (c *ChainRegistry) RegisterChain(newChain ChainCode, - cc *ChainControl) { - - c.Lock() - c.activeChains[newChain] = cc - c.Unlock() -} - -// LookupChain attempts to lookup an active ChainControl instance for the -// target chain. -func (c *ChainRegistry) LookupChain(targetChain ChainCode) ( - *ChainControl, bool) { - - c.RLock() - cc, ok := c.activeChains[targetChain] - c.RUnlock() - return cc, ok -} - -// LookupChainByHash attempts to look up an active ChainControl which -// corresponds to the passed genesis hash. -func (c *ChainRegistry) LookupChainByHash(chainHash chainhash.Hash) (*ChainControl, bool) { - c.RLock() - defer c.RUnlock() - - targetChain, ok := chainMap[chainHash] - if !ok { - return nil, ok - } - - cc, ok := c.activeChains[targetChain] - return cc, ok -} - -// RegisterPrimaryChain sets a target chain as the "home chain" for lnd. -func (c *ChainRegistry) RegisterPrimaryChain(cc ChainCode) { - c.Lock() - defer c.Unlock() - - c.primaryChain = cc -} - -// PrimaryChain returns the primary chain for this running lnd instance. The -// primary chain is considered the "home base" while the other registered -// chains are treated as secondary chains. -func (c *ChainRegistry) PrimaryChain() ChainCode { - c.RLock() - defer c.RUnlock() - - return c.primaryChain -} - -// ActiveChains returns a slice containing the active chains. -func (c *ChainRegistry) ActiveChains() []ChainCode { - c.RLock() - defer c.RUnlock() - - chains := make([]ChainCode, 0, len(c.activeChains)) - for activeChain := range c.activeChains { - chains = append(chains, activeChain) - } - - return chains -} - -// NumActiveChains returns the total number of active chains. -func (c *ChainRegistry) NumActiveChains() uint32 { - c.RLock() - defer c.RUnlock() - - return uint32(len(c.activeChains)) -} diff --git a/lnd/chanacceptor/acceptor_test.go b/lnd/chanacceptor/acceptor_test.go deleted file mode 100644 index 9a36e38b..00000000 --- a/lnd/chanacceptor/acceptor_test.go +++ /dev/null @@ -1,321 +0,0 @@ -package chanacceptor - -import ( - "math/big" - "testing" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnwallet/chancloser" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/stretchr/testify/assert" -) - -const testTimeout = time.Second - -type channelAcceptorCtx struct { - t *testing.T - - // extRequests is the channel that we send our channel accept requests - // into, this channel mocks sending of a request to the rpc acceptor. - // This channel should be buffered with the number of requests we want - // to send so that it does not block (like a rpc stream). - extRequests chan []byte - - // responses is a map of pending channel IDs to the response which we - // wish to mock the remote channel acceptor sending. - responses map[[32]byte]*lnrpc.ChannelAcceptResponse - - // acceptor is the channel acceptor we create for the test. - acceptor *RPCAcceptor - - // errChan is a channel that the error the channel acceptor exits with - // is sent into. - errChan chan er.R - - // quit is a channel that can be used to shutdown the channel acceptor - // and return errShuttingDown. - quit chan struct{} -} - -func newChanAcceptorCtx(t *testing.T, acceptCallCount int, - responses map[[32]byte]*lnrpc.ChannelAcceptResponse) *channelAcceptorCtx { - - testCtx := &channelAcceptorCtx{ - t: t, - extRequests: make(chan []byte, acceptCallCount), - responses: responses, - errChan: make(chan er.R), - quit: make(chan struct{}), - } - - testCtx.acceptor = NewRPCAcceptor( - testCtx.receiveResponse, testCtx.sendRequest, testTimeout*5, - &chaincfg.TestNet3Params, testCtx.quit, - ) - - return testCtx -} - -// sendRequest mocks sending a request to the channel acceptor. -func (c *channelAcceptorCtx) sendRequest(request *lnrpc.ChannelAcceptRequest) error { - select { - case c.extRequests <- request.PendingChanId: - - case <-time.After(testTimeout): - c.t.Fatalf("timeout sending request: %v", request.PendingChanId) - } - - return nil -} - -// receiveResponse mocks sending of a response from the channel acceptor. -func (c *channelAcceptorCtx) receiveResponse() (*lnrpc.ChannelAcceptResponse, - error) { - - select { - case id := <-c.extRequests: - scratch := [32]byte{} - copy(scratch[:], id) - - resp, ok := c.responses[scratch] - assert.True(c.t, ok) - - return resp, nil - - case <-time.After(testTimeout): - c.t.Fatalf("timeout receiving request") - return nil, er.Native(er.New("receiveResponse timeout")) - - // Exit if our test acceptor closes the done channel, which indicates - // that the acceptor is shutting down. - case <-c.acceptor.done: - return nil, er.Native(er.New("acceptor shutting down")) - } -} - -// start runs our channel acceptor in a goroutine which sends its exit error -// into our test error channel. -func (c *channelAcceptorCtx) start() { - go func() { - c.errChan <- c.acceptor.Run() - }() -} - -// stop shuts down the test's channel acceptor and asserts that it exits with -// our expected error. -func (c *channelAcceptorCtx) stop() { - close(c.quit) - - select { - case actual := <-c.errChan: - assert.True(c.t, errShuttingDown.Is(actual)) - - case <-time.After(testTimeout): - c.t.Fatal("timeout waiting for acceptor to exit") - } -} - -// queryAndAssert takes a map of open channel requests which we want to call -// Accept for to the outcome we expect from the acceptor, dispatches each -// request in a goroutine and then asserts that we get the outcome we expect. -func (c *channelAcceptorCtx) queryAndAssert(queries map[*lnwire.OpenChannel]*ChannelAcceptResponse) { - var ( - node = &btcec.PublicKey{ - X: big.NewInt(1), - Y: big.NewInt(1), - } - - responses = make(chan struct{}) - ) - - for request, expected := range queries { - request := request - expected := expected - - go func() { - resp := c.acceptor.Accept(&ChannelAcceptRequest{ - Node: node, - OpenChanMsg: request, - }) - e1 := expected.ChanAcceptError - e2 := resp.ChanAcceptError - assert.True(c.t, er.FuzzyEquals(e1, e2)) - expected.ChanAcceptError = nil - resp.ChanAcceptError = nil - assert.Equal(c.t, expected, resp) - responses <- struct{}{} - }() - } - - // Wait for each of our requests to return a response before we exit. - for i := 0; i < len(queries); i++ { - select { - case <-responses: - case <-time.After(testTimeout): - c.t.Fatalf("did not receive response") - } - } -} - -// TestMultipleAcceptClients tests that the RPC acceptor is capable of handling -// multiple requests to its Accept function and responding to them correctly. -func TestMultipleAcceptClients(t *testing.T) { - testAddr := "bcrt1qwrmq9uca0t3dy9t9wtuq5tm4405r7tfzyqn9pp" - testUpfront, err := chancloser.ParseUpfrontShutdownAddress( - testAddr, &chaincfg.TestNet3Params, - ) - util.RequireNoErr(t, err) - - var ( - chan1 = &lnwire.OpenChannel{ - PendingChannelID: [32]byte{1}, - } - chan2 = &lnwire.OpenChannel{ - PendingChannelID: [32]byte{2}, - } - chan3 = &lnwire.OpenChannel{ - PendingChannelID: [32]byte{3}, - } - - customError = er.New("go away") - - // Queries is a map of the channel IDs we will query Accept - // with, and the set of outcomes we expect. - queries = map[*lnwire.OpenChannel]*ChannelAcceptResponse{ - chan1: NewChannelAcceptResponse( - true, nil, testUpfront, 1, 2, 3, 4, 5, 6, - ), - chan2: NewChannelAcceptResponse( - false, errChannelRejected.Default(), nil, 0, 0, 0, - 0, 0, 0, - ), - chan3: NewChannelAcceptResponse( - false, customError, nil, 0, 0, 0, 0, 0, 0, - ), - } - - // Responses is a mocked set of responses from the remote - // channel acceptor. - responses = map[[32]byte]*lnrpc.ChannelAcceptResponse{ - chan1.PendingChannelID: { - PendingChanId: chan1.PendingChannelID[:], - Accept: true, - UpfrontShutdown: testAddr, - CsvDelay: 1, - MaxHtlcCount: 2, - MinAcceptDepth: 3, - ReserveSat: 4, - InFlightMaxMsat: 5, - MinHtlcIn: 6, - }, - chan2.PendingChannelID: { - PendingChanId: chan2.PendingChannelID[:], - Accept: false, - }, - chan3.PendingChannelID: { - PendingChanId: chan3.PendingChannelID[:], - Accept: false, - Error: customError.String(), - }, - } - ) - - // Create and start our channel acceptor. - testCtx := newChanAcceptorCtx(t, len(queries), responses) - testCtx.start() - - // Dispatch three queries and assert that we get our expected response. - // for each. - testCtx.queryAndAssert(queries) - - // Shutdown our acceptor. - testCtx.stop() -} - -// TestInvalidResponse tests the case where our remote channel acceptor sends us -// an invalid response, so the channel acceptor stream terminates. -func TestInvalidResponse(t *testing.T) { - var ( - chan1 = [32]byte{1} - - // We make a single query, and expect it to fail with our - // generic error because our response is invalid. - queries = map[*lnwire.OpenChannel]*ChannelAcceptResponse{ - { - PendingChannelID: chan1, - }: NewChannelAcceptResponse( - false, errChannelRejected.Default(), nil, 0, 0, - 0, 0, 0, 0, - ), - } - - // Create a single response which is invalid because it accepts - // the channel but also contains an error message. - responses = map[[32]byte]*lnrpc.ChannelAcceptResponse{ - chan1: { - PendingChanId: chan1[:], - Accept: true, - Error: "has an error as well", - }, - } - ) - - // Create and start our channel acceptor. - testCtx := newChanAcceptorCtx(t, len(queries), responses) - testCtx.start() - - testCtx.queryAndAssert(queries) - - // We do not expect our channel acceptor to exit because of one invalid - // response, so we shutdown and assert here. - testCtx.stop() -} - -// TestInvalidReserve tests validation of the channel reserve proposed by the -// acceptor against the dust limit that was proposed by the remote peer. -func TestInvalidReserve(t *testing.T) { - var ( - chan1 = [32]byte{1} - - dustLimit = btcutil.Amount(1000) - reserve = dustLimit / 2 - - // We make a single query, and expect it to fail with our - // generic error because channel reserve is too low. - queries = map[*lnwire.OpenChannel]*ChannelAcceptResponse{ - { - PendingChannelID: chan1, - DustLimit: dustLimit, - }: NewChannelAcceptResponse( - false, errChannelRejected.Default(), nil, 0, 0, - 0, reserve, 0, 0, - ), - } - - // Create a single response which is invalid because the - // proposed reserve is below our dust limit. - responses = map[[32]byte]*lnrpc.ChannelAcceptResponse{ - chan1: { - PendingChanId: chan1[:], - Accept: true, - ReserveSat: uint64(reserve), - }, - } - ) - - // Create and start our channel acceptor. - testCtx := newChanAcceptorCtx(t, len(queries), responses) - testCtx.start() - - testCtx.queryAndAssert(queries) - - // We do not expect our channel acceptor to exit because of one invalid - // response, so we shutdown and assert here. - testCtx.stop() -} diff --git a/lnd/chanacceptor/chainedacceptor.go b/lnd/chanacceptor/chainedacceptor.go deleted file mode 100644 index 30466afc..00000000 --- a/lnd/chanacceptor/chainedacceptor.go +++ /dev/null @@ -1,98 +0,0 @@ -package chanacceptor - -import ( - "sync" - "sync/atomic" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/pktlog/log" -) - -// ChainedAcceptor represents a conjunction of ChannelAcceptor results. -type ChainedAcceptor struct { - // acceptors is a map of ChannelAcceptors that will be evaluated when - // the ChainedAcceptor's Accept method is called. - acceptors map[uint64]ChannelAcceptor - acceptorsMtx sync.RWMutex - - acceptorID uint64 // To be used atomically. -} - -// NewChainedAcceptor initializes a ChainedAcceptor. -func NewChainedAcceptor() *ChainedAcceptor { - return &ChainedAcceptor{ - acceptors: make(map[uint64]ChannelAcceptor), - } -} - -// AddAcceptor adds a ChannelAcceptor to this ChainedAcceptor. -func (c *ChainedAcceptor) AddAcceptor(acceptor ChannelAcceptor) uint64 { - id := atomic.AddUint64(&c.acceptorID, 1) - - c.acceptorsMtx.Lock() - c.acceptors[id] = acceptor - c.acceptorsMtx.Unlock() - - // Return the id so that a caller can call RemoveAcceptor. - return id -} - -// RemoveAcceptor removes a ChannelAcceptor from this ChainedAcceptor given -// an ID. -func (c *ChainedAcceptor) RemoveAcceptor(id uint64) { - c.acceptorsMtx.Lock() - delete(c.acceptors, id) - c.acceptorsMtx.Unlock() -} - -// Accept evaluates the results of all ChannelAcceptors in the acceptors map -// and returns the conjunction of all these predicates. -// -// NOTE: Part of the ChannelAcceptor interface. -func (c *ChainedAcceptor) Accept(req *ChannelAcceptRequest) *ChannelAcceptResponse { - c.acceptorsMtx.RLock() - defer c.acceptorsMtx.RUnlock() - - var finalResp ChannelAcceptResponse - - for _, acceptor := range c.acceptors { - // Call our acceptor to determine whether we want to accept this - // channel. - acceptorResponse := acceptor.Accept(req) - - // If we should reject the channel, we can just exit early. This - // has the effect of returning the error belonging to our first - // failed acceptor. - if acceptorResponse.RejectChannel() { - return acceptorResponse - } - - // If we have accepted the channel, we need to set the other - // fields that were set in the response. However, since we are - // dealing with multiple responses, we need to make sure that we - // have not received inconsistent values (eg a csv delay of 1 - // from one acceptor, and a delay of 120 from another). We - // set each value on our final response if it has not been set - // yet, and allow duplicate sets if the value is the same. If - // we cannot set a field, we return an error response. - var err er.R - finalResp, err = mergeResponse(finalResp, *acceptorResponse) - if err != nil { - log.Errorf("response for: %x has inconsistent values: %v", - req.OpenChanMsg.PendingChannelID, err) - - return NewChannelAcceptResponse( - false, errChannelRejected.Default(), nil, 0, 0, - 0, 0, 0, 0, - ) - } - } - - // If we have gone through all of our acceptors with no objections, we - // can return an acceptor with a nil error. - return &finalResp -} - -// A compile-time constraint to ensure ChainedAcceptor implements the -// ChannelAcceptor interface. -var _ ChannelAcceptor = (*ChainedAcceptor)(nil) diff --git a/lnd/chanacceptor/interface.go b/lnd/chanacceptor/interface.go deleted file mode 100644 index c5638609..00000000 --- a/lnd/chanacceptor/interface.go +++ /dev/null @@ -1,112 +0,0 @@ -package chanacceptor - -import ( - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -var ( - // errChannelRejected is returned when the rpc channel acceptor rejects - // a channel due to acceptor timeout, shutdown, or because no custom - // error value is available when the channel was rejected. - errChannelRejected = er.GenericErrorType.CodeWithDetail("errChannelRejected", "channel rejected") -) - -// ChannelAcceptRequest is a struct containing the requesting node's public key -// along with the lnwire.OpenChannel message that they sent when requesting an -// inbound channel. This information is provided to each acceptor so that they -// can each leverage their own decision-making with this information. -type ChannelAcceptRequest struct { - // Node is the public key of the node requesting to open a channel. - Node *btcec.PublicKey - - // OpenChanMsg is the actual OpenChannel protocol message that the peer - // sent to us. - OpenChanMsg *lnwire.OpenChannel -} - -// ChannelAcceptResponse is a struct containing the response to a request to -// open an inbound channel. Note that fields added to this struct must be added -// to the mergeResponse function to allow combining of responses from different -// acceptors. -type ChannelAcceptResponse struct { - // ChanAcceptError the error returned by the channel acceptor. If the - // channel was accepted, this value will be nil. - ChanAcceptError er.R - - // UpfrontShutdown is the address that we will set as our upfront - // shutdown address. - UpfrontShutdown lnwire.DeliveryAddress - - // CSVDelay is the csv delay we require for the remote peer. - CSVDelay uint16 - - // Reserve is the amount that require the remote peer hold in reserve - // on the channel. - Reserve btcutil.Amount - - // InFlightTotal is the maximum amount that we allow the remote peer to - // hold in outstanding htlcs. - InFlightTotal lnwire.MilliSatoshi - - // HtlcLimit is the maximum number of htlcs that we allow the remote - // peer to offer us. - HtlcLimit uint16 - - // MinHtlcIn is the minimum incoming htlc value allowed on the channel. - MinHtlcIn lnwire.MilliSatoshi - - // MinAcceptDepth is the minimum depth that the initiator of the - // channel should wait before considering the channel open. - MinAcceptDepth uint16 -} - -// NewChannelAcceptResponse is a constructor for a channel accept response, -// which creates a response with an appropriately wrapped error (in the case of -// a rejection) so that the error will be whitelisted and delivered to the -// initiating peer. Accepted channels simply return a response containing a nil -// error. -func NewChannelAcceptResponse(accept bool, acceptErr er.R, - upfrontShutdown lnwire.DeliveryAddress, csvDelay, htlcLimit, - minDepth uint16, reserve btcutil.Amount, inFlight, - minHtlcIn lnwire.MilliSatoshi) *ChannelAcceptResponse { - - resp := &ChannelAcceptResponse{ - UpfrontShutdown: upfrontShutdown, - CSVDelay: csvDelay, - Reserve: reserve, - InFlightTotal: inFlight, - HtlcLimit: htlcLimit, - MinHtlcIn: minHtlcIn, - MinAcceptDepth: minDepth, - } - - // If we want to accept the channel, we return a response with a nil - // error. - if accept { - return resp - } - - // Use a generic error when no custom error is provided. - if acceptErr == nil { - acceptErr = errChannelRejected.Default() - } - - resp.ChanAcceptError = acceptErr - - return resp -} - -// RejectChannel returns a boolean that indicates whether we should reject the -// channel. -func (c *ChannelAcceptResponse) RejectChannel() bool { - return c.ChanAcceptError != nil -} - -// ChannelAcceptor is an interface that represents a predicate on the data -// contained in ChannelAcceptRequest. -type ChannelAcceptor interface { - Accept(req *ChannelAcceptRequest) *ChannelAcceptResponse -} diff --git a/lnd/chanacceptor/merge.go b/lnd/chanacceptor/merge.go deleted file mode 100644 index f2258df2..00000000 --- a/lnd/chanacceptor/merge.go +++ /dev/null @@ -1,152 +0,0 @@ -package chanacceptor - -import ( - "bytes" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -const ( - // We use field names in our errors for more readable errors. Create - // consts for them here so that we can exactly match in our unit tests. - fieldCSV = "csv delay" - fieldHtlcLimit = "htlc limit" - fieldMinDep = "min depth" - fieldReserve = "reserve" - fieldMinIn = "min htlc in" - fieldInFlightTotal = "in flight total" - fieldUpfrontShutdown = "upfront shutdown" -) - -// fieldMismatchError returns a merge error for a named field when we get two -// channel acceptor responses which have different values set. -func fieldMismatchError(name string, current, new interface{}) er.R { - return er.Errorf("multiple values set for: %v, %v and %v", - name, current, new) -} - -// mergeInt64 merges two int64 values, failing if they have different non-zero -// values. -func mergeInt64(name string, current, new int64) (int64, er.R) { - switch { - case current == 0: - return new, nil - - case new == 0: - return current, nil - - case current != new: - return 0, fieldMismatchError(name, current, new) - - default: - return new, nil - } -} - -// mergeMillisatoshi merges two msat values, failing if they have different -// non-zero values. -func mergeMillisatoshi(name string, current, - new lnwire.MilliSatoshi) (lnwire.MilliSatoshi, er.R) { - - switch { - case current == 0: - return new, nil - - case new == 0: - return current, nil - - case current != new: - return 0, fieldMismatchError(name, current, new) - - default: - return new, nil - } -} - -// mergeDeliveryAddress merges two delivery address values, failing if they have -// different non-zero values. -func mergeDeliveryAddress(name string, current, - new lnwire.DeliveryAddress) (lnwire.DeliveryAddress, er.R) { - - switch { - case current == nil: - return new, nil - - case new == nil: - return current, nil - - case !bytes.Equal(current, new): - return nil, fieldMismatchError(name, current, new) - - default: - return new, nil - } -} - -// mergeResponse takes two channel accept responses, and attempts to merge their -// fields, failing if any fields conflict (are non-zero and not equal). It -// returns a new response that has all the merged fields in it. -func mergeResponse(current, new ChannelAcceptResponse) (ChannelAcceptResponse, - er.R) { - - csv, err := mergeInt64( - fieldCSV, int64(current.CSVDelay), int64(new.CSVDelay), - ) - if err != nil { - return current, err - } - current.CSVDelay = uint16(csv) - - htlcLimit, err := mergeInt64( - fieldHtlcLimit, int64(current.HtlcLimit), - int64(new.HtlcLimit), - ) - if err != nil { - return current, err - } - current.HtlcLimit = uint16(htlcLimit) - - minDepth, err := mergeInt64( - fieldMinDep, int64(current.MinAcceptDepth), - int64(new.MinAcceptDepth), - ) - if err != nil { - return current, err - } - current.MinAcceptDepth = uint16(minDepth) - - reserve, err := mergeInt64( - fieldReserve, int64(current.Reserve), int64(new.Reserve), - ) - if err != nil { - return current, err - } - current.Reserve = btcutil.Amount(reserve) - - current.MinHtlcIn, err = mergeMillisatoshi( - fieldMinIn, current.MinHtlcIn, new.MinHtlcIn, - ) - if err != nil { - return current, err - } - - current.InFlightTotal, err = mergeMillisatoshi( - fieldInFlightTotal, current.InFlightTotal, - new.InFlightTotal, - ) - if err != nil { - return current, err - } - - current.UpfrontShutdown, err = mergeDeliveryAddress( - fieldUpfrontShutdown, current.UpfrontShutdown, - new.UpfrontShutdown, - ) - if err != nil { - return current, err - } - - return current, nil -} diff --git a/lnd/chanacceptor/merge_test.go b/lnd/chanacceptor/merge_test.go deleted file mode 100644 index 2da363f4..00000000 --- a/lnd/chanacceptor/merge_test.go +++ /dev/null @@ -1,189 +0,0 @@ -package chanacceptor - -import ( - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/stretchr/testify/require" -) - -// TestMergeResponse tests merging of channel acceptor responses. -func TestMergeResponse(t *testing.T) { - var ( - addr1 = lnwire.DeliveryAddress{1} - addr2 = lnwire.DeliveryAddress{2} - - populatedResp = ChannelAcceptResponse{ - UpfrontShutdown: addr1, - CSVDelay: 2, - Reserve: 3, - InFlightTotal: 4, - HtlcLimit: 5, - MinHtlcIn: 6, - MinAcceptDepth: 7, - } - ) - - tests := []struct { - name string - current ChannelAcceptResponse - new ChannelAcceptResponse - merged ChannelAcceptResponse - err er.R - }{ - { - name: "same response", - current: populatedResp, - new: populatedResp, - merged: populatedResp, - err: nil, - }, - { - name: "different upfront", - current: ChannelAcceptResponse{ - UpfrontShutdown: addr1, - }, - new: ChannelAcceptResponse{ - UpfrontShutdown: addr2, - }, - err: fieldMismatchError(fieldUpfrontShutdown, addr1, addr2), - }, - { - name: "different csv", - current: ChannelAcceptResponse{ - CSVDelay: 1, - }, - new: ChannelAcceptResponse{ - CSVDelay: 2, - }, - err: fieldMismatchError(fieldCSV, 1, 2), - }, - { - name: "different reserve", - current: ChannelAcceptResponse{ - Reserve: 1, - }, - new: ChannelAcceptResponse{ - Reserve: 2, - }, - err: fieldMismatchError(fieldReserve, 1, 2), - }, - { - name: "different in flight", - current: ChannelAcceptResponse{ - InFlightTotal: 1, - }, - new: ChannelAcceptResponse{ - InFlightTotal: 2, - }, - err: fieldMismatchError( - fieldInFlightTotal, lnwire.MilliSatoshi(1), - lnwire.MilliSatoshi(2), - ), - }, - { - name: "different htlc limit", - current: ChannelAcceptResponse{ - HtlcLimit: 1, - }, - new: ChannelAcceptResponse{ - HtlcLimit: 2, - }, - err: fieldMismatchError(fieldHtlcLimit, 1, 2), - }, - { - name: "different min in", - current: ChannelAcceptResponse{ - MinHtlcIn: 1, - }, - new: ChannelAcceptResponse{ - MinHtlcIn: 2, - }, - err: fieldMismatchError( - fieldMinIn, lnwire.MilliSatoshi(1), - lnwire.MilliSatoshi(2), - ), - }, - { - name: "different depth", - current: ChannelAcceptResponse{ - MinAcceptDepth: 1, - }, - new: ChannelAcceptResponse{ - MinAcceptDepth: 2, - }, - err: fieldMismatchError(fieldMinDep, 1, 2), - }, - { - name: "merge all values", - current: ChannelAcceptResponse{ - UpfrontShutdown: lnwire.DeliveryAddress{1}, - CSVDelay: 1, - Reserve: 0, - InFlightTotal: 3, - HtlcLimit: 0, - MinHtlcIn: 5, - MinAcceptDepth: 0, - }, - new: ChannelAcceptResponse{ - UpfrontShutdown: nil, - CSVDelay: 0, - Reserve: 2, - InFlightTotal: 0, - HtlcLimit: 4, - MinHtlcIn: 0, - MinAcceptDepth: 6, - }, - merged: ChannelAcceptResponse{ - UpfrontShutdown: lnwire.DeliveryAddress{1}, - CSVDelay: 1, - Reserve: 2, - InFlightTotal: 3, - HtlcLimit: 4, - MinHtlcIn: 5, - MinAcceptDepth: 6, - }, - err: nil, - }, - { - // Test the case where fields have the same non-zero - // value, and the case where only response value is - // non-zero. - name: "empty and identical", - current: ChannelAcceptResponse{ - CSVDelay: 1, - Reserve: 2, - InFlightTotal: 0, - }, - new: ChannelAcceptResponse{ - CSVDelay: 0, - Reserve: 2, - InFlightTotal: 3, - }, - merged: ChannelAcceptResponse{ - CSVDelay: 1, - Reserve: 2, - InFlightTotal: 3, - }, - err: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - resp, err := mergeResponse(test.current, test.new) - require.True(t, er.FuzzyEquals(test.err, err)) - - // If we expect an error, exit early rather than compare - // our result. - if test.err != nil { - return - } - - require.Equal(t, test.merged, resp) - }) - } -} diff --git a/lnd/chanacceptor/rpcacceptor.go b/lnd/chanacceptor/rpcacceptor.go deleted file mode 100644 index 7376282d..00000000 --- a/lnd/chanacceptor/rpcacceptor.go +++ /dev/null @@ -1,411 +0,0 @@ -package chanacceptor - -import ( - "encoding/hex" - "fmt" - "sync" - "time" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnwallet/chancloser" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/pktlog/log" -) - -var ( - errShuttingDown = er.GenericErrorType.CodeWithDetail("errShuttingDown", "server shutting down") - - // errCustomLength is returned when our custom error's length exceeds - // our maximum. - errCustomLength = er.GenericErrorType.CodeWithDetail("errCustomLength", - fmt.Sprintf("custom error message exceeds length "+ - "limit: %v", maxErrorLength)) - - // errInvalidUpfrontShutdown is returned when we cannot parse the - // upfront shutdown address returned. - errInvalidUpfrontShutdown = er.GenericErrorType.CodeWithDetail("errInvalidUpfrontShutdown", - "could not parse upfront "+ - "shutdown address") - - // errInsufficientReserve is returned when the reserve proposed by for - // a channel is less than the dust limit originally supplied. - errInsufficientReserve = er.GenericErrorType.CodeWithDetail("errInsufficientReserve", - "reserve lower than proposed dust "+ - "limit") - - // errAcceptWithError is returned when we get a response which accepts - // a channel but ambiguously also sets a custom error message. - errAcceptWithError = er.GenericErrorType.CodeWithDetail("errAcceptWithError", - "channel acceptor response accepts "+ - "channel, but also includes custom error") - - // errMaxHtlcTooHigh is returned if our htlc count exceeds the number - // hard-set by BOLT 2. - errMaxHtlcTooHigh = er.GenericErrorType.CodeWithDetail("errMaxHtlcTooHigh", - fmt.Sprintf("htlc limit exceeds spec limit of: %v", - input.MaxHTLCNumber/2)) - - // maxErrorLength is the maximum error length we allow the error we - // send to our peer to be. - maxErrorLength = 500 -) - -// chanAcceptInfo contains a request for a channel acceptor decision, and a -// channel that the response should be sent on. -type chanAcceptInfo struct { - request *ChannelAcceptRequest - response chan *ChannelAcceptResponse -} - -// RPCAcceptor represents the RPC-controlled variant of the ChannelAcceptor. -// One RPCAcceptor allows one RPC client. -type RPCAcceptor struct { - // receive is a function from which we receive channel acceptance - // decisions. Note that this function is expected to block. - receive func() (*lnrpc.ChannelAcceptResponse, error) - - // send is a function which sends requests for channel acceptance - // decisions into our rpc stream. - send func(request *lnrpc.ChannelAcceptRequest) error - - // requests is a channel that we send requests for a acceptor response - // into. - requests chan *chanAcceptInfo - - // timeout is the amount of time we allow the channel acceptance - // decision to take. This time includes the time to send a query to the - // acceptor, and the time it takes to receive a response. - timeout time.Duration - - // params are our current chain params. - params *chaincfg.Params - - // done is closed when the rpc client terminates. - done chan struct{} - - // quit is closed when lnd is shutting down. - quit chan struct{} - - wg sync.WaitGroup -} - -// Accept is a predicate on the ChannelAcceptRequest which is sent to the RPC -// client who will respond with the ultimate decision. This function passes the -// request into the acceptor's requests channel, and returns the response it -// receives, failing the request if the timeout elapses. -// -// NOTE: Part of the ChannelAcceptor interface. -func (r *RPCAcceptor) Accept(req *ChannelAcceptRequest) *ChannelAcceptResponse { - respChan := make(chan *ChannelAcceptResponse, 1) - - newRequest := &chanAcceptInfo{ - request: req, - response: respChan, - } - - // timeout is the time after which ChannelAcceptRequests expire. - timeout := time.After(r.timeout) - - // Create a rejection response which we can use for the cases where we - // reject the channel. - rejectChannel := NewChannelAcceptResponse( - false, errChannelRejected.Default(), nil, 0, 0, 0, 0, 0, 0, - ) - - // Send the request to the newRequests channel. - select { - case r.requests <- newRequest: - - case <-timeout: - log.Errorf("RPCAcceptor returned false - reached timeout of %v", - r.timeout) - return rejectChannel - - case <-r.done: - return rejectChannel - - case <-r.quit: - return rejectChannel - } - - // Receive the response and return it. If no response has been received - // in AcceptorTimeout, then return false. - select { - case resp := <-respChan: - return resp - - case <-timeout: - log.Errorf("RPCAcceptor returned false - reached timeout of %v", - r.timeout) - return rejectChannel - - case <-r.done: - return rejectChannel - - case <-r.quit: - return rejectChannel - } -} - -// NewRPCAcceptor creates and returns an instance of the RPCAcceptor. -func NewRPCAcceptor(receive func() (*lnrpc.ChannelAcceptResponse, error), - send func(*lnrpc.ChannelAcceptRequest) error, timeout time.Duration, - params *chaincfg.Params, quit chan struct{}) *RPCAcceptor { - - return &RPCAcceptor{ - receive: receive, - send: send, - requests: make(chan *chanAcceptInfo), - timeout: timeout, - params: params, - done: make(chan struct{}), - quit: quit, - } -} - -// Run is the main loop for the RPC Acceptor. This function will block until -// it receives the signal that lnd is shutting down, or the rpc stream is -// cancelled by the client. -func (r *RPCAcceptor) Run() er.R { - // Wait for our goroutines to exit before we return. - defer r.wg.Wait() - - // Create a channel that responses from acceptors are sent into. - responses := make(chan lnrpc.ChannelAcceptResponse) - - // errChan is used by the receive loop to signal any errors that occur - // during reading from the stream. This is primarily used to shutdown - // the send loop in the case of an RPC client disconnecting. - errChan := make(chan er.R, 1) - - // Start a goroutine to receive responses from the channel acceptor. - // We expect the receive function to block, so it must be run in a - // goroutine (otherwise we could not send more than one channel accept - // request to the client). - r.wg.Add(1) - go func() { - r.receiveResponses(errChan, responses) - r.wg.Done() - }() - - return r.sendAcceptRequests(errChan, responses) -} - -// receiveResponses receives responses for our channel accept requests and -// dispatches them into the responses channel provided, sending any errors that -// occur into the error channel provided. -func (r *RPCAcceptor) receiveResponses(errChan chan er.R, - responses chan lnrpc.ChannelAcceptResponse) { - - for { - resp, err := r.receive() - if err != nil { - errChan <- er.E(err) - return - } - - var pendingID [32]byte - copy(pendingID[:], resp.PendingChanId) - - openChanResp := lnrpc.ChannelAcceptResponse{ - Accept: resp.Accept, - PendingChanId: pendingID[:], - Error: resp.Error, - UpfrontShutdown: resp.UpfrontShutdown, - CsvDelay: resp.CsvDelay, - ReserveSat: resp.ReserveSat, - InFlightMaxMsat: resp.InFlightMaxMsat, - MaxHtlcCount: resp.MaxHtlcCount, - MinHtlcIn: resp.MinHtlcIn, - MinAcceptDepth: resp.MinAcceptDepth, - } - - // We have received a decision for one of our channel - // acceptor requests. - select { - case responses <- openChanResp: - - case <-r.done: - return - - case <-r.quit: - return - } - } -} - -// sendAcceptRequests handles channel acceptor requests sent to us by our -// Accept() function, dispatching them to our acceptor stream and coordinating -// return of responses to their callers. -func (r *RPCAcceptor) sendAcceptRequests(errChan chan er.R, - responses chan lnrpc.ChannelAcceptResponse) er.R { - - // Close the done channel to indicate that the acceptor is no longer - // listening and any in-progress requests should be terminated. - defer close(r.done) - - // Create a map of pending channel IDs to our original open channel - // request and a response channel. We keep the original chanel open - // message so that we can validate our response against it. - acceptRequests := make(map[[32]byte]*chanAcceptInfo) - - for { - select { - // Consume requests passed to us from our Accept() function and - // send them into our stream. - case newRequest := <-r.requests: - - req := newRequest.request - pendingChanID := req.OpenChanMsg.PendingChannelID - - acceptRequests[pendingChanID] = newRequest - - // A ChannelAcceptRequest has been received, send it to the client. - chanAcceptReq := &lnrpc.ChannelAcceptRequest{ - NodePubkey: req.Node.SerializeCompressed(), - ChainHash: req.OpenChanMsg.ChainHash[:], - PendingChanId: req.OpenChanMsg.PendingChannelID[:], - FundingAmt: uint64(req.OpenChanMsg.FundingAmount), - PushAmt: uint64(req.OpenChanMsg.PushAmount), - DustLimit: uint64(req.OpenChanMsg.DustLimit), - MaxValueInFlight: uint64(req.OpenChanMsg.MaxValueInFlight), - ChannelReserve: uint64(req.OpenChanMsg.ChannelReserve), - MinHtlc: uint64(req.OpenChanMsg.HtlcMinimum), - FeePerKw: uint64(req.OpenChanMsg.FeePerKiloWeight), - CsvDelay: uint32(req.OpenChanMsg.CsvDelay), - MaxAcceptedHtlcs: uint32(req.OpenChanMsg.MaxAcceptedHTLCs), - ChannelFlags: uint32(req.OpenChanMsg.ChannelFlags), - } - - if err := r.send(chanAcceptReq); err != nil { - return er.E(err) - } - - // Process newly received responses from our channel acceptor, - // looking the original request up in our map of requests and - // dispatching the response. - case resp := <-responses: - // Look up the appropriate channel to send on given the - // pending ID. If a channel is found, send the response - // over it. - var pendingID [32]byte - copy(pendingID[:], resp.PendingChanId) - requestInfo, ok := acceptRequests[pendingID] - if !ok { - continue - } - - // Validate the response we have received. If it is not - // valid, we log our error and proceed to deliver the - // rejection. - accept, acceptErr, shutdown, err := r.validateAcceptorResponse( - requestInfo.request.OpenChanMsg.DustLimit, resp, - ) - if err != nil { - log.Errorf("Invalid acceptor response: %v", err) - } - - requestInfo.response <- NewChannelAcceptResponse( - accept, acceptErr, shutdown, - uint16(resp.CsvDelay), - uint16(resp.MaxHtlcCount), - uint16(resp.MinAcceptDepth), - btcutil.Amount(resp.ReserveSat), - lnwire.MilliSatoshi(resp.InFlightMaxMsat), - lnwire.MilliSatoshi(resp.MinHtlcIn), - ) - - // Delete the channel from the acceptRequests map. - delete(acceptRequests, pendingID) - - // If we failed to receive from our acceptor, we exit. - case err := <-errChan: - log.Errorf("Received an error: %v, shutting down", err) - return err - - // Exit if we are shutting down. - case <-r.quit: - return errShuttingDown.Default() - } - } -} - -// validateAcceptorResponse validates the response we get from the channel -// acceptor, returning a boolean indicating whether to accept the channel, an -// error to send to the peer, and any validation errors that occurred. -func (r *RPCAcceptor) validateAcceptorResponse(dustLimit btcutil.Amount, - req lnrpc.ChannelAcceptResponse) (bool, er.R, lnwire.DeliveryAddress, - er.R) { - - channelStr := hex.EncodeToString(req.PendingChanId) - - // Check that the max htlc count is within the BOLT 2 hard-limit of 483. - // The initiating side should fail values above this anyway, but we - // catch the invalid user input here. - if req.MaxHtlcCount > input.MaxHTLCNumber/2 { - log.Errorf("Max htlc count: %v for channel: %v is greater "+ - "than limit of: %v", req.MaxHtlcCount, channelStr, - input.MaxHTLCNumber/2) - - return false, errChannelRejected.Default(), nil, errMaxHtlcTooHigh.Default() - } - - // Ensure that the reserve that has been proposed, if it is set, is at - // least the dust limit that was proposed by the remote peer. This is - // required by BOLT 2. - reserveSat := btcutil.Amount(req.ReserveSat) - if reserveSat != 0 && reserveSat < dustLimit { - log.Errorf("Remote reserve: %v sat for channel: %v must be "+ - "at least equal to proposed dust limit: %v", - req.ReserveSat, channelStr, dustLimit) - - return false, errChannelRejected.Default(), nil, errInsufficientReserve.Default() - } - - // Attempt to parse the upfront shutdown address provided. - upfront, err := chancloser.ParseUpfrontShutdownAddress( - req.UpfrontShutdown, r.params, - ) - if err != nil { - log.Errorf("Could not parse upfront shutdown for "+ - "%v: %v", channelStr, err) - - return false, errChannelRejected.Default(), nil, errInvalidUpfrontShutdown.Default() - } - - // Check that the custom error provided is valid. - if len(req.Error) > maxErrorLength { - return false, errChannelRejected.Default(), nil, errCustomLength.Default() - } - - var haveCustomError = len(req.Error) != 0 - - switch { - // If accept is true, but we also have an error specified, we fail - // because this result is ambiguous. - case req.Accept && haveCustomError: - return false, errChannelRejected.Default(), nil, errAcceptWithError.Default() - - // If we accept without an error message, we can just return a nil - // error. - case req.Accept: - return true, nil, upfront, nil - - // If we reject the channel, and have a custom error, then we use it. - case haveCustomError: - return false, er.Errorf(req.Error), nil, nil - - // Otherwise, we have rejected the channel with no custom error, so we - // just use a generic error to fail the channel. - default: - return false, errChannelRejected.Default(), nil, nil - } -} - -// A compile-time constraint to ensure RPCAcceptor implements the ChannelAcceptor -// interface. -var _ ChannelAcceptor = (*RPCAcceptor)(nil) diff --git a/lnd/chanacceptor/rpcacceptor_test.go b/lnd/chanacceptor/rpcacceptor_test.go deleted file mode 100644 index 0114f552..00000000 --- a/lnd/chanacceptor/rpcacceptor_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package chanacceptor - -import ( - "strings" - "testing" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnwallet/chancloser" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/stretchr/testify/require" -) - -// TestValidateAcceptorResponse test validation of acceptor responses. -func TestValidateAcceptorResponse(t *testing.T) { - var ( - customError = er.New("custom error") - validAddr = "bcrt1qwrmq9uca0t3dy9t9wtuq5tm4405r7tfzyqn9pp" - addr, _ = chancloser.ParseUpfrontShutdownAddress( - validAddr, &chaincfg.TestNet3Params, - ) - ) - - tests := []struct { - name string - dustLimit btcutil.Amount - response lnrpc.ChannelAcceptResponse - accept bool - acceptorErr er.R - error er.R - shutdown lnwire.DeliveryAddress - }{ - { - name: "accepted with error", - response: lnrpc.ChannelAcceptResponse{ - Accept: true, - Error: customError.String(), - }, - accept: false, - acceptorErr: errChannelRejected.Default(), - error: errAcceptWithError.Default(), - }, - { - name: "custom error too long", - response: lnrpc.ChannelAcceptResponse{ - Accept: false, - Error: strings.Repeat(" ", maxErrorLength+1), - }, - accept: false, - acceptorErr: errChannelRejected.Default(), - error: errCustomLength.Default(), - }, - { - name: "accepted", - response: lnrpc.ChannelAcceptResponse{ - Accept: true, - UpfrontShutdown: validAddr, - }, - accept: true, - acceptorErr: nil, - error: nil, - shutdown: addr, - }, - { - name: "rejected with error", - response: lnrpc.ChannelAcceptResponse{ - Accept: false, - Error: customError.String(), - }, - accept: false, - acceptorErr: customError, - error: nil, - }, - { - name: "rejected with no error", - response: lnrpc.ChannelAcceptResponse{ - Accept: false, - }, - accept: false, - acceptorErr: errChannelRejected.Default(), - error: nil, - }, - { - name: "invalid upfront shutdown", - response: lnrpc.ChannelAcceptResponse{ - Accept: true, - UpfrontShutdown: "invalid addr", - }, - accept: false, - acceptorErr: errChannelRejected.Default(), - error: errInvalidUpfrontShutdown.Default(), - }, - { - name: "reserve too low", - dustLimit: 100, - response: lnrpc.ChannelAcceptResponse{ - Accept: true, - ReserveSat: 10, - }, - accept: false, - acceptorErr: errChannelRejected.Default(), - error: errInsufficientReserve.Default(), - }, - { - name: "max htlcs too high", - dustLimit: 100, - response: lnrpc.ChannelAcceptResponse{ - Accept: true, - MaxHtlcCount: 1 + input.MaxHTLCNumber/2, - }, - accept: false, - acceptorErr: errChannelRejected.Default(), - error: errMaxHtlcTooHigh.Default(), - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - // Create an acceptor, everything can be nil because - // we just need the params. - acceptor := NewRPCAcceptor( - nil, nil, 0, &chaincfg.TestNet3Params, nil, - ) - - accept, acceptErr, shutdown, err := acceptor.validateAcceptorResponse( - test.dustLimit, test.response, - ) - require.Equal(t, test.accept, accept) - require.True(t, er.FuzzyEquals(test.acceptorErr, acceptErr)) - require.True(t, er.FuzzyEquals(test.error, err)) - require.Equal(t, test.shutdown, shutdown) - }) - } -} diff --git a/lnd/chanbackup/backup.go b/lnd/chanbackup/backup.go deleted file mode 100644 index 67561369..00000000 --- a/lnd/chanbackup/backup.go +++ /dev/null @@ -1,100 +0,0 @@ -package chanbackup - -import ( - "net" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/wire" -) - -// LiveChannelSource is an interface that allows us to query for the set of -// live channels. A live channel is one that is open, and has not had a -// commitment transaction broadcast. -type LiveChannelSource interface { - // FetchAllChannels returns all known live channels. - FetchAllChannels() ([]*channeldb.OpenChannel, er.R) - - // FetchChannel attempts to locate a live channel identified by the - // passed chanPoint. - FetchChannel(chanPoint wire.OutPoint) (*channeldb.OpenChannel, er.R) - - // AddrsForNode returns all known addresses for the target node public - // key. - AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, er.R) -} - -// assembleChanBackup attempts to assemble a static channel backup for the -// passed open channel. The backup includes all information required to restore -// the channel, as well as addressing information so we can find the peer and -// reconnect to them to initiate the protocol. -func assembleChanBackup(chanSource LiveChannelSource, - openChan *channeldb.OpenChannel) (*Single, er.R) { - - log.Debugf("Crafting backup for ChannelPoint(%v)", - openChan.FundingOutpoint) - - // First, we'll query the channel source to obtain all the addresses - // that are are associated with the peer for this channel. - nodeAddrs, err := chanSource.AddrsForNode(openChan.IdentityPub) - if err != nil { - return nil, err - } - - single := NewSingle(openChan, nodeAddrs) - - return &single, nil -} - -// FetchBackupForChan attempts to create a plaintext static channel backup for -// the target channel identified by its channel point. If we're unable to find -// the target channel, then an error will be returned. -func FetchBackupForChan(chanPoint wire.OutPoint, - chanSource LiveChannelSource) (*Single, er.R) { - - // First, we'll query the channel source to see if the channel is known - // and open within the database. - targetChan, err := chanSource.FetchChannel(chanPoint) - if err != nil { - // If we can't find the channel, then we return with an error, - // as we have nothing to backup. - return nil, er.Errorf("unable to find target channel") - } - - // Once we have the target channel, we can assemble the backup using - // the source to obtain any extra information that we may need. - staticChanBackup, err := assembleChanBackup(chanSource, targetChan) - if err != nil { - return nil, er.Errorf("unable to create chan backup: %v", err) - } - - return staticChanBackup, nil -} - -// FetchStaticChanBackups will return a plaintext static channel back up for -// all known active/open channels within the passed channel source. -func FetchStaticChanBackups(chanSource LiveChannelSource) ([]Single, er.R) { - // First, we'll query the backup source for information concerning all - // currently open and available channels. - openChans, err := chanSource.FetchAllChannels() - if err != nil { - return nil, err - } - - // Now that we have all the channels, we'll use the chanSource to - // obtain any auxiliary information we need to craft a backup for each - // channel. - staticChanBackups := make([]Single, 0, len(openChans)) - for _, openChan := range openChans { - chanBackup, err := assembleChanBackup(chanSource, openChan) - if err != nil { - return nil, err - } - - staticChanBackups = append(staticChanBackups, *chanBackup) - } - - return staticChanBackups, nil -} diff --git a/lnd/chanbackup/backup_test.go b/lnd/chanbackup/backup_test.go deleted file mode 100644 index c9d73a4d..00000000 --- a/lnd/chanbackup/backup_test.go +++ /dev/null @@ -1,197 +0,0 @@ -package chanbackup - -import ( - "net" - "testing" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/wire" -) - -type mockChannelSource struct { - chans map[wire.OutPoint]*channeldb.OpenChannel - - failQuery bool - - addrs map[[33]byte][]net.Addr -} - -func newMockChannelSource() *mockChannelSource { - return &mockChannelSource{ - chans: make(map[wire.OutPoint]*channeldb.OpenChannel), - addrs: make(map[[33]byte][]net.Addr), - } -} - -func (m *mockChannelSource) FetchAllChannels() ([]*channeldb.OpenChannel, er.R) { - if m.failQuery { - return nil, er.Errorf("fail") - } - - chans := make([]*channeldb.OpenChannel, 0, len(m.chans)) - for _, channel := range m.chans { - chans = append(chans, channel) - } - - return chans, nil -} - -func (m *mockChannelSource) FetchChannel(chanPoint wire.OutPoint) (*channeldb.OpenChannel, er.R) { - if m.failQuery { - return nil, er.Errorf("fail") - } - - channel, ok := m.chans[chanPoint] - if !ok { - return nil, er.Errorf("can't find chan") - } - - return channel, nil -} - -func (m *mockChannelSource) addAddrsForNode(nodePub *btcec.PublicKey, addrs []net.Addr) { - var nodeKey [33]byte - copy(nodeKey[:], nodePub.SerializeCompressed()) - - m.addrs[nodeKey] = addrs -} - -func (m *mockChannelSource) AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, er.R) { - if m.failQuery { - return nil, er.Errorf("fail") - } - - var nodeKey [33]byte - copy(nodeKey[:], nodePub.SerializeCompressed()) - - addrs, ok := m.addrs[nodeKey] - if !ok { - return nil, er.Errorf("can't find addr") - } - - return addrs, nil -} - -// TestFetchBackupForChan tests that we're able to construct a single channel -// backup for channels that are known, unknown, and also channels in which we -// can find addresses for and otherwise. -func TestFetchBackupForChan(t *testing.T) { - t.Parallel() - - // First, we'll make two channels, only one of them will have all the - // information we need to construct set of backups for them. - randomChan1, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to generate chan: %v", err) - } - randomChan2, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to generate chan: %v", err) - } - - chanSource := newMockChannelSource() - chanSource.chans[randomChan1.FundingOutpoint] = randomChan1 - chanSource.chans[randomChan2.FundingOutpoint] = randomChan2 - - chanSource.addAddrsForNode(randomChan1.IdentityPub, []net.Addr{addr1}) - - testCases := []struct { - chanPoint wire.OutPoint - - pass bool - }{ - // Able to find channel, and addresses, should pass. - { - chanPoint: randomChan1.FundingOutpoint, - pass: true, - }, - - // Able to find channel, not able to find addrs, should fail. - { - chanPoint: randomChan2.FundingOutpoint, - pass: false, - }, - - // Not able to find channel, should fail. - { - chanPoint: op, - pass: false, - }, - } - for i, testCase := range testCases { - _, err := FetchBackupForChan(testCase.chanPoint, chanSource) - switch { - // If this is a valid test case, and we failed, then we'll - // return an error. - case err != nil && testCase.pass: - t.Fatalf("#%v, unable to make chan backup: %v", i, err) - - // If this is an invalid test case, and we passed it, then - // we'll return an error. - case err == nil && !testCase.pass: - t.Fatalf("#%v got nil error for invalid req: %v", - i, err) - } - } -} - -// TestFetchStaticChanBackups tests that we're able to properly query the -// channel source for all channels and construct a Single for each channel. -func TestFetchStaticChanBackups(t *testing.T) { - t.Parallel() - - // First, we'll make the set of channels that we want to seed the - // channel source with. Both channels will be fully populated in the - // channel source. - const numChans = 2 - randomChan1, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to generate chan: %v", err) - } - randomChan2, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to generate chan: %v", err) - } - - chanSource := newMockChannelSource() - chanSource.chans[randomChan1.FundingOutpoint] = randomChan1 - chanSource.chans[randomChan2.FundingOutpoint] = randomChan2 - chanSource.addAddrsForNode(randomChan1.IdentityPub, []net.Addr{addr1}) - chanSource.addAddrsForNode(randomChan2.IdentityPub, []net.Addr{addr2}) - - // With the channel source populated, we'll now attempt to create a set - // of backups for all the channels. This should succeed, as all items - // are populated within the channel source. - backups, err := FetchStaticChanBackups(chanSource) - if err != nil { - t.Fatalf("unable to create chan back ups: %v", err) - } - - if len(backups) != numChans { - t.Fatalf("expected %v chans, instead got %v", numChans, - len(backups)) - } - - // We'll attempt to create a set up backups again, but this time the - // second channel will have missing information, which should cause the - // query to fail. - var n [33]byte - copy(n[:], randomChan2.IdentityPub.SerializeCompressed()) - delete(chanSource.addrs, n) - - _, err = FetchStaticChanBackups(chanSource) - if err == nil { - t.Fatalf("query with incomplete information should fail") - } - - // To wrap up, we'll ensure that if we're unable to query the channel - // source at all, then we'll fail as well. - chanSource = newMockChannelSource() - chanSource.failQuery = true - _, err = FetchStaticChanBackups(chanSource) - if err == nil { - t.Fatalf("query should fail") - } -} diff --git a/lnd/chanbackup/backupfile.go b/lnd/chanbackup/backupfile.go deleted file mode 100644 index a41affca..00000000 --- a/lnd/chanbackup/backupfile.go +++ /dev/null @@ -1,152 +0,0 @@ -package chanbackup - -import ( - "io/ioutil" - "os" - "path/filepath" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/pktlog/log" -) - -const ( - // DefaultBackupFileName is the default name of the auto updated static - // channel backup fie. - DefaultBackupFileName = "channel.backup" - - // DefaultTempBackupFileName is the default name of the temporary SCB - // file that we'll use to atomically update the primary back up file - // when new channel are detected. - DefaultTempBackupFileName = "temp-dont-use.backup" -) - -var ( - // ErrNoBackupFileExists is returned if caller attempts to call - // UpdateAndSwap with the file name not set. - ErrNoBackupFileExists = er.GenericErrorType.CodeWithDetail("ErrNoBackupFileExists", - "back up file name not set") - - // ErrNoTempBackupFile is returned if caller attempts to call - // UpdateAndSwap with the temp back up file name not set. - ErrNoTempBackupFile = er.GenericErrorType.CodeWithDetail("ErrNoTempBackupFile", - "temp backup file not set") -) - -// MultiFile represents a file on disk that a caller can use to read the packed -// multi backup into an unpacked one, and also atomically update the contents -// on disk once new channels have been opened, and old ones closed. This struct -// relies on an atomic file rename property which most widely use file systems -// have. -type MultiFile struct { - // fileName is the file name of the main back up file. - fileName string - - // tempFileName is the name of the file that we'll use to stage a new - // packed multi-chan backup, and the rename to the main back up file. - tempFileName string - - // tempFile is an open handle to the temp back up file. - tempFile *os.File -} - -// NewMultiFile create a new multi-file instance at the target location on the -// file system. -func NewMultiFile(fileName string) *MultiFile { - - // We'll our temporary backup file in the very same directory as the - // main backup file. - backupFileDir := filepath.Dir(fileName) - tempFileName := filepath.Join( - backupFileDir, DefaultTempBackupFileName, - ) - - return &MultiFile{ - fileName: fileName, - tempFileName: tempFileName, - } -} - -// UpdateAndSwap will attempt write a new temporary backup file to disk with -// the newBackup encoded, then atomically swap (via rename) the old file for -// the new file by updating the name of the new file to the old. -func (b *MultiFile) UpdateAndSwap(newBackup PackedMulti) er.R { - // If the main backup file isn't set, then we can't proceed. - if b.fileName == "" { - return ErrNoBackupFileExists.Default() - } - - log.Infof("Updating backup file at %v", b.fileName) - - // If the old back up file still exists, then we'll delete it before - // proceeding. - if _, err := os.Stat(b.tempFileName); err == nil { - log.Infof("Found old temp backup @ %v, removing before swap", - b.tempFileName) - - err = os.Remove(b.tempFileName) - if err != nil { - return er.Errorf("unable to remove temp "+ - "backup file: %v", err) - } - } - - // Now that we know the staging area is clear, we'll create the new - // temporary back up file. - var err error - b.tempFile, err = os.Create(b.tempFileName) - if err != nil { - return er.Errorf("unable to create temp file: %v", err) - } - - // With the file created, we'll write the new packed multi backup and - // remove the temporary file all together once this method exits. - _, err = b.tempFile.Write([]byte(newBackup)) - if err != nil { - return er.Errorf("unable to write backup to temp file: %v", err) - } - if err := b.tempFile.Sync(); err != nil { - return er.Errorf("unable to sync temp file: %v", err) - } - defer os.Remove(b.tempFileName) - - log.Infof("Swapping old multi backup file from %v to %v", - b.tempFileName, b.fileName) - - // Before we rename the swap (atomic name swap), we'll make - // sure to close the current file as some OSes don't support - // renaming a file that's already open (Windows). - if err := b.tempFile.Close(); err != nil { - return er.Errorf("unable to close file: %v", err) - } - - // Finally, we'll attempt to atomically rename the temporary file to - // the main back up file. If this succeeds, then we'll only have a - // single file on disk once this method exits. - return er.E(os.Rename(b.tempFileName, b.fileName)) -} - -// ExtractMulti attempts to extract the packed multi backup we currently point -// to into an unpacked version. This method will fail if no backup file -// currently exists as the specified location. -func (b *MultiFile) ExtractMulti(keyChain keychain.KeyRing) (*Multi, er.R) { - var err error - - // We'll return an error if the main file isn't currently set. - if b.fileName == "" { - return nil, ErrNoBackupFileExists.Default() - } - - // Now that we've confirmed the target file is populated, we'll read - // all the contents of the file. This function ensures that file is - // always closed, even if we can't read the contents. - multiBytes, err := ioutil.ReadFile(b.fileName) - if err != nil { - return nil, er.E(err) - } - - // Finally, we'll attempt to unpack the file and return the unpack - // version to the caller. - packedMulti := PackedMulti(multiBytes) - return packedMulti.Unpack(keyChain) -} diff --git a/lnd/chanbackup/backupfile_test.go b/lnd/chanbackup/backupfile_test.go deleted file mode 100644 index e30ec572..00000000 --- a/lnd/chanbackup/backupfile_test.go +++ /dev/null @@ -1,290 +0,0 @@ -package chanbackup - -import ( - "bytes" - "io/ioutil" - "math/rand" - "os" - "path/filepath" - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" -) - -func makeFakePackedMulti() (PackedMulti, er.R) { - newPackedMulti := make([]byte, 50) - if _, err := rand.Read(newPackedMulti[:]); err != nil { - return nil, er.Errorf("unable to make test backup: %v", err) - } - - return PackedMulti(newPackedMulti), nil -} - -func assertBackupMatches(t *testing.T, filePath string, - currentBackup PackedMulti) { - - t.Helper() - - packedBackup, err := ioutil.ReadFile(filePath) - if err != nil { - t.Fatalf("unable to test file: %v", err) - } - - if !bytes.Equal(packedBackup, currentBackup) { - t.Fatalf("backups don't match after first swap: "+ - "expected %x got %x", packedBackup[:], - currentBackup) - } -} - -func assertFileDeleted(t *testing.T, filePath string) { - t.Helper() - - _, err := os.Stat(filePath) - if err == nil { - t.Fatalf("file %v still exists: ", filePath) - } -} - -// TestUpdateAndSwap test that we're able to properly swap out old backups on -// disk with new ones. Additionally, after a swap operation succeeds, then each -// time we should only have the main backup file on disk, as the temporary file -// has been removed. -func TestUpdateAndSwap(t *testing.T) { - t.Parallel() - - tempTestDir, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("unable to make temp dir: %v", err) - } - defer os.Remove(tempTestDir) - - testCases := []struct { - fileName string - tempFileName string - - oldTempExists bool - - valid bool - }{ - // Main file name is blank, should fail. - { - fileName: "", - valid: false, - }, - - // Old temporary file still exists, should be removed. Only one - // file should remain. - { - fileName: filepath.Join( - tempTestDir, DefaultBackupFileName, - ), - tempFileName: filepath.Join( - tempTestDir, DefaultTempBackupFileName, - ), - oldTempExists: true, - valid: true, - }, - - // Old temp doesn't exist, should swap out file, only a single - // file remains. - { - fileName: filepath.Join( - tempTestDir, DefaultBackupFileName, - ), - tempFileName: filepath.Join( - tempTestDir, DefaultTempBackupFileName, - ), - valid: true, - }, - } - for i, testCase := range testCases { - // Ensure that all created files are removed at the end of the - // test case. - defer os.Remove(testCase.fileName) - defer os.Remove(testCase.tempFileName) - - backupFile := NewMultiFile(testCase.fileName) - - // To start with, we'll make a random byte slice that'll pose - // as our packed multi backup. - newPackedMulti, err := makeFakePackedMulti() - if err != nil { - t.Fatalf("unable to make test backup: %v", err) - } - - // If the old temporary file is meant to exist, then we'll - // create it now as an empty file. - if testCase.oldTempExists { - _, err := os.Create(testCase.tempFileName) - if err != nil { - t.Fatalf("unable to create temp file: %v", err) - } - - // TODO(roasbeef): mock out fs calls? - } - - // With our backup created, we'll now attempt to swap out this - // backup, for the old one. - err = backupFile.UpdateAndSwap(PackedMulti(newPackedMulti)) - switch { - // If this is a valid test case, and we failed, then we'll - // return an error. - case err != nil && testCase.valid: - t.Fatalf("#%v, unable to swap file: %v", i, err) - - // If this is an invalid test case, and we passed it, then - // we'll return an error. - case err == nil && !testCase.valid: - t.Fatalf("#%v file swap should have failed: %v", i, err) - } - - if !testCase.valid { - continue - } - - // If we read out the file on disk, then it should match - // exactly what we wrote. The temp backup file should also be - // gone. - assertBackupMatches(t, testCase.fileName, newPackedMulti) - assertFileDeleted(t, testCase.tempFileName) - - // Now that we know this is a valid test case, we'll make a new - // packed multi to swap out this current one. - newPackedMulti2, err := makeFakePackedMulti() - if err != nil { - t.Fatalf("unable to make test backup: %v", err) - } - - // We'll then attempt to swap the old version for this new one. - err = backupFile.UpdateAndSwap(PackedMulti(newPackedMulti2)) - if err != nil { - t.Fatalf("unable to swap file: %v", err) - } - - // Once again, the file written on disk should have been - // properly swapped out with the new instance. - assertBackupMatches(t, testCase.fileName, newPackedMulti2) - - // Additionally, we shouldn't be able to find the temp backup - // file on disk, as it should be deleted each time. - assertFileDeleted(t, testCase.tempFileName) - } -} - -func assertMultiEqual(t *testing.T, a, b *Multi) { - - if len(a.StaticBackups) != len(b.StaticBackups) { - t.Fatalf("expected %v backups, got %v", len(a.StaticBackups), - len(b.StaticBackups)) - } - - for i := 0; i < len(a.StaticBackups); i++ { - assertSingleEqual(t, a.StaticBackups[i], b.StaticBackups[i]) - } -} - -// TestExtractMulti tests that given a valid packed multi file on disk, we're -// able to read it multiple times repeatedly. -func TestExtractMulti(t *testing.T) { - t.Parallel() - - keyRing := &mockKeyRing{} - - // First, as prep, we'll create a single chan backup, then pack that - // fully into a multi backup. - channel, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to gen chan: %v", err) - } - - singleBackup := NewSingle(channel, nil) - - var b bytes.Buffer - unpackedMulti := Multi{ - StaticBackups: []Single{singleBackup}, - } - err = unpackedMulti.PackToWriter(&b, keyRing) - if err != nil { - t.Fatalf("unable to pack to writer: %v", err) - } - - packedMulti := PackedMulti(b.Bytes()) - - // Finally, we'll make a new temporary file, then write out the packed - // multi directly to to it. - tempFile, errr := ioutil.TempFile("", "") - if errr != nil { - t.Fatalf("unable to create temp file: %v", errr) - } - defer os.Remove(tempFile.Name()) - - _, errr = tempFile.Write(packedMulti) - if errr != nil { - t.Fatalf("unable to write temp file: %v", errr) - } - if err := tempFile.Sync(); err != nil { - t.Fatalf("unable to sync temp file: %v", err) - } - - testCases := []struct { - fileName string - pass bool - }{ - // Main file not read, file name not present. - { - fileName: "", - pass: false, - }, - - // Main file not read, file name is there, but file doesn't - // exist. - { - fileName: "kek", - pass: false, - }, - - // Main file not read, should be able to read multiple times. - { - fileName: tempFile.Name(), - pass: true, - }, - } - for i, testCase := range testCases { - // First, we'll make our backup file with the specified name. - backupFile := NewMultiFile(testCase.fileName) - - // With our file made, we'll now attempt to read out the - // multi-file. - freshUnpackedMulti, err := backupFile.ExtractMulti(keyRing) - switch { - // If this is a valid test case, and we failed, then we'll - // return an error. - case err != nil && testCase.pass: - t.Fatalf("#%v, unable to extract file: %v", i, err) - - // If this is an invalid test case, and we passed it, then - // we'll return an error. - case err == nil && !testCase.pass: - t.Fatalf("#%v file extraction should have "+ - "failed: %v", i, err) - } - - if !testCase.pass { - continue - } - - // We'll now ensure that the unpacked multi we read is - // identical to the one we wrote out above. - assertMultiEqual(t, &unpackedMulti, freshUnpackedMulti) - - // We should also be able to read the file again, as we have an - // existing handle to it. - freshUnpackedMulti, err = backupFile.ExtractMulti(keyRing) - if err != nil { - t.Fatalf("unable to unpack multi: %v", err) - } - - assertMultiEqual(t, &unpackedMulti, freshUnpackedMulti) - } -} diff --git a/lnd/chanbackup/crypto.go b/lnd/chanbackup/crypto.go deleted file mode 100644 index e2e559a7..00000000 --- a/lnd/chanbackup/crypto.go +++ /dev/null @@ -1,141 +0,0 @@ -package chanbackup - -import ( - "bytes" - "crypto/rand" - "crypto/sha256" - "io" - "io/ioutil" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/keychain" - "golang.org/x/crypto/chacha20poly1305" -) - -// TODO(roasbeef): interface in front of? - -// baseEncryptionKeyLoc is the KeyLocator that we'll use to derive the base -// encryption key used for encrypting all static channel backups. We use this -// to then derive the actual key that we'll use for encryption. We do this -// rather than using the raw key, as we assume that we can't obtain the raw -// keys, and we don't want to require that the HSM know our target cipher for -// encryption. -// -// TODO(roasbeef): possibly unique encrypt? -var baseEncryptionKeyLoc = keychain.KeyLocator{ - Family: keychain.KeyFamilyStaticBackup, - Index: 0, -} - -// genEncryptionKey derives the key that we'll use to encrypt all of our static -// channel backups. The key itself, is the sha2 of a base key that we get from -// the keyring. We derive the key this way as we don't force the HSM (or any -// future abstractions) to be able to derive and know of the cipher that we'll -// use within our protocol. -func genEncryptionKey(keyRing keychain.KeyRing) ([]byte, er.R) { - // key = SHA256(baseKey) - baseKey, err := keyRing.DeriveKey( - baseEncryptionKeyLoc, - ) - if err != nil { - return nil, err - } - - encryptionKey := sha256.Sum256( - baseKey.PubKey.SerializeCompressed(), - ) - - // TODO(roasbeef): throw back in ECDH? - - return encryptionKey[:], nil -} - -// encryptPayloadToWriter attempts to write the set of bytes contained within -// the passed byes.Buffer into the passed io.Writer in an encrypted form. We -// use a 24-byte chachapoly AEAD instance with a randomized nonce that's -// pre-pended to the final payload and used as associated data in the AEAD. We -// use the passed keyRing to generate the encryption key, see genEncryptionKey -// for further details. -func encryptPayloadToWriter(payload bytes.Buffer, w io.Writer, - keyRing keychain.KeyRing) er.R { - - // First, we'll derive the key that we'll use to encrypt the payload - // for safe storage without giving away the details of any of our - // channels. The final operation is: - // - // key = SHA256(baseKey) - encryptionKey, err := genEncryptionKey(keyRing) - if err != nil { - return err - } - - // Before encryption, we'll initialize our cipher with the target - // encryption key, and also read out our random 24-byte nonce we use - // for encryption. Note that we use NewX, not New, as the latter - // version requires a 12-byte nonce, not a 24-byte nonce. - cipher, errr := chacha20poly1305.NewX(encryptionKey) - if errr != nil { - return er.E(errr) - } - var nonce [chacha20poly1305.NonceSizeX]byte - if _, errr := rand.Read(nonce[:]); errr != nil { - return er.E(errr) - } - - // Finally, we encrypted the final payload, and write out our - // ciphertext with nonce pre-pended. - ciphertext := cipher.Seal(nil, nonce[:], payload.Bytes(), nonce[:]) - - if _, err := util.Write(w, nonce[:]); err != nil { - return err - } - if _, err := util.Write(w, ciphertext); err != nil { - return err - } - - return nil -} - -// decryptPayloadFromReader attempts to decrypt the encrypted bytes within the -// passed io.Reader instance using the key derived from the passed keyRing. For -// further details regarding the key derivation protocol, see the -// genEncryptionKey method. -func decryptPayloadFromReader(payload io.Reader, - keyRing keychain.KeyRing) ([]byte, er.R) { - - // First, we'll re-generate the encryption key that we use for all the - // SCBs. - encryptionKey, err := genEncryptionKey(keyRing) - if err != nil { - return nil, err - } - - // Next, we'll read out the entire blob as we need to isolate the nonce - // from the rest of the ciphertext. - packedBackup, errr := ioutil.ReadAll(payload) - if errr != nil { - return nil, er.E(errr) - } - if len(packedBackup) < chacha20poly1305.NonceSizeX { - return nil, er.Errorf("payload size too small, must be at "+ - "least %v bytes", chacha20poly1305.NonceSizeX) - } - - nonce := packedBackup[:chacha20poly1305.NonceSizeX] - ciphertext := packedBackup[chacha20poly1305.NonceSizeX:] - - // Now that we have the cipher text and the nonce separated, we can go - // ahead and decrypt the final blob so we can properly serialized the - // SCB. - cipher, errr := chacha20poly1305.NewX(encryptionKey) - if errr != nil { - return nil, er.E(errr) - } - plaintext, errr := cipher.Open(nil, nonce, ciphertext, nonce) - if errr != nil { - return nil, er.E(errr) - } - - return plaintext, nil -} diff --git a/lnd/chanbackup/crypto_test.go b/lnd/chanbackup/crypto_test.go deleted file mode 100644 index fd2039f4..00000000 --- a/lnd/chanbackup/crypto_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package chanbackup - -import ( - "bytes" - "testing" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/keychain" -) - -var ( - testWalletPrivKey = []byte{ - 0x2b, 0xd8, 0x06, 0xc9, 0x7f, 0x0e, 0x00, 0xaf, - 0x1a, 0x1f, 0xc3, 0x32, 0x8f, 0xa7, 0x63, 0xa9, - 0x26, 0x97, 0x23, 0xc8, 0xdb, 0x8f, 0xac, 0x4f, - 0x93, 0xaf, 0x71, 0xdb, 0x18, 0x6d, 0x6e, 0x90, - } -) - -type mockKeyRing struct { - fail bool -} - -func (m *mockKeyRing) DeriveNextKey(keyFam keychain.KeyFamily) (keychain.KeyDescriptor, er.R) { - return keychain.KeyDescriptor{}, nil -} -func (m *mockKeyRing) DeriveKey(keyLoc keychain.KeyLocator) (keychain.KeyDescriptor, er.R) { - if m.fail { - return keychain.KeyDescriptor{}, er.Errorf("fail") - } - - _, pub := btcec.PrivKeyFromBytes(btcec.S256(), testWalletPrivKey) - return keychain.KeyDescriptor{ - PubKey: pub, - }, nil -} - -// TestEncryptDecryptPayload tests that given a static key, we're able to -// properly decrypt and encrypted payload. We also test that we'll reject a -// ciphertext that has been modified. -func TestEncryptDecryptPayload(t *testing.T) { - t.Parallel() - - payloadCases := []struct { - // plaintext is the string that we'll be encrypting. - plaintext []byte - - // mutator allows a test case to modify the ciphertext before - // we attempt to decrypt it. - mutator func(*[]byte) - - // valid indicates if this test should pass or fail. - valid bool - }{ - // Proper payload, should decrypt. - { - plaintext: []byte("payload test plain text"), - mutator: nil, - valid: true, - }, - - // Mutator modifies cipher text, shouldn't decrypt. - { - plaintext: []byte("payload test plain text"), - mutator: func(p *[]byte) { - // Flip a byte in the payload to render it invalid. - (*p)[0] ^= 1 - }, - valid: false, - }, - - // Cipher text is too small, shouldn't decrypt. - { - plaintext: []byte("payload test plain text"), - mutator: func(p *[]byte) { - // Modify the cipher text to be zero length. - *p = []byte{} - }, - valid: false, - }, - } - - keyRing := &mockKeyRing{} - - for i, payloadCase := range payloadCases { - var cipherBuffer bytes.Buffer - - // First, we'll encrypt the passed payload with our scheme. - payloadReader := bytes.NewBuffer(payloadCase.plaintext) - err := encryptPayloadToWriter( - *payloadReader, &cipherBuffer, keyRing, - ) - if err != nil { - t.Fatalf("unable encrypt paylaod: %v", err) - } - - // If we have a mutator, then we'll wrong the mutator over the - // cipher text, then reset the main buffer and re-write the new - // cipher text. - if payloadCase.mutator != nil { - cipherText := cipherBuffer.Bytes() - - payloadCase.mutator(&cipherText) - - cipherBuffer.Reset() - cipherBuffer.Write(cipherText) - } - - plaintext, err := decryptPayloadFromReader(&cipherBuffer, keyRing) - - switch { - // If this was meant to be a valid decryption, but we failed, - // then we'll return an error. - case err != nil && payloadCase.valid: - t.Fatalf("unable to decrypt valid payload case %v", i) - - // If this was meant to be an invalid decryption, and we didn't - // fail, then we'll return an error. - case err == nil && !payloadCase.valid: - t.Fatalf("payload was invalid yet was able to decrypt") - } - - // Only if this case was mean to be valid will we ensure the - // resulting decrypted plaintext matches the original input. - if payloadCase.valid && - !bytes.Equal(plaintext, payloadCase.plaintext) { - t.Fatalf("#%v: expected %v, got %v: ", i, - payloadCase.plaintext, plaintext) - } - } -} - -// TestInvalidKeyEncryption tests that encryption fails if we're unable to -// obtain a valid key. -func TestInvalidKeyEncryption(t *testing.T) { - t.Parallel() - - var b bytes.Buffer - err := encryptPayloadToWriter(b, &b, &mockKeyRing{true}) - if err == nil { - t.Fatalf("expected error due to fail key gen") - } -} - -// TestInvalidKeyDecrytion tests that decryption fails if we're unable to -// obtain a valid key. -func TestInvalidKeyDecrytion(t *testing.T) { - t.Parallel() - - var b bytes.Buffer - _, err := decryptPayloadFromReader(&b, &mockKeyRing{true}) - if err == nil { - t.Fatalf("expected error due to fail key gen") - } -} diff --git a/lnd/chanbackup/multi.go b/lnd/chanbackup/multi.go deleted file mode 100644 index cad73e41..00000000 --- a/lnd/chanbackup/multi.go +++ /dev/null @@ -1,181 +0,0 @@ -package chanbackup - -import ( - "bytes" - "io" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// MultiBackupVersion denotes the version of the multi channel static channel -// backup. Based on this version, we know how to encode/decode packed/unpacked -// versions of multi backups. -type MultiBackupVersion byte - -const ( - // DefaultMultiVersion is the default version of the multi channel - // backup. The serialized format for this version is simply: version || - // numBackups || SCBs... - DefaultMultiVersion = 0 - - // NilMultiSizePacked is the size of a "nil" packed Multi (45 bytes). - // This consists of the 24 byte chacha nonce, the 16 byte MAC, one byte - // for the version, and 4 bytes to signal zero entries. - NilMultiSizePacked = 24 + 16 + 1 + 4 -) - -// Multi is a form of static channel backup that is amenable to being -// serialized in a single file. Rather than a series of ciphertexts, a -// multi-chan backup is a single ciphertext of all static channel backups -// concatenated. This form factor gives users a single blob that they can use -// to safely copy/obtain at anytime to backup their channels. -type Multi struct { - // Version is the version that should be observed when attempting to - // pack the multi backup. - Version MultiBackupVersion - - // StaticBackups is the set of single channel backups that this multi - // backup is comprised of. - StaticBackups []Single -} - -// PackToWriter packs (encrypts+serializes) the target set of static channel -// backups into a single AEAD ciphertext into the passed io.Writer. This is the -// opposite of UnpackFromReader. The plaintext form of a multi-chan backup is -// the following: a 4 byte integer denoting the number of serialized static -// channel backups serialized, a series of serialized static channel backups -// concatenated. To pack this payload, we then apply our chacha20 AEAD to the -// entire payload, using the 24-byte nonce as associated data. -func (m Multi) PackToWriter(w io.Writer, keyRing keychain.KeyRing) er.R { - // The only version that we know how to pack atm is version 0. Attempts - // to pack any other version will result in an error. - switch m.Version { - case DefaultMultiVersion: - break - - default: - return er.Errorf("unable to pack unknown multi-version "+ - "of %v", m.Version) - } - - var multiBackupBuffer bytes.Buffer - - // First, we'll write out the version of this multi channel baackup. - err := lnwire.WriteElements(&multiBackupBuffer, byte(m.Version)) - if err != nil { - return err - } - - // Now that we've written out the version of this multi-pack format, - // we'll now write the total number of backups to expect after this - // point. - numBackups := uint32(len(m.StaticBackups)) - err = lnwire.WriteElements(&multiBackupBuffer, numBackups) - if err != nil { - return err - } - - // Next, we'll serialize the raw plaintext version of each of the - // backup into the intermediate buffer. - for _, chanBackup := range m.StaticBackups { - err := chanBackup.Serialize(&multiBackupBuffer) - if err != nil { - return er.Errorf("unable to serialize backup "+ - "for %v: %v", chanBackup.FundingOutpoint, err) - } - } - - // With the plaintext multi backup assembled, we'll now encrypt it - // directly to the passed writer. - return encryptPayloadToWriter(multiBackupBuffer, w, keyRing) -} - -// UnpackFromReader attempts to unpack (decrypt+deserialize) a packed -// multi-chan backup form the passed io.Reader. If we're unable to decrypt the -// any portion of the multi-chan backup, an error will be returned. -func (m *Multi) UnpackFromReader(r io.Reader, keyRing keychain.KeyRing) er.R { - // We'll attempt to read the entire packed backup, and also decrypt it - // using the passed key ring which is expected to be able to derive the - // encryption keys. - plaintextBackup, err := decryptPayloadFromReader(r, keyRing) - if err != nil { - return err - } - backupReader := bytes.NewReader(plaintextBackup) - - // Now that we've decrypted the payload successfully, we can parse out - // each of the individual static channel backups. - - // First, we'll need to read the version of this multi-back up so we - // can know how to unpack each of the individual SCB's. - var multiVersion byte - err = lnwire.ReadElements(backupReader, &multiVersion) - if err != nil { - return err - } - - m.Version = MultiBackupVersion(multiVersion) - switch m.Version { - - // The default version is simply a set of serialized SCB's with the - // number of total SCB's prepended to the front of the byte slice. - case DefaultMultiVersion: - // First, we'll need to read out the total number of backups - // that've been serialized into this multi-chan backup. Each - // backup is the same size, so we can continue until we've - // parsed out everything. - var numBackups uint32 - err = lnwire.ReadElements(backupReader, &numBackups) - if err != nil { - return err - } - - // We'll continue to parse out each backup until we've read all - // that was indicated from the length prefix. - for ; numBackups != 0; numBackups-- { - // Attempt to parse out the net static channel backup, - // if it's been malformed, then we'll return with an - // error - var chanBackup Single - err := chanBackup.Deserialize(backupReader) - if err != nil { - return err - } - - // Collect the next valid chan backup into the main - // multi backup slice. - m.StaticBackups = append(m.StaticBackups, chanBackup) - } - - default: - return er.Errorf("unable to unpack unknown multi-version "+ - "of %v", multiVersion) - } - - return nil -} - -// TODO(roasbeef): new key ring interface? -// * just returns key given params? - -// PackedMulti represents a raw fully packed (serialized+encrypted) -// multi-channel static channel backup. -type PackedMulti []byte - -// Unpack attempts to unpack (decrypt+desrialize) the target packed -// multi-channel back up. If we're unable to fully unpack this back, then an -// error will be returned. -func (p *PackedMulti) Unpack(keyRing keychain.KeyRing) (*Multi, er.R) { - var m Multi - - packedReader := bytes.NewReader(*p) - if err := m.UnpackFromReader(packedReader, keyRing); err != nil { - return nil, err - } - - return &m, nil -} - -// TODO(roasbsef): fuzz parsing diff --git a/lnd/chanbackup/multi_test.go b/lnd/chanbackup/multi_test.go deleted file mode 100644 index a6317e09..00000000 --- a/lnd/chanbackup/multi_test.go +++ /dev/null @@ -1,159 +0,0 @@ -package chanbackup - -import ( - "bytes" - "net" - "testing" -) - -// TestMultiPackUnpack... -func TestMultiPackUnpack(t *testing.T) { - t.Parallel() - - var multi Multi - numSingles := 10 - originalSingles := make([]Single, 0, numSingles) - for i := 0; i < numSingles; i++ { - channel, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to gen channel: %v", err) - } - - single := NewSingle(channel, []net.Addr{addr1, addr2}) - - originalSingles = append(originalSingles, single) - multi.StaticBackups = append(multi.StaticBackups, single) - } - - keyRing := &mockKeyRing{} - - versionTestCases := []struct { - // version is the pack/unpack version that we should use to - // decode/encode the final SCB. - version MultiBackupVersion - - // valid tests us if this test case should pass or not. - valid bool - }{ - // The default version, should pack/unpack with no problem. - { - version: DefaultSingleVersion, - valid: true, - }, - - // A non-default version, atm this should result in a failure. - { - version: 99, - valid: false, - }, - } - for i, versionCase := range versionTestCases { - multi.Version = versionCase.version - - var b bytes.Buffer - err := multi.PackToWriter(&b, keyRing) - switch { - // If this is a valid test case, and we failed, then we'll - // return an error. - case err != nil && versionCase.valid: - t.Fatalf("#%v, unable to pack multi: %v", i, err) - - // If this is an invalid test case, and we passed it, then - // we'll return an error. - case err == nil && !versionCase.valid: - t.Fatalf("#%v got nil error for invalid pack: %v", - i, err) - } - - // If this is a valid test case, then we'll continue to ensure - // we can unpack it, and also that if we mutate the packed - // version, then we trigger an error. - if versionCase.valid { - var unpackedMulti Multi - err = unpackedMulti.UnpackFromReader(&b, keyRing) - if err != nil { - t.Fatalf("#%v unable to unpack multi: %v", - i, err) - } - - // First, we'll ensure that the unpacked version of the - // packed multi is the same as the original set. - if len(originalSingles) != - len(unpackedMulti.StaticBackups) { - t.Fatalf("expected %v singles, got %v", - len(originalSingles), - len(unpackedMulti.StaticBackups)) - } - for i := 0; i < numSingles; i++ { - assertSingleEqual( - t, originalSingles[i], - unpackedMulti.StaticBackups[i], - ) - } - - // Next, we'll make a fake packed multi, it'll have an - // unknown version relative to what's implemented atm. - var fakePackedMulti bytes.Buffer - fakeRawMulti := bytes.NewBuffer( - bytes.Repeat([]byte{99}, 20), - ) - err := encryptPayloadToWriter( - *fakeRawMulti, &fakePackedMulti, keyRing, - ) - if err != nil { - t.Fatalf("unable to pack fake multi; %v", err) - } - - // We should reject this fake multi as it contains an - // unknown version. - err = unpackedMulti.UnpackFromReader( - &fakePackedMulti, keyRing, - ) - if err == nil { - t.Fatalf("#%v unpack with unknown version "+ - "should have failed", i) - } - } - } -} - -// TestPackedMultiUnpack tests that we're able to properly unpack a typed -// packed multi. -func TestPackedMultiUnpack(t *testing.T) { - t.Parallel() - - keyRing := &mockKeyRing{} - - // First, we'll make a new unpacked multi with a random channel. - testChannel, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to gen random channel: %v", err) - } - var multi Multi - multi.StaticBackups = append( - multi.StaticBackups, NewSingle(testChannel, nil), - ) - - // Now that we have our multi, we'll pack it into a new buffer. - var b bytes.Buffer - if err := multi.PackToWriter(&b, keyRing); err != nil { - t.Fatalf("unable to pack multi: %v", err) - } - - // We should be able to properly unpack this typed packed multi. - packedMulti := PackedMulti(b.Bytes()) - unpackedMulti, err := packedMulti.Unpack(keyRing) - if err != nil { - t.Fatalf("unable to unpack multi: %v", err) - } - - // Finally, the versions should match, and the unpacked singles also - // identical. - if multi.Version != unpackedMulti.Version { - t.Fatalf("version mismatch: expected %v got %v", - multi.Version, unpackedMulti.Version) - } - assertSingleEqual( - t, multi.StaticBackups[0], unpackedMulti.StaticBackups[0], - ) -} diff --git a/lnd/chanbackup/pubsub.go b/lnd/chanbackup/pubsub.go deleted file mode 100644 index 747578bb..00000000 --- a/lnd/chanbackup/pubsub.go +++ /dev/null @@ -1,311 +0,0 @@ -package chanbackup - -import ( - "bytes" - "net" - "os" - "sync" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/wire" -) - -// Swapper is an interface that allows the chanbackup.SubSwapper to update the -// main multi backup location once it learns of new channels or that prior -// channels have been closed. -type Swapper interface { - // UpdateAndSwap attempts to atomically update the main multi back up - // file location with the new fully packed multi-channel backup. - UpdateAndSwap(newBackup PackedMulti) er.R - - // ExtractMulti attempts to obtain and decode the current SCB instance - // stored by the Swapper instance. - ExtractMulti(keychain keychain.KeyRing) (*Multi, er.R) -} - -// ChannelWithAddrs bundles an open channel along with all the addresses for -// the channel peer. -type ChannelWithAddrs struct { - *channeldb.OpenChannel - - // Addrs is the set of addresses that we can use to reach the target - // peer. - Addrs []net.Addr -} - -// ChannelEvent packages a new update of new channels since subscription, and -// channels that have been opened since prior channel event. -type ChannelEvent struct { - // ClosedChans are the set of channels that have been closed since the - // last event. - ClosedChans []wire.OutPoint - - // NewChans is the set of channels that have been opened since the last - // event. - NewChans []ChannelWithAddrs -} - -// ChannelSubscription represents an intent to be notified of any updates to -// the primary channel state. -type ChannelSubscription struct { - // ChanUpdates is a channel that will be sent upon once the primary - // channel state is updated. - ChanUpdates chan ChannelEvent - - // Cancel is a closure that allows the caller to cancel their - // subscription and free up any resources allocated. - Cancel func() -} - -// ChannelNotifier represents a system that allows the chanbackup.SubSwapper to -// be notified of any changes to the primary channel state. -type ChannelNotifier interface { - // SubscribeChans requests a new channel subscription relative to the - // initial set of known channels. We use the knownChans as a - // synchronization point to ensure that the chanbackup.SubSwapper does - // not miss any channel open or close events in the period between when - // it's created, and when it requests the channel subscription. - SubscribeChans(map[wire.OutPoint]struct{}) (*ChannelSubscription, er.R) -} - -// SubSwapper subscribes to new updates to the open channel state, and then -// swaps out the on-disk channel backup state in response. This sub-system -// that will ensure that the multi chan backup file on disk will always be -// updated with the latest channel back up state. We'll receive new -// opened/closed channels from the ChannelNotifier, then use the Swapper to -// update the file state on disk with the new set of open channels. This can -// be used to implement a system that always keeps the multi-chan backup file -// on disk in a consistent state for safety purposes. -type SubSwapper struct { - started sync.Once - stopped sync.Once - - // backupState are the set of SCBs for all open channels we know of. - backupState map[wire.OutPoint]Single - - // chanEvents is an active subscription to receive new channel state - // over. - chanEvents *ChannelSubscription - - // keyRing is the main key ring that will allow us to pack the new - // multi backup. - keyRing keychain.KeyRing - - Swapper - - quit chan struct{} - wg sync.WaitGroup -} - -// NewSubSwapper creates a new instance of the SubSwapper given the starting -// set of channels, and the required interfaces to be notified of new channel -// updates, pack a multi backup, and swap the current best backup from its -// storage location. -func NewSubSwapper(startingChans []Single, chanNotifier ChannelNotifier, - keyRing keychain.KeyRing, backupSwapper Swapper) (*SubSwapper, er.R) { - - // First, we'll subscribe to the latest set of channel updates given - // the set of channels we already know of. - knownChans := make(map[wire.OutPoint]struct{}) - for _, chanBackup := range startingChans { - knownChans[chanBackup.FundingOutpoint] = struct{}{} - } - chanEvents, err := chanNotifier.SubscribeChans(knownChans) - if err != nil { - return nil, err - } - - // Next, we'll construct our own backup state so we can add/remove - // channels that have been opened and closed. - backupState := make(map[wire.OutPoint]Single) - for _, chanBackup := range startingChans { - backupState[chanBackup.FundingOutpoint] = chanBackup - } - - return &SubSwapper{ - backupState: backupState, - chanEvents: chanEvents, - keyRing: keyRing, - Swapper: backupSwapper, - quit: make(chan struct{}), - }, nil -} - -// Start starts the chanbackup.SubSwapper. -func (s *SubSwapper) Start() er.R { - var startErr er.R - s.started.Do(func() { - log.Infof("Starting chanbackup.SubSwapper") - - // Before we enter our main loop, we'll update the on-disk - // state with the latest Single state, as nodes may have new - // advertised addresses. - if err := s.updateBackupFile(); err != nil { - startErr = er.Errorf("unable to refresh backup "+ - "file: %v", err) - return - } - - s.wg.Add(1) - go s.backupUpdater() - }) - - return startErr -} - -// Stop signals the SubSwapper to being a graceful shutdown. -func (s *SubSwapper) Stop() er.R { - s.stopped.Do(func() { - log.Infof("Stopping chanbackup.SubSwapper") - - close(s.quit) - s.wg.Wait() - }) - return nil -} - -// updateBackupFile updates the backup file in place given the current state of -// the SubSwapper. We accept the set of channels that were closed between this -// update and the last to make sure we leave them out of our backup set union. -func (s *SubSwapper) updateBackupFile(closedChans ...wire.OutPoint) er.R { - // Before we pack the new set of SCBs, we'll first decode what we - // already have on-disk, to make sure we can decode it (proper seed) - // and that we're able to combine it with our new data. - diskMulti, err := s.Swapper.ExtractMulti(s.keyRing) - - // If the file doesn't exist on disk, then that's OK as it was never - // created. In this case we'll continue onwards as it isn't a critical - // error. - if err != nil && !os.IsNotExist(er.Wrapped(err)) { - return er.Errorf("unable to extract on disk encrypted "+ - "SCB: %v", err) - } - - // Now that we have channels stored on-disk, we'll create a new set of - // the combined old and new channels to make sure we retain what's - // already on-disk. - // - // NOTE: The ordering of this operations means that our in-memory - // structure will replace what we read from disk. - combinedBackup := make(map[wire.OutPoint]Single) - if diskMulti != nil { - for _, diskChannel := range diskMulti.StaticBackups { - chanPoint := diskChannel.FundingOutpoint - combinedBackup[chanPoint] = diskChannel - } - } - for _, memChannel := range s.backupState { - chanPoint := memChannel.FundingOutpoint - if _, ok := combinedBackup[chanPoint]; ok { - log.Warnf("Replacing disk backup for ChannelPoint(%v) "+ - "w/ newer version", chanPoint) - } - - combinedBackup[chanPoint] = memChannel - } - - // Remove the set of closed channels from the final set of backups. - for _, closedChan := range closedChans { - delete(combinedBackup, closedChan) - } - - // With our updated channel state obtained, we'll create a new multi - // from our series of singles. - var newMulti Multi - for _, backup := range combinedBackup { - newMulti.StaticBackups = append( - newMulti.StaticBackups, backup, - ) - } - - // Now that our multi has been assembled, we'll attempt to pack - // (encrypt+encode) the new channel state to our target reader. - var b bytes.Buffer - err = newMulti.PackToWriter(&b, s.keyRing) - if err != nil { - return er.Errorf("unable to pack multi backup: %v", err) - } - - // Finally, we'll swap out the old backup for this new one in a single - // atomic step, combining the file already on-disk with this set of new - // channels. - err = s.Swapper.UpdateAndSwap(PackedMulti(b.Bytes())) - if err != nil { - return er.Errorf("unable to update multi backup: %v", err) - } - - return nil -} - -// backupFileUpdater is the primary goroutine of the SubSwapper which is -// responsible for listening for changes to the channel, and updating the -// persistent multi backup state with a new packed multi of the latest channel -// state. -func (s *SubSwapper) backupUpdater() { - // Ensure that once we exit, we'll cancel our active channel - // subscription. - defer s.chanEvents.Cancel() - defer s.wg.Done() - - log.Debugf("SubSwapper's backupUpdater is active!") - - for { - select { - // The channel state has been modified! We'll evaluate all - // changes, and swap out the old packed multi with a new one - // with the latest channel state. - case chanUpdate := <-s.chanEvents.ChanUpdates: - oldStateSize := len(s.backupState) - - // For all new open channels, we'll create a new SCB - // given the required information. - for _, newChan := range chanUpdate.NewChans { - log.Debugf("Adding channel %v to backup state", - newChan.FundingOutpoint) - - s.backupState[newChan.FundingOutpoint] = NewSingle( - newChan.OpenChannel, newChan.Addrs, - ) - } - - // For all closed channels, we'll remove the prior - // backup state. - closedChans := make( - []wire.OutPoint, 0, len(chanUpdate.ClosedChans), - ) - for i, closedChan := range chanUpdate.ClosedChans { - log.Debugf("Removing channel %v from backup "+ - "state", log.C(func() string { - return chanUpdate.ClosedChans[i].String() - })) - - delete(s.backupState, closedChan) - - closedChans = append(closedChans, closedChan) - } - - newStateSize := len(s.backupState) - - log.Infof("Updating on-disk multi SCB backup: "+ - "num_old_chans=%v, num_new_chans=%v", - oldStateSize, newStateSize) - - // With out new state constructed, we'll, atomically - // update the on-disk backup state. - if err := s.updateBackupFile(closedChans...); err != nil { - log.Errorf("unable to update backup file: %v", - err) - } - - // TODO(roasbeef): refresh periodically on a time basis due to - // possible addr changes from node - - // Exit at once if a quit signal is detected. - case <-s.quit: - return - } - } -} diff --git a/lnd/chanbackup/pubsub_test.go b/lnd/chanbackup/pubsub_test.go deleted file mode 100644 index 76bc0373..00000000 --- a/lnd/chanbackup/pubsub_test.go +++ /dev/null @@ -1,284 +0,0 @@ -package chanbackup - -import ( - "testing" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/wire" -) - -type mockSwapper struct { - fail bool - - swaps chan PackedMulti - - swapState *Multi - - keyChain keychain.KeyRing -} - -func newMockSwapper(keychain keychain.KeyRing) *mockSwapper { - return &mockSwapper{ - swaps: make(chan PackedMulti, 1), - keyChain: keychain, - swapState: &Multi{}, - } -} - -func (m *mockSwapper) UpdateAndSwap(newBackup PackedMulti) er.R { - if m.fail { - return er.Errorf("fail") - } - - swapState, err := newBackup.Unpack(m.keyChain) - if err != nil { - return er.Errorf("unable to decode on disk swaps: %v", err) - } - - m.swapState = swapState - - m.swaps <- newBackup - - return nil -} - -func (m *mockSwapper) ExtractMulti(keychain keychain.KeyRing) (*Multi, er.R) { - return m.swapState, nil -} - -type mockChannelNotifier struct { - fail bool - - chanEvents chan ChannelEvent -} - -func newMockChannelNotifier() *mockChannelNotifier { - return &mockChannelNotifier{ - chanEvents: make(chan ChannelEvent), - } -} - -func (m *mockChannelNotifier) SubscribeChans(chans map[wire.OutPoint]struct{}) ( - *ChannelSubscription, er.R) { - - if m.fail { - return nil, er.Errorf("fail") - } - - return &ChannelSubscription{ - ChanUpdates: m.chanEvents, - Cancel: func() { - }, - }, nil -} - -// TestNewSubSwapperSubscribeFail tests that if we're unable to obtain a -// channel subscription, then the entire sub-swapper will fail to start. -func TestNewSubSwapperSubscribeFail(t *testing.T) { - t.Parallel() - - keyRing := &mockKeyRing{} - - var swapper mockSwapper - chanNotifier := mockChannelNotifier{ - fail: true, - } - - _, err := NewSubSwapper(nil, &chanNotifier, keyRing, &swapper) - if err == nil { - t.Fatalf("expected fail due to lack of subscription") - } -} - -func assertExpectedBackupSwap(t *testing.T, swapper *mockSwapper, - subSwapper *SubSwapper, keyRing keychain.KeyRing, - expectedChanSet map[wire.OutPoint]Single) { - - t.Helper() - - select { - case newPackedMulti := <-swapper.swaps: - // If we unpack the new multi, then we should find all the old - // channels, and also the new channel included and any deleted - // channel omitted. - newMulti, err := newPackedMulti.Unpack(keyRing) - if err != nil { - t.Fatalf("unable to unpack multi: %v", err) - } - - // Ensure that once unpacked, the current backup has the - // expected number of Singles. - if len(newMulti.StaticBackups) != len(expectedChanSet) { - t.Fatalf("new backup wasn't included: expected %v "+ - "backups have %v", len(expectedChanSet), - len(newMulti.StaticBackups)) - } - - // We should also find all the old and new channels in this new - // backup. - for _, backup := range newMulti.StaticBackups { - _, ok := expectedChanSet[backup.FundingOutpoint] - if !ok { - t.Fatalf("didn't find backup in original set: %v", - backup.FundingOutpoint) - } - } - - // The same applies for our in-memory state, but it's also - // possible for there to be items in the on-disk state that we - // don't know of explicit. - newChans := make(map[wire.OutPoint]Single) - for _, newChan := range newMulti.StaticBackups { - newChans[newChan.FundingOutpoint] = newChan - } - for _, backup := range subSwapper.backupState { - _, ok := newChans[backup.FundingOutpoint] - if !ok { - t.Fatalf("didn't find backup in original set: %v", - backup.FundingOutpoint) - } - } - - case <-time.After(time.Second * 5): - t.Fatalf("update swapper didn't swap out multi") - } -} - -// TestSubSwapperIdempotentStartStop tests that calling the Start/Stop methods -// multiple time is permitted. -func TestSubSwapperIdempotentStartStop(t *testing.T) { - t.Parallel() - - keyRing := &mockKeyRing{} - - var chanNotifier mockChannelNotifier - - swapper := newMockSwapper(keyRing) - subSwapper, err := NewSubSwapper(nil, &chanNotifier, keyRing, swapper) - if err != nil { - t.Fatalf("unable to init subSwapper: %v", err) - } - - if err := subSwapper.Start(); err != nil { - t.Fatalf("unable to start swapper: %v", err) - } - - // The swapper should write the initial channel state as soon as it's - // active. - backupSet := make(map[wire.OutPoint]Single) - assertExpectedBackupSwap(t, swapper, subSwapper, keyRing, backupSet) - - subSwapper.Start() - - subSwapper.Stop() - subSwapper.Stop() -} - -// TestSubSwapperUpdater tests that the SubSwapper will properly swap out -// new/old channels within the channel set, and notify the swapper to update -// the master multi file backup. -func TestSubSwapperUpdater(t *testing.T) { - t.Parallel() - - keyRing := &mockKeyRing{} - chanNotifier := newMockChannelNotifier() - swapper := newMockSwapper(keyRing) - - // First, we'll start out by creating a channels set for the initial - // set of channels known to the sub-swapper. - const numStartingChans = 3 - initialChanSet := make([]Single, 0, numStartingChans) - backupSet := make(map[wire.OutPoint]Single) - for i := 0; i < numStartingChans; i++ { - channel, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to make test chan: %v", err) - } - - single := NewSingle(channel, nil) - - backupSet[channel.FundingOutpoint] = single - initialChanSet = append(initialChanSet, single) - } - - // We'll also generate two additional channels which will already be - // present on disk. However, these will at first only be known by the - // on disk backup (the backup set). - const numDiskChans = 2 - for i := 0; i < numDiskChans; i++ { - channel, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to make test chan: %v", err) - } - - single := NewSingle(channel, nil) - - backupSet[channel.FundingOutpoint] = single - swapper.swapState.StaticBackups = append( - swapper.swapState.StaticBackups, single, - ) - } - - // With our channel set created, we'll make a fresh sub swapper - // instance to begin our test. - subSwapper, err := NewSubSwapper( - initialChanSet, chanNotifier, keyRing, swapper, - ) - if err != nil { - t.Fatalf("unable to make swapper: %v", err) - } - if err := subSwapper.Start(); err != nil { - t.Fatalf("unable to start sub swapper: %v", err) - } - defer subSwapper.Stop() - - // The swapper should write the initial channel state as soon as it's - // active. - assertExpectedBackupSwap(t, swapper, subSwapper, keyRing, backupSet) - - // Now that the sub-swapper is active, we'll notify to add a brand new - // channel to the channel state. - newChannel, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to create new chan: %v", err) - } - - // With the new channel created, we'll send a new update to the main - // goroutine telling it about this new channel. - select { - case chanNotifier.chanEvents <- ChannelEvent{ - NewChans: []ChannelWithAddrs{ - { - OpenChannel: newChannel, - }, - }, - }: - case <-time.After(time.Second * 5): - t.Fatalf("update swapper didn't read new channel: %v", err) - } - - backupSet[newChannel.FundingOutpoint] = NewSingle(newChannel, nil) - - // At this point, the sub-swapper should now have packed a new multi, - // and then sent it to the swapper so the back up can be updated. - assertExpectedBackupSwap(t, swapper, subSwapper, keyRing, backupSet) - - // We'll now trigger an update to remove an existing channel. - chanToDelete := initialChanSet[0].FundingOutpoint - select { - case chanNotifier.chanEvents <- ChannelEvent{ - ClosedChans: []wire.OutPoint{chanToDelete}, - }: - - case <-time.After(time.Second * 5): - t.Fatalf("update swapper didn't read new channel: %v", err) - } - - delete(backupSet, chanToDelete) - - // Verify that the new set of backups, now has one less after the - // sub-swapper switches the new set with the old. - assertExpectedBackupSwap(t, swapper, subSwapper, keyRing, backupSet) -} diff --git a/lnd/chanbackup/recover.go b/lnd/chanbackup/recover.go deleted file mode 100644 index f92eab52..00000000 --- a/lnd/chanbackup/recover.go +++ /dev/null @@ -1,125 +0,0 @@ -package chanbackup - -import ( - "net" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/pktlog/log" -) - -// ChannelRestorer is an interface that allows the Recover method to map the -// set of single channel backups into a set of "channel shells" and store these -// persistently on disk. The channel shell should contain all the information -// needed to execute the data loss recovery protocol once the channel peer is -// connected to. -type ChannelRestorer interface { - // RestoreChansFromSingles attempts to map the set of single channel - // backups to channel shells that will be stored persistently. Once - // these shells have been stored on disk, we'll be able to connect to - // the channel peer an execute the data loss recovery protocol. - RestoreChansFromSingles(...Single) er.R -} - -// PeerConnector is an interface that allows the Recover method to connect to -// the target node given the set of possible addresses. -type PeerConnector interface { - // ConnectPeer attempts to connect to the target node at the set of - // available addresses. Once this method returns with a non-nil error, - // the connector should attempt to persistently connect to the target - // peer in the background as a persistent attempt. - ConnectPeer(node *btcec.PublicKey, addrs []net.Addr) er.R -} - -// Recover attempts to recover the static channel state from a set of static -// channel backups. If successfully, the database will be populated with a -// series of "shell" channels. These "shell" channels cannot be used to operate -// the channel as normal, but instead are meant to be used to enter the data -// loss recovery phase, and recover the settled funds within -// the channel. In addition a LinkNode will be created for each new peer as -// well, in order to expose the addressing information required to locate to -// and connect to each peer in order to initiate the recovery protocol. -func Recover(backups []Single, restorer ChannelRestorer, - peerConnector PeerConnector) er.R { - - for i, backup := range backups { - log.Infof("Restoring ChannelPoint(%v) to disk: ", - backup.FundingOutpoint) - - err := restorer.RestoreChansFromSingles(backup) - - // If a channel is already present in the channel DB, we can - // just continue. No reason to fail a whole set of multi backups - // for example. This allows resume of a restore in case another - // error happens. - if channeldb.ErrChanAlreadyExists.Is(err) { - continue - } - if err != nil { - return err - } - - log.Infof("Attempting to connect to node=%x (addrs=%v) to "+ - "restore ChannelPoint(%v)", - backup.RemoteNodePub.SerializeCompressed(), - log.C(func() string { - return spew.Sdump(backups[i].Addresses) - }), backup.FundingOutpoint) - - err = peerConnector.ConnectPeer( - backup.RemoteNodePub, backup.Addresses, - ) - if err != nil { - return err - } - - // TODO(roasbeef): to handle case where node has changed addrs, - // need to subscribe to new updates for target node pub to - // attempt to connect to other addrs - // - // * just to to fresh w/ call to node addrs and de-dup? - } - - return nil -} - -// TODO(roasbeef): more specific keychain interface? - -// UnpackAndRecoverSingles is a one-shot method, that given a set of packed -// single channel backups, will restore the channel state to a channel shell, -// and also reach out to connect to any of the known node addresses for that -// channel. It is assumes that after this method exists, if a connection we -// able to be established, then then PeerConnector will continue to attempt to -// re-establish a persistent connection in the background. -func UnpackAndRecoverSingles(singles PackedSingles, - keyChain keychain.KeyRing, restorer ChannelRestorer, - peerConnector PeerConnector) er.R { - - chanBackups, err := singles.Unpack(keyChain) - if err != nil { - return err - } - - return Recover(chanBackups, restorer, peerConnector) -} - -// UnpackAndRecoverMulti is a one-shot method, that given a set of packed -// multi-channel backups, will restore the channel states to channel shells, -// and also reach out to connect to any of the known node addresses for that -// channel. It is assumes that after this method exists, if a connection we -// able to be established, then then PeerConnector will continue to attempt to -// re-establish a persistent connection in the background. -func UnpackAndRecoverMulti(packedMulti PackedMulti, - keyChain keychain.KeyRing, restorer ChannelRestorer, - peerConnector PeerConnector) er.R { - - chanBackups, err := packedMulti.Unpack(keyChain) - if err != nil { - return err - } - - return Recover(chanBackups.StaticBackups, restorer, peerConnector) -} diff --git a/lnd/chanbackup/recover_test.go b/lnd/chanbackup/recover_test.go deleted file mode 100644 index c90a8d06..00000000 --- a/lnd/chanbackup/recover_test.go +++ /dev/null @@ -1,232 +0,0 @@ -package chanbackup - -import ( - "bytes" - "net" - "testing" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" -) - -type mockChannelRestorer struct { - fail bool - - callCount int -} - -func (m *mockChannelRestorer) RestoreChansFromSingles(...Single) er.R { - if m.fail { - return er.Errorf("fail") - } - - m.callCount++ - - return nil -} - -type mockPeerConnector struct { - fail bool - - callCount int -} - -func (m *mockPeerConnector) ConnectPeer(node *btcec.PublicKey, - addrs []net.Addr) er.R { - - if m.fail { - return er.Errorf("fail") - } - - m.callCount++ - - return nil -} - -// TestUnpackAndRecoverSingles tests that we're able to properly unpack and -// recover a set of packed singles. -func TestUnpackAndRecoverSingles(t *testing.T) { - t.Parallel() - - keyRing := &mockKeyRing{} - - // First, we'll create a number of single chan backups that we'll - // shortly back to so we can begin our recovery attempt. - numSingles := 10 - backups := make([]Single, 0, numSingles) - var packedBackups PackedSingles - for i := 0; i < numSingles; i++ { - channel, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable make channel: %v", err) - } - - single := NewSingle(channel, nil) - - var b bytes.Buffer - if err := single.PackToWriter(&b, keyRing); err != nil { - t.Fatalf("unable to pack single: %v", err) - } - - backups = append(backups, single) - packedBackups = append(packedBackups, b.Bytes()) - } - - chanRestorer := mockChannelRestorer{} - peerConnector := mockPeerConnector{} - - // Now that we have our backups (packed and unpacked), we'll attempt to - // restore them all in a single batch. - - // If we make the channel restore fail, then the entire method should - // as well - chanRestorer.fail = true - err := UnpackAndRecoverSingles( - packedBackups, keyRing, &chanRestorer, &peerConnector, - ) - if err == nil { - t.Fatalf("restoration should have failed") - } - - chanRestorer.fail = false - - // If we make the peer connector fail, then the entire method should as - // well - peerConnector.fail = true - err = UnpackAndRecoverSingles( - packedBackups, keyRing, &chanRestorer, &peerConnector, - ) - if err == nil { - t.Fatalf("restoration should have failed") - } - - chanRestorer.callCount-- - peerConnector.fail = false - - // Next, we'll ensure that if all the interfaces function as expected, - // then the channels will properly be unpacked and restored. - err = UnpackAndRecoverSingles( - packedBackups, keyRing, &chanRestorer, &peerConnector, - ) - if err != nil { - t.Fatalf("unable to recover chans: %v", err) - } - - // Both the restorer, and connector should have been called 10 times, - // once for each backup. - if chanRestorer.callCount != numSingles { - t.Fatalf("expected %v calls, instead got %v", - numSingles, chanRestorer.callCount) - } - if peerConnector.callCount != numSingles { - t.Fatalf("expected %v calls, instead got %v", - numSingles, peerConnector.callCount) - } - - // If we modify the keyRing, then unpacking should fail. - keyRing.fail = true - err = UnpackAndRecoverSingles( - packedBackups, keyRing, &chanRestorer, &peerConnector, - ) - if err == nil { - t.Fatalf("unpacking should have failed") - } - - // TODO(roasbeef): verify proper call args -} - -// TestUnpackAndRecoverMulti tests that we're able to properly unpack and -// recover a packed multi. -func TestUnpackAndRecoverMulti(t *testing.T) { - t.Parallel() - - keyRing := &mockKeyRing{} - - // First, we'll create a number of single chan backups that we'll - // shortly back to so we can begin our recovery attempt. - numSingles := 10 - backups := make([]Single, 0, numSingles) - for i := 0; i < numSingles; i++ { - channel, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable make channel: %v", err) - } - - single := NewSingle(channel, nil) - - backups = append(backups, single) - } - - multi := Multi{ - StaticBackups: backups, - } - - var b bytes.Buffer - if err := multi.PackToWriter(&b, keyRing); err != nil { - t.Fatalf("unable to pack multi: %v", err) - } - - // Next, we'll pack the set of singles into a packed multi, and also - // create the set of interfaces we need to carry out the remainder of - // the test. - packedMulti := PackedMulti(b.Bytes()) - - chanRestorer := mockChannelRestorer{} - peerConnector := mockPeerConnector{} - - // If we make the channel restore fail, then the entire method should - // as well - chanRestorer.fail = true - err := UnpackAndRecoverMulti( - packedMulti, keyRing, &chanRestorer, &peerConnector, - ) - if err == nil { - t.Fatalf("restoration should have failed") - } - - chanRestorer.fail = false - - // If we make the peer connector fail, then the entire method should as - // well - peerConnector.fail = true - err = UnpackAndRecoverMulti( - packedMulti, keyRing, &chanRestorer, &peerConnector, - ) - if err == nil { - t.Fatalf("restoration should have failed") - } - - chanRestorer.callCount-- - peerConnector.fail = false - - // Next, we'll ensure that if all the interfaces function as expected, - // then the channels will properly be unpacked and restored. - err = UnpackAndRecoverMulti( - packedMulti, keyRing, &chanRestorer, &peerConnector, - ) - if err != nil { - t.Fatalf("unable to recover chans: %v", err) - } - - // Both the restorer, and connector should have been called 10 times, - // once for each backup. - if chanRestorer.callCount != numSingles { - t.Fatalf("expected %v calls, instead got %v", - numSingles, chanRestorer.callCount) - } - if peerConnector.callCount != numSingles { - t.Fatalf("expected %v calls, instead got %v", - numSingles, peerConnector.callCount) - } - - // If we modify the keyRing, then unpacking should fail. - keyRing.fail = true - err = UnpackAndRecoverMulti( - packedMulti, keyRing, &chanRestorer, &peerConnector, - ) - if err == nil { - t.Fatalf("unpacking should have failed") - } - - // TODO(roasbeef): verify proper call args -} diff --git a/lnd/chanbackup/single.go b/lnd/chanbackup/single.go deleted file mode 100644 index 8db98b44..00000000 --- a/lnd/chanbackup/single.go +++ /dev/null @@ -1,513 +0,0 @@ -package chanbackup - -import ( - "bytes" - "io" - "net" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/wire" -) - -// SingleBackupVersion denotes the version of the single static channel backup. -// Based on this version, we know how to pack/unpack serialized versions of the -// backup. -type SingleBackupVersion byte - -const ( - // DefaultSingleVersion is the default version of the single channel - // backup. The serialized version of this static channel backup is - // simply: version || SCB. Where SCB is the known format of the - // version. - DefaultSingleVersion = 0 - - // TweaklessCommitVersion is the second SCB version. This version - // implicitly denotes that this channel uses the new tweakless commit - // format. - TweaklessCommitVersion = 1 - - // AnchorsCommitVersion is the third SCB version. This version - // implicitly denotes that this channel uses the new anchor commitment - // format. - AnchorsCommitVersion = 2 -) - -// Single is a static description of an existing channel that can be used for -// the purposes of backing up. The fields in this struct allow a node to -// recover the settled funds within a channel in the case of partial or -// complete data loss. We provide the network address that we last used to -// connect to the peer as well, in case the node stops advertising the IP on -// the network for whatever reason. -// -// TODO(roasbeef): suffix version into struct? -type Single struct { - // Version is the version that should be observed when attempting to - // pack the single backup. - Version SingleBackupVersion - - // IsInitiator is true if we were the initiator of the channel, and - // false otherwise. We'll need to know this information in order to - // properly re-derive the state hint information. - IsInitiator bool - - // ChainHash is a hash which represents the blockchain that this - // channel will be opened within. This value is typically the genesis - // hash. In the case that the original chain went through a contentious - // hard-fork, then this value will be tweaked using the unique fork - // point on each branch. - ChainHash chainhash.Hash - - // FundingOutpoint is the outpoint of the final funding transaction. - // This value uniquely and globally identities the channel within the - // target blockchain as specified by the chain hash parameter. - FundingOutpoint wire.OutPoint - - // ShortChannelID encodes the exact location in the chain in which the - // channel was initially confirmed. This includes: the block height, - // transaction index, and the output within the target transaction. - // Channels that were not confirmed at the time of backup creation will - // have the funding TX broadcast height set as their block height in - // the ShortChannelID. - ShortChannelID lnwire.ShortChannelID - - // RemoteNodePub is the identity public key of the remote node this - // channel has been established with. - RemoteNodePub *btcec.PublicKey - - // Addresses is a list of IP address in which either we were able to - // reach the node over in the past, OR we received an incoming - // authenticated connection for the stored identity public key. - Addresses []net.Addr - - // Capacity is the size of the original channel. - Capacity btcutil.Amount - - // LocalChanCfg is our local channel configuration. It contains all the - // information we need to re-derive the keys we used within the - // channel. Most importantly, it allows to derive the base public - // that's used to deriving the key used within the non-delayed - // pay-to-self output on the commitment transaction for a node. With - // this information, we can re-derive the private key needed to sweep - // the funds on-chain. - // - // NOTE: Of the items in the ChannelConstraints, we only write the CSV - // delay. - LocalChanCfg channeldb.ChannelConfig - - // RemoteChanCfg is the remote channel confirmation. We store this as - // well since we'll need some of their keys to re-derive things like - // the state hint obfuscator which will allow us to recognize the state - // their broadcast on chain. - // - // NOTE: Of the items in the ChannelConstraints, we only write the CSV - // delay. - RemoteChanCfg channeldb.ChannelConfig - - // ShaChainRootDesc describes how to derive the private key that was - // used as the shachain root for this channel. - ShaChainRootDesc keychain.KeyDescriptor -} - -// NewSingle creates a new static channel backup based on an existing open -// channel. We also pass in the set of addresses that we used in the past to -// connect to the channel peer. -func NewSingle(channel *channeldb.OpenChannel, - nodeAddrs []net.Addr) Single { - - // TODO(roasbeef): update after we start to store the KeyLoc for - // shachain root - - // We'll need to obtain the shachain root which is derived directly - // from a private key in our keychain. - var b bytes.Buffer - channel.RevocationProducer.Encode(&b) // Can't return an error. - - // Once we have the root, we'll make a public key from it, such that - // the backups plaintext don't carry any private information. When we - // go to recover, we'll present this in order to derive the private - // key. - _, shaChainPoint := btcec.PrivKeyFromBytes(btcec.S256(), b.Bytes()) - - // If a channel is unconfirmed, the block height of the ShortChannelID - // is zero. This will lead to problems when trying to restore that - // channel as the spend notifier would get a height hint of zero. - // To work around that problem, we add the channel broadcast height - // to the channel ID so we can use that as height hint on restore. - chanID := channel.ShortChanID() - if chanID.BlockHeight == 0 { - chanID.BlockHeight = channel.FundingBroadcastHeight - } - - single := Single{ - IsInitiator: channel.IsInitiator, - ChainHash: channel.ChainHash, - FundingOutpoint: channel.FundingOutpoint, - ShortChannelID: chanID, - RemoteNodePub: channel.IdentityPub, - Addresses: nodeAddrs, - Capacity: channel.Capacity, - LocalChanCfg: channel.LocalChanCfg, - RemoteChanCfg: channel.RemoteChanCfg, - ShaChainRootDesc: keychain.KeyDescriptor{ - PubKey: shaChainPoint, - KeyLocator: keychain.KeyLocator{ - Family: keychain.KeyFamilyRevocationRoot, - }, - }, - } - - switch { - case channel.ChanType.HasAnchors(): - single.Version = AnchorsCommitVersion - - case channel.ChanType.IsTweakless(): - single.Version = TweaklessCommitVersion - - default: - single.Version = DefaultSingleVersion - } - - return single -} - -// Serialize attempts to write out the serialized version of the target -// StaticChannelBackup into the passed io.Writer. -func (s *Single) Serialize(w io.Writer) er.R { - // Check to ensure that we'll only attempt to serialize a version that - // we're aware of. - switch s.Version { - case DefaultSingleVersion: - case TweaklessCommitVersion: - case AnchorsCommitVersion: - default: - return er.Errorf("unable to serialize w/ unknown "+ - "version: %v", s.Version) - } - - // If the sha chain root has specified a public key (which is - // optional), then we'll encode it now. - var shaChainPub [33]byte - if s.ShaChainRootDesc.PubKey != nil { - copy( - shaChainPub[:], - s.ShaChainRootDesc.PubKey.SerializeCompressed(), - ) - } - - // First we gather the SCB as is into a temporary buffer so we can - // determine the total length. Before we write out the serialized SCB, - // we write the length which allows us to skip any Singles that we - // don't know of when decoding a multi. - var singleBytes bytes.Buffer - if err := lnwire.WriteElements( - &singleBytes, - s.IsInitiator, - s.ChainHash[:], - s.FundingOutpoint, - s.ShortChannelID, - s.RemoteNodePub, - s.Addresses, - s.Capacity, - - s.LocalChanCfg.CsvDelay, - - // We only need to write out the KeyLocator portion of the - // local channel config. - uint32(s.LocalChanCfg.MultiSigKey.Family), - s.LocalChanCfg.MultiSigKey.Index, - uint32(s.LocalChanCfg.RevocationBasePoint.Family), - s.LocalChanCfg.RevocationBasePoint.Index, - uint32(s.LocalChanCfg.PaymentBasePoint.Family), - s.LocalChanCfg.PaymentBasePoint.Index, - uint32(s.LocalChanCfg.DelayBasePoint.Family), - s.LocalChanCfg.DelayBasePoint.Index, - uint32(s.LocalChanCfg.HtlcBasePoint.Family), - s.LocalChanCfg.HtlcBasePoint.Index, - - s.RemoteChanCfg.CsvDelay, - - // We only need to write out the raw pubkey for the remote - // channel config. - s.RemoteChanCfg.MultiSigKey.PubKey, - s.RemoteChanCfg.RevocationBasePoint.PubKey, - s.RemoteChanCfg.PaymentBasePoint.PubKey, - s.RemoteChanCfg.DelayBasePoint.PubKey, - s.RemoteChanCfg.HtlcBasePoint.PubKey, - - shaChainPub[:], - uint32(s.ShaChainRootDesc.KeyLocator.Family), - s.ShaChainRootDesc.KeyLocator.Index, - ); err != nil { - return err - } - - return lnwire.WriteElements( - w, - byte(s.Version), - uint16(len(singleBytes.Bytes())), - singleBytes.Bytes(), - ) -} - -// PackToWriter is similar to the Serialize method, but takes the operation a -// step further by encryption the raw bytes of the static channel back up. For -// encryption we use the chacah20poly1305 AEAD cipher with a 24 byte nonce and -// 32-byte key size. We use a 24-byte nonce, as we can't ensure that we have a -// global counter to use as a sequence number for nonces, and want to ensure -// that we're able to decrypt these blobs without any additional context. We -// derive the key that we use for encryption via a SHA2 operation of the with -// the golden keychain.KeyFamilyStaticBackup base encryption key. We then take -// the serialized resulting shared secret point, and hash it using sha256 to -// obtain the key that we'll use for encryption. When using the AEAD, we pass -// the nonce as associated data such that we'll be able to package the two -// together for storage. Before writing out the encrypted payload, we prepend -// the nonce to the final blob. -func (s *Single) PackToWriter(w io.Writer, keyRing keychain.KeyRing) er.R { - // First, we'll serialize the SCB (StaticChannelBackup) into a - // temporary buffer so we can store it in a temporary place before we - // go to encrypt the entire thing. - var rawBytes bytes.Buffer - if err := s.Serialize(&rawBytes); err != nil { - return err - } - - // Finally, we'll encrypt the raw serialized SCB (using the nonce as - // associated data), and write out the ciphertext prepend with the - // nonce that we used to the passed io.Reader. - return encryptPayloadToWriter(rawBytes, w, keyRing) -} - -// readLocalKeyDesc reads a KeyDescriptor encoded within an unpacked Single. -// For local KeyDescs, we only write out the KeyLocator information as we can -// re-derive the pubkey from it. -func readLocalKeyDesc(r io.Reader) (keychain.KeyDescriptor, er.R) { - var keyDesc keychain.KeyDescriptor - - var keyFam uint32 - if err := lnwire.ReadElements(r, &keyFam); err != nil { - return keyDesc, err - } - keyDesc.Family = keychain.KeyFamily(keyFam) - - if err := lnwire.ReadElements(r, &keyDesc.Index); err != nil { - return keyDesc, err - } - - return keyDesc, nil -} - -// readRemoteKeyDesc reads a remote KeyDescriptor encoded within an unpacked -// Single. For remote KeyDescs, we write out only the PubKey since we don't -// actually have the KeyLocator data. -func readRemoteKeyDesc(r io.Reader) (keychain.KeyDescriptor, er.R) { - var ( - keyDesc keychain.KeyDescriptor - pub [33]byte - ) - - _, err := util.ReadFull(r, pub[:]) - if err != nil { - return keychain.KeyDescriptor{}, err - } - - keyDesc.PubKey, err = btcec.ParsePubKey(pub[:], btcec.S256()) - if err != nil { - return keychain.KeyDescriptor{}, err - } - - keyDesc.PubKey.Curve = nil - - return keyDesc, nil -} - -// Deserialize attempts to read the raw plaintext serialized SCB from the -// passed io.Reader. If the method is successful, then the target -// StaticChannelBackup will be fully populated. -func (s *Single) Deserialize(r io.Reader) er.R { - // First, we'll need to read the version of this single-back up so we - // can know how to unpack each of the SCB. - var version byte - err := lnwire.ReadElements(r, &version) - if err != nil { - return err - } - - s.Version = SingleBackupVersion(version) - - switch s.Version { - case DefaultSingleVersion: - case TweaklessCommitVersion: - case AnchorsCommitVersion: - default: - return er.Errorf("unable to de-serialize w/ unknown "+ - "version: %v", s.Version) - } - - var length uint16 - if err := lnwire.ReadElements(r, &length); err != nil { - return err - } - - err = lnwire.ReadElements( - r, &s.IsInitiator, s.ChainHash[:], &s.FundingOutpoint, - &s.ShortChannelID, &s.RemoteNodePub, &s.Addresses, &s.Capacity, - ) - if err != nil { - return err - } - - err = lnwire.ReadElements(r, &s.LocalChanCfg.CsvDelay) - if err != nil { - return err - } - s.LocalChanCfg.MultiSigKey, err = readLocalKeyDesc(r) - if err != nil { - return err - } - s.LocalChanCfg.RevocationBasePoint, err = readLocalKeyDesc(r) - if err != nil { - return err - } - s.LocalChanCfg.PaymentBasePoint, err = readLocalKeyDesc(r) - if err != nil { - return err - } - s.LocalChanCfg.DelayBasePoint, err = readLocalKeyDesc(r) - if err != nil { - return err - } - s.LocalChanCfg.HtlcBasePoint, err = readLocalKeyDesc(r) - if err != nil { - return err - } - - err = lnwire.ReadElements(r, &s.RemoteChanCfg.CsvDelay) - if err != nil { - return err - } - s.RemoteChanCfg.MultiSigKey, err = readRemoteKeyDesc(r) - if err != nil { - return err - } - s.RemoteChanCfg.RevocationBasePoint, err = readRemoteKeyDesc(r) - if err != nil { - return err - } - s.RemoteChanCfg.PaymentBasePoint, err = readRemoteKeyDesc(r) - if err != nil { - return err - } - s.RemoteChanCfg.DelayBasePoint, err = readRemoteKeyDesc(r) - if err != nil { - return err - } - s.RemoteChanCfg.HtlcBasePoint, err = readRemoteKeyDesc(r) - if err != nil { - return err - } - - // Finally, we'll parse out the ShaChainRootDesc. - var ( - shaChainPub [33]byte - zeroPub [33]byte - ) - if err := lnwire.ReadElements(r, shaChainPub[:]); err != nil { - return err - } - - // Since this field is optional, we'll check to see if the pubkey has - // been specified or not. - if !bytes.Equal(shaChainPub[:], zeroPub[:]) { - s.ShaChainRootDesc.PubKey, err = btcec.ParsePubKey( - shaChainPub[:], btcec.S256(), - ) - if err != nil { - return err - } - } - - var shaKeyFam uint32 - if err := lnwire.ReadElements(r, &shaKeyFam); err != nil { - return err - } - s.ShaChainRootDesc.KeyLocator.Family = keychain.KeyFamily(shaKeyFam) - - return lnwire.ReadElements(r, &s.ShaChainRootDesc.KeyLocator.Index) -} - -// UnpackFromReader is similar to Deserialize method, but it expects the passed -// io.Reader to contain an encrypt SCB. Refer to the SerializeAndEncrypt method -// for details w.r.t the encryption scheme used. If we're unable to decrypt the -// payload for whatever reason (wrong key, wrong nonce, etc), then this method -// will return an error. -func (s *Single) UnpackFromReader(r io.Reader, keyRing keychain.KeyRing) er.R { - plaintext, err := decryptPayloadFromReader(r, keyRing) - if err != nil { - return err - } - - // Finally, we'll pack the bytes into a reader to we can deserialize - // the plaintext bytes of the SCB. - backupReader := bytes.NewReader(plaintext) - return s.Deserialize(backupReader) -} - -// PackStaticChanBackups accepts a set of existing open channels, and a -// keychain.KeyRing, and returns a map of outpoints to the serialized+encrypted -// static channel backups. The passed keyRing should be backed by the users -// root HD seed in order to ensure full determinism. -func PackStaticChanBackups(backups []Single, - keyRing keychain.KeyRing) (map[wire.OutPoint][]byte, er.R) { - - packedBackups := make(map[wire.OutPoint][]byte) - for _, chanBackup := range backups { - chanPoint := chanBackup.FundingOutpoint - - var b bytes.Buffer - err := chanBackup.PackToWriter(&b, keyRing) - if err != nil { - return nil, er.Errorf("unable to pack chan backup "+ - "for %v: %v", chanPoint, err) - } - - packedBackups[chanPoint] = b.Bytes() - } - - return packedBackups, nil -} - -// PackedSingles represents a series of fully packed SCBs. This may be the -// combination of a series of individual SCBs in order to batch their -// unpacking. -type PackedSingles [][]byte - -// Unpack attempts to decrypt the passed set of encrypted SCBs and deserialize -// each one into a new SCB struct. The passed keyRing should be backed by the -// same HD seed as was used to encrypt the set of backups in the first place. -// If we're unable to decrypt any of the back ups, then we'll return an error. -func (p PackedSingles) Unpack(keyRing keychain.KeyRing) ([]Single, er.R) { - - backups := make([]Single, len(p)) - for i, encryptedBackup := range p { - var backup Single - - backupReader := bytes.NewReader(encryptedBackup) - err := backup.UnpackFromReader(backupReader, keyRing) - if err != nil { - return nil, err - } - - backups[i] = backup - } - - return backups, nil -} - -// TODO(roasbeef): make codec package? diff --git a/lnd/chanbackup/single_test.go b/lnd/chanbackup/single_test.go deleted file mode 100644 index def29f84..00000000 --- a/lnd/chanbackup/single_test.go +++ /dev/null @@ -1,462 +0,0 @@ -package chanbackup - -import ( - "bytes" - "math" - "math/rand" - "net" - "reflect" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/shachain" - "github.com/pkt-cash/pktd/wire" -) - -var ( - chainHash = chainhash.Hash{ - 0xb7, 0x94, 0x38, 0x5f, 0x2d, 0x1e, 0xf7, 0xab, - 0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4, - 0x4f, 0x2f, 0x6f, 0x25, 0x18, 0xa3, 0xef, 0xb9, - 0x64, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53, - } - - op = wire.OutPoint{ - Hash: chainHash, - Index: 4, - } - - addr1, _ = net.ResolveTCPAddr("tcp", "10.0.0.2:9000") - addr2, _ = net.ResolveTCPAddr("tcp", "10.0.0.3:9000") -) - -func assertSingleEqual(t *testing.T, a, b Single) { - t.Helper() - - if a.Version != b.Version { - t.Fatalf("versions don't match: %v vs %v", a.Version, - b.Version) - } - if a.IsInitiator != b.IsInitiator { - t.Fatalf("initiators don't match: %v vs %v", a.IsInitiator, - b.IsInitiator) - } - if a.ChainHash != b.ChainHash { - t.Fatalf("chainhash doesn't match: %v vs %v", a.ChainHash, - b.ChainHash) - } - if a.FundingOutpoint != b.FundingOutpoint { - t.Fatalf("chan point doesn't match: %v vs %v", - a.FundingOutpoint, b.FundingOutpoint) - } - if a.ShortChannelID != b.ShortChannelID { - t.Fatalf("chan id doesn't match: %v vs %v", - a.ShortChannelID, b.ShortChannelID) - } - if a.Capacity != b.Capacity { - t.Fatalf("capacity doesn't match: %v vs %v", - a.Capacity, b.Capacity) - } - if !a.RemoteNodePub.IsEqual(b.RemoteNodePub) { - t.Fatalf("node pubs don't match %x vs %x", - a.RemoteNodePub.SerializeCompressed(), - b.RemoteNodePub.SerializeCompressed()) - } - if !reflect.DeepEqual(a.LocalChanCfg, b.LocalChanCfg) { - t.Fatalf("local chan config doesn't match: %v vs %v", - spew.Sdump(a.LocalChanCfg), - spew.Sdump(b.LocalChanCfg)) - } - if !reflect.DeepEqual(a.RemoteChanCfg, b.RemoteChanCfg) { - t.Fatalf("remote chan config doesn't match: %v vs %v", - spew.Sdump(a.RemoteChanCfg), - spew.Sdump(b.RemoteChanCfg)) - } - if !reflect.DeepEqual(a.ShaChainRootDesc, b.ShaChainRootDesc) { - t.Fatalf("sha chain point doesn't match: %v vs %v", - spew.Sdump(a.ShaChainRootDesc), - spew.Sdump(b.ShaChainRootDesc)) - } - - if len(a.Addresses) != len(b.Addresses) { - t.Fatalf("expected %v addrs got %v", len(a.Addresses), - len(b.Addresses)) - } - for i := 0; i < len(a.Addresses); i++ { - if a.Addresses[i].String() != b.Addresses[i].String() { - t.Fatalf("addr mismatch: %v vs %v", - a.Addresses[i], b.Addresses[i]) - } - } -} - -func genRandomOpenChannelShell() (*channeldb.OpenChannel, er.R) { - var testPriv [32]byte - if _, err := rand.Read(testPriv[:]); err != nil { - return nil, er.E(err) - } - - _, pub := btcec.PrivKeyFromBytes(btcec.S256(), testPriv[:]) - - var chanPoint wire.OutPoint - if _, err := rand.Read(chanPoint.Hash[:]); err != nil { - return nil, er.E(err) - } - - pub.Curve = nil - - chanPoint.Index = uint32(rand.Intn(math.MaxUint16)) - - var shaChainRoot [32]byte - if _, err := rand.Read(shaChainRoot[:]); err != nil { - return nil, er.E(err) - } - - shaChainProducer := shachain.NewRevocationProducer(shaChainRoot) - - var isInitiator bool - if rand.Int63()%2 == 0 { - isInitiator = true - } - - chanType := channeldb.SingleFunderBit - if rand.Int63()%2 == 0 { - chanType = channeldb.SingleFunderTweaklessBit - } - - return &channeldb.OpenChannel{ - ChainHash: chainHash, - ChanType: chanType, - IsInitiator: isInitiator, - FundingOutpoint: chanPoint, - ShortChannelID: lnwire.NewShortChanIDFromInt( - uint64(rand.Int63()), - ), - IdentityPub: pub, - LocalChanCfg: channeldb.ChannelConfig{ - ChannelConstraints: channeldb.ChannelConstraints{ - CsvDelay: uint16(rand.Int63()), - }, - MultiSigKey: keychain.KeyDescriptor{ - KeyLocator: keychain.KeyLocator{ - Family: keychain.KeyFamily(rand.Int63()), - Index: uint32(rand.Int63()), - }, - }, - RevocationBasePoint: keychain.KeyDescriptor{ - KeyLocator: keychain.KeyLocator{ - Family: keychain.KeyFamily(rand.Int63()), - Index: uint32(rand.Int63()), - }, - }, - PaymentBasePoint: keychain.KeyDescriptor{ - KeyLocator: keychain.KeyLocator{ - Family: keychain.KeyFamily(rand.Int63()), - Index: uint32(rand.Int63()), - }, - }, - DelayBasePoint: keychain.KeyDescriptor{ - KeyLocator: keychain.KeyLocator{ - Family: keychain.KeyFamily(rand.Int63()), - Index: uint32(rand.Int63()), - }, - }, - HtlcBasePoint: keychain.KeyDescriptor{ - KeyLocator: keychain.KeyLocator{ - Family: keychain.KeyFamily(rand.Int63()), - Index: uint32(rand.Int63()), - }, - }, - }, - RemoteChanCfg: channeldb.ChannelConfig{ - ChannelConstraints: channeldb.ChannelConstraints{ - CsvDelay: uint16(rand.Int63()), - }, - MultiSigKey: keychain.KeyDescriptor{ - PubKey: pub, - }, - RevocationBasePoint: keychain.KeyDescriptor{ - PubKey: pub, - }, - PaymentBasePoint: keychain.KeyDescriptor{ - PubKey: pub, - }, - DelayBasePoint: keychain.KeyDescriptor{ - PubKey: pub, - }, - HtlcBasePoint: keychain.KeyDescriptor{ - PubKey: pub, - }, - }, - RevocationProducer: shaChainProducer, - }, nil -} - -// TestSinglePackUnpack tests that we're able to unpack a previously packed -// channel backup. -func TestSinglePackUnpack(t *testing.T) { - t.Parallel() - - // Given our test pub key, we'll create an open channel shell that - // contains all the information we need to create a static channel - // backup. - channel, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to gen open channel: %v", err) - } - - singleChanBackup := NewSingle(channel, []net.Addr{addr1, addr2}) - singleChanBackup.RemoteNodePub.Curve = nil - - keyRing := &mockKeyRing{} - - versionTestCases := []struct { - // version is the pack/unpack version that we should use to - // decode/encode the final SCB. - version SingleBackupVersion - - // valid tests us if this test case should pass or not. - valid bool - }{ - // The default version, should pack/unpack with no problem. - { - version: DefaultSingleVersion, - valid: true, - }, - - // The new tweakless version, should pack/unpack with no - // problem. - { - version: TweaklessCommitVersion, - valid: true, - }, - - // The new anchor version, should pack/unpack with no - // problem. - { - version: AnchorsCommitVersion, - valid: true, - }, - - // A non-default version, atm this should result in a failure. - { - version: 99, - valid: false, - }, - } - for i, versionCase := range versionTestCases { - // First, we'll re-assign SCB version to what was indicated in - // the test case. - singleChanBackup.Version = versionCase.version - - var b bytes.Buffer - - err := singleChanBackup.PackToWriter(&b, keyRing) - switch { - // If this is a valid test case, and we failed, then we'll - // return an error. - case err != nil && versionCase.valid: - t.Fatalf("#%v, unable to pack single: %v", i, err) - - // If this is an invalid test case, and we passed it, then - // we'll return an error. - case err == nil && !versionCase.valid: - t.Fatalf("#%v got nil error for invalid pack: %v", - i, err) - } - - // If this is a valid test case, then we'll continue to ensure - // we can unpack it, and also that if we mutate the packed - // version, then we trigger an error. - if versionCase.valid { - var unpackedSingle Single - err = unpackedSingle.UnpackFromReader(&b, keyRing) - if err != nil { - t.Fatalf("#%v unable to unpack single: %v", - i, err) - } - unpackedSingle.RemoteNodePub.Curve = nil - - assertSingleEqual(t, singleChanBackup, unpackedSingle) - - // If this was a valid packing attempt, then we'll test - // to ensure that if we mutate the version prepended to - // the serialization, then unpacking will fail as well. - var rawSingle bytes.Buffer - err := unpackedSingle.Serialize(&rawSingle) - if err != nil { - t.Fatalf("unable to serialize single: %v", err) - } - - rawBytes := rawSingle.Bytes() - rawBytes[0] ^= 5 - - newReader := bytes.NewReader(rawBytes) - err = unpackedSingle.Deserialize(newReader) - if err == nil { - t.Fatalf("#%v unpack with unknown version "+ - "should have failed", i) - } - } - } -} - -// TestPackedSinglesUnpack tests that we're able to properly unpack a series of -// packed singles. -func TestPackedSinglesUnpack(t *testing.T) { - t.Parallel() - - keyRing := &mockKeyRing{} - - // To start, we'll create 10 new singles, and them assemble their - // packed forms into a slice. - numSingles := 10 - packedSingles := make([][]byte, 0, numSingles) - unpackedSingles := make([]Single, 0, numSingles) - for i := 0; i < numSingles; i++ { - channel, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to gen channel: %v", err) - } - - single := NewSingle(channel, nil) - - var b bytes.Buffer - if err := single.PackToWriter(&b, keyRing); err != nil { - t.Fatalf("unable to pack single: %v", err) - } - - packedSingles = append(packedSingles, b.Bytes()) - unpackedSingles = append(unpackedSingles, single) - } - - // With all singles packed, we'll create the grouped type and attempt - // to Unpack all of them in a single go. - freshSingles, err := PackedSingles(packedSingles).Unpack(keyRing) - if err != nil { - t.Fatalf("unable to unpack singles: %v", err) - } - - // The set of freshly unpacked singles should exactly match the initial - // set of singles that we packed before. - for i := 0; i < len(unpackedSingles); i++ { - assertSingleEqual(t, unpackedSingles[i], freshSingles[i]) - } - - // If we mutate one of the packed singles, then the entire method - // should fail. - packedSingles[0][0] ^= 1 - _, err = PackedSingles(packedSingles).Unpack(keyRing) - if err == nil { - t.Fatalf("unpack attempt should fail") - } -} - -// TestSinglePackStaticChanBackups tests that we're able to batch pack a set of -// Singles, and then unpack them obtaining the same set of unpacked singles. -func TestSinglePackStaticChanBackups(t *testing.T) { - t.Parallel() - - keyRing := &mockKeyRing{} - - // First, we'll create a set of random single, and along the way, - // create a map that will let us look up each single by its chan point. - numSingles := 10 - singleMap := make(map[wire.OutPoint]Single, numSingles) - unpackedSingles := make([]Single, 0, numSingles) - for i := 0; i < numSingles; i++ { - channel, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to gen channel: %v", err) - } - - single := NewSingle(channel, nil) - - singleMap[channel.FundingOutpoint] = single - unpackedSingles = append(unpackedSingles, single) - } - - // Now that we have all of our singles are created, we'll attempt to - // pack them all in a single batch. - packedSingleMap, err := PackStaticChanBackups(unpackedSingles, keyRing) - if err != nil { - t.Fatalf("unable to pack backups: %v", err) - } - - // With our packed singles obtained, we'll ensure that each of them - // match their unpacked counterparts after they themselves have been - // unpacked. - for chanPoint, single := range singleMap { - packedSingles, ok := packedSingleMap[chanPoint] - if !ok { - t.Fatalf("unable to find single %v", chanPoint) - } - - var freshSingle Single - err := freshSingle.UnpackFromReader( - bytes.NewReader(packedSingles), keyRing, - ) - if err != nil { - t.Fatalf("unable to unpack single: %v", err) - } - - assertSingleEqual(t, single, freshSingle) - } - - // If we attempt to pack again, but force the key ring to fail, then - // the entire method should fail. - _, err = PackStaticChanBackups( - unpackedSingles, &mockKeyRing{true}, - ) - if err == nil { - t.Fatalf("pack attempt should fail") - } -} - -// TestSingleUnconfirmedChannel tests that unconfirmed channels get serialized -// correctly by encoding the funding broadcast height as block height of the -// short channel ID. -func TestSingleUnconfirmedChannel(t *testing.T) { - t.Parallel() - - var fundingBroadcastHeight = uint32(1234) - - // Let's create an open channel shell that contains all the information - // we need to create a static channel backup but simulate an - // unconfirmed channel by setting the block height to 0. - channel, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to gen open channel: %v", err) - } - channel.ShortChannelID.BlockHeight = 0 - channel.FundingBroadcastHeight = fundingBroadcastHeight - - singleChanBackup := NewSingle(channel, []net.Addr{addr1, addr2}) - keyRing := &mockKeyRing{} - - // Pack it and then unpack it again to make sure everything is written - // correctly, then check that the block height of the unpacked - // is the funding broadcast height we set before. - var b bytes.Buffer - if err := singleChanBackup.PackToWriter(&b, keyRing); err != nil { - t.Fatalf("unable to pack single: %v", err) - } - var unpackedSingle Single - err = unpackedSingle.UnpackFromReader(&b, keyRing) - if err != nil { - t.Fatalf("unable to unpack single: %v", err) - } - if unpackedSingle.ShortChannelID.BlockHeight != fundingBroadcastHeight { - t.Fatalf("invalid block height. got %d expected %d.", - unpackedSingle.ShortChannelID.BlockHeight, - fundingBroadcastHeight) - } -} - -// TODO(roasbsef): fuzz parsing diff --git a/lnd/chanfitness/chanevent.go b/lnd/chanfitness/chanevent.go deleted file mode 100644 index bd78c3d4..00000000 --- a/lnd/chanfitness/chanevent.go +++ /dev/null @@ -1,418 +0,0 @@ -package chanfitness - -import ( - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/clock" - "github.com/pkt-cash/pktd/wire" -) - -type eventType int - -const ( - peerOnlineEvent eventType = iota - peerOfflineEvent -) - -// String provides string representations of channel events. -func (e eventType) String() string { - switch e { - case peerOnlineEvent: - return "peer_online" - - case peerOfflineEvent: - return "peer_offline" - } - - return "unknown" -} - -type event struct { - timestamp time.Time - eventType eventType -} - -// peerLog tracks events for a peer and its channels. If we currently have no -// channels with the peer, it will simply track its current online state. If we -// do have channels open with the peer, it will track the peer's online and -// offline events so that we can calculate uptime for our channels. A single -// event log is used for these online and offline events, and uptime for a -// channel is calculated by examining a subsection of this log. -type peerLog struct { - // online stores whether the peer is currently online. - online bool - - // onlineEvents is a log of timestamped events observed for the peer - // that we have committed to allocating memory to. - onlineEvents []*event - - // stagedEvent represents an event that is pending addition to the - // events list. It has not yet been added because we rate limit the - // frequency that we store events at. We need to store this value - // in the log (rather than just ignore events) so that we can flush the - // aggregate outcome to our event log once the rate limiting period has - // ended. - // - // Take the following example: - // - Peer online event recorded - // - Peer offline event, not recorded due to rate limit - // - No more events, we incorrectly believe our peer to be online - // Instead of skipping events, we stage the most recent event during the - // rate limited period so that we know what happened (on aggregate) - // while we were rate limiting events. - // - // Note that we currently only store offline/online events so we can - // use this field to track our online state. With the addition of other - // event types, we need to only stage online/offline events, or split - // them out. - stagedEvent *event - - // flapCount is the number of times this peer has been observed as - // going offline. - flapCount int - - // lastFlap is the timestamp of the last flap we recorded for the peer. - // This value will be nil if we have never recorded a flap for the peer. - lastFlap *time.Time - - // clock allows creation of deterministic unit tests. - clock clock.Clock - - // channels contains a set of currently open channels. Channels will be - // added and removed from this map as they are opened and closed. - channels map[wire.OutPoint]*channelInfo -} - -// newPeerLog creates a log for a peer, taking its historical flap count and -// last flap time as parameters. These values may be zero/nil if we have no -// record of historical flap count for the peer. -func newPeerLog(clock clock.Clock, flapCount int, - lastFlap *time.Time) *peerLog { - - return &peerLog{ - clock: clock, - flapCount: flapCount, - lastFlap: lastFlap, - channels: make(map[wire.OutPoint]*channelInfo), - } -} - -// channelInfo contains information about a channel. -type channelInfo struct { - // openedAt tracks the first time this channel was seen. This is not - // necessarily the time that it confirmed on chain because channel - // events are not persisted at present. - openedAt time.Time -} - -func newChannelInfo(openedAt time.Time) *channelInfo { - return &channelInfo{ - openedAt: openedAt, - } -} - -// onlineEvent records a peer online or offline event in the log and increments -// the peer's flap count. -func (p *peerLog) onlineEvent(online bool) { - eventTime := p.clock.Now() - - // If we have a non-nil last flap time, potentially apply a cooldown - // factor to the peer's flap count before we rate limit it. This allows - // us to decrease the penalty for historical flaps over time, provided - // the peer has not flapped for a while. - if p.lastFlap != nil { - p.flapCount = cooldownFlapCount( - p.clock.Now(), p.flapCount, *p.lastFlap, - ) - } - - // Record flap count information and online state regardless of whether - // we have any channels open with this peer. - p.flapCount++ - p.lastFlap = &eventTime - p.online = online - - // If we have no channels currently open with the peer, we do not want - // to commit resources to tracking their online state beyond a simple - // online boolean, so we exit early. - if p.channelCount() == 0 { - return - } - - p.addEvent(online, eventTime) -} - -// addEvent records an online or offline event in our event log. and increments -// the peer's flap count. -func (p *peerLog) addEvent(online bool, time time.Time) { - eventType := peerOnlineEvent - if !online { - eventType = peerOfflineEvent - } - - event := &event{ - timestamp: time, - eventType: eventType, - } - - // If we have no staged events, we can just stage this event and return. - if p.stagedEvent == nil { - p.stagedEvent = event - return - } - - // We get the amount of time we require between events according to - // peer flap count. - aggregation := getRateLimit(p.flapCount) - nextRecordTime := p.stagedEvent.timestamp.Add(aggregation) - flushEvent := nextRecordTime.Before(event.timestamp) - - // If enough time has passed since our last staged event, we add our - // event to our in-memory list. - if flushEvent { - p.onlineEvents = append(p.onlineEvents, p.stagedEvent) - } - - // Finally, we replace our staged event with the new event we received. - p.stagedEvent = event -} - -// addChannel adds a channel to our log. If we have not tracked any online -// events for our peer yet, we create one with our peer's current online state -// so that we know the state that the peer had at channel start, which is -// required to calculate uptime over the channel's lifetime. -func (p *peerLog) addChannel(channelPoint wire.OutPoint) er.R { - _, ok := p.channels[channelPoint] - if ok { - return er.Errorf("channel: %v already present", channelPoint) - } - - openTime := p.clock.Now() - p.channels[channelPoint] = newChannelInfo(openTime) - - // If we do not have any online events tracked for our peer (which is - // the case when we have no other channels open with the peer), we add - // an event with the peer's current online state so that we know that - // starting state for this peer when a channel was connected (which - // allows us to calculate uptime over the lifetime of the channel). - if len(p.onlineEvents) == 0 { - p.addEvent(p.online, openTime) - } - - return nil -} - -// removeChannel removes a channel from our log. If we have no more channels -// with the peer after removing this one, we clear our list of events. -func (p *peerLog) removeChannel(channelPoint wire.OutPoint) er.R { - _, ok := p.channels[channelPoint] - if !ok { - return er.Errorf("channel: %v not present", channelPoint) - } - - delete(p.channels, channelPoint) - - // If we have no more channels in our event log, we can discard all of - // our online events in memory, since we don't need them anymore. - // TODO(carla): this could be done on a per channel basis. - if p.channelCount() == 0 { - p.onlineEvents = nil - p.stagedEvent = nil - } - - return nil -} - -// channelCount returns the number of channels that we currently have -// with the peer. -func (p *peerLog) channelCount() int { - return len(p.channels) -} - -// channelUptime looks up a channel and returns the amount of time that the -// channel has been monitored for and its uptime over this period. -func (p *peerLog) channelUptime(channelPoint wire.OutPoint) (time.Duration, - time.Duration, er.R) { - - channel, ok := p.channels[channelPoint] - if !ok { - return 0, 0, ErrChannelNotFound.Default() - } - - now := p.clock.Now() - - uptime, err := p.uptime(channel.openedAt, now) - if err != nil { - return 0, 0, err - } - - return now.Sub(channel.openedAt), uptime, nil -} - -// getFlapCount returns the peer's flap count and the timestamp that we last -// recorded a flap. -func (p *peerLog) getFlapCount() (int, *time.Time) { - return p.flapCount, p.lastFlap -} - -// listEvents returns all of the events that our event log has tracked, -// including events that are staged for addition to our set of events but have -// not yet been committed to (because we rate limit and store only the aggregate -// outcome over a period). -func (p *peerLog) listEvents() []*event { - if p.stagedEvent == nil { - return p.onlineEvents - } - - return append(p.onlineEvents, p.stagedEvent) -} - -// onlinePeriod represents a period of time over which a peer was online. -type onlinePeriod struct { - start, end time.Time -} - -// getOnlinePeriods returns a list of all the periods that the event log has -// recorded the remote peer as being online. In the unexpected case where there -// are no events, the function returns early. Online periods are defined as a -// peer online event which is terminated by a peer offline event. If the event -// log ends on a peer online event, it appends a final period which is -// calculated until the present. This function expects the event log provided -// to be ordered by ascending timestamp, and can tolerate multiple consecutive -// online or offline events. -func (p *peerLog) getOnlinePeriods() []*onlinePeriod { - events := p.listEvents() - - // Return early if there are no events, there are no online periods. - if len(events) == 0 { - return nil - } - - var ( - // lastEvent tracks the last event that we had that was of - // a different type to our own. It is used to determine the - // start time of our online periods when we experience an - // offline event, and to track our last recorded state. - lastEvent *event - onlinePeriods []*onlinePeriod - ) - - // Loop through all events to build a list of periods that the peer was - // online. Online periods are added when they are terminated with a peer - // offline event. If the log ends on an online event, the period between - // the online event and the present is not tracked. The type of the most - // recent event is tracked using the offline bool so that we can add a - // final online period if necessary. - for _, event := range events { - switch event.eventType { - case peerOnlineEvent: - // If our previous event is nil, we just set it and - // break out of the switch. - if lastEvent == nil { - lastEvent = event - break - } - - // If our previous event was an offline event, we update - // it to this event. We do not do this if it was an - // online event because duplicate online events would - // progress our online timestamp forward (rather than - // keep it at our earliest online event timestamp). - if lastEvent.eventType == peerOfflineEvent { - lastEvent = event - } - - case peerOfflineEvent: - // If our previous event is nil, we just set it and - // break out of the switch since we cannot record an - // online period from this single event. - if lastEvent == nil { - lastEvent = event - break - } - - // If the last event we saw was an online event, we - // add an online period to our set and progress our - // previous event to this offline event. We do not - // do this if we have had duplicate offline events - // because we would be tracking the most recent offline - // event (rather than keep it at our earliest offline - // event timestamp). - if lastEvent.eventType == peerOnlineEvent { - onlinePeriods = append( - onlinePeriods, &onlinePeriod{ - start: lastEvent.timestamp, - end: event.timestamp, - }, - ) - - lastEvent = event - } - } - } - - // If the last event was an peer offline event, we do not need to - // calculate a final online period and can return online periods as is. - if lastEvent.eventType == peerOfflineEvent { - return onlinePeriods - } - - // The log ended on an online event, so we need to add a final online - // period which terminates at the present. - finalEvent := &onlinePeriod{ - start: lastEvent.timestamp, - end: p.clock.Now(), - } - - // Add the final online period to the set and return. - return append(onlinePeriods, finalEvent) -} - -// uptime calculates the total uptime we have recorded for a peer over the -// inclusive range specified. An error is returned if the end of the range is -// before the start or a zero end time is returned. -func (p *peerLog) uptime(start, end time.Time) (time.Duration, er.R) { - // Error if we are provided with an invalid range to calculate uptime - // for. - if end.Before(start) { - return 0, er.Errorf("end time: %v before start time: %v", - end, start) - } - if end.IsZero() { - return 0, er.Errorf("zero end time") - } - - var uptime time.Duration - - for _, p := range p.getOnlinePeriods() { - // The online period ends before the range we're looking at, so - // we can skip over it. - if p.end.Before(start) { - continue - } - // The online period starts after the range we're looking at, so - // can stop calculating uptime. - if p.start.After(end) { - break - } - - // If the online period starts before our range, shift the start - // time up so that we only calculate uptime from the start of - // our range. - if p.start.Before(start) { - p.start = start - } - - // If the online period ends before our range, shift the end - // time forward so that we only calculate uptime until the end - // of the range. - if p.end.After(end) { - p.end = end - } - - uptime += p.end.Sub(p.start) - } - - return uptime, nil -} diff --git a/lnd/chanfitness/chanevent_test.go b/lnd/chanfitness/chanevent_test.go deleted file mode 100644 index a0a6a131..00000000 --- a/lnd/chanfitness/chanevent_test.go +++ /dev/null @@ -1,565 +0,0 @@ -package chanfitness - -import ( - "testing" - "time" - - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/clock" - "github.com/pkt-cash/pktd/wire" - "github.com/stretchr/testify/require" -) - -// TestPeerLog tests the functionality of the peer log struct. -func TestPeerLog(t *testing.T) { - clock := clock.NewTestClock(testNow) - peerLog := newPeerLog(clock, 0, nil) - - // assertFlapCount is a helper that asserts that our peer's flap count - // and timestamp is set to expected values. - assertFlapCount := func(expectedCount int, expectedTs *time.Time) { - flapCount, flapTs := peerLog.getFlapCount() - require.Equal(t, expectedCount, flapCount) - require.Equal(t, expectedTs, flapTs) - } - - require.Zero(t, peerLog.channelCount()) - require.False(t, peerLog.online) - assertFlapCount(0, nil) - - // Test that looking up an unknown channel fails. - _, _, err := peerLog.channelUptime(wire.OutPoint{Index: 1}) - util.RequireErr(t, err) - - lastFlap := clock.Now() - - // Add an offline event, since we have no channels, we do not expect - // to have any online periods recorded for our peer. However, we should - // increment our flap count for the peer. - peerLog.onlineEvent(false) - require.Len(t, peerLog.getOnlinePeriods(), 0) - assertFlapCount(1, &lastFlap) - - // Bump our test clock's time by an hour so that we can create an online - // event with a distinct time. - lastFlap = testNow.Add(time.Hour) - clock.SetTime(lastFlap) - - // Likewise, if we have an online event, nothing beyond the online state - // of our peer log should change, but our flap count should change. - peerLog.onlineEvent(true) - require.Len(t, peerLog.getOnlinePeriods(), 0) - assertFlapCount(2, &lastFlap) - - // Add a channel and assert that we have one channel listed. Since this - // is the first channel we track for the peer, we expect an online - // event to be added, however, our flap count should not change because - // this is not a new online event, we are just copying one into our log - // for our purposes. - chan1 := wire.OutPoint{ - Index: 1, - } - util.RequireNoErr(t, peerLog.addChannel(chan1)) - require.Equal(t, 1, peerLog.channelCount()) - assertFlapCount(2, &lastFlap) - - // Assert that we can now successfully get our added channel. - _, _, err = peerLog.channelUptime(chan1) - util.RequireNoErr(t, err) - - // Bump our test clock's time so that our current time is different to - // channel open time. - lastFlap = clock.Now().Add(time.Hour) - clock.SetTime(lastFlap) - - // Now that we have added a channel and an hour has passed, we expect - // our uptime and lifetime to both equal an hour. - lifetime, uptime, err := peerLog.channelUptime(chan1) - util.RequireNoErr(t, err) - require.Equal(t, time.Hour, lifetime) - require.Equal(t, time.Hour, uptime) - - // Add an offline event for our peer and assert that our flap count is - // incremented. - peerLog.onlineEvent(false) - assertFlapCount(3, &lastFlap) - - // Now we add another channel to our store and assert that we now report - // two channels for this peer. - chan2 := wire.OutPoint{ - Index: 2, - } - util.RequireNoErr(t, peerLog.addChannel(chan2)) - require.Equal(t, 2, peerLog.channelCount()) - - // Progress our time again, so that our peer has now been offline for - // two hours. - now := lastFlap.Add(time.Hour * 2) - clock.SetTime(now) - - // Our first channel should report as having been monitored for three - // hours, but only online for one of those hours. - lifetime, uptime, err = peerLog.channelUptime(chan1) - util.RequireNoErr(t, err) - require.Equal(t, time.Hour*3, lifetime) - require.Equal(t, time.Hour, uptime) - - // Remove our first channel and check that we can still correctly query - // uptime for the second channel. - util.RequireNoErr(t, peerLog.removeChannel(chan1)) - require.Equal(t, 1, peerLog.channelCount()) - - // Our second channel, which was created when our peer was offline, - // should report as having been monitored for two hours, but have zero - // uptime. - lifetime, uptime, err = peerLog.channelUptime(chan2) - util.RequireNoErr(t, err) - require.Equal(t, time.Hour*2, lifetime) - require.Equal(t, time.Duration(0), uptime) - - // Finally, remove our second channel and assert that our peer cleans - // up its in memory set of events but keeps its flap count record. - util.RequireNoErr(t, peerLog.removeChannel(chan2)) - require.Equal(t, 0, peerLog.channelCount()) - require.Len(t, peerLog.onlineEvents, 0) - assertFlapCount(3, &lastFlap) - - require.Len(t, peerLog.listEvents(), 0) - require.Nil(t, peerLog.stagedEvent) -} - -// TestRateLimitAdd tests the addition of events to the event log with rate -// limiting in place. -func TestRateLimitAdd(t *testing.T) { - // Create a mock clock specifically for this test so that we can - // progress time without affecting the other tests. - mockedClock := clock.NewTestClock(testNow) - - // Create a new peer log. - peerLog := newPeerLog(mockedClock, 0, nil) - require.Nil(t, peerLog.stagedEvent) - - // Create a channel for our peer log, otherwise it will not track online - // events. - util.RequireNoErr(t, peerLog.addChannel(wire.OutPoint{})) - - // First, we add an event to the event log. Since we have no previous - // events, we expect this event to staged immediately. - peerEvent := &event{ - timestamp: testNow, - eventType: peerOfflineEvent, - } - - peerLog.onlineEvent(false) - require.Equal(t, peerEvent, peerLog.stagedEvent) - - // We immediately add another event to our event log. We expect our - // staged event to be replaced with this new event, because insufficient - // time has passed since our last event. - peerEvent = &event{ - timestamp: testNow, - eventType: peerOnlineEvent, - } - - peerLog.onlineEvent(true) - require.Equal(t, peerEvent, peerLog.stagedEvent) - - // We get the amount of time that we need to pass before we record an - // event from our rate limiting tiers. We then progress our test clock - // to just after this point. - delta := getRateLimit(peerLog.flapCount) - newNow := testNow.Add(delta + 1) - mockedClock.SetTime(newNow) - - // Now, when we add an event, we expect our staged event to be added - // to our events list and for our new event to be staged. - newEvent := &event{ - timestamp: newNow, - eventType: peerOfflineEvent, - } - peerLog.onlineEvent(false) - - require.Equal(t, []*event{peerEvent}, peerLog.onlineEvents) - require.Equal(t, newEvent, peerLog.stagedEvent) - - // Now, we test the case where we add many events to our log. We expect - // our set of events to be untouched, but for our staged event to be - // updated. - nextEvent := &event{ - timestamp: newNow, - eventType: peerOnlineEvent, - } - - for i := 0; i < 5; i++ { - // We flip the kind of event for each type so that we can check - // that our staged event is definitely changing each time. - if i%2 == 0 { - nextEvent.eventType = peerOfflineEvent - } else { - nextEvent.eventType = peerOnlineEvent - } - - online := nextEvent.eventType == peerOnlineEvent - - peerLog.onlineEvent(online) - require.Equal(t, []*event{peerEvent}, peerLog.onlineEvents) - require.Equal(t, nextEvent, peerLog.stagedEvent) - } - - // Now, we test the case where a peer's flap count is cooled down - // because it has not flapped for a while. Set our peer's flap count so - // that we fall within our second rate limiting tier and assert that we - // are at this level. - peerLog.flapCount = rateLimitScale + 1 - rateLimit := getRateLimit(peerLog.flapCount) - require.Equal(t, rateLimits[1], rateLimit) - - // Progress our clock to the point where we will have our flap count - // cooled. - newNow = mockedClock.Now().Add(flapCountCooldownPeriod) - mockedClock.SetTime(newNow) - - // Add an online event, and expect it to be staged. - onlineEvent := &event{ - timestamp: newNow, - eventType: peerOnlineEvent, - } - peerLog.onlineEvent(true) - require.Equal(t, onlineEvent, peerLog.stagedEvent) - - // Progress our clock by the rate limit level that we will be on if - // our flap rate is cooled down to a lower level. - newNow = mockedClock.Now().Add(rateLimits[0] + 1) - mockedClock.SetTime(newNow) - - // Add another event. We expect this event to be staged and our previous - // event to be flushed to the event log (because our cooldown has been - // applied). - offlineEvent := &event{ - timestamp: newNow, - eventType: peerOfflineEvent, - } - peerLog.onlineEvent(false) - require.Equal(t, offlineEvent, peerLog.stagedEvent) - - flushedEventIdx := len(peerLog.onlineEvents) - 1 - require.Equal( - t, onlineEvent, peerLog.onlineEvents[flushedEventIdx], - ) -} - -// TestGetOnlinePeriod tests the getOnlinePeriod function. It tests the case -// where no events present, and the case where an additional online period -// must be added because the event log ends on an online event. -func TestGetOnlinePeriod(t *testing.T) { - fourHoursAgo := testNow.Add(time.Hour * -4) - threeHoursAgo := testNow.Add(time.Hour * -3) - twoHoursAgo := testNow.Add(time.Hour * -2) - - tests := []struct { - name string - events []*event - expectedOnline []*onlinePeriod - }{ - { - name: "no events", - }, - { - name: "start on online period", - events: []*event{ - { - timestamp: threeHoursAgo, - eventType: peerOnlineEvent, - }, - { - timestamp: twoHoursAgo, - eventType: peerOfflineEvent, - }, - }, - expectedOnline: []*onlinePeriod{ - { - start: threeHoursAgo, - end: twoHoursAgo, - }, - }, - }, - { - name: "start on offline period", - events: []*event{ - { - timestamp: fourHoursAgo, - eventType: peerOfflineEvent, - }, - }, - }, - { - name: "end on an online period", - events: []*event{ - { - timestamp: fourHoursAgo, - eventType: peerOnlineEvent, - }, - }, - expectedOnline: []*onlinePeriod{ - { - start: fourHoursAgo, - end: testNow, - }, - }, - }, - { - name: "duplicate online events", - events: []*event{ - { - timestamp: fourHoursAgo, - eventType: peerOnlineEvent, - }, - { - timestamp: threeHoursAgo, - eventType: peerOnlineEvent, - }, - }, - expectedOnline: []*onlinePeriod{ - { - start: fourHoursAgo, - end: testNow, - }, - }, - }, - { - name: "duplicate offline events", - events: []*event{ - { - timestamp: fourHoursAgo, - eventType: peerOfflineEvent, - }, - { - timestamp: threeHoursAgo, - eventType: peerOfflineEvent, - }, - }, - expectedOnline: nil, - }, - { - name: "duplicate online then offline", - events: []*event{ - { - timestamp: fourHoursAgo, - eventType: peerOnlineEvent, - }, - { - timestamp: threeHoursAgo, - eventType: peerOnlineEvent, - }, - { - timestamp: twoHoursAgo, - eventType: peerOfflineEvent, - }, - }, - expectedOnline: []*onlinePeriod{ - { - start: fourHoursAgo, - end: twoHoursAgo, - }, - }, - }, - { - name: "duplicate offline then online", - events: []*event{ - { - timestamp: fourHoursAgo, - eventType: peerOfflineEvent, - }, - { - timestamp: threeHoursAgo, - eventType: peerOfflineEvent, - }, - { - timestamp: twoHoursAgo, - eventType: peerOnlineEvent, - }, - }, - expectedOnline: []*onlinePeriod{ - { - start: twoHoursAgo, - end: testNow, - }, - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - score := &peerLog{ - onlineEvents: test.events, - clock: clock.NewTestClock(testNow), - } - - online := score.getOnlinePeriods() - - require.Equal(t, test.expectedOnline, online) - }) - - } -} - -// TestUptime tests channel uptime calculation based on its event log. -func TestUptime(t *testing.T) { - fourHoursAgo := testNow.Add(time.Hour * -4) - threeHoursAgo := testNow.Add(time.Hour * -3) - twoHoursAgo := testNow.Add(time.Hour * -2) - oneHourAgo := testNow.Add(time.Hour * -1) - - tests := []struct { - name string - - // events is the set of event log that we are calculating uptime - // for. - events []*event - - // startTime is the beginning of the period that we are - // calculating uptime for, it cannot have a zero value. - startTime time.Time - - // endTime is the end of the period that we are calculating - // uptime for, it cannot have a zero value. - endTime time.Time - - // expectedUptime is the amount of uptime we expect to be - // calculated over the period specified by startTime and - // endTime. - expectedUptime time.Duration - - // expectErr is set to true if we expect an error to be returned - // when calling the uptime function. - expectErr bool - }{ - { - name: "End before start", - endTime: threeHoursAgo, - startTime: testNow, - expectErr: true, - }, - { - name: "Zero end time", - expectErr: true, - }, - { - name: "online event and no offline", - events: []*event{ - { - timestamp: fourHoursAgo, - eventType: peerOnlineEvent, - }, - }, - startTime: fourHoursAgo, - endTime: testNow, - expectedUptime: time.Hour * 4, - }, - { - name: "online then offline event", - events: []*event{ - { - timestamp: threeHoursAgo, - eventType: peerOnlineEvent, - }, - { - timestamp: twoHoursAgo, - eventType: peerOfflineEvent, - }, - }, - startTime: fourHoursAgo, - endTime: testNow, - expectedUptime: time.Hour, - }, - { - name: "online event before uptime period", - events: []*event{ - { - timestamp: threeHoursAgo, - eventType: peerOnlineEvent, - }, - }, - startTime: twoHoursAgo, - endTime: testNow, - expectedUptime: time.Hour * 2, - }, - { - name: "offline event after uptime period", - events: []*event{ - { - timestamp: fourHoursAgo, - eventType: peerOnlineEvent, - }, - { - timestamp: testNow.Add(time.Hour), - eventType: peerOfflineEvent, - }, - }, - startTime: twoHoursAgo, - endTime: testNow, - expectedUptime: time.Hour * 2, - }, - { - name: "all events within period", - events: []*event{ - { - timestamp: twoHoursAgo, - eventType: peerOnlineEvent, - }, - }, - startTime: threeHoursAgo, - endTime: oneHourAgo, - expectedUptime: time.Hour, - }, - { - name: "multiple online and offline", - events: []*event{ - { - timestamp: testNow.Add(time.Hour * -7), - eventType: peerOnlineEvent, - }, - { - timestamp: testNow.Add(time.Hour * -6), - eventType: peerOfflineEvent, - }, - { - timestamp: testNow.Add(time.Hour * -5), - eventType: peerOnlineEvent, - }, - { - timestamp: testNow.Add(time.Hour * -4), - eventType: peerOfflineEvent, - }, - { - timestamp: testNow.Add(time.Hour * -3), - eventType: peerOnlineEvent, - }, - }, - startTime: testNow.Add(time.Hour * -8), - endTime: oneHourAgo, - expectedUptime: time.Hour * 4, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - score := &peerLog{ - onlineEvents: test.events, - clock: clock.NewTestClock(testNow), - } - - uptime, err := score.uptime( - test.startTime, test.endTime, - ) - require.Equal(t, test.expectErr, err != nil) - require.Equal(t, test.expectedUptime, uptime) - }) - } -} diff --git a/lnd/chanfitness/chaneventstore.go b/lnd/chanfitness/chaneventstore.go deleted file mode 100644 index 6aa460f4..00000000 --- a/lnd/chanfitness/chaneventstore.go +++ /dev/null @@ -1,563 +0,0 @@ -// Package chanfitness monitors the behaviour of channels to provide insight -// into the health and performance of a channel. This is achieved by maintaining -// an event store which tracks events for each channel. -// -// Lifespan: the period that the channel has been known to the scoring system. -// Note that lifespan may not equal the channel's full lifetime because data is -// not currently persisted. -// -// Uptime: the total time within a given period that the channel's remote peer -// has been online. -package chanfitness - -import ( - "sync" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channelnotifier" - "github.com/pkt-cash/pktd/lnd/clock" - "github.com/pkt-cash/pktd/lnd/peernotifier" - "github.com/pkt-cash/pktd/lnd/routing/route" - "github.com/pkt-cash/pktd/lnd/subscribe" - "github.com/pkt-cash/pktd/lnd/ticker" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/wire" -) - -const ( - // FlapCountFlushRate determines how often we write peer total flap - // count to disk. - FlapCountFlushRate = time.Hour -) - -var ( - Err = er.NewErrorType("lnd.chanfitness") - // errShuttingDown is returned when the store cannot respond to a query - // because it has received the shutdown signal. - errShuttingDown = Err.CodeWithDetail("errShuttingDown", "channel event store shutting down") - - // ErrChannelNotFound is returned when a query is made for a channel - // that the event store does not have knowledge of. - ErrChannelNotFound = Err.CodeWithDetail("ErrChannelNotFound", "channel not found in event store") - - // ErrPeerNotFound is returned when a query is made for a channel - // that has a peer that the event store is not currently tracking. - ErrPeerNotFound = Err.CodeWithDetail("ErrPeerNotFound", "peer not found in event store") -) - -// ChannelEventStore maintains a set of event logs for the node's channels to -// provide insight into the performance and health of channels. -type ChannelEventStore struct { - cfg *Config - - // peers tracks all of our currently monitored peers and their channels. - peers map[route.Vertex]peerMonitor - - // chanInfoRequests serves requests for information about our channel. - chanInfoRequests chan channelInfoRequest - - // peerRequests serves requests for information about a peer. - peerRequests chan peerRequest - - quit chan struct{} - - wg sync.WaitGroup -} - -// Config provides the event store with functions required to monitor channel -// activity. All elements of the config must be non-nil for the event store to -// operate. -type Config struct { - // SubscribeChannelEvents provides a subscription client which provides - // a stream of channel events. - SubscribeChannelEvents func() (subscribe.Subscription, er.R) - - // SubscribePeerEvents provides a subscription client which provides a - // stream of peer online/offline events. - SubscribePeerEvents func() (subscribe.Subscription, er.R) - - // GetOpenChannels provides a list of existing open channels which is - // used to populate the ChannelEventStore with a set of channels on - // startup. - GetOpenChannels func() ([]*channeldb.OpenChannel, er.R) - - // Clock is the time source that the subsystem uses, provided here - // for ease of testing. - Clock clock.Clock - - // WriteFlapCounts records the flap count for a set of peers on disk. - WriteFlapCount func(map[route.Vertex]*channeldb.FlapCount) er.R - - // ReadFlapCount gets the flap count for a peer on disk. - ReadFlapCount func(route.Vertex) (*channeldb.FlapCount, er.R) - - // FlapCountTicker is a ticker which controls how often we flush our - // peer's flap count to disk. - FlapCountTicker ticker.Ticker -} - -// peerFlapCountMap is the map used to map peers to flap counts, declared here -// to allow shorter function signatures. -type peerFlapCountMap map[route.Vertex]*channeldb.FlapCount - -type channelInfoRequest struct { - peer route.Vertex - channelPoint wire.OutPoint - responseChan chan channelInfoResponse -} - -type channelInfoResponse struct { - info *ChannelInfo - err er.R -} - -type peerRequest struct { - peer route.Vertex - responseChan chan peerResponse -} - -type peerResponse struct { - flapCount int - ts *time.Time - err er.R -} - -// NewChannelEventStore initializes an event store with the config provided. -// Note that this function does not start the main event loop, Start() must be -// called. -func NewChannelEventStore(config *Config) *ChannelEventStore { - store := &ChannelEventStore{ - cfg: config, - peers: make(map[route.Vertex]peerMonitor), - chanInfoRequests: make(chan channelInfoRequest), - peerRequests: make(chan peerRequest), - quit: make(chan struct{}), - } - - return store -} - -// Start adds all existing open channels to the event store and starts the main -// loop which records channel and peer events, and serves requests for -// information from the store. If this function fails, it cancels its existing -// subscriptions and returns an error. -func (c *ChannelEventStore) Start() er.R { - // Create a subscription to channel events. - channelClient, err := c.cfg.SubscribeChannelEvents() - if err != nil { - return err - } - - // Create a subscription to peer events. If an error occurs, cancel the - // existing subscription to channel events and return. - peerClient, err := c.cfg.SubscribePeerEvents() - if err != nil { - channelClient.Cancel() - return err - } - - // cancel should be called to cancel all subscriptions if an error - // occurs. - cancel := func() { - channelClient.Cancel() - peerClient.Cancel() - } - - // Add the existing set of channels to the event store. This is required - // because channel events will not be triggered for channels that exist - // at startup time. - channels, err := c.cfg.GetOpenChannels() - if err != nil { - cancel() - return err - } - - log.Infof("Adding %v channels to event store", len(channels)) - - for _, ch := range channels { - peerKey, err := route.NewVertexFromBytes( - ch.IdentityPub.SerializeCompressed(), - ) - if err != nil { - cancel() - return err - } - - // Add existing channels to the channel store with an initial - // peer online or offline event. - c.addChannel(ch.FundingOutpoint, peerKey) - } - - // Start a goroutine that consumes events from all subscriptions. - c.wg.Add(1) - go c.consume(&subscriptions{ - channelUpdates: channelClient.Updates(), - peerUpdates: peerClient.Updates(), - cancel: cancel, - }) - - return nil -} - -// Stop terminates all goroutines started by the event store. -func (c *ChannelEventStore) Stop() { - log.Info("Stopping event store") - - // Stop the consume goroutine. - close(c.quit) - c.wg.Wait() - - // Stop the ticker after the goroutine reading from it has exited, to - // avoid a race. - c.cfg.FlapCountTicker.Stop() -} - -// addChannel checks whether we are already tracking a channel's peer, creates a -// new peer log to track it if we are not yet monitoring it, and adds the -// channel. -func (c *ChannelEventStore) addChannel(channelPoint wire.OutPoint, - peer route.Vertex) { - - peerMonitor, err := c.getPeerMonitor(peer) - if err != nil { - log.Errorf("could not create monitor: %v", err) - return - } - - if err := peerMonitor.addChannel(channelPoint); err != nil { - log.Errorf("could not add channel: %v", err) - } -} - -// getPeerMonitor tries to get an existing peer monitor from our in memory list, -// and falls back to creating a new monitor if it is not currently known. -func (c *ChannelEventStore) getPeerMonitor(peer route.Vertex) (peerMonitor, - er.R) { - - peerMonitor, ok := c.peers[peer] - if ok { - return peerMonitor, nil - } - - var ( - flapCount int - lastFlap *time.Time - ) - - historicalFlap, err := c.cfg.ReadFlapCount(peer) - switch { - // If we do not have any records for this peer we set a 0 flap count - // and timestamp. - case channeldb.ErrNoPeerBucket.Is(err): - - case err == nil: - flapCount = int(historicalFlap.Count) - lastFlap = &historicalFlap.LastFlap - - // Return if we get an unexpected error. - default: - return nil, err - } - - peerMonitor = newPeerLog(c.cfg.Clock, flapCount, lastFlap) - c.peers[peer] = peerMonitor - - return peerMonitor, nil -} - -// closeChannel records a closed time for a channel, and returns early is the -// channel is not known to the event store. We log warnings (rather than errors) -// when we cannot find a peer/channel because channels that we restore from a -// static channel backup do not have their open notified, so the event store -// never learns about them, but they are closed using the regular flow so we -// will try to remove them on close. At present, we cannot easily distinguish -// between these closes and others. -func (c *ChannelEventStore) closeChannel(channelPoint wire.OutPoint, - peer route.Vertex) { - - peerMonitor, ok := c.peers[peer] - if !ok { - log.Warnf("peer not known to store: %v", peer) - return - } - - if err := peerMonitor.removeChannel(channelPoint); err != nil { - log.Warnf("could not remove channel: %v", err) - } -} - -// peerEvent creates a peer monitor for a peer if we do not currently have -// one, and adds an online event to it. -func (c *ChannelEventStore) peerEvent(peer route.Vertex, online bool) { - peerMonitor, err := c.getPeerMonitor(peer) - if err != nil { - log.Errorf("could not create monitor: %v", err) - return - } - - peerMonitor.onlineEvent(online) -} - -// subscriptions abstracts away from subscription clients to allow for mocking. -type subscriptions struct { - channelUpdates <-chan interface{} - peerUpdates <-chan interface{} - cancel func() -} - -// consume is the event store's main loop. It consumes subscriptions to update -// the event store with channel and peer events, and serves requests for channel -// uptime and lifespan. -func (c *ChannelEventStore) consume(subscriptions *subscriptions) { - // Start our flap count ticker. - c.cfg.FlapCountTicker.Resume() - - // On exit, we will cancel our subscriptions and write our most recent - // flap counts to disk. This ensures that we have consistent data in - // the case of a graceful shutdown. If we do not shutdown gracefully, - // our worst case is data from our last flap count tick (1H). - defer func() { - subscriptions.cancel() - - if err := c.recordFlapCount(); err != nil { - log.Errorf("error recording flap on shutdown: %v", err) - } - - c.wg.Done() - }() - - // Consume events until the channel is closed. - for { - select { - // Process channel opened and closed events. - case e := <-subscriptions.channelUpdates: - switch event := e.(type) { - // A new channel has been opened, we must add the - // channel to the store and record a channel open event. - case channelnotifier.OpenChannelEvent: - compressed := event.Channel.IdentityPub.SerializeCompressed() - peerKey, err := route.NewVertexFromBytes( - compressed, - ) - if err != nil { - log.Errorf("Could not get vertex "+ - "from: %v", compressed) - } - - c.addChannel( - event.Channel.FundingOutpoint, peerKey, - ) - - // A channel has been closed, we must remove the channel - // from the store and record a channel closed event. - case channelnotifier.ClosedChannelEvent: - compressed := event.CloseSummary.RemotePub.SerializeCompressed() - peerKey, err := route.NewVertexFromBytes( - compressed, - ) - if err != nil { - log.Errorf("Could not get vertex "+ - "from: %v", compressed) - continue - } - - c.closeChannel( - event.CloseSummary.ChanPoint, peerKey, - ) - } - - // Process peer online and offline events. - case e := <-subscriptions.peerUpdates: - switch event := e.(type) { - // We have reestablished a connection with our peer, - // and should record an online event for any channels - // with that peer. - case peernotifier.PeerOnlineEvent: - c.peerEvent(event.PubKey, true) - - // We have lost a connection with our peer, and should - // record an offline event for any channels with that - // peer. - case peernotifier.PeerOfflineEvent: - c.peerEvent(event.PubKey, false) - } - - // Serve all requests for channel lifetime. - case req := <-c.chanInfoRequests: - var resp channelInfoResponse - - resp.info, resp.err = c.getChanInfo(req) - req.responseChan <- resp - - // Serve all requests for information about our peer. - case req := <-c.peerRequests: - var resp peerResponse - - resp.flapCount, resp.ts, resp.err = c.flapCount( - req.peer, - ) - req.responseChan <- resp - - case <-c.cfg.FlapCountTicker.Ticks(): - if err := c.recordFlapCount(); err != nil { - log.Errorf("could not record flap "+ - "count: %v", err) - } - - // Exit if the store receives the signal to shutdown. - case <-c.quit: - return - } - } -} - -// ChannelInfo provides the set of information that the event store has recorded -// for a channel. -type ChannelInfo struct { - // Lifetime is the total amount of time we have monitored the channel - // for. - Lifetime time.Duration - - // Uptime is the total amount of time that the channel peer has been - // observed as online during the monitored lifespan. - Uptime time.Duration -} - -// GetChanInfo gets all the information we have on a channel in the event store. -func (c *ChannelEventStore) GetChanInfo(channelPoint wire.OutPoint, - peer route.Vertex) (*ChannelInfo, er.R) { - - request := channelInfoRequest{ - peer: peer, - channelPoint: channelPoint, - responseChan: make(chan channelInfoResponse), - } - - // Send a request for the channel's information to the main event loop, - // or return early with an error if the store has already received a - // shutdown signal. - select { - case c.chanInfoRequests <- request: - case <-c.quit: - return nil, errShuttingDown.Default() - } - - // Return the response we receive on the response channel or exit early - // if the store is instructed to exit. - select { - case resp := <-request.responseChan: - return resp.info, resp.err - - case <-c.quit: - return nil, errShuttingDown.Default() - } -} - -// getChanInfo collects channel information for a channel. It gets uptime over -// the full lifetime of the channel. -func (c *ChannelEventStore) getChanInfo(req channelInfoRequest) (*ChannelInfo, - er.R) { - - peerMonitor, ok := c.peers[req.peer] - if !ok { - return nil, ErrPeerNotFound.Default() - } - - lifetime, uptime, err := peerMonitor.channelUptime(req.channelPoint) - if err != nil { - return nil, err - } - - return &ChannelInfo{ - Lifetime: lifetime, - Uptime: uptime, - }, nil -} - -// FlapCount returns the flap count we have for a peer and the timestamp of its -// last flap. If we do not have any flaps recorded for the peer, the last flap -// timestamp will be nil. -func (c *ChannelEventStore) FlapCount(peer route.Vertex) (int, *time.Time, - er.R) { - - request := peerRequest{ - peer: peer, - responseChan: make(chan peerResponse), - } - - // Send a request for the peer's information to the main event loop, - // or return early with an error if the store has already received a - // shutdown signal. - select { - case c.peerRequests <- request: - case <-c.quit: - return 0, nil, errShuttingDown.Default() - } - - // Return the response we receive on the response channel or exit early - // if the store is instructed to exit. - select { - case resp := <-request.responseChan: - return resp.flapCount, resp.ts, resp.err - - case <-c.quit: - return 0, nil, errShuttingDown.Default() - } -} - -// flapCount gets our peer flap count and last flap timestamp from our in memory -// record of a peer, falling back to on disk if we are not currently tracking -// the peer. If we have no flap count recorded for the peer, a nil last flap -// time will be returned. -func (c *ChannelEventStore) flapCount(peer route.Vertex) (int, *time.Time, - er.R) { - - // First check whether we are tracking this peer in memory, because this - // record will have the most accurate flap count. We do not fail if we - // can't find the peer in memory, because we may have previously - // recorded its flap count on disk. - peerMonitor, ok := c.peers[peer] - if ok { - count, ts := peerMonitor.getFlapCount() - return count, ts, nil - } - - // Try to get our flap count from the database. If this value is not - // recorded, we return a nil last flap time to indicate that we have no - // record of the peer's flap count. - flapCount, err := c.cfg.ReadFlapCount(peer) - switch { - case channeldb.ErrNoPeerBucket.Is(err): - return 0, nil, nil - - case nil == err: - return int(flapCount.Count), &flapCount.LastFlap, nil - - default: - return 0, nil, err - } -} - -// recordFlapCount will record our flap count for each peer that we are -// currently tracking, skipping peers that have a 0 flap count. -func (c *ChannelEventStore) recordFlapCount() er.R { - updates := make(peerFlapCountMap) - - for peer, monitor := range c.peers { - flapCount, lastFlap := monitor.getFlapCount() - if lastFlap == nil { - continue - } - - updates[peer] = &channeldb.FlapCount{ - Count: uint32(flapCount), - LastFlap: *lastFlap, - } - } - - log.Debugf("recording flap count for: %v peers", len(updates)) - - return c.cfg.WriteFlapCount(updates) -} diff --git a/lnd/chanfitness/chaneventstore_test.go b/lnd/chanfitness/chaneventstore_test.go deleted file mode 100644 index 320cdd01..00000000 --- a/lnd/chanfitness/chaneventstore_test.go +++ /dev/null @@ -1,344 +0,0 @@ -package chanfitness - -import ( - "math/big" - "testing" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/clock" - "github.com/pkt-cash/pktd/lnd/routing/route" - "github.com/pkt-cash/pktd/lnd/subscribe" - "github.com/pkt-cash/pktd/wire" - "github.com/stretchr/testify/require" -) - -// testNow is the current time tests will use. -var testNow = time.Unix(1592465134, 0) - -// TestStartStoreError tests the starting of the store in cases where the setup -// functions fail. It does not test the mechanics of consuming events because -// these are covered in a separate set of tests. -func TestStartStoreError(t *testing.T) { - // Ok and erroring subscribe functions are defined here to de-clutter - // tests. - okSubscribeFunc := func() (subscribe.Subscription, er.R) { - return newMockSubscription(t), nil - } - - errSubscribeFunc := func() (subscribe.Subscription, er.R) { - return nil, er.New("intentional test err") - } - - tests := []struct { - name string - ChannelEvents func() (subscribe.Subscription, er.R) - PeerEvents func() (subscribe.Subscription, er.R) - GetChannels func() ([]*channeldb.OpenChannel, er.R) - }{ - { - name: "Channel events fail", - ChannelEvents: errSubscribeFunc, - }, - { - name: "Peer events fail", - ChannelEvents: okSubscribeFunc, - PeerEvents: errSubscribeFunc, - }, - { - name: "Get open channels fails", - ChannelEvents: okSubscribeFunc, - PeerEvents: okSubscribeFunc, - GetChannels: func() ([]*channeldb.OpenChannel, er.R) { - return nil, er.New("intentional test err") - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - clock := clock.NewTestClock(testNow) - - store := NewChannelEventStore(&Config{ - SubscribeChannelEvents: test.ChannelEvents, - SubscribePeerEvents: test.PeerEvents, - GetOpenChannels: test.GetChannels, - Clock: clock, - }) - - err := store.Start() - // Check that we receive an error, because the test only - // checks for error cases. - if err == nil { - t.Fatalf("Expected error on startup, got: nil") - } - }) - } -} - -// TestMonitorChannelEvents tests the store's handling of channel and peer -// events. It tests for the unexpected cases where we receive a channel open for -// an already known channel and but does not test for closing an unknown channel -// because it would require custom logic in the test to prevent iterating -// through an eventLog which does not exist. This test does not test handling -// of uptime and lifespan requests, as they are tested in their own tests. -func TestMonitorChannelEvents(t *testing.T) { - var ( - pubKey = &btcec.PublicKey{ - X: big.NewInt(0), - Y: big.NewInt(1), - Curve: btcec.S256(), - } - - chan1 = wire.OutPoint{Index: 1} - chan2 = wire.OutPoint{Index: 2} - ) - - peer1, err := route.NewVertexFromBytes(pubKey.SerializeCompressed()) - util.RequireNoErr(t, err) - - t.Run("peer comes online after channel open", func(t *testing.T) { - gen := func(ctx *chanEventStoreTestCtx) { - ctx.sendChannelOpenedUpdate(pubKey, chan1) - ctx.peerEvent(peer1, true) - } - - testEventStore(t, gen, peer1, 1) - }) - - t.Run("duplicate channel open events", func(t *testing.T) { - gen := func(ctx *chanEventStoreTestCtx) { - ctx.sendChannelOpenedUpdate(pubKey, chan1) - ctx.sendChannelOpenedUpdate(pubKey, chan1) - ctx.peerEvent(peer1, true) - } - - testEventStore(t, gen, peer1, 1) - }) - - t.Run("peer online before channel created", func(t *testing.T) { - gen := func(ctx *chanEventStoreTestCtx) { - ctx.peerEvent(peer1, true) - ctx.sendChannelOpenedUpdate(pubKey, chan1) - } - - testEventStore(t, gen, peer1, 1) - }) - - t.Run("multiple channels for peer", func(t *testing.T) { - gen := func(ctx *chanEventStoreTestCtx) { - ctx.peerEvent(peer1, true) - ctx.sendChannelOpenedUpdate(pubKey, chan1) - - ctx.peerEvent(peer1, false) - ctx.sendChannelOpenedUpdate(pubKey, chan2) - } - - testEventStore(t, gen, peer1, 2) - }) - - t.Run("multiple channels for peer, one closed", func(t *testing.T) { - gen := func(ctx *chanEventStoreTestCtx) { - ctx.peerEvent(peer1, true) - ctx.sendChannelOpenedUpdate(pubKey, chan1) - - ctx.peerEvent(peer1, false) - ctx.sendChannelOpenedUpdate(pubKey, chan2) - - ctx.closeChannel(chan1, pubKey) - ctx.peerEvent(peer1, true) - } - - testEventStore(t, gen, peer1, 1) - }) - -} - -// testEventStore creates a new test contexts, generates a set of events for it -// and tests that it has the number of channels we expect. -func testEventStore(t *testing.T, generateEvents func(*chanEventStoreTestCtx), - peer route.Vertex, expectedChannels int) { - - testCtx := newChanEventStoreTestCtx(t) - testCtx.start() - - generateEvents(testCtx) - - // Shutdown the store so that we can safely access the maps in our event - // store. - testCtx.stop() - - // Get our peer and check that it has the channels we expect. - monitor, ok := testCtx.store.peers[peer] - require.True(t, ok) - - require.Equal(t, expectedChannels, monitor.channelCount()) -} - -// TestStoreFlapCount tests flushing of flap counts to disk on timer ticks and -// on store shutdown. -func TestStoreFlapCount(t *testing.T) { - testCtx := newChanEventStoreTestCtx(t) - testCtx.start() - - pubkey, _, _ := testCtx.createChannel() - testCtx.peerEvent(pubkey, false) - - // Now, we tick our flap count ticker. We expect our main goroutine to - // flush our tick count to disk. - testCtx.tickFlapCount() - - // Since we just tracked a offline event, we expect a single flap for - // our peer. - expectedUpdate := peerFlapCountMap{ - pubkey: { - Count: 1, - LastFlap: testCtx.clock.Now(), - }, - } - - testCtx.assertFlapCountUpdated() - testCtx.assertFlapCountUpdates(expectedUpdate) - - // Create three events for out peer, online/offline/online. - testCtx.peerEvent(pubkey, true) - testCtx.peerEvent(pubkey, false) - testCtx.peerEvent(pubkey, true) - - // Trigger another write. - testCtx.tickFlapCount() - - // Since we have processed 3 more events for our peer, we update our - // expected online map to have a flap count of 4 for this peer. - expectedUpdate[pubkey] = &channeldb.FlapCount{ - Count: 4, - LastFlap: testCtx.clock.Now(), - } - testCtx.assertFlapCountUpdated() - testCtx.assertFlapCountUpdates(expectedUpdate) - - testCtx.stop() -} - -// TestGetChanInfo tests the GetChanInfo function for the cases where a channel -// is known and unknown to the store. -func TestGetChanInfo(t *testing.T) { - ctx := newChanEventStoreTestCtx(t) - ctx.start() - - // Make a note of the time that our mocked clock starts on. - now := ctx.clock.Now() - - // Create mock vars for a channel but do not add them to our store yet. - peer, pk, channel := ctx.newChannel() - - // Send an online event for our peer, although we do not yet have an - // open channel. - ctx.peerEvent(peer, true) - - // Try to get info for a channel that has not been opened yet, we - // expect to get an error. - _, err := ctx.store.GetChanInfo(channel, peer) - require.True(t, ErrChannelNotFound.Is(err)) - - // Now we send our store a notification that a channel has been opened. - ctx.sendChannelOpenedUpdate(pk, channel) - - // Wait for our channel to be recognized by our store. We need to wait - // for the channel to be created so that we do not update our time - // before the channel open is processed. - require.Eventually(t, func() bool { - _, err = ctx.store.GetChanInfo(channel, peer) - return err == nil - }, timeout, time.Millisecond*20) - - // Increment our test clock by an hour. - now = now.Add(time.Hour) - ctx.clock.SetTime(now) - - // At this stage our channel has been open and online for an hour. - info, err := ctx.store.GetChanInfo(channel, peer) - util.RequireNoErr(t, err) - require.Equal(t, time.Hour, info.Lifetime) - require.Equal(t, time.Hour, info.Uptime) - - // Now we send a peer offline event for our channel. - ctx.peerEvent(peer, false) - - // Since we have not bumped our mocked time, our uptime calculations - // should be the same, even though we've just processed an offline - // event. - info, err = ctx.store.GetChanInfo(channel, peer) - util.RequireNoErr(t, err) - require.Equal(t, time.Hour, info.Lifetime) - require.Equal(t, time.Hour, info.Uptime) - - // Progress our time again. This time, our peer is currently tracked as - // being offline, so we expect our channel info to reflect that the peer - // has been offline for this period. - now = now.Add(time.Hour) - ctx.clock.SetTime(now) - - info, err = ctx.store.GetChanInfo(channel, peer) - util.RequireNoErr(t, err) - require.Equal(t, time.Hour*2, info.Lifetime) - require.Equal(t, time.Hour, info.Uptime) - - ctx.stop() -} - -// TestFlapCount tests querying the store for peer flap counts, covering the -// case where the peer is tracked in memory, and the case where we need to -// lookup the peer on disk. -func TestFlapCount(t *testing.T) { - clock := clock.NewTestClock(testNow) - - var ( - peer = route.Vertex{9, 9, 9} - peerFlapCount = 3 - lastFlap = clock.Now() - ) - - // Create a test context with one peer's flap count already recorded, - // which mocks it already having its flap count stored on disk. - ctx := newChanEventStoreTestCtx(t) - ctx.flapUpdates[peer] = &channeldb.FlapCount{ - Count: uint32(peerFlapCount), - LastFlap: lastFlap, - } - - ctx.start() - - // Create test variables for a peer and channel, but do not add it to - // our store yet. - peer1 := route.Vertex{1, 2, 3} - - // First, query for a peer that we have no record of in memory or on - // disk and confirm that we indicate that the peer was not found. - _, ts, err := ctx.store.FlapCount(peer1) - util.RequireNoErr(t, err) - require.Nil(t, ts) - - // Send an online event for our peer. - ctx.peerEvent(peer1, true) - - // Assert that we now find a record of the peer with flap count = 1. - count, ts, err := ctx.store.FlapCount(peer1) - util.RequireNoErr(t, err) - require.Equal(t, lastFlap, *ts) - require.Equal(t, 1, count) - - // Make a request for our peer that not tracked in memory, but does - // have its flap count stored on disk. - count, ts, err = ctx.store.FlapCount(peer) - util.RequireNoErr(t, err) - require.Equal(t, lastFlap, *ts) - require.Equal(t, peerFlapCount, count) - - ctx.stop() -} diff --git a/lnd/chanfitness/chaneventstore_testctx_test.go b/lnd/chanfitness/chaneventstore_testctx_test.go deleted file mode 100644 index b58d0b78..00000000 --- a/lnd/chanfitness/chaneventstore_testctx_test.go +++ /dev/null @@ -1,308 +0,0 @@ -package chanfitness - -import ( - "math/big" - "testing" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channelnotifier" - "github.com/pkt-cash/pktd/lnd/clock" - "github.com/pkt-cash/pktd/lnd/peernotifier" - "github.com/pkt-cash/pktd/lnd/routing/route" - "github.com/pkt-cash/pktd/lnd/subscribe" - "github.com/pkt-cash/pktd/lnd/ticker" - "github.com/pkt-cash/pktd/wire" - "github.com/stretchr/testify/require" -) - -// timeout is the amount of time we allow our blocking test calls. -var timeout = time.Second - -// chanEventStoreTestCtx is a helper struct which can be used to test the -// channel event store. -type chanEventStoreTestCtx struct { - t *testing.T - - store *ChannelEventStore - - channelSubscription *mockSubscription - peerSubscription *mockSubscription - - // testVarIdx is an index which will be used to deterministically add - // channels and public keys to our test context. We use a single value - // for a single pubkey + channel combination because its actual value - // does not matter. - testVarIdx int - - // clock is the clock that our test store will use. - clock *clock.TestClock - - // flapUpdates stores our most recent set of updates flap counts. - flapUpdates peerFlapCountMap - - // flapCountUpdates is a channel which receives new flap counts. - flapCountUpdates chan peerFlapCountMap - - // stopped is closed when our test context is fully shutdown. It is - // used to prevent calling of functions which can only be called after - // shutdown. - stopped chan struct{} -} - -// newChanEventStoreTestCtx creates a test context which can be used to test -// the event store. -func newChanEventStoreTestCtx(t *testing.T) *chanEventStoreTestCtx { - testCtx := &chanEventStoreTestCtx{ - t: t, - channelSubscription: newMockSubscription(t), - peerSubscription: newMockSubscription(t), - clock: clock.NewTestClock(testNow), - flapUpdates: make(peerFlapCountMap), - flapCountUpdates: make(chan peerFlapCountMap), - stopped: make(chan struct{}), - } - - cfg := &Config{ - Clock: testCtx.clock, - SubscribeChannelEvents: func() (subscribe.Subscription, er.R) { - return testCtx.channelSubscription, nil - }, - SubscribePeerEvents: func() (subscribe.Subscription, er.R) { - return testCtx.peerSubscription, nil - }, - GetOpenChannels: func() ([]*channeldb.OpenChannel, er.R) { - return nil, nil - }, - WriteFlapCount: func(updates map[route.Vertex]*channeldb.FlapCount) er.R { - // Send our whole update map into the test context's - // updates channel. The test will need to assert flap - // count updated or this send will timeout. - select { - case testCtx.flapCountUpdates <- updates: - - case <-time.After(timeout): - t.Fatalf("WriteFlapCount timeout") - } - - return nil - }, - ReadFlapCount: func(peer route.Vertex) (*channeldb.FlapCount, er.R) { - count, ok := testCtx.flapUpdates[peer] - if !ok { - return nil, channeldb.ErrNoPeerBucket.Default() - } - - return count, nil - }, - FlapCountTicker: ticker.NewForce(FlapCountFlushRate), - } - - testCtx.store = NewChannelEventStore(cfg) - - return testCtx -} - -// start starts the test context's event store. -func (c *chanEventStoreTestCtx) start() { - util.RequireNoErr(c.t, c.store.Start()) -} - -// stop stops the channel event store's subscribe servers and the store itself. -func (c *chanEventStoreTestCtx) stop() { - // On shutdown of our event store, we write flap counts to disk. In our - // test context, this write function is blocked on asserting that the - // update has occurred. We stop our store in a goroutine so that we - // can shut it down and assert that it performs these on-shutdown - // updates. The stopped channel is used to ensure that we do not finish - // our test before this shutdown has completed. - go func() { - c.store.Stop() - close(c.stopped) - }() - - // We write our flap count to disk on shutdown, assert that the most - // recent record that the server has is written on shutdown. Calling - // this assert unblocks the stop function above. We don't check values - // here, so that our tests don't all require providing an expected swap - // count, but at least assert that the write occurred. - c.assertFlapCountUpdated() - - <-c.stopped - - // Make sure that the cancel function was called for both of our - // subscription mocks. - c.channelSubscription.assertCancelled() - c.peerSubscription.assertCancelled() -} - -// newChannel creates a new, unique test channel. Note that this function -// does not add it to the test event store, it just creates mocked values. -func (c *chanEventStoreTestCtx) newChannel() (route.Vertex, *btcec.PublicKey, - wire.OutPoint) { - - // Create a pubkey for our channel peer. - pubKey := &btcec.PublicKey{ - X: big.NewInt(int64(c.testVarIdx)), - Y: big.NewInt(int64(c.testVarIdx)), - Curve: btcec.S256(), - } - - // Create vertex from our pubkey. - vertex, err := route.NewVertexFromBytes(pubKey.SerializeCompressed()) - util.RequireNoErr(c.t, err) - - // Create a channel point using our channel index, then increment it. - chanPoint := wire.OutPoint{ - Hash: [chainhash.HashSize]byte{1, 2, 3}, - Index: uint32(c.testVarIdx), - } - - // Increment the index we use so that the next channel and pubkey we - // create will be unique. - c.testVarIdx++ - - return vertex, pubKey, chanPoint -} - -// createChannel creates a new channel, notifies the event store that it has -// been created and returns the peer vertex, pubkey and channel point. -func (c *chanEventStoreTestCtx) createChannel() (route.Vertex, *btcec.PublicKey, - wire.OutPoint) { - - vertex, pubKey, chanPoint := c.newChannel() - c.sendChannelOpenedUpdate(pubKey, chanPoint) - - return vertex, pubKey, chanPoint -} - -// closeChannel sends a close channel event to our subscribe server. -func (c *chanEventStoreTestCtx) closeChannel(channel wire.OutPoint, - peer *btcec.PublicKey) { - - update := channelnotifier.ClosedChannelEvent{ - CloseSummary: &channeldb.ChannelCloseSummary{ - ChanPoint: channel, - RemotePub: peer, - }, - } - - c.channelSubscription.sendUpdate(update) -} - -// tickFlapCount forces a tick for our flap count ticker with the current time. -func (c *chanEventStoreTestCtx) tickFlapCount() { - testTicker := c.store.cfg.FlapCountTicker.(*ticker.Force) - - select { - case testTicker.Force <- c.store.cfg.Clock.Now(): - - case <-time.After(timeout): - c.t.Fatalf("could not tick flap count ticker") - } -} - -// peerEvent sends a peer online or offline event to the store for the peer -// provided. -func (c *chanEventStoreTestCtx) peerEvent(peer route.Vertex, online bool) { - var update interface{} - if online { - update = peernotifier.PeerOnlineEvent{PubKey: peer} - } else { - update = peernotifier.PeerOfflineEvent{PubKey: peer} - } - - c.peerSubscription.sendUpdate(update) -} - -// sendChannelOpenedUpdate notifies the test event store that a channel has -// been opened. -func (c *chanEventStoreTestCtx) sendChannelOpenedUpdate(pubkey *btcec.PublicKey, - channel wire.OutPoint) { - - update := channelnotifier.OpenChannelEvent{ - Channel: &channeldb.OpenChannel{ - FundingOutpoint: channel, - IdentityPub: pubkey, - }, - } - - c.channelSubscription.sendUpdate(update) -} - -// assertFlapCountUpdated asserts that our store has made an attempt to write -// our current set of flap counts to disk and sets this value in our test ctx. -// Note that it does not check the values of the update. -func (c *chanEventStoreTestCtx) assertFlapCountUpdated() { - select { - case c.flapUpdates = <-c.flapCountUpdates: - - case <-time.After(timeout): - c.t.Fatalf("assertFlapCountUpdated timeout") - } -} - -// assertFlapCountUpdates asserts that out current record of flap counts is -// as expected. -func (c *chanEventStoreTestCtx) assertFlapCountUpdates(expected peerFlapCountMap) { - require.Equal(c.t, expected, c.flapUpdates) -} - -// mockSubscription is a mock subscription client that blocks on sends into the -// updates channel. We use this mock rather than an actual subscribe client -// because they do not block, which makes tests race (because we have no way -// to guarantee that the test client consumes the update before shutdown). -type mockSubscription struct { - t *testing.T - updates chan interface{} - - // Embed the subscription interface in this mock so that we satisfy it. - subscribe.Subscription -} - -// newMockSubscription creates a mock subscription. -func newMockSubscription(t *testing.T) *mockSubscription { - return &mockSubscription{ - t: t, - updates: make(chan interface{}), - } -} - -// sendUpdate sends an update into our updates channel, mocking the dispatch of -// an update from a subscription server. This call will fail the test if the -// update is not consumed within our timeout. -func (m *mockSubscription) sendUpdate(update interface{}) { - select { - case m.updates <- update: - - case <-time.After(timeout): - m.t.Fatalf("update: %v timeout", update) - } -} - -// Updates returns the updates channel for the mock. -func (m *mockSubscription) Updates() <-chan interface{} { - return m.updates -} - -// Cancel should be called in case the client no longer wants to subscribe for -// updates from the server. -func (m *mockSubscription) Cancel() { - close(m.updates) -} - -// assertCancelled asserts that the cancel function has been called for this -// mock. -func (m *mockSubscription) assertCancelled() { - select { - case _, open := <-m.updates: - require.False(m.t, open, "subscription not cancelled") - - case <-time.After(timeout): - m.t.Fatalf("assert cancelled timeout") - } -} diff --git a/lnd/chanfitness/interface.go b/lnd/chanfitness/interface.go deleted file mode 100644 index 307fe6ee..00000000 --- a/lnd/chanfitness/interface.go +++ /dev/null @@ -1,35 +0,0 @@ -package chanfitness - -import ( - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/wire" -) - -// peerMonitor is an interface implemented by entities that monitor our peers -// online events and the channels we currently have open with them. -type peerMonitor interface { - // event adds an online or offline event. - onlineEvent(online bool) - - // addChannel adds a new channel. - addChannel(channelPoint wire.OutPoint) er.R - - // removeChannel removes a channel. - removeChannel(channelPoint wire.OutPoint) er.R - - // channelCount returns the number of channels that we currently have - // with the peer. - channelCount() int - - // channelUptime looks up a channel and returns the amount of time that - // the channel has been monitored for and its uptime over this period. - channelUptime(channelPoint wire.OutPoint) (time.Duration, - time.Duration, er.R) - - // getFlapCount returns the peer's flap count and the timestamp that we - // last recorded a flap, which may be nil if we have never recorded a - // flap for this peer. - getFlapCount() (int, *time.Time) -} diff --git a/lnd/chanfitness/rate_limit.go b/lnd/chanfitness/rate_limit.go deleted file mode 100644 index b070a445..00000000 --- a/lnd/chanfitness/rate_limit.go +++ /dev/null @@ -1,82 +0,0 @@ -package chanfitness - -import ( - "math" - "time" -) - -const ( - // rateLimitScale is the number of events we allow per rate limited - // tier. Increasing this value makes our rate limiting more lenient, - // decreasing it makes us less lenient. - rateLimitScale = 200 - - // flapCountCooldownFactor is the factor by which we decrease a peer's - // flap count if they have not flapped for the cooldown period. - flapCountCooldownFactor = 0.95 - - // flapCountCooldownPeriod is the amount of time that we require a peer - // has not flapped for before we reduce their all time flap count using - // our cooldown factor. - flapCountCooldownPeriod = time.Hour * 8 -) - -// rateLimits is the set of rate limit tiers we apply to our peers based on -// their flap count. A peer can be placed in their tier by dividing their flap -// count by the rateLimitScale and returning the value at that index. -var rateLimits = []time.Duration{ - time.Second, - time.Second * 5, - time.Second * 30, - time.Minute, - time.Minute * 30, - time.Hour, -} - -// getRateLimit returns the value of the rate limited tier that we are on based -// on current flap count. If a peer's flap count exceeds the top tier, we just -// return our highest tier. -func getRateLimit(flapCount int) time.Duration { - // Figure out the tier we fall into based on our current flap count. - tier := flapCount / rateLimitScale - - // If we have more events than our number of tiers, we just use the - // last tier - tierLen := len(rateLimits) - if tier >= tierLen { - tier = tierLen - 1 - } - - return rateLimits[tier] -} - -// cooldownFlapCount takes a timestamped flap count, and returns its value -// scaled down by our cooldown factor if at least our cooldown period has -// elapsed since the peer last flapped. We do this because we store all-time -// flap count for peers, and want to allow downgrading of peers that have not -// flapped for a long time. -func cooldownFlapCount(now time.Time, flapCount int, - lastFlap time.Time) int { - - // Calculate time since our last flap, and the number of times we need - // to apply our cooldown factor. - timeSinceFlap := now.Sub(lastFlap) - - // If our cooldown period has not elapsed yet, we just return our flap - // count. We allow fractional cooldown periods once this period has - // elapsed, so we do not want to apply a fractional cooldown before the - // full cooldown period has elapsed. - if timeSinceFlap < flapCountCooldownPeriod { - return flapCount - } - - // Get the factor by which we need to cooldown our flap count. If - // insufficient time has passed to cooldown our flap count. Use use a - // float so that we allow fractional cooldown periods. - cooldownPeriods := float64(timeSinceFlap) / - float64(flapCountCooldownPeriod) - - effectiveFactor := math.Pow(flapCountCooldownFactor, cooldownPeriods) - - return int(float64(flapCount) * effectiveFactor) -} diff --git a/lnd/chanfitness/rate_limit_test.go b/lnd/chanfitness/rate_limit_test.go deleted file mode 100644 index b9bca808..00000000 --- a/lnd/chanfitness/rate_limit_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package chanfitness - -import ( - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -// TestGetRateLimit tests getting of our rate limit using the current constants. -// It creates test cases that are relative to our constants so that they -// can be adjusted without breaking the unit test. -func TestGetRateLimit(t *testing.T) { - tests := []struct { - name string - flapCount int - rateLimit time.Duration - }{ - { - name: "zero flaps", - flapCount: 0, - rateLimit: rateLimits[0], - }, - { - name: "middle tier", - flapCount: rateLimitScale * (len(rateLimits) / 2), - rateLimit: rateLimits[len(rateLimits)/2], - }, - { - name: "last tier", - flapCount: rateLimitScale * (len(rateLimits) - 1), - rateLimit: rateLimits[len(rateLimits)-1], - }, - { - name: "beyond last tier", - flapCount: rateLimitScale * (len(rateLimits) * 2), - rateLimit: rateLimits[len(rateLimits)-1], - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - limit := getRateLimit(test.flapCount) - require.Equal(t, test.rateLimit, limit) - }) - } -} - -// TestCooldownFlapCount tests cooldown of all time flap counts. -func TestCooldownFlapCount(t *testing.T) { - tests := []struct { - name string - flapCount int - lastFlap time.Time - expected int - }{ - { - name: "just flapped, do not cooldown", - flapCount: 1, - lastFlap: testNow, - expected: 1, - }, - { - name: "period not elapsed, do not cooldown", - flapCount: 1, - lastFlap: testNow.Add(flapCountCooldownPeriod / 2 * -1), - expected: 1, - }, - { - name: "rounded to 0", - flapCount: 1, - lastFlap: testNow.Add(flapCountCooldownPeriod * -1), - expected: 0, - }, - { - name: "decreased to integer value", - flapCount: 10, - lastFlap: testNow.Add(flapCountCooldownPeriod * -1), - expected: 9, - }, - { - name: "multiple cooldown periods", - flapCount: 10, - lastFlap: testNow.Add(flapCountCooldownPeriod * -3), - expected: 8, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - flapCount := cooldownFlapCount( - testNow, test.flapCount, test.lastFlap, - ) - require.Equal(t, test.expected, flapCount) - }) - } -} diff --git a/lnd/channel_notifier.go b/lnd/channel_notifier.go deleted file mode 100644 index 11d1ff02..00000000 --- a/lnd/channel_notifier.go +++ /dev/null @@ -1,158 +0,0 @@ -package lnd - -import ( - "fmt" - "net" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/chanbackup" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channelnotifier" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/wire" -) - -// addrSource is an interface that allow us to get the addresses for a target -// node. We'll need this in order to be able to properly proxy the -// notifications to create SCBs. -type addrSource interface { - // AddrsForNode returns all known addresses for the target node public - // key. - AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, er.R) -} - -// channelNotifier is an implementation of the chanbackup.ChannelNotifier -// interface using the existing channelnotifier.ChannelNotifier struct. This -// implementation allows us to satisfy all the dependencies of the -// chanbackup.SubSwapper struct. -type channelNotifier struct { - // chanNotifier is the based channel notifier that we'll proxy requests - // from. - chanNotifier *channelnotifier.ChannelNotifier - - // addrs is an implementation of the addrSource interface that allows - // us to get the latest set of addresses for a given node. We'll need - // this to be able to create an SCB for new channels. - addrs addrSource -} - -// SubscribeChans requests a new channel subscription relative to the initial -// set of known channels. We use the knownChans as a synchronization point to -// ensure that the chanbackup.SubSwapper does not miss any channel open or -// close events in the period between when it's created, and when it requests -// the channel subscription. -// -// NOTE: This is part of the chanbackup.ChannelNotifier interface. -func (c *channelNotifier) SubscribeChans(startingChans map[wire.OutPoint]struct{}) ( - *chanbackup.ChannelSubscription, er.R) { - - log.Infof("Channel backup proxy channel notifier starting") - - // TODO(roasbeef): read existing set of chans and diff - - quit := make(chan struct{}) - chanUpdates := make(chan chanbackup.ChannelEvent, 1) - - // sendChanOpenUpdate is a closure that sends a ChannelEvent to the - // chanUpdates channel to inform subscribers about new pending or - // confirmed channels. - sendChanOpenUpdate := func(newOrPendingChan *channeldb.OpenChannel) { - nodeAddrs, err := c.addrs.AddrsForNode( - newOrPendingChan.IdentityPub, - ) - if err != nil { - pub := newOrPendingChan.IdentityPub - log.Errorf("unable to fetch addrs for %x: %v", - pub.SerializeCompressed(), err) - } - - chanEvent := chanbackup.ChannelEvent{ - NewChans: []chanbackup.ChannelWithAddrs{ - { - OpenChannel: newOrPendingChan, - Addrs: nodeAddrs, - }, - }, - } - - select { - case chanUpdates <- chanEvent: - case <-quit: - return - } - } - - // In order to adhere to the interface, we'll proxy the events from the - // channel notifier to the sub-swapper in a format it understands. - go func() { - // First, we'll subscribe to the primary channel notifier so we can - // obtain events for new opened/closed channels. - chanSubscription, err := c.chanNotifier.SubscribeChannelEvents() - if err != nil { - panic(fmt.Sprintf("unable to subscribe to chans: %v", - err)) - } - - defer chanSubscription.Cancel() - - for { - select { - - // A new event has been sent by the chanNotifier, we'll - // filter out the events we actually care about and - // send them to the sub-swapper. - case e := <-chanSubscription.Updates(): - // TODO(roasbeef): batch dispatch ntnfs - - switch event := e.(type) { - // A new channel has been opened and is still - // pending. We can still create a backup, even - // if the final channel ID is not yet available. - case channelnotifier.PendingOpenChannelEvent: - pendingChan := event.PendingChannel - sendChanOpenUpdate(pendingChan) - - // A new channel has been confirmed, we'll - // obtain the node address, then send to the - // sub-swapper. - case channelnotifier.OpenChannelEvent: - sendChanOpenUpdate(event.Channel) - - // An existing channel has been closed, we'll - // send only the chanPoint of the closed - // channel to the sub-swapper. - case channelnotifier.ClosedChannelEvent: - chanPoint := event.CloseSummary.ChanPoint - chanEvent := chanbackup.ChannelEvent{ - ClosedChans: []wire.OutPoint{ - chanPoint, - }, - } - - select { - case chanUpdates <- chanEvent: - case <-quit: - return - } - } - - // The cancel method has been called, signalling us to - // exit - case <-quit: - return - } - } - }() - - return &chanbackup.ChannelSubscription{ - ChanUpdates: chanUpdates, - Cancel: func() { - close(quit) - }, - }, nil -} - -// A compile-time constraint to ensure channelNotifier implements -// chanbackup.ChannelNotifier. -var _ chanbackup.ChannelNotifier = (*channelNotifier)(nil) diff --git a/lnd/channeldb/README.md b/lnd/channeldb/README.md deleted file mode 100644 index 7e3a81ef..00000000 --- a/lnd/channeldb/README.md +++ /dev/null @@ -1,24 +0,0 @@ -channeldb -========== - -[![Build Status](http://img.shields.io/travis/lightningnetwork/lnd.svg)](https://travis-ci.org/lightningnetwork/lnd) -[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/lightningnetwork/lnd/blob/master/LICENSE) -[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/lightningnetwork/lnd/channeldb) - -The channeldb implements the persistent storage engine for `lnd` and -generically a data storage layer for the required state within the Lightning -Network. The backing storage engine is -[boltdb](https://github.com/coreos/bbolt), an embedded pure-go key-value store -based off of LMDB. - -The package implements an object-oriented storage model with queries and -mutations flowing through a particular object instance rather than the database -itself. The storage implemented by the objects includes: open channels, past -commitment revocation states, the channel graph which includes authenticated -node and channel announcements, outgoing payments, and invoices - -## Installation and Updating - -```bash -$ go get -u github.com/lightningnetwork/lnd/channeldb -``` diff --git a/lnd/channeldb/addr.go b/lnd/channeldb/addr.go deleted file mode 100644 index 843ee963..00000000 --- a/lnd/channeldb/addr.go +++ /dev/null @@ -1,221 +0,0 @@ -package channeldb - -import ( - "encoding/binary" - "io" - "net" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/tor" -) - -// addressType specifies the network protocol and version that should be used -// when connecting to a node at a particular address. -type addressType uint8 - -const ( - // tcp4Addr denotes an IPv4 TCP address. - tcp4Addr addressType = 0 - - // tcp6Addr denotes an IPv6 TCP address. - tcp6Addr addressType = 1 - - // v2OnionAddr denotes a version 2 Tor onion service address. - v2OnionAddr addressType = 2 - - // v3OnionAddr denotes a version 3 Tor (prop224) onion service address. - v3OnionAddr addressType = 3 -) - -// encodeTCPAddr serializes a TCP address into its compact raw bytes -// representation. -func encodeTCPAddr(w io.Writer, addr *net.TCPAddr) er.R { - var ( - addrType byte - ip []byte - ) - - if addr.IP.To4() != nil { - addrType = byte(tcp4Addr) - ip = addr.IP.To4() - } else { - addrType = byte(tcp6Addr) - ip = addr.IP.To16() - } - - if ip == nil { - return er.Errorf("unable to encode IP %v", addr.IP) - } - - if _, err := util.Write(w, []byte{addrType}); err != nil { - return err - } - - if _, err := util.Write(w, ip); err != nil { - return err - } - - var port [2]byte - byteOrder.PutUint16(port[:], uint16(addr.Port)) - if _, err := util.Write(w, port[:]); err != nil { - return err - } - - return nil -} - -// encodeOnionAddr serializes an onion address into its compact raw bytes -// representation. -func encodeOnionAddr(w io.Writer, addr *tor.OnionAddr) er.R { - var suffixIndex int - hostLen := len(addr.OnionService) - switch hostLen { - case tor.V2Len: - if _, err := util.Write(w, []byte{byte(v2OnionAddr)}); err != nil { - return err - } - suffixIndex = tor.V2Len - tor.OnionSuffixLen - case tor.V3Len: - if _, err := util.Write(w, []byte{byte(v3OnionAddr)}); err != nil { - return err - } - suffixIndex = tor.V3Len - tor.OnionSuffixLen - default: - return er.New("unknown onion service length") - } - - suffix := addr.OnionService[suffixIndex:] - if suffix != tor.OnionSuffix { - return er.Errorf("invalid suffix \"%v\"", suffix) - } - - host, errr := tor.Base32Encoding.DecodeString( - addr.OnionService[:suffixIndex], - ) - if errr != nil { - return er.E(errr) - } - - // Sanity check the decoded length. - switch { - case hostLen == tor.V2Len && len(host) != tor.V2DecodedLen: - return er.Errorf("onion service %v decoded to invalid host %x", - addr.OnionService, host) - - case hostLen == tor.V3Len && len(host) != tor.V3DecodedLen: - return er.Errorf("onion service %v decoded to invalid host %x", - addr.OnionService, host) - } - - if _, err := util.Write(w, host); err != nil { - return err - } - - var port [2]byte - byteOrder.PutUint16(port[:], uint16(addr.Port)) - if _, err := util.Write(w, port[:]); err != nil { - return err - } - - return nil -} - -// deserializeAddr reads the serialized raw representation of an address and -// deserializes it into the actual address. This allows us to avoid address -// resolution within the channeldb package. -func deserializeAddr(r io.Reader) (net.Addr, er.R) { - var addrType [1]byte - if _, err := r.Read(addrType[:]); err != nil { - return nil, er.E(err) - } - - var address net.Addr - switch addressType(addrType[0]) { - case tcp4Addr: - var ip [4]byte - if _, err := r.Read(ip[:]); err != nil { - return nil, er.E(err) - } - - var port [2]byte - if _, err := r.Read(port[:]); err != nil { - return nil, er.E(err) - } - - address = &net.TCPAddr{ - IP: net.IP(ip[:]), - Port: int(binary.BigEndian.Uint16(port[:])), - } - case tcp6Addr: - var ip [16]byte - if _, err := r.Read(ip[:]); err != nil { - return nil, er.E(err) - } - - var port [2]byte - if _, err := r.Read(port[:]); err != nil { - return nil, er.E(err) - } - - address = &net.TCPAddr{ - IP: net.IP(ip[:]), - Port: int(binary.BigEndian.Uint16(port[:])), - } - case v2OnionAddr: - var h [tor.V2DecodedLen]byte - if _, err := r.Read(h[:]); err != nil { - return nil, er.E(err) - } - - var p [2]byte - if _, err := r.Read(p[:]); err != nil { - return nil, er.E(err) - } - - onionService := tor.Base32Encoding.EncodeToString(h[:]) - onionService += tor.OnionSuffix - port := int(binary.BigEndian.Uint16(p[:])) - - address = &tor.OnionAddr{ - OnionService: onionService, - Port: port, - } - case v3OnionAddr: - var h [tor.V3DecodedLen]byte - if _, err := r.Read(h[:]); err != nil { - return nil, er.E(err) - } - - var p [2]byte - if _, err := r.Read(p[:]); err != nil { - return nil, er.E(err) - } - - onionService := tor.Base32Encoding.EncodeToString(h[:]) - onionService += tor.OnionSuffix - port := int(binary.BigEndian.Uint16(p[:])) - - address = &tor.OnionAddr{ - OnionService: onionService, - Port: port, - } - default: - return nil, ErrUnknownAddressType.Default() - } - - return address, nil -} - -// serializeAddr serializes an address into its raw bytes representation so that -// it can be deserialized without requiring address resolution. -func serializeAddr(w io.Writer, address net.Addr) er.R { - switch addr := address.(type) { - case *net.TCPAddr: - return encodeTCPAddr(w, addr) - case *tor.OnionAddr: - return encodeOnionAddr(w, addr) - default: - return ErrUnknownAddressType.Default() - } -} diff --git a/lnd/channeldb/addr_test.go b/lnd/channeldb/addr_test.go deleted file mode 100644 index 460e2837..00000000 --- a/lnd/channeldb/addr_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package channeldb - -import ( - "bytes" - "net" - "strings" - "testing" - - "github.com/pkt-cash/pktd/lnd/tor" -) - -type unknownAddrType struct{} - -func (t unknownAddrType) Network() string { return "unknown" } -func (t unknownAddrType) String() string { return "unknown" } - -var testIP4 = net.ParseIP("192.168.1.1") -var testIP6 = net.ParseIP("2001:0db8:0000:0000:0000:ff00:0042:8329") - -var addrTests = []struct { - expAddr net.Addr - serErr string -}{ - // Valid addresses. - { - expAddr: &net.TCPAddr{ - IP: testIP4, - Port: 12345, - }, - }, - { - expAddr: &net.TCPAddr{ - IP: testIP6, - Port: 65535, - }, - }, - { - expAddr: &tor.OnionAddr{ - OnionService: "3g2upl4pq6kufc4m.onion", - Port: 9735, - }, - }, - { - expAddr: &tor.OnionAddr{ - OnionService: "vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd.onion", - Port: 80, - }, - }, - - // Invalid addresses. - { - expAddr: unknownAddrType{}, - serErr: "ErrUnknownAddressType", - }, - { - expAddr: &net.TCPAddr{ - // Remove last byte of IPv4 address. - IP: testIP4[:len(testIP4)-1], - Port: 12345, - }, - serErr: "unable to encode", - }, - { - expAddr: &net.TCPAddr{ - // Add an extra byte of IPv4 address. - IP: append(testIP4, 0xff), - Port: 12345, - }, - serErr: "unable to encode", - }, - { - expAddr: &net.TCPAddr{ - // Remove last byte of IPv6 address. - IP: testIP6[:len(testIP6)-1], - Port: 65535, - }, - serErr: "unable to encode", - }, - { - expAddr: &net.TCPAddr{ - // Add an extra byte to the IPv6 address. - IP: append(testIP6, 0xff), - Port: 65535, - }, - serErr: "unable to encode", - }, - { - expAddr: &tor.OnionAddr{ - // Invalid suffix. - OnionService: "vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd.inion", - Port: 80, - }, - serErr: "invalid suffix", - }, - { - expAddr: &tor.OnionAddr{ - // Invalid length. - OnionService: "vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyy.onion", - Port: 80, - }, - serErr: "unknown onion service length", - }, - { - expAddr: &tor.OnionAddr{ - // Invalid encoding. - OnionService: "vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyA.onion", - Port: 80, - }, - serErr: "illegal base32", - }, -} - -// TestAddrSerialization tests that the serialization method used by channeldb -// for net.Addr's works as intended. -func TestAddrSerialization(t *testing.T) { - t.Parallel() - - var b bytes.Buffer - for _, test := range addrTests { - err := serializeAddr(&b, test.expAddr) - switch { - case err == nil && test.serErr != "": - t.Fatalf("expected serialization err for addr %v", - test.expAddr) - - case err != nil && test.serErr == "": - t.Fatalf("unexpected serialization err for addr %v: %v", - test.expAddr, err) - - case err != nil && !strings.Contains(err.String(), test.serErr): - t.Fatalf("unexpected serialization err for addr %v, "+ - "want: %v, got %v", test.expAddr, test.serErr, - err) - - case err != nil: - continue - } - - addr, err := deserializeAddr(&b) - if err != nil { - t.Fatalf("unable to deserialize address: %v", err) - } - - if addr.String() != test.expAddr.String() { - t.Fatalf("expected address %v after serialization, "+ - "got %v", addr, test.expAddr) - } - } -} diff --git a/lnd/channeldb/channel.go b/lnd/channeldb/channel.go deleted file mode 100644 index aa7f4f3a..00000000 --- a/lnd/channeldb/channel.go +++ /dev/null @@ -1,3506 +0,0 @@ -package channeldb - -import ( - "bytes" - "crypto/sha256" - "encoding/binary" - "fmt" - "io" - "net" - "strconv" - "strings" - "sync" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/shachain" - "github.com/pkt-cash/pktd/wire" - "github.com/pkt-cash/pktd/wire/protocol" -) - -const ( - // AbsoluteThawHeightThreshold is the threshold at which a thaw height - // begins to be interpreted as an absolute block height, rather than a - // relative one. - AbsoluteThawHeightThreshold uint32 = 500000 -) - -var ( - // closedChannelBucket stores summarization information concerning - // previously open, but now closed channels. - closedChannelBucket = []byte("closed-chan-bucket") - - // openChanBucket stores all the currently open channels. This bucket - // has a second, nested bucket which is keyed by a node's ID. Within - // that node ID bucket, all attributes required to track, update, and - // close a channel are stored. - // - // openChan -> nodeID -> chanPoint - // - // TODO(roasbeef): flesh out comment - openChannelBucket = []byte("open-chan-bucket") - - // historicalChannelBucket stores all channels that have seen their - // commitment tx confirm. All information from their previous open state - // is retained. - historicalChannelBucket = []byte("historical-chan-bucket") - - // chanInfoKey can be accessed within the bucket for a channel - // (identified by its chanPoint). This key stores all the static - // information for a channel which is decided at the end of the - // funding flow. - chanInfoKey = []byte("chan-info-key") - - // localUpfrontShutdownKey can be accessed within the bucket for a channel - // (identified by its chanPoint). This key stores an optional upfront - // shutdown script for the local peer. - localUpfrontShutdownKey = []byte("local-upfront-shutdown-key") - - // remoteUpfrontShutdownKey can be accessed within the bucket for a channel - // (identified by its chanPoint). This key stores an optional upfront - // shutdown script for the remote peer. - remoteUpfrontShutdownKey = []byte("remote-upfront-shutdown-key") - - // chanCommitmentKey can be accessed within the sub-bucket for a - // particular channel. This key stores the up to date commitment state - // for a particular channel party. Appending a 0 to the end of this key - // indicates it's the commitment for the local party, and appending a 1 - // to the end of this key indicates it's the commitment for the remote - // party. - chanCommitmentKey = []byte("chan-commitment-key") - - // unsignedAckedUpdatesKey is an entry in the channel bucket that - // contains the remote updates that we have acked, but not yet signed - // for in one of our remote commits. - unsignedAckedUpdatesKey = []byte("unsigned-acked-updates-key") - - // remoteUnsignedLocalUpdatesKey is an entry in the channel bucket that - // contains the local updates that the remote party has acked, but - // has not yet signed for in one of their local commits. - remoteUnsignedLocalUpdatesKey = []byte("remote-unsigned-local-updates-key") - - // revocationStateKey stores their current revocation hash, our - // preimage producer and their preimage store. - revocationStateKey = []byte("revocation-state-key") - - // dataLossCommitPointKey stores the commitment point received from the - // remote peer during a channel sync in case we have lost channel state. - dataLossCommitPointKey = []byte("data-loss-commit-point-key") - - // forceCloseTxKey points to a the unilateral closing tx that we - // broadcasted when moving the channel to state CommitBroadcasted. - forceCloseTxKey = []byte("closing-tx-key") - - // coopCloseTxKey points to a the cooperative closing tx that we - // broadcasted when moving the channel to state CoopBroadcasted. - coopCloseTxKey = []byte("coop-closing-tx-key") - - // commitDiffKey stores the current pending commitment state we've - // extended to the remote party (if any). Each time we propose a new - // state, we store the information necessary to reconstruct this state - // from the prior commitment. This allows us to resync the remote party - // to their expected state in the case of message loss. - // - // TODO(roasbeef): rename to commit chain? - commitDiffKey = []byte("commit-diff-key") - - // revocationLogBucket is dedicated for storing the necessary delta - // state between channel updates required to re-construct a past state - // in order to punish a counterparty attempting a non-cooperative - // channel closure. This key should be accessed from within the - // sub-bucket of a target channel, identified by its channel point. - revocationLogBucket = []byte("revocation-log-key") - - // frozenChanKey is the key where we store the information for any - // active "frozen" channels. This key is present only in the leaf - // bucket for a given channel. - frozenChanKey = []byte("frozen-chans") -) - -var ( - // ErrNoCommitmentsFound is returned when a channel has not set - // commitment states. - ErrNoCommitmentsFound = Err.CodeWithDetail("ErrNoCommitmentsFound", - "no commitments found") - - // ErrNoChanInfoFound is returned when a particular channel does not - // have any channels state. - ErrNoChanInfoFound = Err.CodeWithDetail("ErrNoChanInfoFound", - "no chan info found") - - // ErrNoRevocationsFound is returned when revocation state for a - // particular channel cannot be found. - ErrNoRevocationsFound = Err.CodeWithDetail("ErrNoRevocationsFound", - "no revocations found") - - // ErrNoPendingCommit is returned when there is not a pending - // commitment for a remote party. A new commitment is written to disk - // each time we write a new state in order to be properly fault - // tolerant. - ErrNoPendingCommit = Err.CodeWithDetail("ErrNoPendingCommit", - "no pending commits found") - - // ErrInvalidCircuitKeyLen signals that a circuit key could not be - // decoded because the byte slice is of an invalid length. - ErrInvalidCircuitKeyLen = Err.CodeWithDetail("ErrInvalidCircuitKeyLen", - "length of serialized circuit key must be 16 bytes") - - // ErrNoCommitPoint is returned when no data loss commit point is found - // in the database. - ErrNoCommitPoint = Err.CodeWithDetail("ErrNoCommitPoint", - "no commit point found") - - // ErrNoCloseTx is returned when no closing tx is found for a channel - // in the state CommitBroadcasted. - ErrNoCloseTx = Err.CodeWithDetail("ErrNoCloseTx", - "no closing tx found") - - // ErrNoRestoredChannelMutation is returned when a caller attempts to - // mutate a channel that's been recovered. - ErrNoRestoredChannelMutation = Err.CodeWithDetail("ErrNoRestoredChannelMutation", - "cannot mutate restored channel state") - - // ErrChanBorked is returned when a caller attempts to mutate a borked - // channel. - ErrChanBorked = Err.CodeWithDetail("ErrChanBorked", - "cannot mutate borked channel") - - // errLogEntryNotFound is returned when we cannot find a log entry at - // the height requested in the revocation log. - errLogEntryNotFound = Err.CodeWithDetail("errLogEntryNotFound", - "log entry not found") - - // errHeightNotFound is returned when a query for channel balances at - // a height that we have not reached yet is made. - errHeightNotReached = Err.CodeWithDetail("errHeightNotReached", - "height requested greater than current commit height") -) - -// ChannelType is an enum-like type that describes one of several possible -// channel types. Each open channel is associated with a particular type as the -// channel type may determine how higher level operations are conducted such as -// fee negotiation, channel closing, the format of HTLCs, etc. Structure-wise, -// a ChannelType is a bit field, with each bit denoting a modification from the -// base channel type of single funder. -type ChannelType uint8 - -const ( - // NOTE: iota isn't used here for this enum needs to be stable - // long-term as it will be persisted to the database. - - // SingleFunderBit represents a channel wherein one party solely funds - // the entire capacity of the channel. - SingleFunderBit ChannelType = 0 - - // DualFunderBit represents a channel wherein both parties contribute - // funds towards the total capacity of the channel. The channel may be - // funded symmetrically or asymmetrically. - DualFunderBit ChannelType = 1 << 0 - - // SingleFunderTweakless is similar to the basic SingleFunder channel - // type, but it omits the tweak for one's key in the commitment - // transaction of the remote party. - SingleFunderTweaklessBit ChannelType = 1 << 1 - - // NoFundingTxBit denotes if we have the funding transaction locally on - // disk. This bit may be on if the funding transaction was crafted by a - // wallet external to the primary daemon. - NoFundingTxBit ChannelType = 1 << 2 - - // AnchorOutputsBit indicates that the channel makes use of anchor - // outputs to bump the commitment transaction's effective feerate. This - // channel type also uses a delayed to_remote output script. - AnchorOutputsBit ChannelType = 1 << 3 - - // FrozenBit indicates that the channel is a frozen channel, meaning - // that only the responder can decide to cooperatively close the - // channel. - FrozenBit ChannelType = 1 << 4 -) - -// IsSingleFunder returns true if the channel type if one of the known single -// funder variants. -func (c ChannelType) IsSingleFunder() bool { - return c&DualFunderBit == 0 -} - -// IsDualFunder returns true if the ChannelType has the DualFunderBit set. -func (c ChannelType) IsDualFunder() bool { - return c&DualFunderBit == DualFunderBit -} - -// IsTweakless returns true if the target channel uses a commitment that -// doesn't tweak the key for the remote party. -func (c ChannelType) IsTweakless() bool { - return c&SingleFunderTweaklessBit == SingleFunderTweaklessBit -} - -// HasFundingTx returns true if this channel type is one that has a funding -// transaction stored locally. -func (c ChannelType) HasFundingTx() bool { - return c&NoFundingTxBit == 0 -} - -// HasAnchors returns true if this channel type has anchor ouputs on its -// commitment. -func (c ChannelType) HasAnchors() bool { - return c&AnchorOutputsBit == AnchorOutputsBit -} - -// IsFrozen returns true if the channel is considered to be "frozen". A frozen -// channel means that only the responder can initiate a cooperative channel -// closure. -func (c ChannelType) IsFrozen() bool { - return c&FrozenBit == FrozenBit -} - -// ChannelConstraints represents a set of constraints meant to allow a node to -// limit their exposure, enact flow control and ensure that all HTLCs are -// economically relevant. This struct will be mirrored for both sides of the -// channel, as each side will enforce various constraints that MUST be adhered -// to for the life time of the channel. The parameters for each of these -// constraints are static for the duration of the channel, meaning the channel -// must be torn down for them to change. -type ChannelConstraints struct { - // DustLimit is the threshold (in satoshis) below which any outputs - // should be trimmed. When an output is trimmed, it isn't materialized - // as an actual output, but is instead burned to miner's fees. - DustLimit btcutil.Amount - - // ChanReserve is an absolute reservation on the channel for the - // owner of this set of constraints. This means that the current - // settled balance for this node CANNOT dip below the reservation - // amount. This acts as a defense against costless attacks when - // either side no longer has any skin in the game. - ChanReserve btcutil.Amount - - // MaxPendingAmount is the maximum pending HTLC value that the - // owner of these constraints can offer the remote node at a - // particular time. - MaxPendingAmount lnwire.MilliSatoshi - - // MinHTLC is the minimum HTLC value that the owner of these - // constraints can offer the remote node. If any HTLCs below this - // amount are offered, then the HTLC will be rejected. This, in - // tandem with the dust limit allows a node to regulate the - // smallest HTLC that it deems economically relevant. - MinHTLC lnwire.MilliSatoshi - - // MaxAcceptedHtlcs is the maximum number of HTLCs that the owner of - // this set of constraints can offer the remote node. This allows each - // node to limit their over all exposure to HTLCs that may need to be - // acted upon in the case of a unilateral channel closure or a contract - // breach. - MaxAcceptedHtlcs uint16 - - // CsvDelay is the relative time lock delay expressed in blocks. Any - // settled outputs that pay to the owner of this channel configuration - // MUST ensure that the delay branch uses this value as the relative - // time lock. Similarly, any HTLC's offered by this node should use - // this value as well. - CsvDelay uint16 -} - -// ChannelConfig is a struct that houses the various configuration opens for -// channels. Each side maintains an instance of this configuration file as it -// governs: how the funding and commitment transaction to be created, the -// nature of HTLC's allotted, the keys to be used for delivery, and relative -// time lock parameters. -type ChannelConfig struct { - // ChannelConstraints is the set of constraints that must be upheld for - // the duration of the channel for the owner of this channel - // configuration. Constraints govern a number of flow control related - // parameters, also including the smallest HTLC that will be accepted - // by a participant. - ChannelConstraints - - // MultiSigKey is the key to be used within the 2-of-2 output script - // for the owner of this channel config. - MultiSigKey keychain.KeyDescriptor - - // RevocationBasePoint is the base public key to be used when deriving - // revocation keys for the remote node's commitment transaction. This - // will be combined along with a per commitment secret to derive a - // unique revocation key for each state. - RevocationBasePoint keychain.KeyDescriptor - - // PaymentBasePoint is the base public key to be used when deriving - // the key used within the non-delayed pay-to-self output on the - // commitment transaction for a node. This will be combined with a - // tweak derived from the per-commitment point to ensure unique keys - // for each commitment transaction. - PaymentBasePoint keychain.KeyDescriptor - - // DelayBasePoint is the base public key to be used when deriving the - // key used within the delayed pay-to-self output on the commitment - // transaction for a node. This will be combined with a tweak derived - // from the per-commitment point to ensure unique keys for each - // commitment transaction. - DelayBasePoint keychain.KeyDescriptor - - // HtlcBasePoint is the base public key to be used when deriving the - // local HTLC key. The derived key (combined with the tweak derived - // from the per-commitment point) is used within the "to self" clause - // within any HTLC output scripts. - HtlcBasePoint keychain.KeyDescriptor -} - -// ChannelCommitment is a snapshot of the commitment state at a particular -// point in the commitment chain. With each state transition, a snapshot of the -// current state along with all non-settled HTLCs are recorded. These snapshots -// detail the state of the _remote_ party's commitment at a particular state -// number. For ourselves (the local node) we ONLY store our most recent -// (unrevoked) state for safety purposes. -type ChannelCommitment struct { - // CommitHeight is the update number that this ChannelDelta represents - // the total number of commitment updates to this point. This can be - // viewed as sort of a "commitment height" as this number is - // monotonically increasing. - CommitHeight uint64 - - // LocalLogIndex is the cumulative log index index of the local node at - // this point in the commitment chain. This value will be incremented - // for each _update_ added to the local update log. - LocalLogIndex uint64 - - // LocalHtlcIndex is the current local running HTLC index. This value - // will be incremented for each outgoing HTLC the local node offers. - LocalHtlcIndex uint64 - - // RemoteLogIndex is the cumulative log index index of the remote node - // at this point in the commitment chain. This value will be - // incremented for each _update_ added to the remote update log. - RemoteLogIndex uint64 - - // RemoteHtlcIndex is the current remote running HTLC index. This value - // will be incremented for each outgoing HTLC the remote node offers. - RemoteHtlcIndex uint64 - - // LocalBalance is the current available settled balance within the - // channel directly spendable by us. - // - // NOTE: This is the balance *after* subtracting any commitment fee, - // AND anchor output values. - LocalBalance lnwire.MilliSatoshi - - // RemoteBalance is the current available settled balance within the - // channel directly spendable by the remote node. - // - // NOTE: This is the balance *after* subtracting any commitment fee, - // AND anchor output values. - RemoteBalance lnwire.MilliSatoshi - - // CommitFee is the amount calculated to be paid in fees for the - // current set of commitment transactions. The fee amount is persisted - // with the channel in order to allow the fee amount to be removed and - // recalculated with each channel state update, including updates that - // happen after a system restart. - CommitFee btcutil.Amount - - // FeePerKw is the min satoshis/kilo-weight that should be paid within - // the commitment transaction for the entire duration of the channel's - // lifetime. This field may be updated during normal operation of the - // channel as on-chain conditions change. - // - // TODO(halseth): make this SatPerKWeight. Cannot be done atm because - // this will cause the import cycle lnwallet<->channeldb. Fee - // estimation stuff should be in its own package. - FeePerKw btcutil.Amount - - // CommitTx is the latest version of the commitment state, broadcast - // able by us. - CommitTx *wire.MsgTx - - // CommitSig is one half of the signature required to fully complete - // the script for the commitment transaction above. This is the - // signature signed by the remote party for our version of the - // commitment transactions. - CommitSig []byte - - // Htlcs is the set of HTLC's that are pending at this particular - // commitment height. - Htlcs []HTLC - - // TODO(roasbeef): pending commit pointer? - // * lets just walk through -} - -// ChannelStatus is a bit vector used to indicate whether an OpenChannel is in -// the default usable state, or a state where it shouldn't be used. -type ChannelStatus uint8 - -var ( - // ChanStatusDefault is the normal state of an open channel. - ChanStatusDefault ChannelStatus - - // ChanStatusBorked indicates that the channel has entered an - // irreconcilable state, triggered by a state desynchronization or - // channel breach. Channels in this state should never be added to the - // htlc switch. - ChanStatusBorked ChannelStatus = 1 - - // ChanStatusCommitBroadcasted indicates that a commitment for this - // channel has been broadcasted. - ChanStatusCommitBroadcasted ChannelStatus = 1 << 1 - - // ChanStatusLocalDataLoss indicates that we have lost channel state - // for this channel, and broadcasting our latest commitment might be - // considered a breach. - // - // TODO(halseh): actually enforce that we are not force closing such a - // channel. - ChanStatusLocalDataLoss ChannelStatus = 1 << 2 - - // ChanStatusRestored is a status flag that signals that the channel - // has been restored, and doesn't have all the fields a typical channel - // will have. - ChanStatusRestored ChannelStatus = 1 << 3 - - // ChanStatusCoopBroadcasted indicates that a cooperative close for - // this channel has been broadcasted. Older cooperatively closed - // channels will only have this status set. Newer ones will also have - // close initiator information stored using the local/remote initiator - // status. This status is set in conjunction with the initiator status - // so that we do not need to check multiple channel statues for - // cooperative closes. - ChanStatusCoopBroadcasted ChannelStatus = 1 << 4 - - // ChanStatusLocalCloseInitiator indicates that we initiated closing - // the channel. - ChanStatusLocalCloseInitiator ChannelStatus = 1 << 5 - - // ChanStatusRemoteCloseInitiator indicates that the remote node - // initiated closing the channel. - ChanStatusRemoteCloseInitiator ChannelStatus = 1 << 6 -) - -// chanStatusStrings maps a ChannelStatus to a human friendly string that -// describes that status. -var chanStatusStrings = map[ChannelStatus]string{ - ChanStatusDefault: "ChanStatusDefault", - ChanStatusBorked: "ChanStatusBorked", - ChanStatusCommitBroadcasted: "ChanStatusCommitBroadcasted", - ChanStatusLocalDataLoss: "ChanStatusLocalDataLoss", - ChanStatusRestored: "ChanStatusRestored", - ChanStatusCoopBroadcasted: "ChanStatusCoopBroadcasted", - ChanStatusLocalCloseInitiator: "ChanStatusLocalCloseInitiator", - ChanStatusRemoteCloseInitiator: "ChanStatusRemoteCloseInitiator", -} - -// orderedChanStatusFlags is an in-order list of all that channel status flags. -var orderedChanStatusFlags = []ChannelStatus{ - ChanStatusBorked, - ChanStatusCommitBroadcasted, - ChanStatusLocalDataLoss, - ChanStatusRestored, - ChanStatusCoopBroadcasted, - ChanStatusLocalCloseInitiator, - ChanStatusRemoteCloseInitiator, -} - -// String returns a human-readable representation of the ChannelStatus. -func (c ChannelStatus) String() string { - // If no flags are set, then this is the default case. - if c == ChanStatusDefault { - return chanStatusStrings[ChanStatusDefault] - } - - // Add individual bit flags. - statusStr := "" - for _, flag := range orderedChanStatusFlags { - if c&flag == flag { - statusStr += chanStatusStrings[flag] + "|" - c -= flag - } - } - - // Remove anything to the right of the final bar, including it as well. - statusStr = strings.TrimRight(statusStr, "|") - - // Add any remaining flags which aren't accounted for as hex. - if c != 0 { - statusStr += "|0x" + strconv.FormatUint(uint64(c), 16) - } - - // If this was purely an unknown flag, then remove the extra bar at the - // start of the string. - statusStr = strings.TrimLeft(statusStr, "|") - - return statusStr -} - -// OpenChannel encapsulates the persistent and dynamic state of an open channel -// with a remote node. An open channel supports several options for on-disk -// serialization depending on the exact context. Full (upon channel creation) -// state commitments, and partial (due to a commitment update) writes are -// supported. Each partial write due to a state update appends the new update -// to an on-disk log, which can then subsequently be queried in order to -// "time-travel" to a prior state. -type OpenChannel struct { - // ChanType denotes which type of channel this is. - ChanType ChannelType - - // ChainHash is a hash which represents the blockchain that this - // channel will be opened within. This value is typically the genesis - // hash. In the case that the original chain went through a contentious - // hard-fork, then this value will be tweaked using the unique fork - // point on each branch. - ChainHash chainhash.Hash - - // FundingOutpoint is the outpoint of the final funding transaction. - // This value uniquely and globally identifies the channel within the - // target blockchain as specified by the chain hash parameter. - FundingOutpoint wire.OutPoint - - // ShortChannelID encodes the exact location in the chain in which the - // channel was initially confirmed. This includes: the block height, - // transaction index, and the output within the target transaction. - ShortChannelID lnwire.ShortChannelID - - // IsPending indicates whether a channel's funding transaction has been - // confirmed. - IsPending bool - - // IsInitiator is a bool which indicates if we were the original - // initiator for the channel. This value may affect how higher levels - // negotiate fees, or close the channel. - IsInitiator bool - - // chanStatus is the current status of this channel. If it is not in - // the state Default, it should not be used for forwarding payments. - chanStatus ChannelStatus - - // FundingBroadcastHeight is the height in which the funding - // transaction was broadcast. This value can be used by higher level - // sub-systems to determine if a channel is stale and/or should have - // been confirmed before a certain height. - FundingBroadcastHeight uint32 - - // NumConfsRequired is the number of confirmations a channel's funding - // transaction must have received in order to be considered available - // for normal transactional use. - NumConfsRequired uint16 - - // ChannelFlags holds the flags that were sent as part of the - // open_channel message. - ChannelFlags lnwire.FundingFlag - - // IdentityPub is the identity public key of the remote node this - // channel has been established with. - IdentityPub *btcec.PublicKey - - // Capacity is the total capacity of this channel. - Capacity btcutil.Amount - - // TotalMSatSent is the total number of milli-satoshis we've sent - // within this channel. - TotalMSatSent lnwire.MilliSatoshi - - // TotalMSatReceived is the total number of milli-satoshis we've - // received within this channel. - TotalMSatReceived lnwire.MilliSatoshi - - // LocalChanCfg is the channel configuration for the local node. - LocalChanCfg ChannelConfig - - // RemoteChanCfg is the channel configuration for the remote node. - RemoteChanCfg ChannelConfig - - // LocalCommitment is the current local commitment state for the local - // party. This is stored distinct from the state of the remote party - // as there are certain asymmetric parameters which affect the - // structure of each commitment. - LocalCommitment ChannelCommitment - - // RemoteCommitment is the current remote commitment state for the - // remote party. This is stored distinct from the state of the local - // party as there are certain asymmetric parameters which affect the - // structure of each commitment. - RemoteCommitment ChannelCommitment - - // RemoteCurrentRevocation is the current revocation for their - // commitment transaction. However, since this the derived public key, - // we don't yet have the private key so we aren't yet able to verify - // that it's actually in the hash chain. - RemoteCurrentRevocation *btcec.PublicKey - - // RemoteNextRevocation is the revocation key to be used for the *next* - // commitment transaction we create for the local node. Within the - // specification, this value is referred to as the - // per-commitment-point. - RemoteNextRevocation *btcec.PublicKey - - // RevocationProducer is used to generate the revocation in such a way - // that remote side might store it efficiently and have the ability to - // restore the revocation by index if needed. Current implementation of - // secret producer is shachain producer. - RevocationProducer shachain.Producer - - // RevocationStore is used to efficiently store the revocations for - // previous channels states sent to us by remote side. Current - // implementation of secret store is shachain store. - RevocationStore shachain.Store - - // Packager is used to create and update forwarding packages for this - // channel, which encodes all necessary information to recover from - // failures and reforward HTLCs that were not fully processed. - Packager FwdPackager - - // FundingTxn is the transaction containing this channel's funding - // outpoint. Upon restarts, this txn will be rebroadcast if the channel - // is found to be pending. - // - // NOTE: This value will only be populated for single-funder channels - // for which we are the initiator, and that we also have the funding - // transaction for. One can check this by using the HasFundingTx() - // method on the ChanType field. - FundingTxn *wire.MsgTx - - // LocalShutdownScript is set to a pre-set script if the channel was opened - // by the local node with option_upfront_shutdown_script set. If the option - // was not set, the field is empty. - LocalShutdownScript lnwire.DeliveryAddress - - // RemoteShutdownScript is set to a pre-set script if the channel was opened - // by the remote node with option_upfront_shutdown_script set. If the option - // was not set, the field is empty. - RemoteShutdownScript lnwire.DeliveryAddress - - // ThawHeight is the height when a frozen channel once again becomes a - // normal channel. If this is zero, then there're no restrictions on - // this channel. If the value is lower than 500,000, then it's - // interpreted as a relative height, or an absolute height otherwise. - ThawHeight uint32 - - // TODO(roasbeef): eww - Db *DB - - // TODO(roasbeef): just need to store local and remote HTLC's? - - sync.RWMutex -} - -// ShortChanID returns the current ShortChannelID of this channel. -func (c *OpenChannel) ShortChanID() lnwire.ShortChannelID { - c.RLock() - defer c.RUnlock() - - return c.ShortChannelID -} - -// ChanStatus returns the current ChannelStatus of this channel. -func (c *OpenChannel) ChanStatus() ChannelStatus { - c.RLock() - defer c.RUnlock() - - return c.chanStatus -} - -// ApplyChanStatus allows the caller to modify the internal channel state in a -// thead-safe manner. -func (c *OpenChannel) ApplyChanStatus(status ChannelStatus) er.R { - c.Lock() - defer c.Unlock() - - return c.putChanStatus(status) -} - -// ClearChanStatus allows the caller to clear a particular channel status from -// the primary channel status bit field. After this method returns, a call to -// HasChanStatus(status) should return false. -func (c *OpenChannel) ClearChanStatus(status ChannelStatus) er.R { - c.Lock() - defer c.Unlock() - - return c.clearChanStatus(status) -} - -// HasChanStatus returns true if the internal bitfield channel status of the -// target channel has the specified status bit set. -func (c *OpenChannel) HasChanStatus(status ChannelStatus) bool { - c.RLock() - defer c.RUnlock() - - return c.hasChanStatus(status) -} - -func (c *OpenChannel) hasChanStatus(status ChannelStatus) bool { - // Special case ChanStatusDefualt since it isn't actually flag, but a - // particular combination (or lack-there-of) of flags. - if status == ChanStatusDefault { - return c.chanStatus == ChanStatusDefault - } - - return c.chanStatus&status == status -} - -// RefreshShortChanID updates the in-memory channel state using the latest -// value observed on disk. -// -// TODO: the name of this function should be changed to reflect the fact that -// it is not only refreshing the short channel id but all the channel state. -// maybe Refresh/Reload? -func (c *OpenChannel) RefreshShortChanID() er.R { - c.Lock() - defer c.Unlock() - - err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R { - chanBucket, err := fetchChanBucket( - tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, - ) - if err != nil { - return err - } - - // We'll re-populating the in-memory channel with the info - // fetched from disk. - if err := fetchChanInfo(chanBucket, c); err != nil { - return er.Errorf("unable to fetch chan info: %v", err) - } - - return nil - }, func() {}) - if err != nil { - return err - } - - return nil -} - -// fetchChanBucket is a helper function that returns the bucket where a -// channel's data resides in given: the public key for the node, the outpoint, -// and the chainhash that the channel resides on. -func fetchChanBucket(tx kvdb.RTx, nodeKey *btcec.PublicKey, - outPoint *wire.OutPoint, chainHash chainhash.Hash) (kvdb.RBucket, er.R) { - - // First fetch the top level bucket which stores all data related to - // current, active channels. - openChanBucket := tx.ReadBucket(openChannelBucket) - if openChanBucket == nil { - return nil, ErrNoChanDBExists.Default() - } - - // TODO(roasbeef): CreateTopLevelBucket on the interface isn't like - // CreateIfNotExists, will return error - - // Within this top level bucket, fetch the bucket dedicated to storing - // open channel data specific to the remote node. - nodePub := nodeKey.SerializeCompressed() - nodeChanBucket := openChanBucket.NestedReadBucket(nodePub) - if nodeChanBucket == nil { - return nil, ErrNoActiveChannels.Default() - } - - // We'll then recurse down an additional layer in order to fetch the - // bucket for this particular chain. - chainBucket := nodeChanBucket.NestedReadBucket(chainHash[:]) - if chainBucket == nil { - return nil, ErrNoActiveChannels.Default() - } - - // With the bucket for the node and chain fetched, we can now go down - // another level, for this channel itself. - var chanPointBuf bytes.Buffer - if err := writeOutpoint(&chanPointBuf, outPoint); err != nil { - return nil, err - } - chanBucket := chainBucket.NestedReadBucket(chanPointBuf.Bytes()) - if chanBucket == nil { - return nil, ErrChannelNotFound.Default() - } - - return chanBucket, nil -} - -// fetchChanBucketRw is a helper function that returns the bucket where a -// channel's data resides in given: the public key for the node, the outpoint, -// and the chainhash that the channel resides on. This differs from -// fetchChanBucket in that it returns a writeable bucket. -func fetchChanBucketRw(tx kvdb.RwTx, nodeKey *btcec.PublicKey, // nolint:interfacer - outPoint *wire.OutPoint, chainHash chainhash.Hash) (kvdb.RwBucket, er.R) { - - readBucket, err := fetchChanBucket(tx, nodeKey, outPoint, chainHash) - if err != nil { - return nil, err - } - - return readBucket.(kvdb.RwBucket), nil -} - -// fullSync syncs the contents of an OpenChannel while re-using an existing -// database transaction. -func (c *OpenChannel) fullSync(tx kvdb.RwTx) er.R { - // First fetch the top level bucket which stores all data related to - // current, active channels. - openChanBucket, err := tx.CreateTopLevelBucket(openChannelBucket) - if err != nil { - return err - } - - // Within this top level bucket, fetch the bucket dedicated to storing - // open channel data specific to the remote node. - nodePub := c.IdentityPub.SerializeCompressed() - nodeChanBucket, err := openChanBucket.CreateBucketIfNotExists(nodePub) - if err != nil { - return err - } - - // We'll then recurse down an additional layer in order to fetch the - // bucket for this particular chain. - chainBucket, err := nodeChanBucket.CreateBucketIfNotExists(c.ChainHash[:]) - if err != nil { - return err - } - - // With the bucket for the node fetched, we can now go down another - // level, creating the bucket for this channel itself. - var chanPointBuf bytes.Buffer - if err := writeOutpoint(&chanPointBuf, &c.FundingOutpoint); err != nil { - return err - } - chanBucket, err := chainBucket.CreateBucket( - chanPointBuf.Bytes(), - ) - switch { - case kvdb.ErrBucketExists.Is(err): - // If this channel already exists, then in order to avoid - // overriding it, we'll return an error back up to the caller. - return ErrChanAlreadyExists.Default() - case err != nil: - return err - } - - return putOpenChannel(chanBucket, c) -} - -// MarkAsOpen marks a channel as fully open given a locator that uniquely -// describes its location within the chain. -func (c *OpenChannel) MarkAsOpen(openLoc lnwire.ShortChannelID) er.R { - c.Lock() - defer c.Unlock() - - if err := kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R { - chanBucket, err := fetchChanBucket( - tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, - ) - if err != nil { - return err - } - - channel, err := fetchOpenChannel(chanBucket, &c.FundingOutpoint) - if err != nil { - return err - } - - channel.IsPending = false - channel.ShortChannelID = openLoc - - return putOpenChannel(chanBucket.(kvdb.RwBucket), channel) - }, func() {}); err != nil { - return err - } - - c.IsPending = false - c.ShortChannelID = openLoc - c.Packager = NewChannelPackager(openLoc) - - return nil -} - -// MarkDataLoss marks sets the channel status to LocalDataLoss and stores the -// passed commitPoint for use to retrieve funds in case the remote force closes -// the channel. -func (c *OpenChannel) MarkDataLoss(commitPoint *btcec.PublicKey) er.R { - c.Lock() - defer c.Unlock() - - var b bytes.Buffer - if err := WriteElement(&b, commitPoint); err != nil { - return err - } - - putCommitPoint := func(chanBucket kvdb.RwBucket) er.R { - return chanBucket.Put(dataLossCommitPointKey, b.Bytes()) - } - - return c.putChanStatus(ChanStatusLocalDataLoss, putCommitPoint) -} - -func mapErr(err er.R, code *er.ErrorCode) (er.R, bool) { - switch { - case err == nil: - return nil, false - case ErrNoChanDBExists.Is(err), ErrNoActiveChannels.Is(err), ErrChannelNotFound.Is(err): - if code != nil { - return code.New("", err), true - } - return nil, true - default: - return err, true - } -} - -// DataLossCommitPoint retrieves the stored commit point set during -// MarkDataLoss. If not found ErrNoCommitPoint is returned. -func (c *OpenChannel) DataLossCommitPoint() (*btcec.PublicKey, er.R) { - var commitPoint *btcec.PublicKey - - err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R { - chanBucket, err := fetchChanBucket( - tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, - ) - if err, stop := mapErr(err, ErrNoCommitPoint); stop { - return err - } - - bs := chanBucket.Get(dataLossCommitPointKey) - if bs == nil { - return ErrNoCommitPoint.Default() - } - r := bytes.NewReader(bs) - if err := ReadElements(r, &commitPoint); err != nil { - return err - } - - return nil - }, func() { - commitPoint = nil - }) - if err != nil { - return nil, err - } - - return commitPoint, nil -} - -// MarkBorked marks the event when the channel as reached an irreconcilable -// state, such as a channel breach or state desynchronization. Borked channels -// should never be added to the switch. -func (c *OpenChannel) MarkBorked() er.R { - c.Lock() - defer c.Unlock() - - return c.putChanStatus(ChanStatusBorked) -} - -// ChanSyncMsg returns the ChannelReestablish message that should be sent upon -// reconnection with the remote peer that we're maintaining this channel with. -// The information contained within this message is necessary to re-sync our -// commitment chains in the case of a last or only partially processed message. -// When the remote party receiver this message one of three things may happen: -// -// 1. We're fully synced and no messages need to be sent. -// 2. We didn't get the last CommitSig message they sent, to they'll re-send -// it. -// 3. We didn't get the last RevokeAndAck message they sent, so they'll -// re-send it. -// -// If this is a restored channel, having status ChanStatusRestored, then we'll -// modify our typical chan sync message to ensure they force close even if -// we're on the very first state. -func (c *OpenChannel) ChanSyncMsg() (*lnwire.ChannelReestablish, er.R) { - c.Lock() - defer c.Unlock() - - // The remote commitment height that we'll send in the - // ChannelReestablish message is our current commitment height plus - // one. If the receiver thinks that our commitment height is actually - // *equal* to this value, then they'll re-send the last commitment that - // they sent but we never fully processed. - localHeight := c.LocalCommitment.CommitHeight - nextLocalCommitHeight := localHeight + 1 - - // The second value we'll send is the height of the remote commitment - // from our PoV. If the receiver thinks that their height is actually - // *one plus* this value, then they'll re-send their last revocation. - remoteChainTipHeight := c.RemoteCommitment.CommitHeight - - // If this channel has undergone a commitment update, then in order to - // prove to the remote party our knowledge of their prior commitment - // state, we'll also send over the last commitment secret that the - // remote party sent. - var lastCommitSecret [32]byte - if remoteChainTipHeight != 0 { - remoteSecret, err := c.RevocationStore.LookUp( - remoteChainTipHeight - 1, - ) - if err != nil { - return nil, err - } - lastCommitSecret = [32]byte(*remoteSecret) - } - - // Additionally, we'll send over the current unrevoked commitment on - // our local commitment transaction. - currentCommitSecret, err := c.RevocationProducer.AtIndex( - localHeight, - ) - if err != nil { - return nil, err - } - - // If we've restored this channel, then we'll purposefully give them an - // invalid LocalUnrevokedCommitPoint so they'll force close the channel - // allowing us to sweep our funds. - if c.hasChanStatus(ChanStatusRestored) { - currentCommitSecret[0] ^= 1 - - // If this is a tweakless channel, then we'll purposefully send - // a next local height taht's invalid to trigger a force close - // on their end. We do this as tweakless channels don't require - // that the commitment point is valid, only that it's present. - if c.ChanType.IsTweakless() { - nextLocalCommitHeight = 0 - } - } - - return &lnwire.ChannelReestablish{ - ChanID: lnwire.NewChanIDFromOutPoint( - &c.FundingOutpoint, - ), - NextLocalCommitHeight: nextLocalCommitHeight, - RemoteCommitTailHeight: remoteChainTipHeight, - LastRemoteCommitSecret: lastCommitSecret, - LocalUnrevokedCommitPoint: input.ComputeCommitmentPoint( - currentCommitSecret[:], - ), - }, nil -} - -// isBorked returns true if the channel has been marked as borked in the -// database. This requires an existing database transaction to already be -// active. -// -// NOTE: The primary mutex should already be held before this method is called. -func (c *OpenChannel) isBorked(chanBucket kvdb.RBucket) (bool, er.R) { - channel, err := fetchOpenChannel(chanBucket, &c.FundingOutpoint) - if err != nil { - return false, err - } - - return channel.chanStatus != ChanStatusDefault, nil -} - -// MarkCommitmentBroadcasted marks the channel as a commitment transaction has -// been broadcast, either our own or the remote, and we should watch the chain -// for it to confirm before taking any further action. It takes as argument the -// closing tx _we believe_ will appear in the chain. This is only used to -// republish this tx at startup to ensure propagation, and we should still -// handle the case where a different tx actually hits the chain. -func (c *OpenChannel) MarkCommitmentBroadcasted(closeTx *wire.MsgTx, - locallyInitiated bool) er.R { - - return c.markBroadcasted( - ChanStatusCommitBroadcasted, forceCloseTxKey, closeTx, - locallyInitiated, - ) -} - -// MarkCoopBroadcasted marks the channel to indicate that a cooperative close -// transaction has been broadcast, either our own or the remote, and that we -// should watch the chain for it to confirm before taking further action. It -// takes as argument a cooperative close tx that could appear on chain, and -// should be rebroadcast upon startup. This is only used to republish and -// ensure propagation, and we should still handle the case where a different tx -// actually hits the chain. -func (c *OpenChannel) MarkCoopBroadcasted(closeTx *wire.MsgTx, - locallyInitiated bool) er.R { - - return c.markBroadcasted( - ChanStatusCoopBroadcasted, coopCloseTxKey, closeTx, - locallyInitiated, - ) -} - -// markBroadcasted is a helper function which modifies the channel status of the -// receiving channel and inserts a close transaction under the requested key, -// which should specify either a coop or force close. It adds a status which -// indicates the party that initiated the channel close. -func (c *OpenChannel) markBroadcasted(status ChannelStatus, key []byte, - closeTx *wire.MsgTx, locallyInitiated bool) er.R { - - c.Lock() - defer c.Unlock() - - // If a closing tx is provided, we'll generate a closure to write the - // transaction in the appropriate bucket under the given key. - var putClosingTx func(kvdb.RwBucket) er.R - if closeTx != nil { - var b bytes.Buffer - if err := WriteElement(&b, closeTx); err != nil { - return err - } - - putClosingTx = func(chanBucket kvdb.RwBucket) er.R { - return chanBucket.Put(key, b.Bytes()) - } - } - - // Add the initiator status to the status provided. These statuses are - // set in addition to the broadcast status so that we do not need to - // migrate the original logic which does not store initiator. - if locallyInitiated { - status |= ChanStatusLocalCloseInitiator - } else { - status |= ChanStatusRemoteCloseInitiator - } - - return c.putChanStatus(status, putClosingTx) -} - -// BroadcastedCommitment retrieves the stored unilateral closing tx set during -// MarkCommitmentBroadcasted. If not found ErrNoCloseTx is returned. -func (c *OpenChannel) BroadcastedCommitment() (*wire.MsgTx, er.R) { - return c.getClosingTx(forceCloseTxKey) -} - -// BroadcastedCooperative retrieves the stored cooperative closing tx set during -// MarkCoopBroadcasted. If not found ErrNoCloseTx is returned. -func (c *OpenChannel) BroadcastedCooperative() (*wire.MsgTx, er.R) { - return c.getClosingTx(coopCloseTxKey) -} - -// getClosingTx is a helper method which returns the stored closing transaction -// for key. The caller should use either the force or coop closing keys. -func (c *OpenChannel) getClosingTx(key []byte) (*wire.MsgTx, er.R) { - var closeTx *wire.MsgTx - - err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R { - chanBucket, err := fetchChanBucket( - tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, - ) - if err, stop := mapErr(err, ErrNoCloseTx); stop { - return err - } - - bs := chanBucket.Get(key) - if bs == nil { - return ErrNoCloseTx.Default() - } - r := bytes.NewReader(bs) - return ReadElement(r, &closeTx) - }, func() { - closeTx = nil - }) - if err != nil { - return nil, err - } - - return closeTx, nil -} - -// putChanStatus appends the given status to the channel. fs is an optional -// list of closures that are given the chanBucket in order to atomically add -// extra information together with the new status. -func (c *OpenChannel) putChanStatus(status ChannelStatus, - fs ...func(kvdb.RwBucket) er.R) er.R { - - if err := kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R { - chanBucket, err := fetchChanBucketRw( - tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, - ) - if err != nil { - return err - } - - channel, err := fetchOpenChannel(chanBucket, &c.FundingOutpoint) - if err != nil { - return err - } - - // Add this status to the existing bitvector found in the DB. - status = channel.chanStatus | status - channel.chanStatus = status - - if err := putOpenChannel(chanBucket, channel); err != nil { - return err - } - - for _, f := range fs { - // Skip execution of nil closures. - if f == nil { - continue - } - - if err := f(chanBucket); err != nil { - return err - } - } - - return nil - }, func() {}); err != nil { - return err - } - - // Update the in-memory representation to keep it in sync with the DB. - c.chanStatus = status - - return nil -} - -func (c *OpenChannel) clearChanStatus(status ChannelStatus) er.R { - if err := kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R { - chanBucket, err := fetchChanBucketRw( - tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, - ) - if err != nil { - return err - } - - channel, err := fetchOpenChannel(chanBucket, &c.FundingOutpoint) - if err != nil { - return err - } - - // Unset this bit in the bitvector on disk. - status = channel.chanStatus & ^status - channel.chanStatus = status - - return putOpenChannel(chanBucket, channel) - }, func() {}); err != nil { - return err - } - - // Update the in-memory representation to keep it in sync with the DB. - c.chanStatus = status - - return nil -} - -// putChannel serializes, and stores the current state of the channel in its -// entirety. -func putOpenChannel(chanBucket kvdb.RwBucket, channel *OpenChannel) er.R { - // First, we'll write out all the relatively static fields, that are - // decided upon initial channel creation. - if err := putChanInfo(chanBucket, channel); err != nil { - return er.Errorf("unable to store chan info: %v", err) - } - - // With the static channel info written out, we'll now write out the - // current commitment state for both parties. - if err := putChanCommitments(chanBucket, channel); err != nil { - return er.Errorf("unable to store chan commitments: %v", err) - } - - // Next, if this is a frozen channel, we'll add in the axillary - // information we need to store. - if channel.ChanType.IsFrozen() { - err := storeThawHeight( - chanBucket, channel.ThawHeight, - ) - if err != nil { - return er.Errorf("unable to store thaw height: %v", err) - } - } - - // Finally, we'll write out the revocation state for both parties - // within a distinct key space. - if err := putChanRevocationState(chanBucket, channel); err != nil { - return er.Errorf("unable to store chan revocations: %v", err) - } - - return nil -} - -// fetchOpenChannel retrieves, and deserializes (including decrypting -// sensitive) the complete channel currently active with the passed nodeID. -func fetchOpenChannel(chanBucket kvdb.RBucket, - chanPoint *wire.OutPoint) (*OpenChannel, er.R) { - - channel := &OpenChannel{ - FundingOutpoint: *chanPoint, - } - - // First, we'll read all the static information that changes less - // frequently from disk. - if err := fetchChanInfo(chanBucket, channel); err != nil { - return nil, er.Errorf("unable to fetch chan info: %v", err) - } - - // With the static information read, we'll now read the current - // commitment state for both sides of the channel. - if err := fetchChanCommitments(chanBucket, channel); err != nil { - return nil, er.Errorf("unable to fetch chan commitments: %v", err) - } - - // Next, if this is a frozen channel, we'll add in the axillary - // information we need to store. - if channel.ChanType.IsFrozen() { - thawHeight, err := fetchThawHeight(chanBucket) - if err != nil { - return nil, er.Errorf("unable to store thaw "+ - "height: %v", err) - } - - channel.ThawHeight = thawHeight - } - - // Finally, we'll retrieve the current revocation state so we can - // properly - if err := fetchChanRevocationState(chanBucket, channel); err != nil { - return nil, er.Errorf("unable to fetch chan revocations: %v", err) - } - - channel.Packager = NewChannelPackager(channel.ShortChannelID) - - return channel, nil -} - -// SyncPending writes the contents of the channel to the database while it's in -// the pending (waiting for funding confirmation) state. The IsPending flag -// will be set to true. When the channel's funding transaction is confirmed, -// the channel should be marked as "open" and the IsPending flag set to false. -// Note that this function also creates a LinkNode relationship between this -// newly created channel and a new LinkNode instance. This allows listing all -// channels in the database globally, or according to the LinkNode they were -// created with. -// -// TODO(roasbeef): addr param should eventually be an lnwire.NetAddress type -// that includes service bits. -func (c *OpenChannel) SyncPending(addr net.Addr, pendingHeight uint32) er.R { - c.Lock() - defer c.Unlock() - - c.FundingBroadcastHeight = pendingHeight - - return kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R { - return syncNewChannel(tx, c, []net.Addr{addr}) - }, func() {}) -} - -// syncNewChannel will write the passed channel to disk, and also create a -// LinkNode (if needed) for the channel peer. -func syncNewChannel(tx kvdb.RwTx, c *OpenChannel, addrs []net.Addr) er.R { - // First, sync all the persistent channel state to disk. - if err := c.fullSync(tx); err != nil { - return err - } - - nodeInfoBucket, err := tx.CreateTopLevelBucket(nodeInfoBucket) - if err != nil { - return err - } - - // If a LinkNode for this identity public key already exists, - // then we can exit early. - nodePub := c.IdentityPub.SerializeCompressed() - if nodeInfoBucket.Get(nodePub) != nil { - return nil - } - - // Next, we need to establish a (possibly) new LinkNode relationship - // for this channel. The LinkNode metadata contains reachability, - // up-time, and service bits related information. - linkNode := c.Db.NewLinkNode(protocol.MainNet, c.IdentityPub, addrs...) - - // TODO(roasbeef): do away with link node all together? - - return putLinkNode(nodeInfoBucket, linkNode) -} - -// UpdateCommitment updates the local commitment state. It locks in the pending -// local updates that were received by us from the remote party. The commitment -// state completely describes the balance state at this point in the commitment -// chain. In addition to that, it persists all the remote log updates that we -// have acked, but not signed a remote commitment for yet. These need to be -// persisted to be able to produce a valid commit signature if a restart would -// occur. This method its to be called when we revoke our prior commitment -// state. -func (c *OpenChannel) UpdateCommitment(newCommitment *ChannelCommitment, - unsignedAckedUpdates []LogUpdate) er.R { - - c.Lock() - defer c.Unlock() - - // If this is a restored channel, then we want to avoid mutating the - // state as all, as it's impossible to do so in a protocol compliant - // manner. - if c.hasChanStatus(ChanStatusRestored) { - return ErrNoRestoredChannelMutation.Default() - } - - err := kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R { - chanBucket, err := fetchChanBucketRw( - tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, - ) - if err != nil { - return err - } - - // If the channel is marked as borked, then for safety reasons, - // we shouldn't attempt any further updates. - isBorked, err := c.isBorked(chanBucket) - if err != nil { - return err - } - if isBorked { - return ErrChanBorked.Default() - } - - if err = putChanInfo(chanBucket, c); err != nil { - return er.Errorf("unable to store chan info: %v", err) - } - - // With the proper bucket fetched, we'll now write the latest - // commitment state to disk for the target party. - err = putChanCommitment( - chanBucket, newCommitment, true, - ) - if err != nil { - return er.Errorf("unable to store chan "+ - "revocations: %v", err) - } - - // Persist unsigned but acked remote updates that need to be - // restored after a restart. - var b bytes.Buffer - err = serializeLogUpdates(&b, unsignedAckedUpdates) - if err != nil { - return err - } - - err = chanBucket.Put(unsignedAckedUpdatesKey, b.Bytes()) - if err != nil { - return er.Errorf("unable to store dangline remote "+ - "updates: %v", err) - } - - // Persist the remote unsigned local updates that are not included - // in our new commitment. - updateBytes := chanBucket.Get(remoteUnsignedLocalUpdatesKey) - if updateBytes == nil { - return nil - } - - r := bytes.NewReader(updateBytes) - updates, err := deserializeLogUpdates(r) - if err != nil { - return err - } - - var validUpdates []LogUpdate - for _, upd := range updates { - // Filter for updates that are not on our local - // commitment. - if upd.LogIndex >= newCommitment.LocalLogIndex { - validUpdates = append(validUpdates, upd) - } - } - - var b2 bytes.Buffer - err = serializeLogUpdates(&b2, validUpdates) - if err != nil { - return er.Errorf("unable to serialize log updates: %v", err) - } - - err = chanBucket.Put(remoteUnsignedLocalUpdatesKey, b2.Bytes()) - if err != nil { - return er.Errorf("unable to restore chanbucket: %v", err) - } - - return nil - }, func() {}) - if err != nil { - return err - } - - c.LocalCommitment = *newCommitment - - return nil -} - -// BalancesAtHeight returns the local and remote balances on our commitment -// transactions as of a given height. -// -// NOTE: these are our balances *after* subtracting the commitment fee and -// anchor outputs. -func (c *OpenChannel) BalancesAtHeight(height uint64) (lnwire.MilliSatoshi, - lnwire.MilliSatoshi, er.R) { - - if height > c.LocalCommitment.CommitHeight && - height > c.RemoteCommitment.CommitHeight { - - return 0, 0, errHeightNotReached.Default() - } - - // If our current commit is as the desired height, we can return our - // current balances. - if c.LocalCommitment.CommitHeight == height { - return c.LocalCommitment.LocalBalance, - c.LocalCommitment.RemoteBalance, nil - } - - // If our current remote commit is at the desired height, we can return - // the current balances. - if c.RemoteCommitment.CommitHeight == height { - return c.RemoteCommitment.LocalBalance, - c.RemoteCommitment.RemoteBalance, nil - } - - // If we are not currently on the height requested, we need to look up - // the previous height to obtain our balances at the given height. - commit, err := c.FindPreviousState(height) - if err != nil { - return 0, 0, err - } - - return commit.LocalBalance, commit.RemoteBalance, nil -} - -// ActiveHtlcs returns a slice of HTLC's which are currently active on *both* -// commitment transactions. -func (c *OpenChannel) ActiveHtlcs() []HTLC { - c.RLock() - defer c.RUnlock() - - // We'll only return HTLC's that are locked into *both* commitment - // transactions. So we'll iterate through their set of HTLC's to note - // which ones are present on their commitment. - remoteHtlcs := make(map[[32]byte]struct{}) - for _, htlc := range c.RemoteCommitment.Htlcs { - onionHash := sha256.Sum256(htlc.OnionBlob) - remoteHtlcs[onionHash] = struct{}{} - } - - // Now that we know which HTLC's they have, we'll only mark the HTLC's - // as active if *we* know them as well. - activeHtlcs := make([]HTLC, 0, len(remoteHtlcs)) - for _, htlc := range c.LocalCommitment.Htlcs { - onionHash := sha256.Sum256(htlc.OnionBlob) - if _, ok := remoteHtlcs[onionHash]; !ok { - continue - } - - activeHtlcs = append(activeHtlcs, htlc) - } - - return activeHtlcs -} - -// HTLC is the on-disk representation of a hash time-locked contract. HTLCs are -// contained within ChannelDeltas which encode the current state of the -// commitment between state updates. -// -// TODO(roasbeef): save space by using smaller ints at tail end? -type HTLC struct { - // Signature is the signature for the second level covenant transaction - // for this HTLC. The second level transaction is a timeout tx in the - // case that this is an outgoing HTLC, and a success tx in the case - // that this is an incoming HTLC. - // - // TODO(roasbeef): make [64]byte instead? - Signature []byte - - // RHash is the payment hash of the HTLC. - RHash [32]byte - - // Amt is the amount of milli-satoshis this HTLC escrows. - Amt lnwire.MilliSatoshi - - // RefundTimeout is the absolute timeout on the HTLC that the sender - // must wait before reclaiming the funds in limbo. - RefundTimeout uint32 - - // OutputIndex is the output index for this particular HTLC output - // within the commitment transaction. - OutputIndex int32 - - // Incoming denotes whether we're the receiver or the sender of this - // HTLC. - Incoming bool - - // OnionBlob is an opaque blob which is used to complete multi-hop - // routing. - OnionBlob []byte - - // HtlcIndex is the HTLC counter index of this active, outstanding - // HTLC. This differs from the LogIndex, as the HtlcIndex is only - // incremented for each offered HTLC, while they LogIndex is - // incremented for each update (includes settle+fail). - HtlcIndex uint64 - - // LogIndex is the cumulative log index of this HTLC. This differs - // from the HtlcIndex as this will be incremented for each new log - // update added. - LogIndex uint64 -} - -// SerializeHtlcs writes out the passed set of HTLC's into the passed writer -// using the current default on-disk serialization format. -// -// NOTE: This API is NOT stable, the on-disk format will likely change in the -// future. -func SerializeHtlcs(b io.Writer, htlcs ...HTLC) er.R { - numHtlcs := uint16(len(htlcs)) - if err := WriteElement(b, numHtlcs); err != nil { - return err - } - - for _, htlc := range htlcs { - if err := WriteElements(b, - htlc.Signature, htlc.RHash, htlc.Amt, htlc.RefundTimeout, - htlc.OutputIndex, htlc.Incoming, htlc.OnionBlob[:], - htlc.HtlcIndex, htlc.LogIndex, - ); err != nil { - return err - } - } - - return nil -} - -// DeserializeHtlcs attempts to read out a slice of HTLC's from the passed -// io.Reader. The bytes within the passed reader MUST have been previously -// written to using the SerializeHtlcs function. -// -// NOTE: This API is NOT stable, the on-disk format will likely change in the -// future. -func DeserializeHtlcs(r io.Reader) ([]HTLC, er.R) { - var numHtlcs uint16 - if err := ReadElement(r, &numHtlcs); err != nil { - return nil, err - } - - var htlcs []HTLC - if numHtlcs == 0 { - return htlcs, nil - } - - htlcs = make([]HTLC, numHtlcs) - for i := uint16(0); i < numHtlcs; i++ { - if err := ReadElements(r, - &htlcs[i].Signature, &htlcs[i].RHash, &htlcs[i].Amt, - &htlcs[i].RefundTimeout, &htlcs[i].OutputIndex, - &htlcs[i].Incoming, &htlcs[i].OnionBlob, - &htlcs[i].HtlcIndex, &htlcs[i].LogIndex, - ); err != nil { - return htlcs, err - } - } - - return htlcs, nil -} - -// Copy returns a full copy of the target HTLC. -func (h *HTLC) Copy() HTLC { - clone := HTLC{ - Incoming: h.Incoming, - Amt: h.Amt, - RefundTimeout: h.RefundTimeout, - OutputIndex: h.OutputIndex, - } - copy(clone.Signature[:], h.Signature) - copy(clone.RHash[:], h.RHash[:]) - - return clone -} - -// LogUpdate represents a pending update to the remote commitment chain. The -// log update may be an add, fail, or settle entry. We maintain this data in -// order to be able to properly retransmit our proposed -// state if necessary. -type LogUpdate struct { - // LogIndex is the log index of this proposed commitment update entry. - LogIndex uint64 - - // UpdateMsg is the update message that was included within the our - // local update log. The LogIndex value denotes the log index of this - // update which will be used when restoring our local update log if - // we're left with a dangling update on restart. - UpdateMsg lnwire.Message -} - -// Encode writes a log update to the provided io.Writer. -func (l *LogUpdate) Encode(w io.Writer) er.R { - return WriteElements(w, l.LogIndex, l.UpdateMsg) -} - -// Decode reads a log update from the provided io.Reader. -func (l *LogUpdate) Decode(r io.Reader) er.R { - return ReadElements(r, &l.LogIndex, &l.UpdateMsg) -} - -// CircuitKey is used by a channel to uniquely identify the HTLCs it receives -// from the switch, and is used to purge our in-memory state of HTLCs that have -// already been processed by a link. Two list of CircuitKeys are included in -// each CommitDiff to allow a link to determine which in-memory htlcs directed -// the opening and closing of circuits in the switch's circuit map. -type CircuitKey struct { - // ChanID is the short chanid indicating the HTLC's origin. - // - // NOTE: It is fine for this value to be blank, as this indicates a - // locally-sourced payment. - ChanID lnwire.ShortChannelID - - // HtlcID is the unique htlc index predominately assigned by links, - // though can also be assigned by switch in the case of locally-sourced - // payments. - HtlcID uint64 -} - -// SetBytes deserializes the given bytes into this CircuitKey. -func (k *CircuitKey) SetBytes(bs []byte) er.R { - if len(bs) != 16 { - return ErrInvalidCircuitKeyLen.Default() - } - - k.ChanID = lnwire.NewShortChanIDFromInt( - binary.BigEndian.Uint64(bs[:8])) - k.HtlcID = binary.BigEndian.Uint64(bs[8:]) - - return nil -} - -// Bytes returns the serialized bytes for this circuit key. -func (k CircuitKey) Bytes() []byte { - var bs = make([]byte, 16) - binary.BigEndian.PutUint64(bs[:8], k.ChanID.ToUint64()) - binary.BigEndian.PutUint64(bs[8:], k.HtlcID) - return bs -} - -// Encode writes a CircuitKey to the provided io.Writer. -func (k *CircuitKey) Encode(w io.Writer) er.R { - var scratch [16]byte - binary.BigEndian.PutUint64(scratch[:8], k.ChanID.ToUint64()) - binary.BigEndian.PutUint64(scratch[8:], k.HtlcID) - - _, err := util.Write(w, scratch[:]) - return err -} - -// Decode reads a CircuitKey from the provided io.Reader. -func (k *CircuitKey) Decode(r io.Reader) er.R { - var scratch [16]byte - - if _, err := util.ReadFull(r, scratch[:]); err != nil { - return err - } - k.ChanID = lnwire.NewShortChanIDFromInt( - binary.BigEndian.Uint64(scratch[:8])) - k.HtlcID = binary.BigEndian.Uint64(scratch[8:]) - - return nil -} - -// String returns a string representation of the CircuitKey. -func (k CircuitKey) String() string { - return fmt.Sprintf("(Chan ID=%s, HTLC ID=%d)", k.ChanID, k.HtlcID) -} - -// CommitDiff represents the delta needed to apply the state transition between -// two subsequent commitment states. Given state N and state N+1, one is able -// to apply the set of messages contained within the CommitDiff to N to arrive -// at state N+1. Each time a new commitment is extended, we'll write a new -// commitment (along with the full commitment state) to disk so we can -// re-transmit the state in the case of a connection loss or message drop. -type CommitDiff struct { - // ChannelCommitment is the full commitment state that one would arrive - // at by applying the set of messages contained in the UpdateDiff to - // the prior accepted commitment. - Commitment ChannelCommitment - - // LogUpdates is the set of messages sent prior to the commitment state - // transition in question. Upon reconnection, if we detect that they - // don't have the commitment, then we re-send this along with the - // proper signature. - LogUpdates []LogUpdate - - // CommitSig is the exact CommitSig message that should be sent after - // the set of LogUpdates above has been retransmitted. The signatures - // within this message should properly cover the new commitment state - // and also the HTLC's within the new commitment state. - CommitSig *lnwire.CommitSig - - // OpenedCircuitKeys is a set of unique identifiers for any downstream - // Add packets included in this commitment txn. After a restart, this - // set of htlcs is acked from the link's incoming mailbox to ensure - // there isn't an attempt to re-add them to this commitment txn. - OpenedCircuitKeys []CircuitKey - - // ClosedCircuitKeys records the unique identifiers for any settle/fail - // packets that were resolved by this commitment txn. After a restart, - // this is used to ensure those circuits are removed from the circuit - // map, and the downstream packets in the link's mailbox are removed. - ClosedCircuitKeys []CircuitKey - - // AddAcks specifies the locations (commit height, pkg index) of any - // Adds that were failed/settled in this commit diff. This will ack - // entries in *this* channel's forwarding packages. - // - // NOTE: This value is not serialized, it is used to atomically mark the - // resolution of adds, such that they will not be reprocessed after a - // restart. - AddAcks []AddRef - - // SettleFailAcks specifies the locations (chan id, commit height, pkg - // index) of any Settles or Fails that were locked into this commit - // diff, and originate from *another* channel, i.e. the outgoing link. - // - // NOTE: This value is not serialized, it is used to atomically acks - // settles and fails from the forwarding packages of other channels, - // such that they will not be reforwarded internally after a restart. - SettleFailAcks []SettleFailRef -} - -// serializeLogUpdates serializes provided list of updates to a stream. -func serializeLogUpdates(w io.Writer, logUpdates []LogUpdate) er.R { - numUpdates := uint16(len(logUpdates)) - if err := util.WriteBin(w, byteOrder, numUpdates); err != nil { - return err - } - - for _, diff := range logUpdates { - err := WriteElements(w, diff.LogIndex, diff.UpdateMsg) - if err != nil { - return err - } - } - - return nil -} - -// deserializeLogUpdates deserializes a list of updates from a stream. -func deserializeLogUpdates(r io.Reader) ([]LogUpdate, er.R) { - var numUpdates uint16 - if err := util.ReadBin(r, byteOrder, &numUpdates); err != nil { - return nil, err - } - - logUpdates := make([]LogUpdate, numUpdates) - for i := 0; i < int(numUpdates); i++ { - err := ReadElements(r, - &logUpdates[i].LogIndex, &logUpdates[i].UpdateMsg, - ) - if err != nil { - return nil, err - } - } - return logUpdates, nil -} - -func serializeCommitDiff(w io.Writer, diff *CommitDiff) er.R { - if err := serializeChanCommit(w, &diff.Commitment); err != nil { - return err - } - - if err := diff.CommitSig.Encode(w, 0); err != nil { - return err - } - - if err := serializeLogUpdates(w, diff.LogUpdates); err != nil { - return err - } - - numOpenRefs := uint16(len(diff.OpenedCircuitKeys)) - if err := util.WriteBin(w, byteOrder, numOpenRefs); err != nil { - return err - } - - for _, openRef := range diff.OpenedCircuitKeys { - err := WriteElements(w, openRef.ChanID, openRef.HtlcID) - if err != nil { - return err - } - } - - numClosedRefs := uint16(len(diff.ClosedCircuitKeys)) - if err := util.WriteBin(w, byteOrder, numClosedRefs); err != nil { - return err - } - - for _, closedRef := range diff.ClosedCircuitKeys { - err := WriteElements(w, closedRef.ChanID, closedRef.HtlcID) - if err != nil { - return err - } - } - - return nil -} - -func deserializeCommitDiff(r io.Reader) (*CommitDiff, er.R) { - var ( - d CommitDiff - err er.R - ) - - d.Commitment, err = deserializeChanCommit(r) - if err != nil { - return nil, err - } - - d.CommitSig = &lnwire.CommitSig{} - if err := d.CommitSig.Decode(r, 0); err != nil { - return nil, err - } - - d.LogUpdates, err = deserializeLogUpdates(r) - if err != nil { - return nil, err - } - - var numOpenRefs uint16 - if err := util.ReadBin(r, byteOrder, &numOpenRefs); err != nil { - return nil, err - } - - d.OpenedCircuitKeys = make([]CircuitKey, numOpenRefs) - for i := 0; i < int(numOpenRefs); i++ { - err := ReadElements(r, - &d.OpenedCircuitKeys[i].ChanID, - &d.OpenedCircuitKeys[i].HtlcID) - if err != nil { - return nil, err - } - } - - var numClosedRefs uint16 - if err := util.ReadBin(r, byteOrder, &numClosedRefs); err != nil { - return nil, err - } - - d.ClosedCircuitKeys = make([]CircuitKey, numClosedRefs) - for i := 0; i < int(numClosedRefs); i++ { - err := ReadElements(r, - &d.ClosedCircuitKeys[i].ChanID, - &d.ClosedCircuitKeys[i].HtlcID) - if err != nil { - return nil, err - } - } - - return &d, nil -} - -// AppendRemoteCommitChain appends a new CommitDiff to the end of the -// commitment chain for the remote party. This method is to be used once we -// have prepared a new commitment state for the remote party, but before we -// transmit it to the remote party. The contents of the argument should be -// sufficient to retransmit the updates and signature needed to reconstruct the -// state in full, in the case that we need to retransmit. -func (c *OpenChannel) AppendRemoteCommitChain(diff *CommitDiff) er.R { - c.Lock() - defer c.Unlock() - - // If this is a restored channel, then we want to avoid mutating the - // state at all, as it's impossible to do so in a protocol compliant - // manner. - if c.hasChanStatus(ChanStatusRestored) { - return ErrNoRestoredChannelMutation.Default() - } - - return kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R { - // First, we'll grab the writable bucket where this channel's - // data resides. - chanBucket, err := fetchChanBucketRw( - tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, - ) - if err != nil { - return err - } - - // If the channel is marked as borked, then for safety reasons, - // we shouldn't attempt any further updates. - isBorked, err := c.isBorked(chanBucket) - if err != nil { - return err - } - if isBorked { - return ErrChanBorked.Default() - } - - // Any outgoing settles and fails necessarily have a - // corresponding adds in this channel's forwarding packages. - // Mark all of these as being fully processed in our forwarding - // package, which prevents us from reprocessing them after - // startup. - err = c.Packager.AckAddHtlcs(tx, diff.AddAcks...) - if err != nil { - return err - } - - // Additionally, we ack from any fails or settles that are - // persisted in another channel's forwarding package. This - // prevents the same fails and settles from being retransmitted - // after restarts. The actual fail or settle we need to - // propagate to the remote party is now in the commit diff. - err = c.Packager.AckSettleFails(tx, diff.SettleFailAcks...) - if err != nil { - return err - } - - // TODO(roasbeef): use seqno to derive key for later LCP - - // With the bucket retrieved, we'll now serialize the commit - // diff itself, and write it to disk. - var b bytes.Buffer - if err := serializeCommitDiff(&b, diff); err != nil { - return err - } - return chanBucket.Put(commitDiffKey, b.Bytes()) - }, func() {}) -} - -// RemoteCommitChainTip returns the "tip" of the current remote commitment -// chain. This value will be non-nil iff, we've created a new commitment for -// the remote party that they haven't yet ACK'd. In this case, their commitment -// chain will have a length of two: their current unrevoked commitment, and -// this new pending commitment. Once they revoked their prior state, we'll swap -// these pointers, causing the tip and the tail to point to the same entry. -func (c *OpenChannel) RemoteCommitChainTip() (*CommitDiff, er.R) { - var cd *CommitDiff - err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R { - chanBucket, err := fetchChanBucket( - tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, - ) - if err, stop := mapErr(err, ErrNoPendingCommit); stop { - return err - } - - tipBytes := chanBucket.Get(commitDiffKey) - if tipBytes == nil { - return ErrNoPendingCommit.Default() - } - - tipReader := bytes.NewReader(tipBytes) - dcd, err := deserializeCommitDiff(tipReader) - if err != nil { - return err - } - - cd = dcd - return nil - }, func() { - cd = nil - }) - if err != nil { - return nil, err - } - - return cd, err -} - -// UnsignedAckedUpdates retrieves the persisted unsigned acked remote log -// updates that still need to be signed for. -func (c *OpenChannel) UnsignedAckedUpdates() ([]LogUpdate, er.R) { - var updates []LogUpdate - err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R { - chanBucket, err := fetchChanBucket( - tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, - ) - if err, stop := mapErr(err, nil); stop { - return err - } - - updateBytes := chanBucket.Get(unsignedAckedUpdatesKey) - if updateBytes == nil { - return nil - } - - r := bytes.NewReader(updateBytes) - updates, err = deserializeLogUpdates(r) - return err - }, func() { - updates = nil - }) - if err != nil { - return nil, err - } - - return updates, nil -} - -// RemoteUnsignedLocalUpdates retrieves the persisted, unsigned local log -// updates that the remote still needs to sign for. -func (c *OpenChannel) RemoteUnsignedLocalUpdates() ([]LogUpdate, er.R) { - var updates []LogUpdate - err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R { - chanBucket, err := fetchChanBucket( - tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, - ) - if err, stop := mapErr(err, nil); stop { - return err - } - - updateBytes := chanBucket.Get(remoteUnsignedLocalUpdatesKey) - if updateBytes == nil { - return nil - } - - r := bytes.NewReader(updateBytes) - updates, err = deserializeLogUpdates(r) - return err - }, func() { - updates = nil - }) - if err != nil { - return nil, err - } - - return updates, nil -} - -// InsertNextRevocation inserts the _next_ commitment point (revocation) into -// the database, and also modifies the internal RemoteNextRevocation attribute -// to point to the passed key. This method is to be using during final channel -// set up, _after_ the channel has been fully confirmed. -// -// NOTE: If this method isn't called, then the target channel won't be able to -// propose new states for the commitment state of the remote party. -func (c *OpenChannel) InsertNextRevocation(revKey *btcec.PublicKey) er.R { - c.Lock() - defer c.Unlock() - - c.RemoteNextRevocation = revKey - - err := kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R { - chanBucket, err := fetchChanBucketRw( - tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, - ) - if err != nil { - return err - } - - return putChanRevocationState(chanBucket, c) - }, func() {}) - if err != nil { - return err - } - - return nil -} - -// AdvanceCommitChainTail records the new state transition within an on-disk -// append-only log which records all state transitions by the remote peer. In -// the case of an uncooperative broadcast of a prior state by the remote peer, -// this log can be consulted in order to reconstruct the state needed to -// rectify the situation. This method will add the current commitment for the -// remote party to the revocation log, and promote the current pending -// commitment to the current remote commitment. The updates parameter is the -// set of local updates that the peer still needs to send us a signature for. -// We store this set of updates in case we go down. -func (c *OpenChannel) AdvanceCommitChainTail(fwdPkg *FwdPkg, - updates []LogUpdate) er.R { - - c.Lock() - defer c.Unlock() - - // If this is a restored channel, then we want to avoid mutating the - // state at all, as it's impossible to do so in a protocol compliant - // manner. - if c.hasChanStatus(ChanStatusRestored) { - return ErrNoRestoredChannelMutation.Default() - } - - var newRemoteCommit *ChannelCommitment - - err := kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R { - chanBucket, err := fetchChanBucketRw( - tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, - ) - if err != nil { - return err - } - - // If the channel is marked as borked, then for safety reasons, - // we shouldn't attempt any further updates. - isBorked, err := c.isBorked(chanBucket) - if err != nil { - return err - } - if isBorked { - return ErrChanBorked.Default() - } - - // Persist the latest preimage state to disk as the remote peer - // has just added to our local preimage store, and given us a - // new pending revocation key. - if err := putChanRevocationState(chanBucket, c); err != nil { - return err - } - - // With the current preimage producer/store state updated, - // append a new log entry recording this the delta of this - // state transition. - // - // TODO(roasbeef): could make the deltas relative, would save - // space, but then tradeoff for more disk-seeks to recover the - // full state. - logKey := revocationLogBucket - logBucket, err := chanBucket.CreateBucketIfNotExists(logKey) - if err != nil { - return err - } - - // Before we append this revoked state to the revocation log, - // we'll swap out what's currently the tail of the commit tip, - // with the current locked-in commitment for the remote party. - tipBytes := chanBucket.Get(commitDiffKey) - tipReader := bytes.NewReader(tipBytes) - newCommit, err := deserializeCommitDiff(tipReader) - if err != nil { - return err - } - err = putChanCommitment( - chanBucket, &newCommit.Commitment, false, - ) - if err != nil { - return err - } - if err := chanBucket.Delete(commitDiffKey); err != nil { - return err - } - - // With the commitment pointer swapped, we can now add the - // revoked (prior) state to the revocation log. - // - // TODO(roasbeef): store less - err = appendChannelLogEntry(logBucket, &c.RemoteCommitment) - if err != nil { - return err - } - - // Lastly, we write the forwarding package to disk so that we - // can properly recover from failures and reforward HTLCs that - // have not received a corresponding settle/fail. - if err := c.Packager.AddFwdPkg(tx, fwdPkg); err != nil { - return err - } - - // Persist the unsigned acked updates that are not included - // in their new commitment. - updateBytes := chanBucket.Get(unsignedAckedUpdatesKey) - if updateBytes == nil { - // If there are no updates to sign, we don't need to - // filter out any updates. - newRemoteCommit = &newCommit.Commitment - return nil - } - - r := bytes.NewReader(updateBytes) - unsignedUpdates, err := deserializeLogUpdates(r) - if err != nil { - return err - } - - var validUpdates []LogUpdate - for _, upd := range unsignedUpdates { - lIdx := upd.LogIndex - - // Filter for updates that are not on the remote - // commitment. - if lIdx >= newCommit.Commitment.RemoteLogIndex { - validUpdates = append(validUpdates, upd) - } - } - - var b bytes.Buffer - err = serializeLogUpdates(&b, validUpdates) - if err != nil { - return er.Errorf("unable to serialize log updates: %v", err) - } - - err = chanBucket.Put(unsignedAckedUpdatesKey, b.Bytes()) - if err != nil { - return er.Errorf("unable to store under unsignedAckedUpdatesKey: %v", err) - } - - // Persist the local updates the peer hasn't yet signed so they - // can be restored after restart. - var b2 bytes.Buffer - err = serializeLogUpdates(&b2, updates) - if err != nil { - return err - } - - err = chanBucket.Put(remoteUnsignedLocalUpdatesKey, b2.Bytes()) - if err != nil { - return er.Errorf("unable to restore remote unsigned "+ - "local updates: %v", err) - } - - newRemoteCommit = &newCommit.Commitment - - return nil - }, func() { - newRemoteCommit = nil - }) - if err != nil { - return err - } - - // With the db transaction complete, we'll swap over the in-memory - // pointer of the new remote commitment, which was previously the tip - // of the commit chain. - c.RemoteCommitment = *newRemoteCommit - - return nil -} - -// NextLocalHtlcIndex returns the next unallocated local htlc index. To ensure -// this always returns the next index that has been not been allocated, this -// will first try to examine any pending commitments, before falling back to the -// last locked-in remote commitment. -func (c *OpenChannel) NextLocalHtlcIndex() (uint64, er.R) { - // First, load the most recent commit diff that we initiated for the - // remote party. If no pending commit is found, this is not treated as - // a critical error, since we can always fall back. - pendingRemoteCommit, err := c.RemoteCommitChainTip() - if err != nil && !ErrNoPendingCommit.Is(err) { - return 0, err - } - - // If a pending commit was found, its local htlc index will be at least - // as large as the one on our local commitment. - if pendingRemoteCommit != nil { - return pendingRemoteCommit.Commitment.LocalHtlcIndex, nil - } - - // Otherwise, fallback to using the local htlc index of their commitment. - return c.RemoteCommitment.LocalHtlcIndex, nil -} - -// LoadFwdPkgs scans the forwarding log for any packages that haven't been -// processed, and returns their deserialized log updates in map indexed by the -// remote commitment height at which the updates were locked in. -func (c *OpenChannel) LoadFwdPkgs() ([]*FwdPkg, er.R) { - c.RLock() - defer c.RUnlock() - - var fwdPkgs []*FwdPkg - if err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R { - var err er.R - fwdPkgs, err = c.Packager.LoadFwdPkgs(tx) - return err - }, func() { - fwdPkgs = nil - }); err != nil { - return nil, err - } - - return fwdPkgs, nil -} - -// AckAddHtlcs updates the AckAddFilter containing any of the provided AddRefs -// indicating that a response to this Add has been committed to the remote party. -// Doing so will prevent these Add HTLCs from being reforwarded internally. -func (c *OpenChannel) AckAddHtlcs(addRefs ...AddRef) er.R { - c.Lock() - defer c.Unlock() - - return kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R { - return c.Packager.AckAddHtlcs(tx, addRefs...) - }, func() {}) -} - -// AckSettleFails updates the SettleFailFilter containing any of the provided -// SettleFailRefs, indicating that the response has been delivered to the -// incoming link, corresponding to a particular AddRef. Doing so will prevent -// the responses from being retransmitted internally. -func (c *OpenChannel) AckSettleFails(settleFailRefs ...SettleFailRef) er.R { - c.Lock() - defer c.Unlock() - - return kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R { - return c.Packager.AckSettleFails(tx, settleFailRefs...) - }, func() {}) -} - -// SetFwdFilter atomically sets the forwarding filter for the forwarding package -// identified by `height`. -func (c *OpenChannel) SetFwdFilter(height uint64, fwdFilter *PkgFilter) er.R { - c.Lock() - defer c.Unlock() - - return kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R { - return c.Packager.SetFwdFilter(tx, height, fwdFilter) - }, func() {}) -} - -// RemoveFwdPkgs atomically removes forwarding packages specified by the remote -// commitment heights. If one of the intermediate RemovePkg calls fails, then the -// later packages won't be removed. -// -// NOTE: This method should only be called on packages marked FwdStateCompleted. -func (c *OpenChannel) RemoveFwdPkgs(heights ...uint64) er.R { - c.Lock() - defer c.Unlock() - - return kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R { - for _, height := range heights { - err := c.Packager.RemovePkg(tx, height) - if err != nil { - return err - } - } - - return nil - }, func() {}) -} - -// RevocationLogTail returns the "tail", or the end of the current revocation -// log. This entry represents the last previous state for the remote node's -// commitment chain. The ChannelDelta returned by this method will always lag -// one state behind the most current (unrevoked) state of the remote node's -// commitment chain. -func (c *OpenChannel) RevocationLogTail() (*ChannelCommitment, er.R) { - c.RLock() - defer c.RUnlock() - - // If we haven't created any state updates yet, then we'll exit early as - // there's nothing to be found on disk in the revocation bucket. - if c.RemoteCommitment.CommitHeight == 0 { - return nil, nil - } - - var commit ChannelCommitment - if err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R { - chanBucket, err := fetchChanBucket( - tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, - ) - if err != nil { - return err - } - - logBucket := chanBucket.NestedReadBucket(revocationLogBucket) - if logBucket == nil { - return ErrNoPastDeltas.Default() - } - - // Once we have the bucket that stores the revocation log from - // this channel, we'll jump to the _last_ key in bucket. As we - // store the update number on disk in a big-endian format, - // this will retrieve the latest entry. - cursor := logBucket.ReadCursor() - _, tailLogEntry := cursor.Last() - logEntryReader := bytes.NewReader(tailLogEntry) - - // Once we have the entry, we'll decode it into the channel - // delta pointer we created above. - var dbErr er.R - commit, dbErr = deserializeChanCommit(logEntryReader) - if dbErr != nil { - return dbErr - } - - return nil - }, func() {}); err != nil { - return nil, err - } - - return &commit, nil -} - -// CommitmentHeight returns the current commitment height. The commitment -// height represents the number of updates to the commitment state to date. -// This value is always monotonically increasing. This method is provided in -// order to allow multiple instances of a particular open channel to obtain a -// consistent view of the number of channel updates to date. -func (c *OpenChannel) CommitmentHeight() (uint64, er.R) { - c.RLock() - defer c.RUnlock() - - var height uint64 - err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R { - // Get the bucket dedicated to storing the metadata for open - // channels. - chanBucket, err := fetchChanBucket( - tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, - ) - if err != nil { - return err - } - - commit, err := fetchChanCommitment(chanBucket, true) - if err != nil { - return err - } - - height = commit.CommitHeight - return nil - }, func() { - height = 0 - }) - if err != nil { - return 0, err - } - - return height, nil -} - -// FindPreviousState scans through the append-only log in an attempt to recover -// the previous channel state indicated by the update number. This method is -// intended to be used for obtaining the relevant data needed to claim all -// funds rightfully spendable in the case of an on-chain broadcast of the -// commitment transaction. -func (c *OpenChannel) FindPreviousState(updateNum uint64) (*ChannelCommitment, er.R) { - c.RLock() - defer c.RUnlock() - - var commit ChannelCommitment - err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R { - chanBucket, err := fetchChanBucket( - tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, - ) - if err != nil { - return err - } - - logBucket := chanBucket.NestedReadBucket(revocationLogBucket) - if logBucket == nil { - return ErrNoPastDeltas.Default() - } - - c, err := fetchChannelLogEntry(logBucket, updateNum) - if err != nil { - return err - } - - commit = c - return nil - }, func() {}) - if err != nil { - return nil, err - } - - return &commit, nil -} - -// ClosureType is an enum like structure that details exactly _how_ a channel -// was closed. Three closure types are currently possible: none, cooperative, -// local force close, remote force close, and (remote) breach. -type ClosureType uint8 - -const ( - // CooperativeClose indicates that a channel has been closed - // cooperatively. This means that both channel peers were online and - // signed a new transaction paying out the settled balance of the - // contract. - CooperativeClose ClosureType = 0 - - // LocalForceClose indicates that we have unilaterally broadcast our - // current commitment state on-chain. - LocalForceClose ClosureType = 1 - - // RemoteForceClose indicates that the remote peer has unilaterally - // broadcast their current commitment state on-chain. - RemoteForceClose ClosureType = 4 - - // BreachClose indicates that the remote peer attempted to broadcast a - // prior _revoked_ channel state. - BreachClose ClosureType = 2 - - // FundingCanceled indicates that the channel never was fully opened - // before it was marked as closed in the database. This can happen if - // we or the remote fail at some point during the opening workflow, or - // we timeout waiting for the funding transaction to be confirmed. - FundingCanceled ClosureType = 3 - - // Abandoned indicates that the channel state was removed without - // any further actions. This is intended to clean up unusable - // channels during development. - Abandoned ClosureType = 5 -) - -// ChannelCloseSummary contains the final state of a channel at the point it -// was closed. Once a channel is closed, all the information pertaining to that -// channel within the openChannelBucket is deleted, and a compact summary is -// put in place instead. -type ChannelCloseSummary struct { - // ChanPoint is the outpoint for this channel's funding transaction, - // and is used as a unique identifier for the channel. - ChanPoint wire.OutPoint - - // ShortChanID encodes the exact location in the chain in which the - // channel was initially confirmed. This includes: the block height, - // transaction index, and the output within the target transaction. - ShortChanID lnwire.ShortChannelID - - // ChainHash is the hash of the genesis block that this channel resides - // within. - ChainHash chainhash.Hash - - // ClosingTXID is the txid of the transaction which ultimately closed - // this channel. - ClosingTXID chainhash.Hash - - // RemotePub is the public key of the remote peer that we formerly had - // a channel with. - RemotePub *btcec.PublicKey - - // Capacity was the total capacity of the channel. - Capacity btcutil.Amount - - // CloseHeight is the height at which the funding transaction was - // spent. - CloseHeight uint32 - - // SettledBalance is our total balance settled balance at the time of - // channel closure. This _does not_ include the sum of any outputs that - // have been time-locked as a result of the unilateral channel closure. - SettledBalance btcutil.Amount - - // TimeLockedBalance is the sum of all the time-locked outputs at the - // time of channel closure. If we triggered the force closure of this - // channel, then this value will be non-zero if our settled output is - // above the dust limit. If we were on the receiving side of a channel - // force closure, then this value will be non-zero if we had any - // outstanding outgoing HTLC's at the time of channel closure. - TimeLockedBalance btcutil.Amount - - // CloseType details exactly _how_ the channel was closed. Five closure - // types are possible: cooperative, local force, remote force, breach - // and funding canceled. - CloseType ClosureType - - // IsPending indicates whether this channel is in the 'pending close' - // state, which means the channel closing transaction has been - // confirmed, but not yet been fully resolved. In the case of a channel - // that has been cooperatively closed, it will go straight into the - // fully resolved state as soon as the closing transaction has been - // confirmed. However, for channels that have been force closed, they'll - // stay marked as "pending" until _all_ the pending funds have been - // swept. - IsPending bool - - // RemoteCurrentRevocation is the current revocation for their - // commitment transaction. However, since this is the derived public key, - // we don't yet have the private key so we aren't yet able to verify - // that it's actually in the hash chain. - RemoteCurrentRevocation *btcec.PublicKey - - // RemoteNextRevocation is the revocation key to be used for the *next* - // commitment transaction we create for the local node. Within the - // specification, this value is referred to as the - // per-commitment-point. - RemoteNextRevocation *btcec.PublicKey - - // LocalChanCfg is the channel configuration for the local node. - LocalChanConfig ChannelConfig - - // LastChanSyncMsg is the ChannelReestablish message for this channel - // for the state at the point where it was closed. - LastChanSyncMsg *lnwire.ChannelReestablish -} - -// CloseChannel closes a previously active Lightning channel. Closing a channel -// entails deleting all saved state within the database concerning this -// channel. This method also takes a struct that summarizes the state of the -// channel at closing, this compact representation will be the only component -// of a channel left over after a full closing. It takes an optional set of -// channel statuses which will be written to the historical channel bucket. -// These statuses are used to record close initiators. -func (c *OpenChannel) CloseChannel(summary *ChannelCloseSummary, - statuses ...ChannelStatus) er.R { - - c.Lock() - defer c.Unlock() - - return kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R { - openChanBucket := tx.ReadWriteBucket(openChannelBucket) - if openChanBucket == nil { - return ErrNoChanDBExists.Default() - } - - nodePub := c.IdentityPub.SerializeCompressed() - nodeChanBucket := openChanBucket.NestedReadWriteBucket(nodePub) - if nodeChanBucket == nil { - return ErrNoActiveChannels.Default() - } - - chainBucket := nodeChanBucket.NestedReadWriteBucket(c.ChainHash[:]) - if chainBucket == nil { - return ErrNoActiveChannels.Default() - } - - var chanPointBuf bytes.Buffer - err := writeOutpoint(&chanPointBuf, &c.FundingOutpoint) - if err != nil { - return err - } - chanKey := chanPointBuf.Bytes() - chanBucket := chainBucket.NestedReadWriteBucket( - chanKey, - ) - if chanBucket == nil { - return ErrNoActiveChannels.Default() - } - - // Before we delete the channel state, we'll read out the full - // details, as we'll also store portions of this information - // for record keeping. - chanState, err := fetchOpenChannel( - chanBucket, &c.FundingOutpoint, - ) - if err != nil { - return err - } - - // Now that the index to this channel has been deleted, purge - // the remaining channel metadata from the database. - err = deleteOpenChannel(chanBucket) - if err != nil { - return err - } - - // We'll also remove the channel from the frozen channel bucket - // if we need to. - if c.ChanType.IsFrozen() { - err := deleteThawHeight(chanBucket) - if err != nil { - return err - } - } - - // With the base channel data deleted, attempt to delete the - // information stored within the revocation log. - logBucket := chanBucket.NestedReadWriteBucket(revocationLogBucket) - if logBucket != nil { - err = chanBucket.DeleteNestedBucket(revocationLogBucket) - if err != nil { - return err - } - } - - err = chainBucket.DeleteNestedBucket(chanPointBuf.Bytes()) - if err != nil { - return err - } - - // Add channel state to the historical channel bucket. - historicalBucket, err := tx.CreateTopLevelBucket( - historicalChannelBucket, - ) - if err != nil { - return err - } - - historicalChanBucket, err := - historicalBucket.CreateBucketIfNotExists(chanKey) - if err != nil { - return err - } - - // Apply any additional statuses to the channel state. - for _, status := range statuses { - chanState.chanStatus |= status - } - - err = putOpenChannel(historicalChanBucket, chanState) - if err != nil { - return err - } - - // Finally, create a summary of this channel in the closed - // channel bucket for this node. - return putChannelCloseSummary( - tx, chanPointBuf.Bytes(), summary, chanState, - ) - }, func() {}) -} - -// ChannelSnapshot is a frozen snapshot of the current channel state. A -// snapshot is detached from the original channel that generated it, providing -// read-only access to the current or prior state of an active channel. -// -// TODO(roasbeef): remove all together? pretty much just commitment -type ChannelSnapshot struct { - // RemoteIdentity is the identity public key of the remote node that we - // are maintaining the open channel with. - RemoteIdentity btcec.PublicKey - - // ChanPoint is the outpoint that created the channel. This output is - // found within the funding transaction and uniquely identified the - // channel on the resident chain. - ChannelPoint wire.OutPoint - - // ChainHash is the genesis hash of the chain that the channel resides - // within. - ChainHash chainhash.Hash - - // Capacity is the total capacity of the channel. - Capacity btcutil.Amount - - // TotalMSatSent is the total number of milli-satoshis we've sent - // within this channel. - TotalMSatSent lnwire.MilliSatoshi - - // TotalMSatReceived is the total number of milli-satoshis we've - // received within this channel. - TotalMSatReceived lnwire.MilliSatoshi - - // ChannelCommitment is the current up-to-date commitment for the - // target channel. - ChannelCommitment -} - -// Snapshot returns a read-only snapshot of the current channel state. This -// snapshot includes information concerning the current settled balance within -// the channel, metadata detailing total flows, and any outstanding HTLCs. -func (c *OpenChannel) Snapshot() *ChannelSnapshot { - c.RLock() - defer c.RUnlock() - - localCommit := c.LocalCommitment - snapshot := &ChannelSnapshot{ - RemoteIdentity: *c.IdentityPub, - ChannelPoint: c.FundingOutpoint, - Capacity: c.Capacity, - TotalMSatSent: c.TotalMSatSent, - TotalMSatReceived: c.TotalMSatReceived, - ChainHash: c.ChainHash, - ChannelCommitment: ChannelCommitment{ - LocalBalance: localCommit.LocalBalance, - RemoteBalance: localCommit.RemoteBalance, - CommitHeight: localCommit.CommitHeight, - CommitFee: localCommit.CommitFee, - }, - } - - // Copy over the current set of HTLCs to ensure the caller can't mutate - // our internal state. - snapshot.Htlcs = make([]HTLC, len(localCommit.Htlcs)) - for i, h := range localCommit.Htlcs { - snapshot.Htlcs[i] = h.Copy() - } - - return snapshot -} - -// LatestCommitments returns the two latest commitments for both the local and -// remote party. These commitments are read from disk to ensure that only the -// latest fully committed state is returned. The first commitment returned is -// the local commitment, and the second returned is the remote commitment. -func (c *OpenChannel) LatestCommitments() (*ChannelCommitment, *ChannelCommitment, er.R) { - err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R { - chanBucket, err := fetchChanBucket( - tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, - ) - if err != nil { - return err - } - - return fetchChanCommitments(chanBucket, c) - }, func() {}) - if err != nil { - return nil, nil, err - } - - return &c.LocalCommitment, &c.RemoteCommitment, nil -} - -// RemoteRevocationStore returns the most up to date commitment version of the -// revocation storage tree for the remote party. This method can be used when -// acting on a possible contract breach to ensure, that the caller has the most -// up to date information required to deliver justice. -func (c *OpenChannel) RemoteRevocationStore() (shachain.Store, er.R) { - err := kvdb.View(c.Db, func(tx kvdb.RTx) er.R { - chanBucket, err := fetchChanBucket( - tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, - ) - if err != nil { - return err - } - - return fetchChanRevocationState(chanBucket, c) - }, func() {}) - if err != nil { - return nil, err - } - - return c.RevocationStore, nil -} - -// AbsoluteThawHeight determines a frozen channel's absolute thaw height. If the -// channel is not frozen, then 0 is returned. -func (c *OpenChannel) AbsoluteThawHeight() (uint32, er.R) { - // Only frozen channels have a thaw height. - if !c.ChanType.IsFrozen() { - return 0, nil - } - - // If the channel's thaw height is below the absolute threshold, then - // it's interpreted as a relative height to the chain's current height. - if c.ThawHeight < AbsoluteThawHeightThreshold { - // We'll only known of the channel's short ID once it's - // confirmed. - if c.IsPending { - return 0, er.New("cannot use relative thaw " + - "height for unconfirmed channel") - } - return c.ShortChannelID.BlockHeight + c.ThawHeight, nil - } - return c.ThawHeight, nil -} - -func putChannelCloseSummary(tx kvdb.RwTx, chanID []byte, - summary *ChannelCloseSummary, lastChanState *OpenChannel) er.R { - - closedChanBucket, err := tx.CreateTopLevelBucket(closedChannelBucket) - if err != nil { - return err - } - - summary.RemoteCurrentRevocation = lastChanState.RemoteCurrentRevocation - summary.RemoteNextRevocation = lastChanState.RemoteNextRevocation - summary.LocalChanConfig = lastChanState.LocalChanCfg - - var b bytes.Buffer - if err := serializeChannelCloseSummary(&b, summary); err != nil { - return err - } - - return closedChanBucket.Put(chanID, b.Bytes()) -} - -func serializeChannelCloseSummary(w io.Writer, cs *ChannelCloseSummary) er.R { - err := WriteElements(w, - cs.ChanPoint, cs.ShortChanID, cs.ChainHash, cs.ClosingTXID, - cs.CloseHeight, cs.RemotePub, cs.Capacity, cs.SettledBalance, - cs.TimeLockedBalance, cs.CloseType, cs.IsPending, - ) - if err != nil { - return err - } - - // If this is a close channel summary created before the addition of - // the new fields, then we can exit here. - if cs.RemoteCurrentRevocation == nil { - return WriteElements(w, false) - } - - // If fields are present, write boolean to indicate this, and continue. - if err := WriteElements(w, true); err != nil { - return err - } - - if err := WriteElements(w, cs.RemoteCurrentRevocation); err != nil { - return err - } - - if err := writeChanConfig(w, &cs.LocalChanConfig); err != nil { - return err - } - - // The RemoteNextRevocation field is optional, as it's possible for a - // channel to be closed before we learn of the next unrevoked - // revocation point for the remote party. Write a boolen indicating - // whether this field is present or not. - if err := WriteElements(w, cs.RemoteNextRevocation != nil); err != nil { - return err - } - - // Write the field, if present. - if cs.RemoteNextRevocation != nil { - if err = WriteElements(w, cs.RemoteNextRevocation); err != nil { - return err - } - } - - // Write whether the channel sync message is present. - if err := WriteElements(w, cs.LastChanSyncMsg != nil); err != nil { - return err - } - - // Write the channel sync message, if present. - if cs.LastChanSyncMsg != nil { - if err := WriteElements(w, cs.LastChanSyncMsg); err != nil { - return err - } - } - - return nil -} - -func deserializeCloseChannelSummary(r io.Reader) (*ChannelCloseSummary, er.R) { - c := &ChannelCloseSummary{} - - err := ReadElements(r, - &c.ChanPoint, &c.ShortChanID, &c.ChainHash, &c.ClosingTXID, - &c.CloseHeight, &c.RemotePub, &c.Capacity, &c.SettledBalance, - &c.TimeLockedBalance, &c.CloseType, &c.IsPending, - ) - if err != nil { - return nil, err - } - - // We'll now check to see if the channel close summary was encoded with - // any of the additional optional fields. - var hasNewFields bool - err = ReadElements(r, &hasNewFields) - if err != nil { - return nil, err - } - - // If fields are not present, we can return. - if !hasNewFields { - return c, nil - } - - // Otherwise read the new fields. - if err := ReadElements(r, &c.RemoteCurrentRevocation); err != nil { - return nil, err - } - - if err := readChanConfig(r, &c.LocalChanConfig); err != nil { - return nil, err - } - - // Finally, we'll attempt to read the next unrevoked commitment point - // for the remote party. If we closed the channel before receiving a - // funding locked message then this might not be present. A boolean - // indicating whether the field is present will come first. - var hasRemoteNextRevocation bool - err = ReadElements(r, &hasRemoteNextRevocation) - if err != nil { - return nil, err - } - - // If this field was written, read it. - if hasRemoteNextRevocation { - err = ReadElements(r, &c.RemoteNextRevocation) - if err != nil { - return nil, err - } - } - - // Check if we have a channel sync message to read. - var hasChanSyncMsg bool - err = ReadElements(r, &hasChanSyncMsg) - if er.Wrapped(err) == io.EOF { - return c, nil - } else if err != nil { - return nil, err - } - - // If a chan sync message is present, read it. - if hasChanSyncMsg { - // We must pass in reference to a lnwire.Message for the codec - // to support it. - var msg lnwire.Message - if err := ReadElements(r, &msg); err != nil { - return nil, err - } - - chanSync, ok := msg.(*lnwire.ChannelReestablish) - if !ok { - return nil, er.New("unable cast db Message to " + - "ChannelReestablish") - } - c.LastChanSyncMsg = chanSync - } - - return c, nil -} - -func writeChanConfig(b io.Writer, c *ChannelConfig) er.R { - return WriteElements(b, - c.DustLimit, c.MaxPendingAmount, c.ChanReserve, c.MinHTLC, - c.MaxAcceptedHtlcs, c.CsvDelay, c.MultiSigKey, - c.RevocationBasePoint, c.PaymentBasePoint, c.DelayBasePoint, - c.HtlcBasePoint, - ) -} - -// fundingTxPresent returns true if expect the funding transcation to be found -// on disk or already populated within the passed oen chanel struct. -func fundingTxPresent(channel *OpenChannel) bool { - chanType := channel.ChanType - - return chanType.IsSingleFunder() && chanType.HasFundingTx() && - channel.IsInitiator && - !channel.hasChanStatus(ChanStatusRestored) -} - -func putChanInfo(chanBucket kvdb.RwBucket, channel *OpenChannel) er.R { - var w bytes.Buffer - if err := WriteElements(&w, - channel.ChanType, channel.ChainHash, channel.FundingOutpoint, - channel.ShortChannelID, channel.IsPending, channel.IsInitiator, - channel.chanStatus, channel.FundingBroadcastHeight, - channel.NumConfsRequired, channel.ChannelFlags, - channel.IdentityPub, channel.Capacity, channel.TotalMSatSent, - channel.TotalMSatReceived, - ); err != nil { - return err - } - - // For single funder channels that we initiated, and we have the - // funding transaction, then write the funding txn. - if fundingTxPresent(channel) { - if err := WriteElement(&w, channel.FundingTxn); err != nil { - return err - } - } - - if err := writeChanConfig(&w, &channel.LocalChanCfg); err != nil { - return err - } - if err := writeChanConfig(&w, &channel.RemoteChanCfg); err != nil { - return err - } - - if err := chanBucket.Put(chanInfoKey, w.Bytes()); err != nil { - return err - } - - // Finally, add optional shutdown scripts for the local and remote peer if - // they are present. - if err := putOptionalUpfrontShutdownScript( - chanBucket, localUpfrontShutdownKey, channel.LocalShutdownScript, - ); err != nil { - return err - } - - return putOptionalUpfrontShutdownScript( - chanBucket, remoteUpfrontShutdownKey, channel.RemoteShutdownScript, - ) -} - -// putOptionalUpfrontShutdownScript adds a shutdown script under the key -// provided if it has a non-zero length. -func putOptionalUpfrontShutdownScript(chanBucket kvdb.RwBucket, key []byte, - script []byte) er.R { - // If the script is empty, we do not need to add anything. - if len(script) == 0 { - return nil - } - - var w bytes.Buffer - if err := WriteElement(&w, script); err != nil { - return err - } - - return chanBucket.Put(key, w.Bytes()) -} - -// getOptionalUpfrontShutdownScript reads the shutdown script stored under the -// key provided if it is present. Upfront shutdown scripts are optional, so the -// function returns with no error if the key is not present. -func getOptionalUpfrontShutdownScript(chanBucket kvdb.RBucket, key []byte, - script *lnwire.DeliveryAddress) er.R { - - // Return early if the bucket does not exit, a shutdown script was not set. - bs := chanBucket.Get(key) - if bs == nil { - return nil - } - - var tempScript []byte - r := bytes.NewReader(bs) - if err := ReadElement(r, &tempScript); err != nil { - return err - } - *script = tempScript - - return nil -} - -func serializeChanCommit(w io.Writer, c *ChannelCommitment) er.R { - if err := WriteElements(w, - c.CommitHeight, c.LocalLogIndex, c.LocalHtlcIndex, - c.RemoteLogIndex, c.RemoteHtlcIndex, c.LocalBalance, - c.RemoteBalance, c.CommitFee, c.FeePerKw, c.CommitTx, - c.CommitSig, - ); err != nil { - return err - } - - return SerializeHtlcs(w, c.Htlcs...) -} - -func putChanCommitment(chanBucket kvdb.RwBucket, c *ChannelCommitment, - local bool) er.R { - - var commitKey []byte - if local { - commitKey = append(chanCommitmentKey, byte(0x00)) - } else { - commitKey = append(chanCommitmentKey, byte(0x01)) - } - - var b bytes.Buffer - if err := serializeChanCommit(&b, c); err != nil { - return err - } - - return chanBucket.Put(commitKey, b.Bytes()) -} - -func putChanCommitments(chanBucket kvdb.RwBucket, channel *OpenChannel) er.R { - // If this is a restored channel, then we don't have any commitments to - // write. - if channel.hasChanStatus(ChanStatusRestored) { - return nil - } - - err := putChanCommitment( - chanBucket, &channel.LocalCommitment, true, - ) - if err != nil { - return err - } - - return putChanCommitment( - chanBucket, &channel.RemoteCommitment, false, - ) -} - -func putChanRevocationState(chanBucket kvdb.RwBucket, channel *OpenChannel) er.R { - - var b bytes.Buffer - err := WriteElements( - &b, channel.RemoteCurrentRevocation, channel.RevocationProducer, - channel.RevocationStore, - ) - if err != nil { - return err - } - - // TODO(roasbeef): don't keep producer on disk - - // If the next revocation is present, which is only the case after the - // FundingLocked message has been sent, then we'll write it to disk. - if channel.RemoteNextRevocation != nil { - err = WriteElements(&b, channel.RemoteNextRevocation) - if err != nil { - return err - } - } - - return chanBucket.Put(revocationStateKey, b.Bytes()) -} - -func readChanConfig(b io.Reader, c *ChannelConfig) er.R { - return ReadElements(b, - &c.DustLimit, &c.MaxPendingAmount, &c.ChanReserve, - &c.MinHTLC, &c.MaxAcceptedHtlcs, &c.CsvDelay, - &c.MultiSigKey, &c.RevocationBasePoint, - &c.PaymentBasePoint, &c.DelayBasePoint, - &c.HtlcBasePoint, - ) -} - -func fetchChanInfo(chanBucket kvdb.RBucket, channel *OpenChannel) er.R { - infoBytes := chanBucket.Get(chanInfoKey) - if infoBytes == nil { - return ErrNoChanInfoFound.Default() - } - r := bytes.NewReader(infoBytes) - - if err := ReadElements(r, - &channel.ChanType, &channel.ChainHash, &channel.FundingOutpoint, - &channel.ShortChannelID, &channel.IsPending, &channel.IsInitiator, - &channel.chanStatus, &channel.FundingBroadcastHeight, - &channel.NumConfsRequired, &channel.ChannelFlags, - &channel.IdentityPub, &channel.Capacity, &channel.TotalMSatSent, - &channel.TotalMSatReceived, - ); err != nil { - return err - } - - // For single funder channels that we initiated and have the funding - // transaction to, read the funding txn. - if fundingTxPresent(channel) { - if err := ReadElement(r, &channel.FundingTxn); err != nil { - return err - } - } - - if err := readChanConfig(r, &channel.LocalChanCfg); err != nil { - return err - } - if err := readChanConfig(r, &channel.RemoteChanCfg); err != nil { - return err - } - - channel.Packager = NewChannelPackager(channel.ShortChannelID) - - // Finally, read the optional shutdown scripts. - if err := getOptionalUpfrontShutdownScript( - chanBucket, localUpfrontShutdownKey, &channel.LocalShutdownScript, - ); err != nil { - return err - } - - return getOptionalUpfrontShutdownScript( - chanBucket, remoteUpfrontShutdownKey, &channel.RemoteShutdownScript, - ) -} - -func deserializeChanCommit(r io.Reader) (ChannelCommitment, er.R) { - var c ChannelCommitment - - err := ReadElements(r, - &c.CommitHeight, &c.LocalLogIndex, &c.LocalHtlcIndex, &c.RemoteLogIndex, - &c.RemoteHtlcIndex, &c.LocalBalance, &c.RemoteBalance, - &c.CommitFee, &c.FeePerKw, &c.CommitTx, &c.CommitSig, - ) - if err != nil { - return c, err - } - - c.Htlcs, err = DeserializeHtlcs(r) - if err != nil { - return c, err - } - - return c, nil -} - -func fetchChanCommitment(chanBucket kvdb.RBucket, local bool) (ChannelCommitment, er.R) { - var commitKey []byte - if local { - commitKey = append(chanCommitmentKey, byte(0x00)) - } else { - commitKey = append(chanCommitmentKey, byte(0x01)) - } - - commitBytes := chanBucket.Get(commitKey) - if commitBytes == nil { - return ChannelCommitment{}, ErrNoCommitmentsFound.Default() - } - - r := bytes.NewReader(commitBytes) - return deserializeChanCommit(r) -} - -func fetchChanCommitments(chanBucket kvdb.RBucket, channel *OpenChannel) er.R { - var err er.R - - // If this is a restored channel, then we don't have any commitments to - // read. - if channel.hasChanStatus(ChanStatusRestored) { - return nil - } - - channel.LocalCommitment, err = fetchChanCommitment(chanBucket, true) - if err != nil { - return err - } - channel.RemoteCommitment, err = fetchChanCommitment(chanBucket, false) - if err != nil { - return err - } - - return nil -} - -func fetchChanRevocationState(chanBucket kvdb.RBucket, channel *OpenChannel) er.R { - revBytes := chanBucket.Get(revocationStateKey) - if revBytes == nil { - return ErrNoRevocationsFound.Default() - } - r := bytes.NewReader(revBytes) - - err := ReadElements( - r, &channel.RemoteCurrentRevocation, &channel.RevocationProducer, - &channel.RevocationStore, - ) - if err != nil { - return err - } - - // If there aren't any bytes left in the buffer, then we don't yet have - // the next remote revocation, so we can exit early here. - if r.Len() == 0 { - return nil - } - - // Otherwise we'll read the next revocation for the remote party which - // is always the last item within the buffer. - return ReadElements(r, &channel.RemoteNextRevocation) -} - -func deleteOpenChannel(chanBucket kvdb.RwBucket) er.R { - - if err := chanBucket.Delete(chanInfoKey); err != nil { - return err - } - - err := chanBucket.Delete(append(chanCommitmentKey, byte(0x00))) - if err != nil { - return err - } - err = chanBucket.Delete(append(chanCommitmentKey, byte(0x01))) - if err != nil { - return err - } - - if err := chanBucket.Delete(revocationStateKey); err != nil { - return err - } - - if diff := chanBucket.Get(commitDiffKey); diff != nil { - return chanBucket.Delete(commitDiffKey) - } - - return nil - -} - -// makeLogKey converts a uint64 into an 8 byte array. -func makeLogKey(updateNum uint64) [8]byte { - var key [8]byte - byteOrder.PutUint64(key[:], updateNum) - return key -} - -func appendChannelLogEntry(log kvdb.RwBucket, - commit *ChannelCommitment) er.R { - - var b bytes.Buffer - if err := serializeChanCommit(&b, commit); err != nil { - return err - } - - logEntrykey := makeLogKey(commit.CommitHeight) - return log.Put(logEntrykey[:], b.Bytes()) -} - -func fetchChannelLogEntry(log kvdb.RBucket, - updateNum uint64) (ChannelCommitment, er.R) { - - logEntrykey := makeLogKey(updateNum) - commitBytes := log.Get(logEntrykey[:]) - if commitBytes == nil { - return ChannelCommitment{}, errLogEntryNotFound.Default() - } - - commitReader := bytes.NewReader(commitBytes) - return deserializeChanCommit(commitReader) -} - -func fetchThawHeight(chanBucket kvdb.RBucket) (uint32, er.R) { - var height uint32 - - heightBytes := chanBucket.Get(frozenChanKey) - heightReader := bytes.NewReader(heightBytes) - - if err := ReadElements(heightReader, &height); err != nil { - return 0, err - } - - return height, nil -} - -func storeThawHeight(chanBucket kvdb.RwBucket, height uint32) er.R { - var heightBuf bytes.Buffer - if err := WriteElements(&heightBuf, height); err != nil { - return err - } - - return chanBucket.Put(frozenChanKey, heightBuf.Bytes()) -} - -func deleteThawHeight(chanBucket kvdb.RwBucket) er.R { - return chanBucket.Delete(frozenChanKey) -} diff --git a/lnd/channeldb/channel_cache.go b/lnd/channeldb/channel_cache.go deleted file mode 100644 index 2f26c185..00000000 --- a/lnd/channeldb/channel_cache.go +++ /dev/null @@ -1,50 +0,0 @@ -package channeldb - -// channelCache is an in-memory cache used to improve the performance of -// ChanUpdatesInHorizon. It caches the chan info and edge policies for a -// particular channel. -type channelCache struct { - n int - channels map[uint64]ChannelEdge -} - -// newChannelCache creates a new channelCache with maximum capacity of n -// channels. -func newChannelCache(n int) *channelCache { - return &channelCache{ - n: n, - channels: make(map[uint64]ChannelEdge), - } -} - -// get returns the channel from the cache, if it exists. -func (c *channelCache) get(chanid uint64) (ChannelEdge, bool) { - channel, ok := c.channels[chanid] - return channel, ok -} - -// insert adds the entry to the channel cache. If an entry for chanid already -// exists, it will be replaced with the new entry. If the entry doesn't exist, -// it will be inserted to the cache, performing a random eviction if the cache -// is at capacity. -func (c *channelCache) insert(chanid uint64, channel ChannelEdge) { - // If entry exists, replace it. - if _, ok := c.channels[chanid]; ok { - c.channels[chanid] = channel - return - } - - // Otherwise, evict an entry at random and insert. - if len(c.channels) == c.n { - for id := range c.channels { - delete(c.channels, id) - break - } - } - c.channels[chanid] = channel -} - -// remove deletes an edge for chanid from the cache, if it exists. -func (c *channelCache) remove(chanid uint64) { - delete(c.channels, chanid) -} diff --git a/lnd/channeldb/channel_cache_test.go b/lnd/channeldb/channel_cache_test.go deleted file mode 100644 index d776c131..00000000 --- a/lnd/channeldb/channel_cache_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package channeldb - -import ( - "reflect" - "testing" -) - -// TestChannelCache checks the behavior of the channelCache with respect to -// insertion, eviction, and removal of cache entries. -func TestChannelCache(t *testing.T) { - const cacheSize = 100 - - // Create a new channel cache with the configured max size. - c := newChannelCache(cacheSize) - - // As a sanity check, assert that querying the empty cache does not - // return an entry. - _, ok := c.get(0) - if ok { - t.Fatalf("channel cache should be empty") - } - - // Now, fill up the cache entirely. - for i := uint64(0); i < cacheSize; i++ { - c.insert(i, channelForInt(i)) - } - - // Assert that the cache has all of the entries just inserted, since no - // eviction should occur until we try to surpass the max size. - assertHasChanEntries(t, c, 0, cacheSize) - - // Now, insert a new element that causes the cache to evict an element. - c.insert(cacheSize, channelForInt(cacheSize)) - - // Assert that the cache has this last entry, as the cache should evict - // some prior element and not the newly inserted one. - assertHasChanEntries(t, c, cacheSize, cacheSize) - - // Iterate over all inserted elements and construct a set of the evicted - // elements. - evicted := make(map[uint64]struct{}) - for i := uint64(0); i < cacheSize+1; i++ { - _, ok := c.get(i) - if !ok { - evicted[i] = struct{}{} - } - } - - // Assert that exactly one element has been evicted. - numEvicted := len(evicted) - if numEvicted != 1 { - t.Fatalf("expected one evicted entry, got: %d", numEvicted) - } - - // Remove the highest item which initially caused the eviction and - // reinsert the element that was evicted prior. - c.remove(cacheSize) - for i := range evicted { - c.insert(i, channelForInt(i)) - } - - // Since the removal created an extra slot, the last insertion should - // not have caused an eviction and the entries for all channels in the - // original set that filled the cache should be present. - assertHasChanEntries(t, c, 0, cacheSize) - - // Finally, reinsert the existing set back into the cache and test that - // the cache still has all the entries. If the randomized eviction were - // happening on inserts for existing cache items, we expect this to fail - // with high probability. - for i := uint64(0); i < cacheSize; i++ { - c.insert(i, channelForInt(i)) - } - assertHasChanEntries(t, c, 0, cacheSize) - -} - -// assertHasEntries queries the edge cache for all channels in the range [start, -// end), asserting that they exist and their value matches the entry produced by -// entryForInt. -func assertHasChanEntries(t *testing.T, c *channelCache, start, end uint64) { - t.Helper() - - for i := start; i < end; i++ { - entry, ok := c.get(i) - if !ok { - t.Fatalf("channel cache should contain chan %d", i) - } - - expEntry := channelForInt(i) - if !reflect.DeepEqual(entry, expEntry) { - t.Fatalf("entry mismatch, want: %v, got: %v", - expEntry, entry) - } - } -} - -// channelForInt generates a unique ChannelEdge given an integer. -func channelForInt(i uint64) ChannelEdge { - return ChannelEdge{ - Info: &ChannelEdgeInfo{ - ChannelID: i, - }, - } -} diff --git a/lnd/channeldb/channel_test.go b/lnd/channeldb/channel_test.go deleted file mode 100644 index 00df46b9..00000000 --- a/lnd/channeldb/channel_test.go +++ /dev/null @@ -1,1617 +0,0 @@ -package channeldb - -import ( - "bytes" - "math/rand" - "net" - "reflect" - "runtime" - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/clock" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/shachain" - _ "github.com/pkt-cash/pktd/pktwallet/walletdb/bdb" - "github.com/pkt-cash/pktd/wire" -) - -var ( - key = [chainhash.HashSize]byte{ - 0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, - 0x68, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, - 0xd, 0xe7, 0x93, 0xe4, 0xb7, 0x25, 0xb8, 0x4d, - 0x1e, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9, - } - rev = [chainhash.HashSize]byte{ - 0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, - 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, - 0x2d, 0xe7, 0x93, 0xe4, - } - testTx = &wire.MsgTx{ - Version: 1, - TxIn: []*wire.TxIn{ - { - PreviousOutPoint: wire.OutPoint{ - Hash: chainhash.Hash{}, - Index: 0xffffffff, - }, - SignatureScript: []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62}, - Sequence: 0xffffffff, - }, - }, - TxOut: []*wire.TxOut{ - { - Value: 5000000000, - PkScript: []byte{ - 0x41, // OP_DATA_65 - 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5, - 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42, - 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1, - 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24, - 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97, - 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78, - 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20, - 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63, - 0xa6, // 65-byte signature - 0xac, // OP_CHECKSIG - }, - }, - }, - LockTime: 5, - } - privKey, pubKey = btcec.PrivKeyFromBytes(btcec.S256(), key[:]) - - wireSig, _ = lnwire.NewSigFromSignature(testSig) - - testClock = clock.NewTestClock(testNow) - - // defaultPendingHeight is the default height at which we set - // channels to pending. - defaultPendingHeight = 100 - - // defaultAddr is the default address that we mark test channels pending - // with. - defaultAddr = &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18555, - } -) - -// testChannelParams is a struct which details the specifics of how a channel -// should be created. -type testChannelParams struct { - // channel is the channel that will be written to disk. - channel *OpenChannel - - // addr is the address that the channel will be synced pending with. - addr *net.TCPAddr - - // pendingHeight is the height that the channel should be recorded as - // pending. - pendingHeight uint32 - - // openChannel is set to true if the channel should be fully marked as - // open if this is false, the channel will be left in pending state. - openChannel bool -} - -// testChannelOption is a functional option which can be used to alter the -// default channel that is creates for testing. -type testChannelOption func(params *testChannelParams) - -// channelCommitmentOption is an option which allows overwriting of the default -// commitment height and balances. The local boolean can be used to set these -// balances on the local or remote commit. -func channelCommitmentOption(height uint64, localBalance, - remoteBalance lnwire.MilliSatoshi, local bool) testChannelOption { - - return func(params *testChannelParams) { - if local { - params.channel.LocalCommitment.CommitHeight = height - params.channel.LocalCommitment.LocalBalance = localBalance - params.channel.LocalCommitment.RemoteBalance = remoteBalance - } else { - params.channel.RemoteCommitment.CommitHeight = height - params.channel.RemoteCommitment.LocalBalance = localBalance - params.channel.RemoteCommitment.RemoteBalance = remoteBalance - } - } -} - -// pendingHeightOption is an option which can be used to set the height the -// channel is marked as pending at. -func pendingHeightOption(height uint32) testChannelOption { - return func(params *testChannelParams) { - params.pendingHeight = height - } -} - -// openChannelOption is an option which can be used to create a test channel -// that is open. -func openChannelOption() testChannelOption { - return func(params *testChannelParams) { - params.openChannel = true - } -} - -// localHtlcsOption is an option which allows setting of htlcs on the local -// commitment. -func localHtlcsOption(htlcs []HTLC) testChannelOption { - return func(params *testChannelParams) { - params.channel.LocalCommitment.Htlcs = htlcs - } -} - -// remoteHtlcsOption is an option which allows setting of htlcs on the remote -// commitment. -func remoteHtlcsOption(htlcs []HTLC) testChannelOption { - return func(params *testChannelParams) { - params.channel.RemoteCommitment.Htlcs = htlcs - } -} - -// localShutdownOption is an option which sets the local upfront shutdown -// script for the channel. -func localShutdownOption(addr lnwire.DeliveryAddress) testChannelOption { - return func(params *testChannelParams) { - params.channel.LocalShutdownScript = addr - } -} - -// remoteShutdownOption is an option which sets the remote upfront shutdown -// script for the channel. -func remoteShutdownOption(addr lnwire.DeliveryAddress) testChannelOption { - return func(params *testChannelParams) { - params.channel.RemoteShutdownScript = addr - } -} - -// fundingPointOption is an option which sets the funding outpoint of the -// channel. -func fundingPointOption(chanPoint wire.OutPoint) testChannelOption { - return func(params *testChannelParams) { - params.channel.FundingOutpoint = chanPoint - } -} - -// channelIDOption is an option which sets the short channel ID of the channel. -var channelIDOption = func(chanID lnwire.ShortChannelID) testChannelOption { - return func(params *testChannelParams) { - params.channel.ShortChannelID = chanID - } -} - -// createTestChannel writes a test channel to the database. It takes a set of -// functional options which can be used to overwrite the default of creating -// a pending channel that was broadcast at height 100. -func createTestChannel(t *testing.T, cdb *DB, - opts ...testChannelOption) *OpenChannel { - - // Create a default set of parameters. - params := &testChannelParams{ - channel: createTestChannelState(t, cdb), - addr: defaultAddr, - openChannel: false, - pendingHeight: uint32(defaultPendingHeight), - } - - // Apply all functional options to the test channel params. - for _, o := range opts { - o(params) - } - - // Mark the channel as pending. - err := params.channel.SyncPending(params.addr, params.pendingHeight) - if err != nil { - t.Fatalf("unable to save and serialize channel "+ - "state: %v", err) - } - - // If the parameters do not specify that we should open the channel - // fully, we return the pending channel. - if !params.openChannel { - return params.channel - } - - // Mark the channel as open with the short channel id provided. - err = params.channel.MarkAsOpen(params.channel.ShortChannelID) - if err != nil { - t.Fatalf("unable to mark channel open: %v", err) - } - - return params.channel -} - -func createTestChannelState(t *testing.T, cdb *DB) *OpenChannel { - // Simulate 1000 channel updates. - producer, err := shachain.NewRevocationProducerFromBytes(key[:]) - if err != nil { - t.Fatalf("could not get producer: %v", err) - } - store := shachain.NewRevocationStore() - for i := 0; i < 1; i++ { - preImage, err := producer.AtIndex(uint64(i)) - if err != nil { - t.Fatalf("could not get "+ - "preimage: %v", err) - } - - if err := store.AddNextEntry(preImage); err != nil { - t.Fatalf("could not add entry: %v", err) - } - } - - localCfg := ChannelConfig{ - ChannelConstraints: ChannelConstraints{ - DustLimit: btcutil.Amount(rand.Int63()), - MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()), - ChanReserve: btcutil.Amount(rand.Int63()), - MinHTLC: lnwire.MilliSatoshi(rand.Int63()), - MaxAcceptedHtlcs: uint16(rand.Int31()), - CsvDelay: uint16(rand.Int31()), - }, - MultiSigKey: keychain.KeyDescriptor{ - PubKey: privKey.PubKey(), - }, - RevocationBasePoint: keychain.KeyDescriptor{ - PubKey: privKey.PubKey(), - }, - PaymentBasePoint: keychain.KeyDescriptor{ - PubKey: privKey.PubKey(), - }, - DelayBasePoint: keychain.KeyDescriptor{ - PubKey: privKey.PubKey(), - }, - HtlcBasePoint: keychain.KeyDescriptor{ - PubKey: privKey.PubKey(), - }, - } - remoteCfg := ChannelConfig{ - ChannelConstraints: ChannelConstraints{ - DustLimit: btcutil.Amount(rand.Int63()), - MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()), - ChanReserve: btcutil.Amount(rand.Int63()), - MinHTLC: lnwire.MilliSatoshi(rand.Int63()), - MaxAcceptedHtlcs: uint16(rand.Int31()), - CsvDelay: uint16(rand.Int31()), - }, - MultiSigKey: keychain.KeyDescriptor{ - PubKey: privKey.PubKey(), - KeyLocator: keychain.KeyLocator{ - Family: keychain.KeyFamilyMultiSig, - Index: 9, - }, - }, - RevocationBasePoint: keychain.KeyDescriptor{ - PubKey: privKey.PubKey(), - KeyLocator: keychain.KeyLocator{ - Family: keychain.KeyFamilyRevocationBase, - Index: 8, - }, - }, - PaymentBasePoint: keychain.KeyDescriptor{ - PubKey: privKey.PubKey(), - KeyLocator: keychain.KeyLocator{ - Family: keychain.KeyFamilyPaymentBase, - Index: 7, - }, - }, - DelayBasePoint: keychain.KeyDescriptor{ - PubKey: privKey.PubKey(), - KeyLocator: keychain.KeyLocator{ - Family: keychain.KeyFamilyDelayBase, - Index: 6, - }, - }, - HtlcBasePoint: keychain.KeyDescriptor{ - PubKey: privKey.PubKey(), - KeyLocator: keychain.KeyLocator{ - Family: keychain.KeyFamilyHtlcBase, - Index: 5, - }, - }, - } - - chanID := lnwire.NewShortChanIDFromInt(uint64(rand.Int63())) - - return &OpenChannel{ - ChanType: SingleFunderBit | FrozenBit, - ChainHash: key, - FundingOutpoint: wire.OutPoint{Hash: key, Index: rand.Uint32()}, - ShortChannelID: chanID, - IsInitiator: true, - IsPending: true, - IdentityPub: pubKey, - Capacity: btcutil.Amount(10000), - LocalChanCfg: localCfg, - RemoteChanCfg: remoteCfg, - TotalMSatSent: 8, - TotalMSatReceived: 2, - LocalCommitment: ChannelCommitment{ - CommitHeight: 0, - LocalBalance: lnwire.MilliSatoshi(9000), - RemoteBalance: lnwire.MilliSatoshi(3000), - CommitFee: btcutil.Amount(rand.Int63()), - FeePerKw: btcutil.Amount(5000), - CommitTx: testTx, - CommitSig: bytes.Repeat([]byte{1}, 71), - }, - RemoteCommitment: ChannelCommitment{ - CommitHeight: 0, - LocalBalance: lnwire.MilliSatoshi(3000), - RemoteBalance: lnwire.MilliSatoshi(9000), - CommitFee: btcutil.Amount(rand.Int63()), - FeePerKw: btcutil.Amount(5000), - CommitTx: testTx, - CommitSig: bytes.Repeat([]byte{1}, 71), - }, - NumConfsRequired: 4, - RemoteCurrentRevocation: privKey.PubKey(), - RemoteNextRevocation: privKey.PubKey(), - RevocationProducer: producer, - RevocationStore: store, - Db: cdb, - Packager: NewChannelPackager(chanID), - FundingTxn: testTx, - ThawHeight: uint32(defaultPendingHeight), - } -} - -func TestOpenChannelPutGetDelete(t *testing.T) { - t.Parallel() - - cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - defer cleanUp() - - // Create the test channel state, with additional htlcs on the local - // and remote commitment. - localHtlcs := []HTLC{ - {Signature: testSig.Serialize(), - Incoming: true, - Amt: 10, - RHash: key, - RefundTimeout: 1, - OnionBlob: []byte("onionblob"), - }, - } - - remoteHtlcs := []HTLC{ - { - Signature: testSig.Serialize(), - Incoming: false, - Amt: 10, - RHash: key, - RefundTimeout: 1, - OnionBlob: []byte("onionblob"), - }, - } - - state := createTestChannel( - t, cdb, - remoteHtlcsOption(remoteHtlcs), - localHtlcsOption(localHtlcs), - ) - - openChannels, err := cdb.FetchOpenChannels(state.IdentityPub) - if err != nil { - t.Fatalf("unable to fetch open channel: %v", err) - } - - newState := openChannels[0] - - // The decoded channel state should be identical to what we stored - // above. - if !reflect.DeepEqual(state, newState) { - t.Fatalf("channel state doesn't match:: %v vs %v", - spew.Sdump(state), spew.Sdump(newState)) - } - - // We'll also test that the channel is properly able to hot swap the - // next revocation for the state machine. This tests the initial - // post-funding revocation exchange. - nextRevKey, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Fatalf("unable to create new private key: %v", err) - } - if err := state.InsertNextRevocation(nextRevKey.PubKey()); err != nil { - t.Fatalf("unable to update revocation: %v", err) - } - - openChannels, err = cdb.FetchOpenChannels(state.IdentityPub) - if err != nil { - t.Fatalf("unable to fetch open channel: %v", err) - } - updatedChan := openChannels[0] - - // Ensure that the revocation was set properly. - if !nextRevKey.PubKey().IsEqual(updatedChan.RemoteNextRevocation) { - t.Fatalf("next revocation wasn't updated") - } - - // Finally to wrap up the test, delete the state of the channel within - // the database. This involves "closing" the channel which removes all - // written state, and creates a small "summary" elsewhere within the - // database. - closeSummary := &ChannelCloseSummary{ - ChanPoint: state.FundingOutpoint, - RemotePub: state.IdentityPub, - SettledBalance: btcutil.Amount(500), - TimeLockedBalance: btcutil.Amount(10000), - IsPending: false, - CloseType: CooperativeClose, - } - if err := state.CloseChannel(closeSummary); err != nil { - t.Fatalf("unable to close channel: %v", err) - } - - // As the channel is now closed, attempting to fetch all open channels - // for our fake node ID should return an empty slice. - openChans, err := cdb.FetchOpenChannels(state.IdentityPub) - if err != nil { - t.Fatalf("unable to fetch open channels: %v", err) - } - if len(openChans) != 0 { - t.Fatalf("all channels not deleted, found %v", len(openChans)) - } - - // Additionally, attempting to fetch all the open channels globally - // should yield no results. - openChans, err = cdb.FetchAllChannels() - if err != nil { - t.Fatal("unable to fetch all open chans") - } - if len(openChans) != 0 { - t.Fatalf("all channels not deleted, found %v", len(openChans)) - } -} - -// TestOptionalShutdown tests the reading and writing of channels with and -// without optional shutdown script fields. -func TestOptionalShutdown(t *testing.T) { - local := lnwire.DeliveryAddress([]byte("local shutdown script")) - remote := lnwire.DeliveryAddress([]byte("remote shutdown script")) - - if _, err := rand.Read(remote); err != nil { - t.Fatalf("Could not create random script: %v", err) - } - - tests := []struct { - name string - localShutdown lnwire.DeliveryAddress - remoteShutdown lnwire.DeliveryAddress - }{ - { - name: "no shutdown scripts", - localShutdown: nil, - remoteShutdown: nil, - }, - { - name: "local shutdown script", - localShutdown: local, - remoteShutdown: nil, - }, - { - name: "remote shutdown script", - localShutdown: nil, - remoteShutdown: remote, - }, - { - name: "both scripts set", - localShutdown: local, - remoteShutdown: remote, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - defer cleanUp() - - // Create a channel with upfront scripts set as - // specified in the test. - state := createTestChannel( - t, cdb, - localShutdownOption(test.localShutdown), - remoteShutdownOption(test.remoteShutdown), - ) - - openChannels, err := cdb.FetchOpenChannels( - state.IdentityPub, - ) - if err != nil { - t.Fatalf("unable to fetch open"+ - " channel: %v", err) - } - - if len(openChannels) != 1 { - t.Fatalf("Expected one channel open,"+ - " got: %v", len(openChannels)) - } - - if !bytes.Equal(openChannels[0].LocalShutdownScript, - test.localShutdown) { - - t.Fatalf("Expected local: %x, got: %x", - test.localShutdown, - openChannels[0].LocalShutdownScript) - } - - if !bytes.Equal(openChannels[0].RemoteShutdownScript, - test.remoteShutdown) { - - t.Fatalf("Expected remote: %x, got: %x", - test.remoteShutdown, - openChannels[0].RemoteShutdownScript) - } - }) - } -} - -func assertCommitmentEqual(t *testing.T, a, b *ChannelCommitment) { - if !reflect.DeepEqual(a, b) { - _, _, line, _ := runtime.Caller(1) - t.Fatalf("line %v: commitments don't match: %v vs %v", - line, spew.Sdump(a), spew.Sdump(b)) - } -} - -func TestChannelStateTransition(t *testing.T) { - t.Parallel() - - cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - defer cleanUp() - - // First create a minimal channel, then perform a full sync in order to - // persist the data. - channel := createTestChannel(t, cdb) - - // Add some HTLCs which were added during this new state transition. - // Half of the HTLCs are incoming, while the other half are outgoing. - var ( - htlcs []HTLC - htlcAmt lnwire.MilliSatoshi - ) - for i := uint32(0); i < 10; i++ { - var incoming bool - if i > 5 { - incoming = true - } - htlc := HTLC{ - Signature: testSig.Serialize(), - Incoming: incoming, - Amt: 10, - RHash: key, - RefundTimeout: i, - OutputIndex: int32(i * 3), - LogIndex: uint64(i * 2), - HtlcIndex: uint64(i), - } - htlc.OnionBlob = make([]byte, 10) - copy(htlc.OnionBlob[:], bytes.Repeat([]byte{2}, 10)) - htlcs = append(htlcs, htlc) - htlcAmt += htlc.Amt - } - - // Create a new channel delta which includes the above HTLCs, some - // balance updates, and an increment of the current commitment height. - // Additionally, modify the signature and commitment transaction. - newSequence := uint32(129498) - newSig := bytes.Repeat([]byte{3}, 71) - newTx := channel.LocalCommitment.CommitTx.Copy() - newTx.TxIn[0].Sequence = newSequence - commitment := ChannelCommitment{ - CommitHeight: 1, - LocalLogIndex: 2, - LocalHtlcIndex: 1, - RemoteLogIndex: 2, - RemoteHtlcIndex: 1, - LocalBalance: lnwire.MilliSatoshi(1e8), - RemoteBalance: lnwire.MilliSatoshi(1e8), - CommitFee: 55, - FeePerKw: 99, - CommitTx: newTx, - CommitSig: newSig, - Htlcs: htlcs, - } - - // First update the local node's broadcastable state and also add a - // CommitDiff remote node's as well in order to simulate a proper state - // transition. - unsignedAckedUpdates := []LogUpdate{ - { - LogIndex: 2, - UpdateMsg: &lnwire.UpdateAddHTLC{ - ChanID: lnwire.ChannelID{1, 2, 3}, - }, - }, - } - - err = channel.UpdateCommitment(&commitment, unsignedAckedUpdates) - if err != nil { - t.Fatalf("unable to update commitment: %v", err) - } - - // Assert that update is correctly written to the database. - dbUnsignedAckedUpdates, err := channel.UnsignedAckedUpdates() - if err != nil { - t.Fatalf("unable to fetch dangling remote updates: %v", err) - } - if len(dbUnsignedAckedUpdates) != 1 { - t.Fatalf("unexpected number of dangling remote updates") - } - if !reflect.DeepEqual( - dbUnsignedAckedUpdates[0], unsignedAckedUpdates[0], - ) { - t.Fatalf("unexpected update") - } - - // The balances, new update, the HTLCs and the changes to the fake - // commitment transaction along with the modified signature should all - // have been updated. - updatedChannel, err := cdb.FetchOpenChannels(channel.IdentityPub) - if err != nil { - t.Fatalf("unable to fetch updated channel: %v", err) - } - assertCommitmentEqual(t, &commitment, &updatedChannel[0].LocalCommitment) - numDiskUpdates, err := updatedChannel[0].CommitmentHeight() - if err != nil { - t.Fatalf("unable to read commitment height from disk: %v", err) - } - if numDiskUpdates != uint64(commitment.CommitHeight) { - t.Fatalf("num disk updates doesn't match: %v vs %v", - numDiskUpdates, commitment.CommitHeight) - } - - // Attempting to query for a commitment diff should return - // ErrNoPendingCommit as we haven't yet created a new state for them. - _, err = channel.RemoteCommitChainTip() - if !ErrNoPendingCommit.Is(err) { - t.Fatalf("expected ErrNoPendingCommit, instead got %v", err) - } - - // To simulate us extending a new state to the remote party, we'll also - // create a new commit diff for them. - remoteCommit := commitment - remoteCommit.LocalBalance = lnwire.MilliSatoshi(2e8) - remoteCommit.RemoteBalance = lnwire.MilliSatoshi(3e8) - remoteCommit.CommitHeight = 1 - commitDiff := &CommitDiff{ - Commitment: remoteCommit, - CommitSig: &lnwire.CommitSig{ - ChanID: lnwire.ChannelID(key), - CommitSig: wireSig, - HtlcSigs: []lnwire.Sig{ - wireSig, - wireSig, - }, - }, - LogUpdates: []LogUpdate{ - { - LogIndex: 1, - UpdateMsg: &lnwire.UpdateAddHTLC{ - ID: 1, - Amount: lnwire.NewMSatFromSatoshis(100), - Expiry: 25, - }, - }, - { - LogIndex: 2, - UpdateMsg: &lnwire.UpdateAddHTLC{ - ID: 2, - Amount: lnwire.NewMSatFromSatoshis(200), - Expiry: 50, - }, - }, - }, - OpenedCircuitKeys: []CircuitKey{}, - ClosedCircuitKeys: []CircuitKey{}, - } - copy(commitDiff.LogUpdates[0].UpdateMsg.(*lnwire.UpdateAddHTLC).PaymentHash[:], - bytes.Repeat([]byte{1}, 32)) - copy(commitDiff.LogUpdates[1].UpdateMsg.(*lnwire.UpdateAddHTLC).PaymentHash[:], - bytes.Repeat([]byte{2}, 32)) - if err := channel.AppendRemoteCommitChain(commitDiff); err != nil { - t.Fatalf("unable to add to commit chain: %v", err) - } - - // The commitment tip should now match the commitment that we just - // inserted. - diskCommitDiff, err := channel.RemoteCommitChainTip() - if err != nil { - t.Fatalf("unable to fetch commit diff: %v", err) - } - if !reflect.DeepEqual(commitDiff, diskCommitDiff) { - t.Fatalf("commit diffs don't match: %v vs %v", spew.Sdump(remoteCommit), - spew.Sdump(diskCommitDiff)) - } - - // We'll save the old remote commitment as this will be added to the - // revocation log shortly. - oldRemoteCommit := channel.RemoteCommitment - - // Next, write to the log which tracks the necessary revocation state - // needed to rectify any fishy behavior by the remote party. Modify the - // current uncollapsed revocation state to simulate a state transition - // by the remote party. - channel.RemoteCurrentRevocation = channel.RemoteNextRevocation - newPriv, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Fatalf("unable to generate key: %v", err) - } - channel.RemoteNextRevocation = newPriv.PubKey() - - fwdPkg := NewFwdPkg(channel.ShortChanID(), oldRemoteCommit.CommitHeight, - diskCommitDiff.LogUpdates, nil) - - err = channel.AdvanceCommitChainTail(fwdPkg, nil) - if err != nil { - t.Fatalf("unable to append to revocation log: %v", err) - } - - // At this point, the remote commit chain should be nil, and the posted - // remote commitment should match the one we added as a diff above. - if _, err := channel.RemoteCommitChainTip(); !ErrNoPendingCommit.Is(err) { - t.Fatalf("expected ErrNoPendingCommit, instead got %v", err) - } - - // We should be able to fetch the channel delta created above by its - // update number with all the state properly reconstructed. - diskPrevCommit, err := channel.FindPreviousState( - oldRemoteCommit.CommitHeight, - ) - if err != nil { - t.Fatalf("unable to fetch past delta: %v", err) - } - - // The two deltas (the original vs the on-disk version) should - // identical, and all HTLC data should properly be retained. - assertCommitmentEqual(t, &oldRemoteCommit, diskPrevCommit) - - // The state number recovered from the tail of the revocation log - // should be identical to this current state. - logTail, err := channel.RevocationLogTail() - if err != nil { - t.Fatalf("unable to retrieve log: %v", err) - } - if logTail.CommitHeight != oldRemoteCommit.CommitHeight { - t.Fatal("update number doesn't match") - } - - oldRemoteCommit = channel.RemoteCommitment - - // Next modify the posted diff commitment slightly, then create a new - // commitment diff and advance the tail. - commitDiff.Commitment.CommitHeight = 2 - commitDiff.Commitment.LocalBalance -= htlcAmt - commitDiff.Commitment.RemoteBalance += htlcAmt - commitDiff.LogUpdates = []LogUpdate{} - if err := channel.AppendRemoteCommitChain(commitDiff); err != nil { - t.Fatalf("unable to add to commit chain: %v", err) - } - - fwdPkg = NewFwdPkg(channel.ShortChanID(), oldRemoteCommit.CommitHeight, nil, nil) - - err = channel.AdvanceCommitChainTail(fwdPkg, nil) - if err != nil { - t.Fatalf("unable to append to revocation log: %v", err) - } - - // Once again, fetch the state and ensure it has been properly updated. - prevCommit, err := channel.FindPreviousState(oldRemoteCommit.CommitHeight) - if err != nil { - t.Fatalf("unable to fetch past delta: %v", err) - } - assertCommitmentEqual(t, &oldRemoteCommit, prevCommit) - - // Once again, state number recovered from the tail of the revocation - // log should be identical to this current state. - logTail, err = channel.RevocationLogTail() - if err != nil { - t.Fatalf("unable to retrieve log: %v", err) - } - if logTail.CommitHeight != oldRemoteCommit.CommitHeight { - t.Fatal("update number doesn't match") - } - - // The revocation state stored on-disk should now also be identical. - updatedChannel, err = cdb.FetchOpenChannels(channel.IdentityPub) - if err != nil { - t.Fatalf("unable to fetch updated channel: %v", err) - } - if !channel.RemoteCurrentRevocation.IsEqual(updatedChannel[0].RemoteCurrentRevocation) { - t.Fatalf("revocation state was not synced") - } - if !channel.RemoteNextRevocation.IsEqual(updatedChannel[0].RemoteNextRevocation) { - t.Fatalf("revocation state was not synced") - } - - // Now attempt to delete the channel from the database. - closeSummary := &ChannelCloseSummary{ - ChanPoint: channel.FundingOutpoint, - RemotePub: channel.IdentityPub, - SettledBalance: btcutil.Amount(500), - TimeLockedBalance: btcutil.Amount(10000), - IsPending: false, - CloseType: RemoteForceClose, - } - if err := updatedChannel[0].CloseChannel(closeSummary); err != nil { - t.Fatalf("unable to delete updated channel: %v", err) - } - - // If we attempt to fetch the target channel again, it shouldn't be - // found. - channels, err := cdb.FetchOpenChannels(channel.IdentityPub) - if err != nil { - t.Fatalf("unable to fetch updated channels: %v", err) - } - if len(channels) != 0 { - t.Fatalf("%v channels, found, but none should be", - len(channels)) - } - - // Attempting to find previous states on the channel should fail as the - // revocation log has been deleted. - _, err = updatedChannel[0].FindPreviousState(oldRemoteCommit.CommitHeight) - if err == nil { - t.Fatal("revocation log search should have failed") - } -} - -func TestFetchPendingChannels(t *testing.T) { - t.Parallel() - - cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - defer cleanUp() - - // Create a pending channel that was broadcast at height 99. - const broadcastHeight = 99 - createTestChannel(t, cdb, pendingHeightOption(broadcastHeight)) - - pendingChannels, err := cdb.FetchPendingChannels() - if err != nil { - t.Fatalf("unable to list pending channels: %v", err) - } - - if len(pendingChannels) != 1 { - t.Fatalf("incorrect number of pending channels: expecting %v,"+ - "got %v", 1, len(pendingChannels)) - } - - // The broadcast height of the pending channel should have been set - // properly. - if pendingChannels[0].FundingBroadcastHeight != broadcastHeight { - t.Fatalf("broadcast height mismatch: expected %v, got %v", - pendingChannels[0].FundingBroadcastHeight, - broadcastHeight) - } - - chanOpenLoc := lnwire.ShortChannelID{ - BlockHeight: 5, - TxIndex: 10, - TxPosition: 15, - } - err = pendingChannels[0].MarkAsOpen(chanOpenLoc) - if err != nil { - t.Fatalf("unable to mark channel as open: %v", err) - } - - if pendingChannels[0].IsPending { - t.Fatalf("channel marked open should no longer be pending") - } - - if pendingChannels[0].ShortChanID() != chanOpenLoc { - t.Fatalf("channel opening height not updated: expected %v, "+ - "got %v", spew.Sdump(pendingChannels[0].ShortChanID()), - chanOpenLoc) - } - - // Next, we'll re-fetch the channel to ensure that the open height was - // properly set. - openChans, err := cdb.FetchAllChannels() - if err != nil { - t.Fatalf("unable to fetch channels: %v", err) - } - if openChans[0].ShortChanID() != chanOpenLoc { - t.Fatalf("channel opening heights don't match: expected %v, "+ - "got %v", spew.Sdump(openChans[0].ShortChanID()), - chanOpenLoc) - } - if openChans[0].FundingBroadcastHeight != broadcastHeight { - t.Fatalf("broadcast height mismatch: expected %v, got %v", - openChans[0].FundingBroadcastHeight, - broadcastHeight) - } - - pendingChannels, err = cdb.FetchPendingChannels() - if err != nil { - t.Fatalf("unable to list pending channels: %v", err) - } - - if len(pendingChannels) != 0 { - t.Fatalf("incorrect number of pending channels: expecting %v,"+ - "got %v", 0, len(pendingChannels)) - } -} - -func TestFetchClosedChannels(t *testing.T) { - t.Parallel() - - cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - defer cleanUp() - - // Create an open channel in the database. - state := createTestChannel(t, cdb, openChannelOption()) - - // Next, close the channel by including a close channel summary in the - // database. - summary := &ChannelCloseSummary{ - ChanPoint: state.FundingOutpoint, - ClosingTXID: rev, - RemotePub: state.IdentityPub, - Capacity: state.Capacity, - SettledBalance: state.LocalCommitment.LocalBalance.ToSatoshis(), - TimeLockedBalance: state.RemoteCommitment.LocalBalance.ToSatoshis() + 10000, - CloseType: RemoteForceClose, - IsPending: true, - LocalChanConfig: state.LocalChanCfg, - } - if err := state.CloseChannel(summary); err != nil { - t.Fatalf("unable to close channel: %v", err) - } - - // Query the database to ensure that the channel has now been properly - // closed. We should get the same result whether querying for pending - // channels only, or not. - pendingClosed, err := cdb.FetchClosedChannels(true) - if err != nil { - t.Fatalf("failed fetching closed channels: %v", err) - } - if len(pendingClosed) != 1 { - t.Fatalf("incorrect number of pending closed channels: expecting %v,"+ - "got %v", 1, len(pendingClosed)) - } - if !reflect.DeepEqual(summary, pendingClosed[0]) { - t.Fatalf("database summaries don't match: expected %v got %v", - spew.Sdump(summary), spew.Sdump(pendingClosed[0])) - } - closed, err := cdb.FetchClosedChannels(false) - if err != nil { - t.Fatalf("failed fetching all closed channels: %v", err) - } - if len(closed) != 1 { - t.Fatalf("incorrect number of closed channels: expecting %v, "+ - "got %v", 1, len(closed)) - } - if !reflect.DeepEqual(summary, closed[0]) { - t.Fatalf("database summaries don't match: expected %v got %v", - spew.Sdump(summary), spew.Sdump(closed[0])) - } - - // Mark the channel as fully closed. - err = cdb.MarkChanFullyClosed(&state.FundingOutpoint) - if err != nil { - t.Fatalf("failed fully closing channel: %v", err) - } - - // The channel should no longer be considered pending, but should still - // be retrieved when fetching all the closed channels. - closed, err = cdb.FetchClosedChannels(false) - if err != nil { - t.Fatalf("failed fetching closed channels: %v", err) - } - if len(closed) != 1 { - t.Fatalf("incorrect number of closed channels: expecting %v, "+ - "got %v", 1, len(closed)) - } - pendingClose, err := cdb.FetchClosedChannels(true) - if err != nil { - t.Fatalf("failed fetching channels pending close: %v", err) - } - if len(pendingClose) != 0 { - t.Fatalf("incorrect number of closed channels: expecting %v, "+ - "got %v", 0, len(closed)) - } -} - -// TestFetchWaitingCloseChannels ensures that the correct channels that are -// waiting to be closed are returned. -func TestFetchWaitingCloseChannels(t *testing.T) { - t.Parallel() - - const numChannels = 2 - const broadcastHeight = 99 - - // We'll start by creating two channels within our test database. One of - // them will have their funding transaction confirmed on-chain, while - // the other one will remain unconfirmed. - db, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - defer cleanUp() - - channels := make([]*OpenChannel, numChannels) - for i := 0; i < numChannels; i++ { - // Create a pending channel in the database at the broadcast - // height. - channels[i] = createTestChannel( - t, db, pendingHeightOption(broadcastHeight), - ) - } - - // We'll only confirm the first one. - channelConf := lnwire.ShortChannelID{ - BlockHeight: broadcastHeight + 1, - TxIndex: 10, - TxPosition: 15, - } - if err := channels[0].MarkAsOpen(channelConf); err != nil { - t.Fatalf("unable to mark channel as open: %v", err) - } - - // Then, we'll mark the channels as if their commitments were broadcast. - // This would happen in the event of a force close and should make the - // channels enter a state of waiting close. - for _, channel := range channels { - closeTx := wire.NewMsgTx(2) - closeTx.AddTxIn( - &wire.TxIn{ - PreviousOutPoint: channel.FundingOutpoint, - }, - ) - - if err := channel.MarkCommitmentBroadcasted(closeTx, true); err != nil { - t.Fatalf("unable to mark commitment broadcast: %v", err) - } - - // Now try to marking a coop close with a nil tx. This should - // succeed, but it shouldn't exit when queried. - if err = channel.MarkCoopBroadcasted(nil, true); err != nil { - t.Fatalf("unable to mark nil coop broadcast: %v", err) - } - _, err := channel.BroadcastedCooperative() - if !ErrNoCloseTx.Is(err) { - t.Fatalf("expected no closing tx error, got: %v", err) - } - - // Finally, modify the close tx deterministically and also mark - // it as coop closed. Later we will test that distinct - // transactions are returned for both coop and force closes. - closeTx.TxIn[0].PreviousOutPoint.Index ^= 1 - if err := channel.MarkCoopBroadcasted(closeTx, true); err != nil { - t.Fatalf("unable to mark coop broadcast: %v", err) - } - } - - // Now, we'll fetch all the channels waiting to be closed from the - // database. We should expect to see both channels above, even if any of - // them haven't had their funding transaction confirm on-chain. - waitingCloseChannels, err := db.FetchWaitingCloseChannels() - if err != nil { - t.Fatalf("unable to fetch all waiting close channels: %v", err) - } - if len(waitingCloseChannels) != numChannels { - t.Fatalf("expected %d channels waiting to be closed, got %d", 2, - len(waitingCloseChannels)) - } - expectedChannels := make(map[wire.OutPoint]struct{}) - for _, channel := range channels { - expectedChannels[channel.FundingOutpoint] = struct{}{} - } - for _, channel := range waitingCloseChannels { - if _, ok := expectedChannels[channel.FundingOutpoint]; !ok { - t.Fatalf("expected channel %v to be waiting close", - channel.FundingOutpoint) - } - - chanPoint := channel.FundingOutpoint - - // Assert that the force close transaction is retrievable. - forceCloseTx, err := channel.BroadcastedCommitment() - if err != nil { - t.Fatalf("Unable to retrieve commitment: %v", err) - } - - if forceCloseTx.TxIn[0].PreviousOutPoint != chanPoint { - t.Fatalf("expected outpoint %v, got %v", - chanPoint, - forceCloseTx.TxIn[0].PreviousOutPoint) - } - - // Assert that the coop close transaction is retrievable. - coopCloseTx, err := channel.BroadcastedCooperative() - if err != nil { - t.Fatalf("unable to retrieve coop close: %v", err) - } - - chanPoint.Index ^= 1 - if coopCloseTx.TxIn[0].PreviousOutPoint != chanPoint { - t.Fatalf("expected outpoint %v, got %v", - chanPoint, - coopCloseTx.TxIn[0].PreviousOutPoint) - } - } -} - -// TestRefreshShortChanID asserts that RefreshShortChanID updates the in-memory -// state of another OpenChannel to reflect a preceding call to MarkOpen on a -// different OpenChannel. -func TestRefreshShortChanID(t *testing.T) { - t.Parallel() - - cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - defer cleanUp() - - // First create a test channel. - state := createTestChannel(t, cdb) - - // Next, locate the pending channel with the database. - pendingChannels, err := cdb.FetchPendingChannels() - if err != nil { - t.Fatalf("unable to load pending channels; %v", err) - } - - var pendingChannel *OpenChannel - for _, channel := range pendingChannels { - if channel.FundingOutpoint == state.FundingOutpoint { - pendingChannel = channel - break - } - } - if pendingChannel == nil { - t.Fatalf("unable to find pending channel with funding "+ - "outpoint=%v: %v", state.FundingOutpoint, err) - } - - // Next, simulate the confirmation of the channel by marking it as - // pending within the database. - chanOpenLoc := lnwire.ShortChannelID{ - BlockHeight: 105, - TxIndex: 10, - TxPosition: 15, - } - - err = state.MarkAsOpen(chanOpenLoc) - if err != nil { - t.Fatalf("unable to mark channel open: %v", err) - } - - // The short_chan_id of the receiver to MarkAsOpen should reflect the - // open location, but the other pending channel should remain unchanged. - if state.ShortChanID() == pendingChannel.ShortChanID() { - t.Fatalf("pending channel short_chan_ID should not have been " + - "updated before refreshing short_chan_id") - } - - // Now that the receiver's short channel id has been updated, check to - // ensure that the channel packager's source has been updated as well. - // This ensures that the packager will read and write to buckets - // corresponding to the new short chan id, instead of the prior. - if state.Packager.(*ChannelPackager).source != chanOpenLoc { - t.Fatalf("channel packager source was not updated: want %v, "+ - "got %v", chanOpenLoc, - state.Packager.(*ChannelPackager).source) - } - - // Now, refresh the short channel ID of the pending channel. - err = pendingChannel.RefreshShortChanID() - if err != nil { - t.Fatalf("unable to refresh short_chan_id: %v", err) - } - - // This should result in both OpenChannel's now having the same - // ShortChanID. - if state.ShortChanID() != pendingChannel.ShortChanID() { - t.Fatalf("expected pending channel short_chan_id to be "+ - "refreshed: want %v, got %v", state.ShortChanID(), - pendingChannel.ShortChanID()) - } - - // Check to ensure that the _other_ OpenChannel channel packager's - // source has also been updated after the refresh. This ensures that the - // other packagers will read and write to buckets corresponding to the - // updated short chan id. - if pendingChannel.Packager.(*ChannelPackager).source != chanOpenLoc { - t.Fatalf("channel packager source was not updated: want %v, "+ - "got %v", chanOpenLoc, - pendingChannel.Packager.(*ChannelPackager).source) - } - - // Check to ensure that this channel is no longer pending and this field - // is up to date. - if pendingChannel.IsPending { - t.Fatalf("channel pending state wasn't updated: want false got true") - } -} - -// TestCloseInitiator tests the setting of close initiator statuses for -// cooperative closes and local force closes. -func TestCloseInitiator(t *testing.T) { - tests := []struct { - name string - // updateChannel is called to update the channel as broadcast, - // cooperatively or not, based on the test's requirements. - updateChannel func(c *OpenChannel) er.R - expectedStatuses []ChannelStatus - }{ - { - name: "local coop close", - // Mark the channel as cooperatively closed, initiated - // by the local party. - updateChannel: func(c *OpenChannel) er.R { - return c.MarkCoopBroadcasted( - &wire.MsgTx{}, true, - ) - }, - expectedStatuses: []ChannelStatus{ - ChanStatusLocalCloseInitiator, - ChanStatusCoopBroadcasted, - }, - }, - { - name: "remote coop close", - // Mark the channel as cooperatively closed, initiated - // by the remote party. - updateChannel: func(c *OpenChannel) er.R { - return c.MarkCoopBroadcasted( - &wire.MsgTx{}, false, - ) - }, - expectedStatuses: []ChannelStatus{ - ChanStatusRemoteCloseInitiator, - ChanStatusCoopBroadcasted, - }, - }, - { - name: "local force close", - // Mark the channel's commitment as broadcast with - // local initiator. - updateChannel: func(c *OpenChannel) er.R { - return c.MarkCommitmentBroadcasted( - &wire.MsgTx{}, true, - ) - }, - expectedStatuses: []ChannelStatus{ - ChanStatusLocalCloseInitiator, - ChanStatusCommitBroadcasted, - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", - err) - } - defer cleanUp() - - // Create an open channel. - channel := createTestChannel( - t, cdb, openChannelOption(), - ) - - err = test.updateChannel(channel) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - // Lookup open channels in the database. - dbChans, err := fetchChannels( - cdb, pendingChannelFilter(false), - ) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if len(dbChans) != 1 { - t.Fatalf("expected 1 channel, got: %v", - len(dbChans)) - } - - // Check that the statuses that we expect were written - // to disk. - for _, status := range test.expectedStatuses { - if !dbChans[0].HasChanStatus(status) { - t.Fatalf("expected channel to have "+ - "status: %v, has status: %v", - status, dbChans[0].chanStatus) - } - } - }) - } -} - -// TestCloseChannelStatus tests setting of a channel status on the historical -// channel on channel close. -func TestCloseChannelStatus(t *testing.T) { - cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", - err) - } - defer cleanUp() - - // Create an open channel. - channel := createTestChannel( - t, cdb, openChannelOption(), - ) - - if err := channel.CloseChannel( - &ChannelCloseSummary{ - ChanPoint: channel.FundingOutpoint, - RemotePub: channel.IdentityPub, - }, ChanStatusRemoteCloseInitiator, - ); err != nil { - t.Fatalf("unexpected error: %v", err) - } - - histChan, err := channel.Db.FetchHistoricalChannel( - &channel.FundingOutpoint, - ) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if !histChan.HasChanStatus(ChanStatusRemoteCloseInitiator) { - t.Fatalf("channel should have status") - } -} - -// TestBalanceAtHeight tests lookup of our local and remote balance at a given -// height. -func TestBalanceAtHeight(t *testing.T) { - const ( - // Values that will be set on our current local commit in - // memory. - localHeight = 2 - localLocalBalance = 1000 - localRemoteBalance = 1500 - - // Values that will be set on our current remote commit in - // memory. - remoteHeight = 3 - remoteLocalBalance = 2000 - remoteRemoteBalance = 2500 - - // Values that will be written to disk in the revocation log. - oldHeight = 0 - oldLocalBalance = 200 - oldRemoteBalance = 300 - - // Heights to test error cases. - unknownHeight = 1 - unreachedHeight = 4 - ) - - // putRevokedState is a helper function used to put commitments is - // the revocation log bucket to test lookup of balances at heights that - // are not our current height. - putRevokedState := func(c *OpenChannel, height uint64, local, - remote lnwire.MilliSatoshi) er.R { - - err := kvdb.Update(c.Db, func(tx kvdb.RwTx) er.R { - chanBucket, err := fetchChanBucketRw( - tx, c.IdentityPub, &c.FundingOutpoint, - c.ChainHash, - ) - if err != nil { - return err - } - - logKey := revocationLogBucket - logBucket, err := chanBucket.CreateBucketIfNotExists( - logKey, - ) - if err != nil { - return err - } - - // Make a copy of our current commitment so we do not - // need to re-fill all the required fields and copy in - // our new desired values. - commit := c.LocalCommitment - commit.CommitHeight = height - commit.LocalBalance = local - commit.RemoteBalance = remote - - return appendChannelLogEntry(logBucket, &commit) - }, func() {}) - - return err - } - - tests := []struct { - name string - targetHeight uint64 - expectedLocalBalance lnwire.MilliSatoshi - expectedRemoteBalance lnwire.MilliSatoshi - expectedError *er.ErrorCode - }{ - { - name: "target is current local height", - targetHeight: localHeight, - expectedLocalBalance: localLocalBalance, - expectedRemoteBalance: localRemoteBalance, - expectedError: nil, - }, - { - name: "target is current remote height", - targetHeight: remoteHeight, - expectedLocalBalance: remoteLocalBalance, - expectedRemoteBalance: remoteRemoteBalance, - expectedError: nil, - }, - { - name: "need to lookup commit", - targetHeight: oldHeight, - expectedLocalBalance: oldLocalBalance, - expectedRemoteBalance: oldRemoteBalance, - expectedError: nil, - }, - { - name: "height not found", - targetHeight: unknownHeight, - expectedLocalBalance: 0, - expectedRemoteBalance: 0, - expectedError: errLogEntryNotFound, - }, - { - name: "height not reached", - targetHeight: unreachedHeight, - expectedLocalBalance: 0, - expectedRemoteBalance: 0, - expectedError: errHeightNotReached, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", - err) - } - defer cleanUp() - - // Create options to set the heights and balances of - // our local and remote commitments. - localCommitOpt := channelCommitmentOption( - localHeight, localLocalBalance, - localRemoteBalance, true, - ) - - remoteCommitOpt := channelCommitmentOption( - remoteHeight, remoteLocalBalance, - remoteRemoteBalance, false, - ) - - // Create an open channel. - channel := createTestChannel( - t, cdb, openChannelOption(), - localCommitOpt, remoteCommitOpt, - ) - - // Write an older commit to disk. - err = putRevokedState(channel, oldHeight, - oldLocalBalance, oldRemoteBalance) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - local, remote, err := channel.BalancesAtHeight( - test.targetHeight, - ) - if test.expectedError == nil && err == nil { - } else if test.expectedError == nil || !test.expectedError.Is(err) { - t.Fatalf("expected: %v, got: %v", - test.expectedError, err) - } - - if local != test.expectedLocalBalance { - t.Fatalf("expected local: %v, got: %v", - test.expectedLocalBalance, local) - } - - if remote != test.expectedRemoteBalance { - t.Fatalf("expected remote: %v, got: %v", - test.expectedRemoteBalance, remote) - } - }) - } -} - -// TestHasChanStatus asserts the behavior of HasChanStatus by checking the -// behavior of various status flags in addition to the special case of -// ChanStatusDefault which is treated like a flag in the code base even though -// it isn't. -func TestHasChanStatus(t *testing.T) { - tests := []struct { - name string - status ChannelStatus - expHas map[ChannelStatus]bool - }{ - { - name: "default", - status: ChanStatusDefault, - expHas: map[ChannelStatus]bool{ - ChanStatusDefault: true, - ChanStatusBorked: false, - }, - }, - { - name: "single flag", - status: ChanStatusBorked, - expHas: map[ChannelStatus]bool{ - ChanStatusDefault: false, - ChanStatusBorked: true, - }, - }, - { - name: "multiple flags", - status: ChanStatusBorked | ChanStatusLocalDataLoss, - expHas: map[ChannelStatus]bool{ - ChanStatusDefault: false, - ChanStatusBorked: true, - ChanStatusLocalDataLoss: true, - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - c := &OpenChannel{ - chanStatus: test.status, - } - - for status, expHas := range test.expHas { - has := c.HasChanStatus(status) - if has == expHas { - continue - } - - t.Fatalf("expected chan status to "+ - "have %s? %t, got: %t", - status, expHas, has) - } - }) - } -} diff --git a/lnd/channeldb/codec.go b/lnd/channeldb/codec.go deleted file mode 100644 index d5fda0d0..00000000 --- a/lnd/channeldb/codec.go +++ /dev/null @@ -1,465 +0,0 @@ -package channeldb - -import ( - "fmt" - "io" - "net" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/shachain" - "github.com/pkt-cash/pktd/wire" -) - -// writeOutpoint writes an outpoint to the passed writer using the minimal -// amount of bytes possible. -func writeOutpoint(w io.Writer, o *wire.OutPoint) er.R { - if _, err := util.Write(w, o.Hash[:]); err != nil { - return err - } - if err := util.WriteBin(w, byteOrder, o.Index); err != nil { - return err - } - - return nil -} - -// readOutpoint reads an outpoint from the passed reader that was previously -// written using the writeOutpoint struct. -func readOutpoint(r io.Reader, o *wire.OutPoint) er.R { - if _, err := util.ReadFull(r, o.Hash[:]); err != nil { - return err - } - if err := util.ReadBin(r, byteOrder, &o.Index); err != nil { - return err - } - - return nil -} - -// UnknownElementType is an error returned when the codec is unable to encode or -// decode a particular type. -type UnknownElementType struct { - method string - element interface{} -} - -// NewUnknownElementType creates a new UnknownElementType error from the passed -// method name and element. -func NewUnknownElementType(method string, el interface{}) UnknownElementType { - return UnknownElementType{method: method, element: el} -} - -// Error returns the name of the method that encountered the error, as well as -// the type that was unsupported. -func (e UnknownElementType) Error() string { - return fmt.Sprintf("Unknown type in %s: %T", e.method, e.element) -} - -// WriteElement is a one-stop shop to write the big endian representation of -// any element which is to be serialized for storage on disk. The passed -// io.Writer should be backed by an appropriately sized byte slice, or be able -// to dynamically expand to accommodate additional data. -func WriteElement(w io.Writer, element interface{}) er.R { - switch e := element.(type) { - case keychain.KeyDescriptor: - if err := util.WriteBin(w, byteOrder, e.Family); err != nil { - return err - } - if err := util.WriteBin(w, byteOrder, e.Index); err != nil { - return err - } - - if e.PubKey != nil { - if err := util.WriteBin(w, byteOrder, true); err != nil { - return er.Errorf("error writing serialized element: %s", err) - } - - return WriteElement(w, e.PubKey) - } - - return util.WriteBin(w, byteOrder, false) - case ChannelType: - if err := util.WriteBin(w, byteOrder, e); err != nil { - return err - } - - case chainhash.Hash: - if _, err := util.Write(w, e[:]); err != nil { - return err - } - - case wire.OutPoint: - return writeOutpoint(w, &e) - - case lnwire.ShortChannelID: - if err := util.WriteBin(w, byteOrder, e.ToUint64()); err != nil { - return err - } - - case lnwire.ChannelID: - if _, err := util.Write(w, e[:]); err != nil { - return err - } - - case int64, uint64: - if err := util.WriteBin(w, byteOrder, e); err != nil { - return err - } - - case uint32: - if err := util.WriteBin(w, byteOrder, e); err != nil { - return err - } - - case int32: - if err := util.WriteBin(w, byteOrder, e); err != nil { - return err - } - - case uint16: - if err := util.WriteBin(w, byteOrder, e); err != nil { - return err - } - - case uint8: - if err := util.WriteBin(w, byteOrder, e); err != nil { - return err - } - - case bool: - if err := util.WriteBin(w, byteOrder, e); err != nil { - return err - } - - case btcutil.Amount: - if err := util.WriteBin(w, byteOrder, uint64(e)); err != nil { - return err - } - - case lnwire.MilliSatoshi: - if err := util.WriteBin(w, byteOrder, uint64(e)); err != nil { - return err - } - - case *btcec.PrivateKey: - b := e.Serialize() - if _, err := util.Write(w, b); err != nil { - return err - } - - case *btcec.PublicKey: - b := e.SerializeCompressed() - if _, err := util.Write(w, b); err != nil { - return err - } - - case shachain.Producer: - return e.Encode(w) - - case shachain.Store: - return e.Encode(w) - - case *wire.MsgTx: - return e.Serialize(w) - - case [32]byte: - if _, err := util.Write(w, e[:]); err != nil { - return err - } - - case []byte: - if err := wire.WriteVarBytes(w, 0, e); err != nil { - return err - } - - case lnwire.Message: - if _, err := lnwire.WriteMessage(w, e, 0); err != nil { - return err - } - - case ChannelStatus: - if err := util.WriteBin(w, byteOrder, e); err != nil { - return err - } - - case ClosureType: - if err := util.WriteBin(w, byteOrder, e); err != nil { - return err - } - - case paymentIndexType: - if err := util.WriteBin(w, byteOrder, e); err != nil { - return err - } - - case lnwire.FundingFlag: - if err := util.WriteBin(w, byteOrder, e); err != nil { - return err - } - - case net.Addr: - if err := serializeAddr(w, e); err != nil { - return err - } - - case []net.Addr: - if err := WriteElement(w, uint32(len(e))); err != nil { - return err - } - - for _, addr := range e { - if err := serializeAddr(w, addr); err != nil { - return err - } - } - - default: - return er.E(UnknownElementType{"WriteElement", e}) - } - - return nil -} - -// WriteElements is writes each element in the elements slice to the passed -// io.Writer using WriteElement. -func WriteElements(w io.Writer, elements ...interface{}) er.R { - for _, element := range elements { - err := WriteElement(w, element) - if err != nil { - return err - } - } - return nil -} - -// ReadElement is a one-stop utility function to deserialize any datastructure -// encoded using the serialization format of the database. -func ReadElement(r io.Reader, element interface{}) er.R { - switch e := element.(type) { - case *keychain.KeyDescriptor: - if err := util.ReadBin(r, byteOrder, &e.Family); err != nil { - return err - } - if err := util.ReadBin(r, byteOrder, &e.Index); err != nil { - return err - } - - var hasPubKey bool - if err := util.ReadBin(r, byteOrder, &hasPubKey); err != nil { - return err - } - - if hasPubKey { - return ReadElement(r, &e.PubKey) - } - - case *ChannelType: - if err := util.ReadBin(r, byteOrder, e); err != nil { - return err - } - - case *chainhash.Hash: - if _, err := util.ReadFull(r, e[:]); err != nil { - return err - } - - case *wire.OutPoint: - return readOutpoint(r, e) - - case *lnwire.ShortChannelID: - var a uint64 - if err := util.ReadBin(r, byteOrder, &a); err != nil { - return err - } - *e = lnwire.NewShortChanIDFromInt(a) - - case *lnwire.ChannelID: - if _, err := util.ReadFull(r, e[:]); err != nil { - return err - } - - case *int64, *uint64: - if err := util.ReadBin(r, byteOrder, e); err != nil { - return err - } - - case *uint32: - if err := util.ReadBin(r, byteOrder, e); err != nil { - return err - } - - case *int32: - if err := util.ReadBin(r, byteOrder, e); err != nil { - return err - } - - case *uint16: - if err := util.ReadBin(r, byteOrder, e); err != nil { - return err - } - - case *uint8: - if err := util.ReadBin(r, byteOrder, e); err != nil { - return err - } - - case *bool: - if err := util.ReadBin(r, byteOrder, e); err != nil { - return err - } - - case *btcutil.Amount: - var a uint64 - if err := util.ReadBin(r, byteOrder, &a); err != nil { - return err - } - - *e = btcutil.Amount(a) - - case *lnwire.MilliSatoshi: - var a uint64 - if err := util.ReadBin(r, byteOrder, &a); err != nil { - return err - } - - *e = lnwire.MilliSatoshi(a) - - case **btcec.PrivateKey: - var b [btcec.PrivKeyBytesLen]byte - if _, err := util.ReadFull(r, b[:]); err != nil { - return err - } - - priv, _ := btcec.PrivKeyFromBytes(btcec.S256(), b[:]) - *e = priv - - case **btcec.PublicKey: - var b [btcec.PubKeyBytesLenCompressed]byte - if _, err := util.ReadFull(r, b[:]); err != nil { - return err - } - - pubKey, err := btcec.ParsePubKey(b[:], btcec.S256()) - if err != nil { - return err - } - *e = pubKey - - case *shachain.Producer: - var root [32]byte - if _, err := util.ReadFull(r, root[:]); err != nil { - return err - } - - // TODO(roasbeef): remove - producer, err := shachain.NewRevocationProducerFromBytes(root[:]) - if err != nil { - return err - } - - *e = producer - - case *shachain.Store: - store, err := shachain.NewRevocationStoreFromBytes(r) - if err != nil { - return err - } - - *e = store - - case **wire.MsgTx: - tx := wire.NewMsgTx(2) - if err := tx.Deserialize(r); err != nil { - return err - } - - *e = tx - - case *[32]byte: - if _, err := util.ReadFull(r, e[:]); err != nil { - return err - } - - case *[]byte: - bytes, err := wire.ReadVarBytes(r, 0, 66000, "[]byte") - if err != nil { - return err - } - - *e = bytes - - case *lnwire.Message: - msg, err := lnwire.ReadMessage(r, 0) - if err != nil { - return err - } - - *e = msg - - case *ChannelStatus: - if err := util.ReadBin(r, byteOrder, e); err != nil { - return err - } - - case *ClosureType: - if err := util.ReadBin(r, byteOrder, e); err != nil { - return err - } - - case *paymentIndexType: - if err := util.ReadBin(r, byteOrder, e); err != nil { - return err - } - - case *lnwire.FundingFlag: - if err := util.ReadBin(r, byteOrder, e); err != nil { - return err - } - - case *net.Addr: - addr, err := deserializeAddr(r) - if err != nil { - return err - } - *e = addr - - case *[]net.Addr: - var numAddrs uint32 - if err := ReadElement(r, &numAddrs); err != nil { - return err - } - - *e = make([]net.Addr, numAddrs) - for i := uint32(0); i < numAddrs; i++ { - addr, err := deserializeAddr(r) - if err != nil { - return err - } - (*e)[i] = addr - } - - default: - return er.E(UnknownElementType{"ReadElement", e}) - } - - return nil -} - -// ReadElements deserializes a variable number of elements into the passed -// io.Reader, with each element being deserialized according to the ReadElement -// function. -func ReadElements(r io.Reader, elements ...interface{}) er.R { - for _, element := range elements { - err := ReadElement(r, element) - if err != nil { - return err - } - } - return nil -} diff --git a/lnd/channeldb/db.go b/lnd/channeldb/db.go deleted file mode 100644 index 15021156..00000000 --- a/lnd/channeldb/db.go +++ /dev/null @@ -1,1333 +0,0 @@ -package channeldb - -import ( - "bytes" - "encoding/binary" - "io/ioutil" - "net" - "os" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - mig "github.com/pkt-cash/pktd/lnd/channeldb/migration" - "github.com/pkt-cash/pktd/lnd/channeldb/migration12" - "github.com/pkt-cash/pktd/lnd/channeldb/migration13" - "github.com/pkt-cash/pktd/lnd/channeldb/migration16" - "github.com/pkt-cash/pktd/lnd/channeldb/migration_01_to_11" - "github.com/pkt-cash/pktd/lnd/clock" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/pktwallet/walletdb" - "github.com/pkt-cash/pktd/wire" -) - -const ( - dbName = "channel.db" - dbFilePermission = 0600 -) - -var ( - // ErrDryRunMigrationOK signals that a migration executed successful, - // but we intentionally did not commit the result. - ErrDryRunMigrationOK = Err.CodeWithDetail("ErrDryRunMigrationOK", "dry run migration successful") -) - -// migration is a function which takes a prior outdated version of the database -// instances and mutates the key/bucket structure to arrive at a more -// up-to-date version of the database. -type migration func(tx kvdb.RwTx) er.R - -type version struct { - number uint32 - migration migration -} - -var ( - // dbVersions is storing all versions of database. If current version - // of database don't match with latest version this list will be used - // for retrieving all migration function that are need to apply to the - // current db. - dbVersions = []version{ - { - // The base DB version requires no migration. - number: 0, - migration: nil, - }, - { - // The version of the database where two new indexes - // for the update time of node and channel updates were - // added. - number: 1, - migration: migration_01_to_11.MigrateNodeAndEdgeUpdateIndex, - }, - { - // The DB version that added the invoice event time - // series. - number: 2, - migration: migration_01_to_11.MigrateInvoiceTimeSeries, - }, - { - // The DB version that updated the embedded invoice in - // outgoing payments to match the new format. - number: 3, - migration: migration_01_to_11.MigrateInvoiceTimeSeriesOutgoingPayments, - }, - { - // The version of the database where every channel - // always has two entries in the edges bucket. If - // a policy is unknown, this will be represented - // by a special byte sequence. - number: 4, - migration: migration_01_to_11.MigrateEdgePolicies, - }, - { - // The DB version where we persist each attempt to send - // an HTLC to a payment hash, and track whether the - // payment is in-flight, succeeded, or failed. - number: 5, - migration: migration_01_to_11.PaymentStatusesMigration, - }, - { - // The DB version that properly prunes stale entries - // from the edge update index. - number: 6, - migration: migration_01_to_11.MigratePruneEdgeUpdateIndex, - }, - { - // The DB version that migrates the ChannelCloseSummary - // to a format where optional fields are indicated with - // boolean flags. - number: 7, - migration: migration_01_to_11.MigrateOptionalChannelCloseSummaryFields, - }, - { - // The DB version that changes the gossiper's message - // store keys to account for the message's type and - // ShortChannelID. - number: 8, - migration: migration_01_to_11.MigrateGossipMessageStoreKeys, - }, - { - // The DB version where the payments and payment - // statuses are moved to being stored in a combined - // bucket. - number: 9, - migration: migration_01_to_11.MigrateOutgoingPayments, - }, - { - // The DB version where we started to store legacy - // payload information for all routes, as well as the - // optional TLV records. - number: 10, - migration: migration_01_to_11.MigrateRouteSerialization, - }, - { - // Add invoice htlc and cltv delta fields. - number: 11, - migration: migration_01_to_11.MigrateInvoices, - }, - { - // Migrate to TLV invoice bodies, add payment address - // and features, remove receipt. - number: 12, - migration: migration12.MigrateInvoiceTLV, - }, - { - // Migrate to multi-path payments. - number: 13, - migration: migration13.MigrateMPP, - }, - { - // Initialize payment address index and begin using it - // as the default index, falling back to payment hash - // index. - number: 14, - migration: mig.CreateTLB(payAddrIndexBucket), - }, - { - // Initialize payment index bucket which will be used - // to index payments by sequence number. This index will - // be used to allow more efficient ListPayments queries. - number: 15, - migration: mig.CreateTLB(paymentsIndexBucket), - }, - { - // Add our existing payments to the index bucket created - // in migration 15. - number: 16, - migration: migration16.MigrateSequenceIndex, - }, - { - // Create a top level bucket which will store extra - // information about channel closes. - number: 17, - migration: mig.CreateTLB(closeSummaryBucket), - }, - { - // Create a top level bucket which holds information - // about our peers. - number: 18, - migration: mig.CreateTLB(peersBucket), - }, - } - - // Big endian is the preferred byte order, due to cursor scans over - // integer keys iterating in order. - byteOrder = binary.BigEndian -) - -// DB is the primary datastore for the lnd daemon. The database stores -// information related to nodes, routing data, open/closed channels, fee -// schedules, and reputation data. -type DB struct { - kvdb.Backend - - dbPath string - graph *ChannelGraph - clock clock.Clock - dryRun bool -} - -// Update is a wrapper around walletdb.Update which calls into the extended -// backend when available. This call is needed to be able to cast DB to -// ExtendedBackend. The passed reset function is called before the start of the -// transaction and can be used to reset intermediate state. As callers may -// expect retries of the f closure (depending on the database backend used), the -// reset function will be called before each retry respectively. -func (db *DB) Update(f func(tx walletdb.ReadWriteTx) er.R, reset func()) er.R { - if v, ok := db.Backend.(kvdb.ExtendedBackend); ok { - return v.Update(f, reset) - } - - reset() - return walletdb.Update(db, f) -} - -// View is a wrapper around walletdb.View which calls into the extended -// backend when available. This call is needed to be able to cast DB to -// ExtendedBackend. The passed reset function is called before the start of the -// transaction and can be used to reset intermediate state. As callers may -// expect retries of the f closure (depending on the database backend used), the -// reset function will be called before each retry respectively. -func (db *DB) View(f func(tx walletdb.ReadTx) er.R, reset func()) er.R { - if v, ok := db.Backend.(kvdb.ExtendedBackend); ok { - return v.View(f, reset) - } - - reset() - return walletdb.View(db, f) -} - -// PrintStats calls into the extended backend if available. This call is needed -// to be able to cast DB to ExtendedBackend. -func (db *DB) PrintStats() string { - if v, ok := db.Backend.(kvdb.ExtendedBackend); ok { - return v.PrintStats() - } - - return "unimplemented" -} - -// Open opens or creates channeldb. Any necessary schemas migrations due -// to updates will take place as necessary. -// TODO(bhandras): deprecate this function. -func Open(dbPath string, modifiers ...OptionModifier) (*DB, er.R) { - opts := DefaultOptions() - for _, modifier := range modifiers { - modifier(&opts) - } - - backend, err := kvdb.GetBoltBackend(&kvdb.BoltBackendConfig{ - DBPath: dbPath, - DBFileName: dbName, - NoFreelistSync: opts.NoFreelistSync, - AutoCompact: opts.AutoCompact, - AutoCompactMinAge: opts.AutoCompactMinAge, - }) - if err != nil { - return nil, err - } - - db, err := CreateWithBackend(backend, modifiers...) - if err == nil { - db.dbPath = dbPath - } - return db, err -} - -// CreateWithBackend creates channeldb instance using the passed kvdb.Backend. -// Any necessary schemas migrations due to updates will take place as necessary. -func CreateWithBackend(backend kvdb.Backend, modifiers ...OptionModifier) (*DB, er.R) { - if err := initChannelDB(backend); err != nil { - return nil, err - } - - opts := DefaultOptions() - for _, modifier := range modifiers { - modifier(&opts) - } - - chanDB := &DB{ - Backend: backend, - clock: opts.clock, - dryRun: opts.dryRun, - } - chanDB.graph = newChannelGraph( - chanDB, opts.RejectCacheSize, opts.ChannelCacheSize, - ) - - // Synchronize the version of database and apply migrations if needed. - if err := chanDB.syncVersions(dbVersions); err != nil { - backend.Close() - return nil, err - } - - return chanDB, nil -} - -// Path returns the file path to the channel database. -func (d *DB) Path() string { - return d.dbPath -} - -var topLevelBuckets = [][]byte{ - openChannelBucket, - closedChannelBucket, - forwardingLogBucket, - fwdPackagesKey, - invoiceBucket, - payAddrIndexBucket, - paymentsIndexBucket, - peersBucket, - nodeInfoBucket, - nodeBucket, - edgeBucket, - edgeIndexBucket, - graphMetaBucket, - metaBucket, - closeSummaryBucket, -} - -// Wipe completely deletes all saved state within all used buckets within the -// database. The deletion is done in a single transaction, therefore this -// operation is fully atomic. -func (d *DB) Wipe() er.R { - return kvdb.Update(d, func(tx kvdb.RwTx) er.R { - for _, tlb := range topLevelBuckets { - err := tx.DeleteTopLevelBucket(tlb) - if err != nil && !kvdb.ErrBucketNotFound.Is(err) { - return err - } - } - return nil - }, func() {}) -} - -// createChannelDB creates and initializes a fresh version of channeldb. In -// the case that the target path has not yet been created or doesn't yet exist, -// then the path is created. Additionally, all required top-level buckets used -// within the database are created. -func initChannelDB(db kvdb.Backend) er.R { - err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - meta := &Meta{} - // Check if DB is already initialized. - err := fetchMeta(meta, tx) - if err == nil { - return nil - } - - for _, tlb := range topLevelBuckets { - if _, err := tx.CreateTopLevelBucket(tlb); err != nil { - return err - } - } - - nodes := tx.ReadWriteBucket(nodeBucket) - _, err = nodes.CreateBucket(aliasIndexBucket) - if err != nil { - return err - } - _, err = nodes.CreateBucket(nodeUpdateIndexBucket) - if err != nil { - return err - } - - edges := tx.ReadWriteBucket(edgeBucket) - if _, err := edges.CreateBucket(edgeIndexBucket); err != nil { - return err - } - if _, err := edges.CreateBucket(edgeUpdateIndexBucket); err != nil { - return err - } - if _, err := edges.CreateBucket(channelPointBucket); err != nil { - return err - } - if _, err := edges.CreateBucket(zombieBucket); err != nil { - return err - } - - graphMeta := tx.ReadWriteBucket(graphMetaBucket) - _, err = graphMeta.CreateBucket(pruneLogBucket) - if err != nil { - return err - } - - meta.DbVersionNumber = getLatestDBVersion(dbVersions) - return putMeta(meta, tx) - }, func() {}) - if err != nil { - return er.Errorf("unable to create new channeldb: %v", err) - } - - return nil -} - -// fileExists returns true if the file exists, and false otherwise. -func fileExists(path string) bool { - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - return false - } - } - - return true -} - -// FetchOpenChannels starts a new database transaction and returns all stored -// currently active/open channels associated with the target nodeID. In the case -// that no active channels are known to have been created with this node, then a -// zero-length slice is returned. -func (d *DB) FetchOpenChannels(nodeID *btcec.PublicKey) ([]*OpenChannel, er.R) { - var channels []*OpenChannel - err := kvdb.View(d, func(tx kvdb.RTx) er.R { - var err er.R - channels, err = d.fetchOpenChannels(tx, nodeID) - return err - }, func() { - channels = nil - }) - - return channels, err -} - -// fetchOpenChannels uses and existing database transaction and returns all -// stored currently active/open channels associated with the target nodeID. In -// the case that no active channels are known to have been created with this -// node, then a zero-length slice is returned. -func (db *DB) fetchOpenChannels(tx kvdb.RTx, - nodeID *btcec.PublicKey) ([]*OpenChannel, er.R) { - - // Get the bucket dedicated to storing the metadata for open channels. - openChanBucket := tx.ReadBucket(openChannelBucket) - if openChanBucket == nil { - return nil, nil - } - - // Within this top level bucket, fetch the bucket dedicated to storing - // open channel data specific to the remote node. - pub := nodeID.SerializeCompressed() - nodeChanBucket := openChanBucket.NestedReadBucket(pub) - if nodeChanBucket == nil { - return nil, nil - } - - // Next, we'll need to go down an additional layer in order to retrieve - // the channels for each chain the node knows of. - var channels []*OpenChannel - err := nodeChanBucket.ForEach(func(chainHash, v []byte) er.R { - // If there's a value, it's not a bucket so ignore it. - if v != nil { - return nil - } - - // If we've found a valid chainhash bucket, then we'll retrieve - // that so we can extract all the channels. - chainBucket := nodeChanBucket.NestedReadBucket(chainHash) - if chainBucket == nil { - return er.Errorf("unable to read bucket for chain=%x", - chainHash[:]) - } - - // Finally, we both of the necessary buckets retrieved, fetch - // all the active channels related to this node. - nodeChannels, err := db.fetchNodeChannels(chainBucket) - if err != nil { - return er.Errorf("unable to read channel for "+ - "chain_hash=%x, node_key=%x: %v", - chainHash[:], pub, err) - } - - channels = append(channels, nodeChannels...) - return nil - }) - - return channels, err -} - -// fetchNodeChannels retrieves all active channels from the target chainBucket -// which is under a node's dedicated channel bucket. This function is typically -// used to fetch all the active channels related to a particular node. -func (db *DB) fetchNodeChannels(chainBucket kvdb.RBucket) ([]*OpenChannel, er.R) { - - var channels []*OpenChannel - - // A node may have channels on several chains, so for each known chain, - // we'll extract all the channels. - err := chainBucket.ForEach(func(chanPoint, v []byte) er.R { - // If there's a value, it's not a bucket so ignore it. - if v != nil { - return nil - } - - // Once we've found a valid channel bucket, we'll extract it - // from the node's chain bucket. - chanBucket := chainBucket.NestedReadBucket(chanPoint) - - var outPoint wire.OutPoint - err := readOutpoint(bytes.NewReader(chanPoint), &outPoint) - if err != nil { - return err - } - oChannel, err := fetchOpenChannel(chanBucket, &outPoint) - if err != nil { - return er.Errorf("unable to read channel data for "+ - "chan_point=%v: %v", outPoint, err) - } - oChannel.Db = db - - channels = append(channels, oChannel) - - return nil - }) - if err != nil { - return nil, err - } - - return channels, nil -} - -// FetchChannel attempts to locate a channel specified by the passed channel -// point. If the channel cannot be found, then an error will be returned. -func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, er.R) { - var ( - targetChan *OpenChannel - targetChanPoint bytes.Buffer - ) - - if err := writeOutpoint(&targetChanPoint, &chanPoint); err != nil { - return nil, err - } - - // chanScan will traverse the following bucket structure: - // * nodePub => chainHash => chanPoint - // - // At each level we go one further, ensuring that we're traversing the - // proper key (that's actually a bucket). By only reading the bucket - // structure and skipping fully decoding each channel, we save a good - // bit of CPU as we don't need to do things like decompress public - // keys. - chanScan := func(tx kvdb.RTx) er.R { - // Get the bucket dedicated to storing the metadata for open - // channels. - openChanBucket := tx.ReadBucket(openChannelBucket) - if openChanBucket == nil { - return ErrNoActiveChannels.Default() - } - - // Within the node channel bucket, are the set of node pubkeys - // we have channels with, we don't know the entire set, so - // we'll check them all. - return openChanBucket.ForEach(func(nodePub, v []byte) er.R { - // Ensure that this is a key the same size as a pubkey, - // and also that it leads directly to a bucket. - if len(nodePub) != 33 || v != nil { - return nil - } - - nodeChanBucket := openChanBucket.NestedReadBucket(nodePub) - if nodeChanBucket == nil { - return nil - } - - // The next layer down is all the chains that this node - // has channels on with us. - return nodeChanBucket.ForEach(func(chainHash, v []byte) er.R { - // If there's a value, it's not a bucket so - // ignore it. - if v != nil { - return nil - } - - chainBucket := nodeChanBucket.NestedReadBucket( - chainHash, - ) - if chainBucket == nil { - return er.Errorf("unable to read "+ - "bucket for chain=%x", chainHash[:]) - } - - // Finally we reach the leaf bucket that stores - // all the chanPoints for this node. - chanBucket := chainBucket.NestedReadBucket( - targetChanPoint.Bytes(), - ) - if chanBucket == nil { - return nil - } - - channel, err := fetchOpenChannel( - chanBucket, &chanPoint, - ) - if err != nil { - return err - } - - targetChan = channel - targetChan.Db = d - - return nil - }) - }) - } - - err := kvdb.View(d, chanScan, func() {}) - if err != nil { - return nil, err - } - - if targetChan != nil { - return targetChan, nil - } - - // If we can't find the channel, then we return with an error, as we - // have nothing to backup. - return nil, ErrChannelNotFound.Default() -} - -// FetchAllChannels attempts to retrieve all open channels currently stored -// within the database, including pending open, fully open and channels waiting -// for a closing transaction to confirm. -func (d *DB) FetchAllChannels() ([]*OpenChannel, er.R) { - return fetchChannels(d) -} - -// FetchAllOpenChannels will return all channels that have the funding -// transaction confirmed, and is not waiting for a closing transaction to be -// confirmed. -func (d *DB) FetchAllOpenChannels() ([]*OpenChannel, er.R) { - return fetchChannels( - d, - pendingChannelFilter(false), - waitingCloseFilter(false), - ) -} - -// FetchPendingChannels will return channels that have completed the process of -// generating and broadcasting funding transactions, but whose funding -// transactions have yet to be confirmed on the blockchain. -func (d *DB) FetchPendingChannels() ([]*OpenChannel, er.R) { - return fetchChannels(d, - pendingChannelFilter(true), - waitingCloseFilter(false), - ) -} - -// FetchWaitingCloseChannels will return all channels that have been opened, -// but are now waiting for a closing transaction to be confirmed. -// -// NOTE: This includes channels that are also pending to be opened. -func (d *DB) FetchWaitingCloseChannels() ([]*OpenChannel, er.R) { - return fetchChannels( - d, waitingCloseFilter(true), - ) -} - -// fetchChannelsFilter applies a filter to channels retrieved in fetchchannels. -// A set of filters can be combined to filter across multiple dimensions. -type fetchChannelsFilter func(channel *OpenChannel) bool - -// pendingChannelFilter returns a filter based on whether channels are pending -// (ie, their funding transaction still needs to confirm). If pending is false, -// channels with confirmed funding transactions are returned. -func pendingChannelFilter(pending bool) fetchChannelsFilter { - return func(channel *OpenChannel) bool { - return channel.IsPending == pending - } -} - -// waitingCloseFilter returns a filter which filters channels based on whether -// they are awaiting the confirmation of their closing transaction. If waiting -// close is true, channels that have had their closing tx broadcast are -// included. If it is false, channels that are not awaiting confirmation of -// their close transaction are returned. -func waitingCloseFilter(waitingClose bool) fetchChannelsFilter { - return func(channel *OpenChannel) bool { - // If the channel is in any other state than Default, - // then it means it is waiting to be closed. - channelWaitingClose := - channel.ChanStatus() != ChanStatusDefault - - // Include the channel if it matches the value for - // waiting close that we are filtering on. - return channelWaitingClose == waitingClose - } -} - -// fetchChannels attempts to retrieve channels currently stored in the -// database. It takes a set of filters which are applied to each channel to -// obtain a set of channels with the desired set of properties. Only channels -// which have a true value returned for *all* of the filters will be returned. -// If no filters are provided, every channel in the open channels bucket will -// be returned. -func fetchChannels(d *DB, filters ...fetchChannelsFilter) ([]*OpenChannel, er.R) { - var channels []*OpenChannel - - err := kvdb.View(d, func(tx kvdb.RTx) er.R { - // Get the bucket dedicated to storing the metadata for open - // channels. - openChanBucket := tx.ReadBucket(openChannelBucket) - if openChanBucket == nil { - return ErrNoActiveChannels.Default() - } - - // Next, fetch the bucket dedicated to storing metadata related - // to all nodes. All keys within this bucket are the serialized - // public keys of all our direct counterparties. - nodeMetaBucket := tx.ReadBucket(nodeInfoBucket) - if nodeMetaBucket == nil { - return er.Errorf("node bucket not created") - } - - // Finally for each node public key in the bucket, fetch all - // the channels related to this particular node. - return nodeMetaBucket.ForEach(func(k, v []byte) er.R { - nodeChanBucket := openChanBucket.NestedReadBucket(k) - if nodeChanBucket == nil { - return nil - } - - return nodeChanBucket.ForEach(func(chainHash, v []byte) er.R { - // If there's a value, it's not a bucket so - // ignore it. - if v != nil { - return nil - } - - // If we've found a valid chainhash bucket, - // then we'll retrieve that so we can extract - // all the channels. - chainBucket := nodeChanBucket.NestedReadBucket( - chainHash, - ) - if chainBucket == nil { - return er.Errorf("unable to read "+ - "bucket for chain=%x", chainHash[:]) - } - - nodeChans, err := d.fetchNodeChannels(chainBucket) - if err != nil { - return er.Errorf("unable to read "+ - "channel for chain_hash=%x, "+ - "node_key=%x: %v", chainHash[:], k, err) - } - for _, channel := range nodeChans { - // includeChannel indicates whether the channel - // meets the criteria specified by our filters. - includeChannel := true - - // Run through each filter and check whether the - // channel should be included. - for _, f := range filters { - // If the channel fails the filter, set - // includeChannel to false and don't bother - // checking the remaining filters. - if !f(channel) { - includeChannel = false - break - } - } - - // If the channel passed every filter, include it in - // our set of channels. - if includeChannel { - channels = append(channels, channel) - } - } - return nil - }) - - }) - }, func() { - channels = nil - }) - if err != nil { - return nil, err - } - - return channels, nil -} - -// FetchClosedChannels attempts to fetch all closed channels from the database. -// The pendingOnly bool toggles if channels that aren't yet fully closed should -// be returned in the response or not. When a channel was cooperatively closed, -// it becomes fully closed after a single confirmation. When a channel was -// forcibly closed, it will become fully closed after _all_ the pending funds -// (if any) have been swept. -func (d *DB) FetchClosedChannels(pendingOnly bool) ([]*ChannelCloseSummary, er.R) { - var chanSummaries []*ChannelCloseSummary - - if err := kvdb.View(d, func(tx kvdb.RTx) er.R { - closeBucket := tx.ReadBucket(closedChannelBucket) - if closeBucket == nil { - return ErrNoClosedChannels.Default() - } - - return closeBucket.ForEach(func(chanID []byte, summaryBytes []byte) er.R { - summaryReader := bytes.NewReader(summaryBytes) - chanSummary, err := deserializeCloseChannelSummary(summaryReader) - if err != nil { - return err - } - - // If the query specified to only include pending - // channels, then we'll skip any channels which aren't - // currently pending. - if !chanSummary.IsPending && pendingOnly { - return nil - } - - chanSummaries = append(chanSummaries, chanSummary) - return nil - }) - }, func() { - chanSummaries = nil - }); err != nil { - return nil, err - } - - return chanSummaries, nil -} - -// ErrClosedChannelNotFound signals that a closed channel could not be found in -// the channeldb. -var ErrClosedChannelNotFound = Err.CodeWithDetail("ErrClosedChannelNotFound", "unable to find closed channel summary") - -// FetchClosedChannel queries for a channel close summary using the channel -// point of the channel in question. -func (d *DB) FetchClosedChannel(chanID *wire.OutPoint) (*ChannelCloseSummary, er.R) { - var chanSummary *ChannelCloseSummary - if err := kvdb.View(d, func(tx kvdb.RTx) er.R { - closeBucket := tx.ReadBucket(closedChannelBucket) - if closeBucket == nil { - return ErrClosedChannelNotFound.Default() - } - - var b bytes.Buffer - var err er.R - if err = writeOutpoint(&b, chanID); err != nil { - return err - } - - summaryBytes := closeBucket.Get(b.Bytes()) - if summaryBytes == nil { - return ErrClosedChannelNotFound.Default() - } - - summaryReader := bytes.NewReader(summaryBytes) - chanSummary, err = deserializeCloseChannelSummary(summaryReader) - - return err - }, func() { - chanSummary = nil - }); err != nil { - return nil, err - } - - return chanSummary, nil -} - -// FetchClosedChannelForID queries for a channel close summary using the -// channel ID of the channel in question. -func (d *DB) FetchClosedChannelForID(cid lnwire.ChannelID) ( - *ChannelCloseSummary, er.R) { - - var chanSummary *ChannelCloseSummary - if err := kvdb.View(d, func(tx kvdb.RTx) er.R { - closeBucket := tx.ReadBucket(closedChannelBucket) - if closeBucket == nil { - return ErrClosedChannelNotFound.Default() - } - - // The first 30 bytes of the channel ID and outpoint will be - // equal. - cursor := closeBucket.ReadCursor() - op, c := cursor.Seek(cid[:30]) - - // We scan over all possible candidates for this channel ID. - for ; op != nil && bytes.Compare(cid[:30], op[:30]) <= 0; op, c = cursor.Next() { - var outPoint wire.OutPoint - err := readOutpoint(bytes.NewReader(op), &outPoint) - if err != nil { - return err - } - - // If the found outpoint does not correspond to this - // channel ID, we continue. - if !cid.IsChanPoint(&outPoint) { - continue - } - - // Deserialize the close summary and return. - r := bytes.NewReader(c) - chanSummary, err = deserializeCloseChannelSummary(r) - if err != nil { - return err - } - - return nil - } - return ErrClosedChannelNotFound.Default() - }, func() { - chanSummary = nil - }); err != nil { - return nil, err - } - - return chanSummary, nil -} - -// MarkChanFullyClosed marks a channel as fully closed within the database. A -// channel should be marked as fully closed if the channel was initially -// cooperatively closed and it's reached a single confirmation, or after all -// the pending funds in a channel that has been forcibly closed have been -// swept. -func (d *DB) MarkChanFullyClosed(chanPoint *wire.OutPoint) er.R { - return kvdb.Update(d, func(tx kvdb.RwTx) er.R { - var b bytes.Buffer - if err := writeOutpoint(&b, chanPoint); err != nil { - return err - } - - chanID := b.Bytes() - - closedChanBucket, err := tx.CreateTopLevelBucket( - closedChannelBucket, - ) - if err != nil { - return err - } - - chanSummaryBytes := closedChanBucket.Get(chanID) - if chanSummaryBytes == nil { - return er.Errorf("no closed channel for "+ - "chan_point=%v found", chanPoint) - } - - chanSummaryReader := bytes.NewReader(chanSummaryBytes) - chanSummary, errr := deserializeCloseChannelSummary( - chanSummaryReader, - ) - if errr != nil { - return errr - } - - chanSummary.IsPending = false - - var newSummary bytes.Buffer - errr = serializeChannelCloseSummary(&newSummary, chanSummary) - if errr != nil { - return errr - } - - err = closedChanBucket.Put(chanID, newSummary.Bytes()) - if err != nil { - return err - } - - // Now that the channel is closed, we'll check if we have any - // other open channels with this peer. If we don't we'll - // garbage collect it to ensure we don't establish persistent - // connections to peers without open channels. - return d.pruneLinkNode(tx, chanSummary.RemotePub) - }, func() {}) -} - -// pruneLinkNode determines whether we should garbage collect a link node from -// the database due to no longer having any open channels with it. If there are -// any left, then this acts as a no-op. -func (db *DB) pruneLinkNode(tx kvdb.RwTx, remotePub *btcec.PublicKey) er.R { - openChannels, err := db.fetchOpenChannels(tx, remotePub) - if err != nil { - return er.Errorf("unable to fetch open channels for peer %x: "+ - "%v", remotePub.SerializeCompressed(), err) - } - - if len(openChannels) > 0 { - return nil - } - - log.Infof("Pruning link node %x with zero open channels from database", - remotePub.SerializeCompressed()) - - return db.deleteLinkNode(tx, remotePub) -} - -// PruneLinkNodes attempts to prune all link nodes found within the databse with -// whom we no longer have any open channels with. -func (d *DB) PruneLinkNodes() er.R { - return kvdb.Update(d, func(tx kvdb.RwTx) er.R { - linkNodes, err := d.fetchAllLinkNodes(tx) - if err != nil { - return err - } - - for _, linkNode := range linkNodes { - err := d.pruneLinkNode(tx, linkNode.IdentityPub) - if err != nil { - return err - } - } - - return nil - }, func() {}) -} - -// ChannelShell is a shell of a channel that is meant to be used for channel -// recovery purposes. It contains a minimal OpenChannel instance along with -// addresses for that target node. -type ChannelShell struct { - // NodeAddrs the set of addresses that this node has known to be - // reachable at in the past. - NodeAddrs []net.Addr - - // Chan is a shell of an OpenChannel, it contains only the items - // required to restore the channel on disk. - Chan *OpenChannel -} - -// RestoreChannelShells is a method that allows the caller to reconstruct the -// state of an OpenChannel from the ChannelShell. We'll attempt to write the -// new channel to disk, create a LinkNode instance with the passed node -// addresses, and finally create an edge within the graph for the channel as -// well. This method is idempotent, so repeated calls with the same set of -// channel shells won't modify the database after the initial call. -func (d *DB) RestoreChannelShells(channelShells ...*ChannelShell) er.R { - err := kvdb.Update(d, func(tx kvdb.RwTx) er.R { - for _, channelShell := range channelShells { - channel := channelShell.Chan - - // When we make a channel, we mark that the channel has - // been restored, this will signal to other sub-systems - // to not attempt to use the channel as if it was a - // regular one. - channel.chanStatus |= ChanStatusRestored - - // First, we'll attempt to create a new open channel - // and link node for this channel. If the channel - // already exists, then in order to ensure this method - // is idempotent, we'll continue to the next step. - channel.Db = d - err := syncNewChannel( - tx, channel, channelShell.NodeAddrs, - ) - if err != nil { - return err - } - } - - return nil - }, func() {}) - if err != nil { - return err - } - - return nil -} - -// AddrsForNode consults the graph and channel database for all addresses known -// to the passed node public key. -func (d *DB) AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, er.R) { - var ( - linkNode *LinkNode - graphNode LightningNode - ) - - dbErr := kvdb.View(d, func(tx kvdb.RTx) er.R { - var err er.R - - linkNode, err = fetchLinkNode(tx, nodePub) - if err != nil { - return err - } - - // We'll also query the graph for this peer to see if they have - // any addresses that we don't currently have stored within the - // link node database. - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNotFound.Default() - } - compressedPubKey := nodePub.SerializeCompressed() - graphNode, err = fetchLightningNode(nodes, compressedPubKey) - if err != nil && !ErrGraphNodeNotFound.Is(err) { - // If the node isn't found, then that's OK, as we still - // have the link node data. - return err - } - - return nil - }, func() { - linkNode = nil - }) - if dbErr != nil { - return nil, dbErr - } - - // Now that we have both sources of addrs for this node, we'll use a - // map to de-duplicate any addresses between the two sources, and - // produce a final list of the combined addrs. - addrs := make(map[string]net.Addr) - for _, addr := range linkNode.Addresses { - addrs[addr.String()] = addr - } - for _, addr := range graphNode.Addresses { - addrs[addr.String()] = addr - } - dedupedAddrs := make([]net.Addr, 0, len(addrs)) - for _, addr := range addrs { - dedupedAddrs = append(dedupedAddrs, addr) - } - - return dedupedAddrs, nil -} - -// AbandonChannel attempts to remove the target channel from the open channel -// database. If the channel was already removed (has a closed channel entry), -// then we'll return a nil error. Otherwise, we'll insert a new close summary -// into the database. -func (db *DB) AbandonChannel(chanPoint *wire.OutPoint, bestHeight uint32) er.R { - // With the chanPoint constructed, we'll attempt to find the target - // channel in the database. If we can't find the channel, then we'll - // return the error back to the caller. - dbChan, err := db.FetchChannel(*chanPoint) - switch { - // If the channel wasn't found, then it's possible that it was already - // abandoned from the database. - case ErrChannelNotFound.Is(err): - _, closedErr := db.FetchClosedChannel(chanPoint) - if closedErr != nil { - return closedErr - } - - // If the channel was already closed, then we don't return an - // error as we'd like fro this step to be repeatable. - return nil - case err != nil: - return err - } - - // Now that we've found the channel, we'll populate a close summary for - // the channel, so we can store as much information for this abounded - // channel as possible. We also ensure that we set Pending to false, to - // indicate that this channel has been "fully" closed. - summary := &ChannelCloseSummary{ - CloseType: Abandoned, - ChanPoint: *chanPoint, - ChainHash: dbChan.ChainHash, - CloseHeight: bestHeight, - RemotePub: dbChan.IdentityPub, - Capacity: dbChan.Capacity, - SettledBalance: dbChan.LocalCommitment.LocalBalance.ToSatoshis(), - ShortChanID: dbChan.ShortChanID(), - RemoteCurrentRevocation: dbChan.RemoteCurrentRevocation, - RemoteNextRevocation: dbChan.RemoteNextRevocation, - LocalChanConfig: dbChan.LocalChanCfg, - } - - // Finally, we'll close the channel in the DB, and return back to the - // caller. We set ourselves as the close initiator because we abandoned - // the channel. - return dbChan.CloseChannel(summary, ChanStatusLocalCloseInitiator) -} - -// syncVersions function is used for safe db version synchronization. It -// applies migration functions to the current database and recovers the -// previous state of db if at least one error/panic appeared during migration. -func (d *DB) syncVersions(versions []version) er.R { - meta, err := d.FetchMeta(nil) - if err != nil { - if ErrMetaNotFound.Is(err) { - meta = &Meta{} - } else { - return err - } - } - - latestVersion := getLatestDBVersion(versions) - log.Infof("Checking for schema update: latest_version=%v, "+ - "db_version=%v", latestVersion, meta.DbVersionNumber) - - switch { - - // If the database reports a higher version that we are aware of, the - // user is probably trying to revert to a prior version of lnd. We fail - // here to prevent reversions and unintended corruption. - case meta.DbVersionNumber > latestVersion: - log.Errorf("Refusing to revert from db_version=%d to "+ - "lower version=%d", meta.DbVersionNumber, - latestVersion) - return ErrDBReversion.Default() - - // If the current database version matches the latest version number, - // then we don't need to perform any migrations. - case meta.DbVersionNumber == latestVersion: - return nil - } - - log.Infof("Performing database schema migration") - - // Otherwise, we fetch the migrations which need to applied, and - // execute them serially within a single database transaction to ensure - // the migration is atomic. - migrations, migrationVersions := getMigrationsToApply( - versions, meta.DbVersionNumber, - ) - return kvdb.Update(d, func(tx kvdb.RwTx) er.R { - for i, migration := range migrations { - if migration == nil { - continue - } - - log.Infof("Applying migration #%v", migrationVersions[i]) - - if err := migration(tx); err != nil { - log.Infof("Unable to apply migration #%v", - migrationVersions[i]) - return err - } - } - - meta.DbVersionNumber = latestVersion - err := putMeta(meta, tx) - if err != nil { - return err - } - - // In dry-run mode, return an error to prevent the transaction - // from committing. - if d.dryRun { - return ErrDryRunMigrationOK.Default() - } - - return nil - }, func() {}) -} - -// ChannelGraph returns a new instance of the directed channel graph. -func (d *DB) ChannelGraph() *ChannelGraph { - return d.graph -} - -func getLatestDBVersion(versions []version) uint32 { - return versions[len(versions)-1].number -} - -// getMigrationsToApply retrieves the migration function that should be -// applied to the database. -func getMigrationsToApply(versions []version, version uint32) ([]migration, []uint32) { - migrations := make([]migration, 0, len(versions)) - migrationVersions := make([]uint32, 0, len(versions)) - - for _, v := range versions { - if v.number > version { - migrations = append(migrations, v.migration) - migrationVersions = append(migrationVersions, v.number) - } - } - - return migrations, migrationVersions -} - -// fetchHistoricalChanBucket returns a the channel bucket for a given outpoint -// from the historical channel bucket. If the bucket does not exist, -// ErrNoHistoricalBucket is returned. -func fetchHistoricalChanBucket(tx kvdb.RTx, - outPoint *wire.OutPoint) (kvdb.RBucket, er.R) { - - // First fetch the top level bucket which stores all data related to - // historically stored channels. - historicalChanBucket := tx.ReadBucket(historicalChannelBucket) - if historicalChanBucket == nil { - return nil, ErrNoHistoricalBucket.Default() - } - - // With the bucket for the node and chain fetched, we can now go down - // another level, for the channel itself. - var chanPointBuf bytes.Buffer - if err := writeOutpoint(&chanPointBuf, outPoint); err != nil { - return nil, err - } - chanBucket := historicalChanBucket.NestedReadBucket(chanPointBuf.Bytes()) - if chanBucket == nil { - return nil, ErrChannelNotFound.Default() - } - - return chanBucket, nil -} - -// FetchHistoricalChannel fetches open channel data from the historical channel -// bucket. -func (db *DB) FetchHistoricalChannel(outPoint *wire.OutPoint) (*OpenChannel, er.R) { - var channel *OpenChannel - err := kvdb.View(db, func(tx kvdb.RTx) er.R { - chanBucket, err := fetchHistoricalChanBucket(tx, outPoint) - if err != nil { - return err - } - - channel, err = fetchOpenChannel(chanBucket, outPoint) - return err - }, func() { - channel = nil - }) - if err != nil { - return nil, err - } - - return channel, nil -} - -// MakeTestDB creates a new instance of the ChannelDB for testing purposes. -// A callback which cleans up the created temporary directories is also -// returned and intended to be executed after the test completes. -func MakeTestDB(modifiers ...OptionModifier) (*DB, func(), er.R) { - // First, create a temporary directory to be used for the duration of - // this test. - tempDirName, errr := ioutil.TempDir("", "channeldb") - if errr != nil { - return nil, nil, er.E(errr) - } - - // Next, create channeldb for the first time. - backend, backendCleanup, err := kvdb.GetTestBackend(tempDirName, "cdb") - if err != nil { - backendCleanup() - return nil, nil, err - } - - cdb, err := CreateWithBackend(backend, modifiers...) - if err != nil { - backendCleanup() - os.RemoveAll(tempDirName) - return nil, nil, err - } - - cleanUp := func() { - cdb.Close() - backendCleanup() - os.RemoveAll(tempDirName) - } - - return cdb, cleanUp, nil -} diff --git a/lnd/channeldb/db_test.go b/lnd/channeldb/db_test.go deleted file mode 100644 index c1e3158a..00000000 --- a/lnd/channeldb/db_test.go +++ /dev/null @@ -1,742 +0,0 @@ -package channeldb - -import ( - "io/ioutil" - "math" - "math/rand" - "net" - "os" - "path/filepath" - "reflect" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/shachain" - "github.com/pkt-cash/pktd/wire" - "github.com/pkt-cash/pktd/wire/protocol" -) - -func TestOpenWithCreate(t *testing.T) { - t.Parallel() - - // First, create a temporary directory to be used for the duration of - // this test. - tempDirName, errr := ioutil.TempDir("", "channeldb") - if errr != nil { - t.Fatalf("unable to create temp dir: %v", errr) - } - defer os.RemoveAll(tempDirName) - - // Next, open thereby creating channeldb for the first time. - dbPath := filepath.Join(tempDirName, "cdb") - backend, cleanup, err := kvdb.GetTestBackend(dbPath, "cdb") - if err != nil { - t.Fatalf("unable to get test db backend: %v", err) - } - defer cleanup() - - cdb, err := CreateWithBackend(backend) - if err != nil { - t.Fatalf("unable to create channeldb: %v", err) - } - if err := cdb.Close(); err != nil { - t.Fatalf("unable to close channeldb: %v", err) - } - - // The path should have been successfully created. - if !fileExists(dbPath) { - t.Fatalf("channeldb failed to create data directory") - } - - // Now, reopen the same db in dry run migration mode. Since we have not - // applied any migrations, this should ignore the flag and not fail. - cdb, err = Open(dbPath, OptionDryRunMigration(true)) - if err != nil { - t.Fatalf("unable to create channeldb: %v", err) - } - if err := cdb.Close(); err != nil { - t.Fatalf("unable to close channeldb: %v", err) - } -} - -// TestWipe tests that the database wipe operation completes successfully -// and that the buckets are deleted. It also checks that attempts to fetch -// information while the buckets are not set return the correct errors. -func TestWipe(t *testing.T) { - t.Parallel() - - // First, create a temporary directory to be used for the duration of - // this test. - tempDirName, errr := ioutil.TempDir("", "channeldb") - if errr != nil { - t.Fatalf("unable to create temp dir: %v", errr) - } - defer os.RemoveAll(tempDirName) - - // Next, open thereby creating channeldb for the first time. - dbPath := filepath.Join(tempDirName, "cdb") - backend, cleanup, err := kvdb.GetTestBackend(dbPath, "cdb") - if err != nil { - t.Fatalf("unable to get test db backend: %v", err) - } - defer cleanup() - - cdb, err := CreateWithBackend(backend) - if err != nil { - t.Fatalf("unable to create channeldb: %v", err) - } - defer cdb.Close() - - if err := cdb.Wipe(); err != nil { - t.Fatalf("unable to wipe channeldb: %v", err) - } - // Check correct errors are returned - _, err = cdb.FetchAllOpenChannels() - if !ErrNoActiveChannels.Is(err) { - t.Fatalf("fetching open channels: expected '%v' instead got '%v'", - ErrNoActiveChannels, err) - } - _, err = cdb.FetchClosedChannels(false) - if !ErrNoClosedChannels.Is(err) { - t.Fatalf("fetching closed channels: expected '%v' instead got '%v'", - ErrNoClosedChannels, err) - } -} - -// TestFetchClosedChannelForID tests that we are able to properly retrieve a -// ChannelCloseSummary from the DB given a ChannelID. -func TestFetchClosedChannelForID(t *testing.T) { - t.Parallel() - - const numChans = 101 - - cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - defer cleanUp() - - // Create the test channel state, that we will mutate the index of the - // funding point. - state := createTestChannelState(t, cdb) - - // Now run through the number of channels, and modify the outpoint index - // to create new channel IDs. - for i := uint32(0); i < numChans; i++ { - // Save the open channel to disk. - state.FundingOutpoint.Index = i - - // Write the channel to disk in a pending state. - createTestChannel( - t, cdb, - fundingPointOption(state.FundingOutpoint), - openChannelOption(), - ) - - // Close the channel. To make sure we retrieve the correct - // summary later, we make them differ in the SettledBalance. - closeSummary := &ChannelCloseSummary{ - ChanPoint: state.FundingOutpoint, - RemotePub: state.IdentityPub, - SettledBalance: btcutil.Amount(500 + i), - } - if err := state.CloseChannel(closeSummary); err != nil { - t.Fatalf("unable to close channel: %v", err) - } - } - - // Now run though them all again and make sure we are able to retrieve - // summaries from the DB. - for i := uint32(0); i < numChans; i++ { - state.FundingOutpoint.Index = i - - // We calculate the ChannelID and use it to fetch the summary. - cid := lnwire.NewChanIDFromOutPoint(&state.FundingOutpoint) - fetchedSummary, err := cdb.FetchClosedChannelForID(cid) - if err != nil { - t.Fatalf("unable to fetch close summary: %v", err) - } - - // Make sure we retrieved the correct one by checking the - // SettledBalance. - if fetchedSummary.SettledBalance != btcutil.Amount(500+i) { - t.Fatalf("summaries don't match: expected %v got %v", - btcutil.Amount(500+i), - fetchedSummary.SettledBalance) - } - } - - // As a final test we make sure that we get ErrClosedChannelNotFound - // for a ChannelID we didn't add to the DB. - state.FundingOutpoint.Index++ - cid := lnwire.NewChanIDFromOutPoint(&state.FundingOutpoint) - _, err = cdb.FetchClosedChannelForID(cid) - if !ErrClosedChannelNotFound.Is(err) { - t.Fatalf("expected ErrClosedChannelNotFound, instead got: %v", err) - } -} - -// TestAddrsForNode tests the we're able to properly obtain all the addresses -// for a target node. -func TestAddrsForNode(t *testing.T) { - t.Parallel() - - cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - defer cleanUp() - - graph := cdb.ChannelGraph() - - // We'll make a test vertex to insert into the database, as the source - // node, but this node will only have half the number of addresses it - // usually does. - testNode, err := createTestVertex(cdb) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - testNode.Addresses = []net.Addr{testAddr} - if err := graph.SetSourceNode(testNode); err != nil { - t.Fatalf("unable to set source node: %v", err) - } - - // Next, we'll make a link node with the same pubkey, but with an - // additional address. - nodePub, err := testNode.PubKey() - if err != nil { - t.Fatalf("unable to recv node pub: %v", err) - } - linkNode := cdb.NewLinkNode( - protocol.MainNet, nodePub, anotherAddr, - ) - if err := linkNode.Sync(); err != nil { - t.Fatalf("unable to sync link node: %v", err) - } - - // Now that we've created a link node, as well as a vertex for the - // node, we'll query for all its addresses. - nodeAddrs, err := cdb.AddrsForNode(nodePub) - if err != nil { - t.Fatalf("unable to obtain node addrs: %v", err) - } - - expectedAddrs := make(map[string]struct{}) - expectedAddrs[testAddr.String()] = struct{}{} - expectedAddrs[anotherAddr.String()] = struct{}{} - - // Finally, ensure that all the expected addresses are found. - if len(nodeAddrs) != len(expectedAddrs) { - t.Fatalf("expected %v addrs, got %v", - len(expectedAddrs), len(nodeAddrs)) - } - for _, addr := range nodeAddrs { - if _, ok := expectedAddrs[addr.String()]; !ok { - t.Fatalf("unexpected addr: %v", addr) - } - } -} - -// TestFetchChannel tests that we're able to fetch an arbitrary channel from -// disk. -func TestFetchChannel(t *testing.T) { - t.Parallel() - - cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - defer cleanUp() - - // Create an open channel. - channelState := createTestChannel(t, cdb, openChannelOption()) - - // Next, attempt to fetch the channel by its chan point. - dbChannel, err := cdb.FetchChannel(channelState.FundingOutpoint) - if err != nil { - t.Fatalf("unable to fetch channel: %v", err) - } - - // The decoded channel state should be identical to what we stored - // above. - if !reflect.DeepEqual(channelState, dbChannel) { - t.Fatalf("channel state doesn't match:: %v vs %v", - spew.Sdump(channelState), spew.Sdump(dbChannel)) - } - - // If we attempt to query for a non-exist ante channel, then we should - // get an error. - channelState2 := createTestChannelState(t, cdb) - if err != nil { - t.Fatalf("unable to create channel state: %v", err) - } - channelState2.FundingOutpoint.Index ^= 1 - - _, err = cdb.FetchChannel(channelState2.FundingOutpoint) - if err == nil { - t.Fatalf("expected query to fail") - } -} - -func genRandomChannelShell() (*ChannelShell, er.R) { - var testPriv [32]byte - if _, err := rand.Read(testPriv[:]); err != nil { - return nil, er.E(err) - } - - _, pub := btcec.PrivKeyFromBytes(btcec.S256(), testPriv[:]) - - var chanPoint wire.OutPoint - if _, err := rand.Read(chanPoint.Hash[:]); err != nil { - return nil, er.E(err) - } - - pub.Curve = nil - - chanPoint.Index = uint32(rand.Intn(math.MaxUint16)) - - chanStatus := ChanStatusDefault | ChanStatusRestored - - var shaChainPriv [32]byte - if _, err := rand.Read(testPriv[:]); err != nil { - return nil, er.E(err) - } - revRoot, err := chainhash.NewHash(shaChainPriv[:]) - if err != nil { - return nil, err - } - shaChainProducer := shachain.NewRevocationProducer(*revRoot) - - return &ChannelShell{ - NodeAddrs: []net.Addr{&net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18555, - }}, - Chan: &OpenChannel{ - chanStatus: chanStatus, - ChainHash: rev, - FundingOutpoint: chanPoint, - ShortChannelID: lnwire.NewShortChanIDFromInt( - uint64(rand.Int63()), - ), - IdentityPub: pub, - LocalChanCfg: ChannelConfig{ - ChannelConstraints: ChannelConstraints{ - CsvDelay: uint16(rand.Int63()), - }, - PaymentBasePoint: keychain.KeyDescriptor{ - KeyLocator: keychain.KeyLocator{ - Family: keychain.KeyFamily(rand.Int63()), - Index: uint32(rand.Int63()), - }, - }, - }, - RemoteCurrentRevocation: pub, - IsPending: false, - RevocationStore: shachain.NewRevocationStore(), - RevocationProducer: shaChainProducer, - }, - }, nil -} - -// TestRestoreChannelShells tests that we're able to insert a partially channel -// populated to disk. This is useful for channel recovery purposes. We should -// find the new channel shell on disk, and also the db should be populated with -// an edge for that channel. -func TestRestoreChannelShells(t *testing.T) { - t.Parallel() - - cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - defer cleanUp() - - // First, we'll make our channel shell, it will only have the minimal - // amount of information required for us to initiate the data loss - // protection feature. - channelShell, err := genRandomChannelShell() - if err != nil { - t.Fatalf("unable to gen channel shell: %v", err) - } - - // With the channel shell constructed, we'll now insert it into the - // database with the restoration method. - if err := cdb.RestoreChannelShells(channelShell); err != nil { - t.Fatalf("unable to restore channel shell: %v", err) - } - - // Now that the channel has been inserted, we'll attempt to query for - // it to ensure we can properly locate it via various means. - // - // First, we'll attempt to query for all channels that we have with the - // node public key that was restored. - nodeChans, err := cdb.FetchOpenChannels(channelShell.Chan.IdentityPub) - if err != nil { - t.Fatalf("unable find channel: %v", err) - } - - // We should now find a single channel from the database. - if len(nodeChans) != 1 { - t.Fatalf("unable to find restored channel by node "+ - "pubkey: %v", err) - } - - // Ensure that it isn't possible to modify the commitment state machine - // of this restored channel. - channel := nodeChans[0] - err = channel.UpdateCommitment(nil, nil) - if !ErrNoRestoredChannelMutation.Is(err) { - t.Fatalf("able to mutate restored channel") - } - err = channel.AppendRemoteCommitChain(nil) - if !ErrNoRestoredChannelMutation.Is(err) { - t.Fatalf("able to mutate restored channel") - } - err = channel.AdvanceCommitChainTail(nil, nil) - if !ErrNoRestoredChannelMutation.Is(err) { - t.Fatalf("able to mutate restored channel") - } - - // That single channel should have the proper channel point, and also - // the expected set of flags to indicate that it was a restored - // channel. - if nodeChans[0].FundingOutpoint != channelShell.Chan.FundingOutpoint { - t.Fatalf("wrong funding outpoint: expected %v, got %v", - nodeChans[0].FundingOutpoint, - channelShell.Chan.FundingOutpoint) - } - if !nodeChans[0].HasChanStatus(ChanStatusRestored) { - t.Fatalf("node has wrong status flags: %v", - nodeChans[0].chanStatus) - } - - // We should also be able to find the channel if we query for it - // directly. - _, err = cdb.FetchChannel(channelShell.Chan.FundingOutpoint) - if err != nil { - t.Fatalf("unable to fetch channel: %v", err) - } - - // We should also be able to find the link node that was inserted by - // its public key. - linkNode, err := cdb.FetchLinkNode(channelShell.Chan.IdentityPub) - if err != nil { - t.Fatalf("unable to fetch link node: %v", err) - } - - // The node should have the same address, as specified in the channel - // shell. - if reflect.DeepEqual(linkNode.Addresses, channelShell.NodeAddrs) { - t.Fatalf("addr mismach: expected %v, got %v", - linkNode.Addresses, channelShell.NodeAddrs) - } -} - -// TestAbandonChannel tests that the AbandonChannel method is able to properly -// remove a channel from the database and add a close channel summary. If -// called after a channel has already been removed, the method shouldn't return -// an error. -func TestAbandonChannel(t *testing.T) { - t.Parallel() - - cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - defer cleanUp() - - // If we attempt to abandon the state of a channel that doesn't exist - // in the open or closed channel bucket, then we should receive an - // error. - err = cdb.AbandonChannel(&wire.OutPoint{}, 0) - if err == nil { - t.Fatalf("removing non-existent channel should have failed") - } - - // We'll now create a new channel in a pending state to abandon - // shortly. - chanState := createTestChannel(t, cdb) - - // We should now be able to abandon the channel without any errors. - closeHeight := uint32(11) - err = cdb.AbandonChannel(&chanState.FundingOutpoint, closeHeight) - if err != nil { - t.Fatalf("unable to abandon channel: %v", err) - } - - // At this point, the channel should no longer be found in the set of - // open channels. - _, err = cdb.FetchChannel(chanState.FundingOutpoint) - if !ErrChannelNotFound.Is(err) { - t.Fatalf("channel should not have been found: %v", err) - } - - // However we should be able to retrieve a close channel summary for - // the channel. - _, err = cdb.FetchClosedChannel(&chanState.FundingOutpoint) - if err != nil { - t.Fatalf("unable to fetch closed channel: %v", err) - } - - // Finally, if we attempt to abandon the channel again, we should get a - // nil error as the channel has already been abandoned. - err = cdb.AbandonChannel(&chanState.FundingOutpoint, closeHeight) - if err != nil { - t.Fatalf("unable to abandon channel: %v", err) - } -} - -// TestFetchChannels tests the filtering of open channels in fetchChannels. -// It tests the case where no filters are provided (which is equivalent to -// FetchAllOpenChannels) and every combination of pending and waiting close. -func TestFetchChannels(t *testing.T) { - // Create static channel IDs for each kind of channel retrieved by - // fetchChannels so that the expected channel IDs can be set in tests. - var ( - // Pending is a channel that is pending open, and has not had - // a close initiated. - pendingChan = lnwire.NewShortChanIDFromInt(1) - - // pendingWaitingClose is a channel that is pending open and - // has has its closing transaction broadcast. - pendingWaitingChan = lnwire.NewShortChanIDFromInt(2) - - // openChan is a channel that has confirmed on chain. - openChan = lnwire.NewShortChanIDFromInt(3) - - // openWaitingChan is a channel that has confirmed on chain, - // and it waiting for its close transaction to confirm. - openWaitingChan = lnwire.NewShortChanIDFromInt(4) - ) - - tests := []struct { - name string - filters []fetchChannelsFilter - expectedChannels map[lnwire.ShortChannelID]bool - }{ - { - name: "get all channels", - filters: []fetchChannelsFilter{}, - expectedChannels: map[lnwire.ShortChannelID]bool{ - pendingChan: true, - pendingWaitingChan: true, - openChan: true, - openWaitingChan: true, - }, - }, - { - name: "pending channels", - filters: []fetchChannelsFilter{ - pendingChannelFilter(true), - }, - expectedChannels: map[lnwire.ShortChannelID]bool{ - pendingChan: true, - pendingWaitingChan: true, - }, - }, - { - name: "open channels", - filters: []fetchChannelsFilter{ - pendingChannelFilter(false), - }, - expectedChannels: map[lnwire.ShortChannelID]bool{ - openChan: true, - openWaitingChan: true, - }, - }, - { - name: "waiting close channels", - filters: []fetchChannelsFilter{ - waitingCloseFilter(true), - }, - expectedChannels: map[lnwire.ShortChannelID]bool{ - pendingWaitingChan: true, - openWaitingChan: true, - }, - }, - { - name: "not waiting close channels", - filters: []fetchChannelsFilter{ - waitingCloseFilter(false), - }, - expectedChannels: map[lnwire.ShortChannelID]bool{ - pendingChan: true, - openChan: true, - }, - }, - { - name: "pending waiting", - filters: []fetchChannelsFilter{ - pendingChannelFilter(true), - waitingCloseFilter(true), - }, - expectedChannels: map[lnwire.ShortChannelID]bool{ - pendingWaitingChan: true, - }, - }, - { - name: "pending, not waiting", - filters: []fetchChannelsFilter{ - pendingChannelFilter(true), - waitingCloseFilter(false), - }, - expectedChannels: map[lnwire.ShortChannelID]bool{ - pendingChan: true, - }, - }, - { - name: "open waiting", - filters: []fetchChannelsFilter{ - pendingChannelFilter(false), - waitingCloseFilter(true), - }, - expectedChannels: map[lnwire.ShortChannelID]bool{ - openWaitingChan: true, - }, - }, - { - name: "open, not waiting", - filters: []fetchChannelsFilter{ - pendingChannelFilter(false), - waitingCloseFilter(false), - }, - expectedChannels: map[lnwire.ShortChannelID]bool{ - openChan: true, - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test "+ - "database: %v", err) - } - defer cleanUp() - - // Create a pending channel that is not awaiting close. - createTestChannel( - t, cdb, channelIDOption(pendingChan), - ) - - // Create a pending channel which has has been marked as - // broadcast, indicating that its closing transaction is - // waiting to confirm. - pendingClosing := createTestChannel( - t, cdb, - channelIDOption(pendingWaitingChan), - ) - - err = pendingClosing.MarkCoopBroadcasted(nil, true) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - // Create a open channel that is not awaiting close. - createTestChannel( - t, cdb, - channelIDOption(openChan), - openChannelOption(), - ) - - // Create a open channel which has has been marked as - // broadcast, indicating that its closing transaction is - // waiting to confirm. - openClosing := createTestChannel( - t, cdb, - channelIDOption(openWaitingChan), - openChannelOption(), - ) - err = openClosing.MarkCoopBroadcasted(nil, true) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - channels, err := fetchChannels(cdb, test.filters...) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if len(channels) != len(test.expectedChannels) { - t.Fatalf("expected: %v channels, "+ - "got: %v", len(test.expectedChannels), - len(channels)) - } - - for _, ch := range channels { - _, ok := test.expectedChannels[ch.ShortChannelID] - if !ok { - t.Fatalf("fetch channels unexpected "+ - "channel: %v", ch.ShortChannelID) - } - } - }) - } -} - -// TestFetchHistoricalChannel tests lookup of historical channels. -func TestFetchHistoricalChannel(t *testing.T) { - cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - defer cleanUp() - - // Create a an open channel in the database. - channel := createTestChannel(t, cdb, openChannelOption()) - - // First, try to lookup a channel when the bucket does not - // exist. - _, err = cdb.FetchHistoricalChannel(&channel.FundingOutpoint) - if !ErrNoHistoricalBucket.Is(err) { - t.Fatalf("expected no bucket, got: %v", err) - } - - // Close the channel so that it will be written to the historical - // bucket. The values provided in the channel close summary are the - // minimum required for this call to run without panicking. - if err := channel.CloseChannel(&ChannelCloseSummary{ - ChanPoint: channel.FundingOutpoint, - RemotePub: channel.IdentityPub, - SettledBalance: btcutil.Amount(500), - }); err != nil { - t.Fatalf("unexpected error closing channel: %v", err) - } - - histChannel, err := cdb.FetchHistoricalChannel(&channel.FundingOutpoint) - if err != nil { - t.Fatalf("unexepected error getting channel: %v", err) - } - - // Set the db on our channel to nil so that we can check that all other - // fields on the channel equal those on the historical channel. - channel.Db = nil - - if !reflect.DeepEqual(histChannel, channel) { - t.Fatalf("expected: %v, got: %v", channel, histChannel) - } - - // Create an outpoint that will not be in the db and look it up. - badOutpoint := &wire.OutPoint{ - Hash: channel.FundingOutpoint.Hash, - Index: channel.FundingOutpoint.Index + 1, - } - _, err = cdb.FetchHistoricalChannel(badOutpoint) - if !ErrChannelNotFound.Is(err) { - t.Fatalf("expected chan not found, got: %v", err) - } - -} diff --git a/lnd/channeldb/doc.go b/lnd/channeldb/doc.go deleted file mode 100644 index d03b3406..00000000 --- a/lnd/channeldb/doc.go +++ /dev/null @@ -1 +0,0 @@ -package channeldb diff --git a/lnd/channeldb/duplicate_payments.go b/lnd/channeldb/duplicate_payments.go deleted file mode 100644 index 87dea620..00000000 --- a/lnd/channeldb/duplicate_payments.go +++ /dev/null @@ -1,247 +0,0 @@ -package channeldb - -import ( - "bytes" - "encoding/binary" - "io" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/routing/route" -) - -var ( - // duplicatePaymentsBucket is the name of a optional sub-bucket within - // the payment hash bucket, that is used to hold duplicate payments to a - // payment hash. This is needed to support information from earlier - // versions of lnd, where it was possible to pay to a payment hash more - // than once. - duplicatePaymentsBucket = []byte("payment-duplicate-bucket") - - // duplicatePaymentSettleInfoKey is a key used in the payment's - // sub-bucket to store the settle info of the payment. - duplicatePaymentSettleInfoKey = []byte("payment-settle-info") - - // duplicatePaymentAttemptInfoKey is a key used in the payment's - // sub-bucket to store the info about the latest attempt that was done - // for the payment in question. - duplicatePaymentAttemptInfoKey = []byte("payment-attempt-info") - - // duplicatePaymentCreationInfoKey is a key used in the payment's - // sub-bucket to store the creation info of the payment. - duplicatePaymentCreationInfoKey = []byte("payment-creation-info") - - // duplicatePaymentFailInfoKey is a key used in the payment's sub-bucket - // to store information about the reason a payment failed. - duplicatePaymentFailInfoKey = []byte("payment-fail-info") - - // duplicatePaymentSequenceKey is a key used in the payment's sub-bucket - // to store the sequence number of the payment. - duplicatePaymentSequenceKey = []byte("payment-sequence-key") -) - -// duplicateHTLCAttemptInfo contains static information about a specific HTLC -// attempt for a payment. This information is used by the router to handle any -// errors coming back after an attempt is made, and to query the switch about -// the status of the attempt. -type duplicateHTLCAttemptInfo struct { - // attemptID is the unique ID used for this attempt. - attemptID uint64 - - // sessionKey is the ephemeral key used for this attempt. - sessionKey *btcec.PrivateKey - - // route is the route attempted to send the HTLC. - route route.Route -} - -// fetchDuplicatePaymentStatus fetches the payment status of the payment. If the -// payment isn't found, it will default to "StatusUnknown". -func fetchDuplicatePaymentStatus(bucket kvdb.RBucket) PaymentStatus { - if bucket.Get(duplicatePaymentSettleInfoKey) != nil { - return StatusSucceeded - } - - if bucket.Get(duplicatePaymentFailInfoKey) != nil { - return StatusFailed - } - - if bucket.Get(duplicatePaymentCreationInfoKey) != nil { - return StatusInFlight - } - - return StatusUnknown -} - -func deserializeDuplicateHTLCAttemptInfo(r io.Reader) ( - *duplicateHTLCAttemptInfo, er.R) { - - a := &duplicateHTLCAttemptInfo{} - err := ReadElements(r, &a.attemptID, &a.sessionKey) - if err != nil { - return nil, err - } - a.route, err = DeserializeRoute(r) - if err != nil { - return nil, err - } - return a, nil -} - -func deserializeDuplicatePaymentCreationInfo(r io.Reader) ( - *PaymentCreationInfo, er.R) { - - var scratch [8]byte - - c := &PaymentCreationInfo{} - - if _, err := util.ReadFull(r, c.PaymentHash[:]); err != nil { - return nil, err - } - - if _, err := util.ReadFull(r, scratch[:]); err != nil { - return nil, err - } - c.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:])) - - if _, err := util.ReadFull(r, scratch[:]); err != nil { - return nil, err - } - c.CreationTime = time.Unix(int64(byteOrder.Uint64(scratch[:])), 0) - - if _, err := util.ReadFull(r, scratch[:4]); err != nil { - return nil, err - } - - reqLen := byteOrder.Uint32(scratch[:4]) - payReq := make([]byte, reqLen) - if reqLen > 0 { - if _, err := util.ReadFull(r, payReq); err != nil { - return nil, err - } - } - c.PaymentRequest = payReq - - return c, nil -} - -func fetchDuplicatePayment(bucket kvdb.RBucket) (*MPPayment, er.R) { - seqBytes := bucket.Get(duplicatePaymentSequenceKey) - if seqBytes == nil { - return nil, er.Errorf("sequence number not found") - } - - sequenceNum := binary.BigEndian.Uint64(seqBytes) - - // Get the payment status. - paymentStatus := fetchDuplicatePaymentStatus(bucket) - - // Get the PaymentCreationInfo. - b := bucket.Get(duplicatePaymentCreationInfoKey) - if b == nil { - return nil, er.Errorf("creation info not found") - } - - r := bytes.NewReader(b) - creationInfo, err := deserializeDuplicatePaymentCreationInfo(r) - if err != nil { - return nil, err - - } - - // Get failure reason if available. - var failureReason *FailureReason - b = bucket.Get(duplicatePaymentFailInfoKey) - if b != nil { - reason := FailureReason(b[0]) - failureReason = &reason - } - - payment := &MPPayment{ - SequenceNum: sequenceNum, - Info: creationInfo, - FailureReason: failureReason, - Status: paymentStatus, - } - - // Get the HTLCAttemptInfo. It can be absent. - b = bucket.Get(duplicatePaymentAttemptInfoKey) - if b != nil { - r = bytes.NewReader(b) - attempt, err := deserializeDuplicateHTLCAttemptInfo(r) - if err != nil { - return nil, err - } - - htlc := HTLCAttempt{ - HTLCAttemptInfo: HTLCAttemptInfo{ - AttemptID: attempt.attemptID, - Route: attempt.route, - SessionKey: attempt.sessionKey, - }, - } - - // Get the payment preimage. This is only found for - // successful payments. - b = bucket.Get(duplicatePaymentSettleInfoKey) - if b != nil { - var preimg lntypes.Preimage - copy(preimg[:], b) - - htlc.Settle = &HTLCSettleInfo{ - Preimage: preimg, - SettleTime: time.Time{}, - } - } else { - // Otherwise the payment must have failed. - htlc.Failure = &HTLCFailInfo{ - FailTime: time.Time{}, - } - } - - payment.HTLCs = []HTLCAttempt{htlc} - } - - return payment, nil -} - -func fetchDuplicatePayments(paymentHashBucket kvdb.RBucket) ([]*MPPayment, - er.R) { - - var payments []*MPPayment - - // For older versions of lnd, duplicate payments to a payment has was - // possible. These will be found in a sub-bucket indexed by their - // sequence number if available. - dup := paymentHashBucket.NestedReadBucket(duplicatePaymentsBucket) - if dup == nil { - return nil, nil - } - - err := dup.ForEach(func(k, v []byte) er.R { - subBucket := dup.NestedReadBucket(k) - if subBucket == nil { - // We one bucket for each duplicate to be found. - return er.Errorf("non bucket element" + - "in duplicate bucket") - } - - p, err := fetchDuplicatePayment(subBucket) - if err != nil { - return err - } - - payments = append(payments, p) - return nil - }) - if err != nil { - return nil, err - } - - return payments, nil -} diff --git a/lnd/channeldb/error.go b/lnd/channeldb/error.go deleted file mode 100644 index 6b21edf0..00000000 --- a/lnd/channeldb/error.go +++ /dev/null @@ -1,162 +0,0 @@ -package channeldb - -import ( - "fmt" - - "github.com/pkt-cash/pktd/btcutil/er" -) - -var ( - Err = er.NewErrorType("lnd.channeldb") - // ErrNoChanDBExists is returned when a channel bucket hasn't been - // created. - ErrNoChanDBExists = Err.CodeWithDetail("ErrNoChanDBExists", - "channel db has not yet been created") - - // ErrNoHistoricalBucket is returned when the historical channel bucket - // not been created yet. - ErrNoHistoricalBucket = Err.CodeWithDetail("ErrNoHistoricalBucket", - "historical channel bucket has not yet been created") - - // ErrDBReversion is returned when detecting an attempt to revert to a - // prior database version. - ErrDBReversion = Err.CodeWithDetail("ErrDBReversion", - "channel db cannot revert to prior version") - - // ErrLinkNodesNotFound is returned when node info bucket hasn't been - // created. - ErrLinkNodesNotFound = Err.CodeWithDetail("ErrLinkNodesNotFound", - "no link nodes exist") - - // ErrNoActiveChannels is returned when there is no active (open) - // channels within the database. - ErrNoActiveChannels = Err.CodeWithDetail("ErrNoActiveChannels", - "no active channels exist") - - // ErrNoPastDeltas is returned when the channel delta bucket hasn't been - // created. - ErrNoPastDeltas = Err.CodeWithDetail("ErrNoPastDeltas", - "channel has no recorded deltas") - - // ErrInvoiceNotFound is returned when a targeted invoice can't be - // found. - ErrInvoiceNotFound = Err.CodeWithDetail("ErrInvoiceNotFound", - "unable to locate invoice") - - // ErrNoInvoicesCreated is returned when we don't have invoices in - // our database to return. - ErrNoInvoicesCreated = Err.CodeWithDetail("ErrNoInvoicesCreated", - "there are no existing invoices") - - // ErrDuplicateInvoice is returned when an invoice with the target - // payment hash already exists. - ErrDuplicateInvoice = Err.CodeWithDetail("ErrDuplicateInvoice", - "invoice with payment hash already exists") - - // ErrDuplicatePayAddr is returned when an invoice with the target - // payment addr already exists. - ErrDuplicatePayAddr = Err.CodeWithDetail("ErrDuplicatePayAddr", - "invoice with payemnt addr already exists") - - // ErrInvRefEquivocation is returned when an InvoiceRef targets - // multiple, distinct invoices. - ErrInvRefEquivocation = Err.CodeWithDetail("ErrInvRefEquivocation", "inv ref matches multiple invoices") - - // ErrNoPaymentsCreated is returned when bucket of payments hasn't been - // created. - ErrNoPaymentsCreated = Err.CodeWithDetail("ErrNoPaymentsCreated", - "there are no existing payments") - - // ErrNodeNotFound is returned when node bucket exists, but node with - // specific identity can't be found. - ErrNodeNotFound = Err.CodeWithDetail("ErrNodeNotFound", - "link node with target identity not found") - - // ErrChannelNotFound is returned when we attempt to locate a channel - // for a specific chain, but it is not found. - ErrChannelNotFound = Err.CodeWithDetail("ErrChannelNotFound", - "channel not found") - - // ErrMetaNotFound is returned when meta bucket hasn't been - // created. - ErrMetaNotFound = Err.CodeWithDetail("ErrMetaNotFound", - "unable to locate meta information") - - // ErrGraphNotFound is returned when at least one of the components of - // graph doesn't exist. - ErrGraphNotFound = Err.CodeWithDetail("ErrGraphNotFound", - "graph bucket not initialized") - - // ErrGraphNeverPruned is returned when graph was never pruned. - ErrGraphNeverPruned = Err.CodeWithDetail("ErrGraphNeverPruned", - "graph never pruned") - - // ErrSourceNodeNotSet is returned if the source node of the graph - // hasn't been added The source node is the center node within a - // star-graph. - ErrSourceNodeNotSet = Err.CodeWithDetail("ErrSourceNodeNotSet", - "source node does not exist") - - // ErrGraphNodesNotFound is returned in case none of the nodes has - // been added in graph node bucket. - ErrGraphNodesNotFound = Err.CodeWithDetail("ErrGraphNodesNotFound", - "no graph nodes exist") - - // ErrGraphNoEdgesFound is returned in case of none of the channel/edges - // has been added in graph edge bucket. - ErrGraphNoEdgesFound = Err.CodeWithDetail("ErrGraphNoEdgesFound", - "no graph edges exist") - - // ErrGraphNodeNotFound is returned when we're unable to find the target - // node. - ErrGraphNodeNotFound = Err.CodeWithDetail("ErrGraphNodeNotFound", - "unable to find node") - - // ErrEdgeNotFound is returned when an edge for the target chanID - // can't be found. - ErrEdgeNotFound = Err.CodeWithDetail("ErrEdgeNotFound", - "edge not found") - - // ErrZombieEdge is an error returned when we attempt to look up an edge - // but it is marked as a zombie within the zombie index. - ErrZombieEdge = Err.CodeWithDetail("ErrZombieEdge", "edge marked as zombie") - - // ErrEdgeAlreadyExist is returned when edge with specific - // channel id can't be added because it already exist. - ErrEdgeAlreadyExist = Err.CodeWithDetail("ErrEdgeAlreadyExist", - "edge already exist") - - // ErrNodeAliasNotFound is returned when alias for node can't be found. - ErrNodeAliasNotFound = Err.CodeWithDetail("ErrNodeAliasNotFound", - "alias for node not found") - - // ErrUnknownAddressType is returned when a node's addressType is not - // an expected value. - ErrUnknownAddressType = Err.CodeWithDetail("ErrUnknownAddressType", - "address type cannot be resolved") - - // ErrNoClosedChannels is returned when a node is queries for all the - // channels it has closed, but it hasn't yet closed any channels. - ErrNoClosedChannels = Err.CodeWithDetail("ErrNoClosedChannels", - "no channel have been closed yet") - - // ErrNoForwardingEvents is returned in the case that a query fails due - // to the log not having any recorded events. - ErrNoForwardingEvents = Err.CodeWithDetail("ErrNoForwardingEvents", - "no recorded forwarding events") - - // ErrEdgePolicyOptionalFieldNotFound is an error returned if a channel - // policy field is not found in the db even though its message flags - // indicate it should be. - ErrEdgePolicyOptionalFieldNotFound = Err.CodeWithDetail("ErrEdgePolicyOptionalFieldNotFound", - "optional field not present") - - // ErrChanAlreadyExists is return when the caller attempts to create a - // channel with a channel point that is already present in the - // database. - ErrChanAlreadyExists = Err.CodeWithDetail("ErrChanAlreadyExists", - "channel already exists") - - ErrTooManyExtraOpaqueBytes = Err.CodeWithDetail("ErrTooManyExtraOpaqueBytes", - fmt.Sprintf("max allowed number of opaque bytes is %v", MaxAllowedExtraOpaqueBytes)) -) diff --git a/lnd/channeldb/fees.go b/lnd/channeldb/fees.go deleted file mode 100644 index d03b3406..00000000 --- a/lnd/channeldb/fees.go +++ /dev/null @@ -1 +0,0 @@ -package channeldb diff --git a/lnd/channeldb/forwarding_log.go b/lnd/channeldb/forwarding_log.go deleted file mode 100644 index aee3e78d..00000000 --- a/lnd/channeldb/forwarding_log.go +++ /dev/null @@ -1,342 +0,0 @@ -package channeldb - -import ( - "bytes" - "io" - "sort" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/pktwallet/walletdb" -) - -var ( - // forwardingLogBucket is the bucket that we'll use to store the - // forwarding log. The forwarding log contains a time series database - // of the forwarding history of a lightning daemon. Each key within the - // bucket is a timestamp (in nano seconds since the unix epoch), and - // the value a slice of a forwarding event for that timestamp. - forwardingLogBucket = []byte("circuit-fwd-log") -) - -const ( - // forwardingEventSize is the size of a forwarding event. The breakdown - // is as follows: - // - // * 8 byte incoming chan ID || 8 byte outgoing chan ID || 8 byte value in - // || 8 byte value out - // - // From the value in and value out, callers can easily compute the - // total fee extract from a forwarding event. - forwardingEventSize = 32 - - // MaxResponseEvents is the max number of forwarding events that will - // be returned by a single query response. This size was selected to - // safely remain under gRPC's 4MiB message size response limit. As each - // full forwarding event (including the timestamp) is 40 bytes, we can - // safely return 50k entries in a single response. - MaxResponseEvents = 50000 -) - -// ForwardingLog returns an instance of the ForwardingLog object backed by the -// target database instance. -func (d *DB) ForwardingLog() *ForwardingLog { - return &ForwardingLog{ - db: d, - } -} - -// ForwardingLog is a time series database that logs the fulfilment of payment -// circuits by a lightning network daemon. The log contains a series of -// forwarding events which map a timestamp to a forwarding event. A forwarding -// event describes which channels were used to create+settle a circuit, and the -// amount involved. Subtracting the outgoing amount from the incoming amount -// reveals the fee charged for the forwarding service. -type ForwardingLog struct { - db *DB -} - -// ForwardingEvent is an event in the forwarding log's time series. Each -// forwarding event logs the creation and tear-down of a payment circuit. A -// circuit is created once an incoming HTLC has been fully forwarded, and -// destroyed once the payment has been settled. -type ForwardingEvent struct { - // Timestamp is the settlement time of this payment circuit. - Timestamp time.Time - - // IncomingChanID is the incoming channel ID of the payment circuit. - IncomingChanID lnwire.ShortChannelID - - // OutgoingChanID is the outgoing channel ID of the payment circuit. - OutgoingChanID lnwire.ShortChannelID - - // AmtIn is the amount of the incoming HTLC. Subtracting this from the - // outgoing amount gives the total fees of this payment circuit. - AmtIn lnwire.MilliSatoshi - - // AmtOut is the amount of the outgoing HTLC. Subtracting the incoming - // amount from this gives the total fees for this payment circuit. - AmtOut lnwire.MilliSatoshi -} - -// encodeForwardingEvent writes out the target forwarding event to the passed -// io.Writer, using the expected DB format. Note that the timestamp isn't -// serialized as this will be the key value within the bucket. -func encodeForwardingEvent(w io.Writer, f *ForwardingEvent) er.R { - return WriteElements( - w, f.IncomingChanID, f.OutgoingChanID, f.AmtIn, f.AmtOut, - ) -} - -// decodeForwardingEvent attempts to decode the raw bytes of a serialized -// forwarding event into the target ForwardingEvent. Note that the timestamp -// won't be decoded, as the caller is expected to set this due to the bucket -// structure of the forwarding log. -func decodeForwardingEvent(r io.Reader, f *ForwardingEvent) er.R { - return ReadElements( - r, &f.IncomingChanID, &f.OutgoingChanID, &f.AmtIn, &f.AmtOut, - ) -} - -// AddForwardingEvents adds a series of forwarding events to the database. -// Before inserting, the set of events will be sorted according to their -// timestamp. This ensures that all writes to disk are sequential. -func (f *ForwardingLog) AddForwardingEvents(events []ForwardingEvent) er.R { - // Before we create the database transaction, we'll ensure that the set - // of forwarding events are properly sorted according to their - // timestamp and that no duplicate timestamps exist to avoid collisions - // in the key we are going to store the events under. - makeUniqueTimestamps(events) - - var timestamp [8]byte - - return kvdb.Batch(f.db.Backend, func(tx kvdb.RwTx) er.R { - // First, we'll fetch the bucket that stores our time series - // log. - logBucket, err := tx.CreateTopLevelBucket( - forwardingLogBucket, - ) - if err != nil { - return err - } - - // With the bucket obtained, we can now begin to write out the - // series of events. - for _, event := range events { - err := storeEvent(logBucket, event, timestamp[:]) - if err != nil { - return err - } - } - - return nil - }) -} - -// storeEvent tries to store a forwarding event into the given bucket by trying -// to avoid collisions. If a key for the event timestamp already exists in the -// database, the timestamp is incremented in nanosecond intervals until a "free" -// slot is found. -func storeEvent(bucket walletdb.ReadWriteBucket, event ForwardingEvent, - timestampScratchSpace []byte) er.R { - - // First, we'll serialize this timestamp into our - // timestamp buffer. - byteOrder.PutUint64( - timestampScratchSpace, uint64(event.Timestamp.UnixNano()), - ) - - // Next we'll loop until we find a "free" slot in the bucket to store - // the event under. This should almost never happen unless we're running - // on a system that has a very bad system clock that doesn't properly - // resolve to nanosecond scale. We try up to 100 times (which would come - // to a maximum shift of 0.1 microsecond which is acceptable for most - // use cases). If we don't find a free slot, we just give up and let - // the collision happen. Something must be wrong with the data in that - // case, even on a very fast machine forwarding payments _will_ take a - // few microseconds at least so we should find a nanosecond slot - // somewhere. - const maxTries = 100 - tries := 0 - for tries < maxTries { - val := bucket.Get(timestampScratchSpace) - if val == nil { - break - } - - // Collision, try the next nanosecond timestamp. - nextNano := event.Timestamp.UnixNano() + 1 - event.Timestamp = time.Unix(0, nextNano) - byteOrder.PutUint64(timestampScratchSpace, uint64(nextNano)) - tries++ - } - - // With the key encoded, we'll then encode the event - // into our buffer, then write it out to disk. - var eventBytes [forwardingEventSize]byte - eventBuf := bytes.NewBuffer(eventBytes[0:0:forwardingEventSize]) - err := encodeForwardingEvent(eventBuf, &event) - if err != nil { - return err - } - return bucket.Put(timestampScratchSpace, eventBuf.Bytes()) -} - -// ForwardingEventQuery represents a query to the forwarding log payment -// circuit time series database. The query allows a caller to retrieve all -// records for a particular time slice, offset in that time slice, limiting the -// total number of responses returned. -type ForwardingEventQuery struct { - // StartTime is the start time of the time slice. - StartTime time.Time - - // EndTime is the end time of the time slice. - EndTime time.Time - - // IndexOffset is the offset within the time slice to start at. This - // can be used to start the response at a particular record. - IndexOffset uint32 - - // NumMaxEvents is the max number of events to return. - NumMaxEvents uint32 -} - -// ForwardingLogTimeSlice is the response to a forwarding query. It includes -// the original query, the set events that match the query, and an integer -// which represents the offset index of the last item in the set of retuned -// events. This integer allows callers to resume their query using this offset -// in the event that the query's response exceeds the max number of returnable -// events. -type ForwardingLogTimeSlice struct { - ForwardingEventQuery - - // ForwardingEvents is the set of events in our time series that answer - // the query embedded above. - ForwardingEvents []ForwardingEvent - - // LastIndexOffset is the index of the last element in the set of - // returned ForwardingEvents above. Callers can use this to resume - // their query in the event that the time slice has too many events to - // fit into a single response. - LastIndexOffset uint32 -} - -// Query allows a caller to query the forwarding event time series for a -// particular time slice. The caller can control the precise time as well as -// the number of events to be returned. -// -// TODO(roasbeef): rename? -func (f *ForwardingLog) Query(q ForwardingEventQuery) (ForwardingLogTimeSlice, er.R) { - var resp ForwardingLogTimeSlice - - // If the user provided an index offset, then we'll not know how many - // records we need to skip. We'll also keep track of the record offset - // as that's part of the final return value. - recordsToSkip := q.IndexOffset - recordOffset := q.IndexOffset - - err := kvdb.View(f.db, func(tx kvdb.RTx) er.R { - // If the bucket wasn't found, then there aren't any events to - // be returned. - logBucket := tx.ReadBucket(forwardingLogBucket) - if logBucket == nil { - return ErrNoForwardingEvents.Default() - } - - // We'll be using a cursor to seek into the database, so we'll - // populate byte slices that represent the start of the key - // space we're interested in, and the end. - var startTime, endTime [8]byte - byteOrder.PutUint64(startTime[:], uint64(q.StartTime.UnixNano())) - byteOrder.PutUint64(endTime[:], uint64(q.EndTime.UnixNano())) - - // If we know that a set of log events exists, then we'll begin - // our seek through the log in order to satisfy the query. - // We'll continue until either we reach the end of the range, - // or reach our max number of events. - logCursor := logBucket.ReadCursor() - timestamp, events := logCursor.Seek(startTime[:]) - for ; timestamp != nil && bytes.Compare(timestamp, endTime[:]) <= 0; timestamp, events = logCursor.Next() { - // If our current return payload exceeds the max number - // of events, then we'll exit now. - if uint32(len(resp.ForwardingEvents)) >= q.NumMaxEvents { - return nil - } - - // If we're not yet past the user defined offset, then - // we'll continue to seek forward. - if recordsToSkip > 0 { - recordsToSkip-- - continue - } - - currentTime := time.Unix( - 0, int64(byteOrder.Uint64(timestamp)), - ) - - // At this point, we've skipped enough records to start - // to collate our query. For each record, we'll - // increment the final record offset so the querier can - // utilize pagination to seek further. - readBuf := bytes.NewReader(events) - for readBuf.Len() != 0 { - var event ForwardingEvent - err := decodeForwardingEvent(readBuf, &event) - if err != nil { - return err - } - - event.Timestamp = currentTime - resp.ForwardingEvents = append(resp.ForwardingEvents, event) - - recordOffset++ - } - } - - return nil - }, func() { - resp = ForwardingLogTimeSlice{ - ForwardingEventQuery: q, - } - }) - if err != nil && !ErrNoForwardingEvents.Is(err) { - return ForwardingLogTimeSlice{}, err - } - - resp.LastIndexOffset = recordOffset - - return resp, nil -} - -// makeUniqueTimestamps takes a slice of forwarding events, sorts it by the -// event timestamps and then makes sure there are no duplicates in the -// timestamps. If duplicates are found, some of the timestamps are increased on -// the nanosecond scale until only unique values remain. This is a fix to -// address the problem that in some environments (looking at you, Windows) the -// system clock has such a bad resolution that two serial invocations of -// time.Now() might return the same timestamp, even if some time has elapsed -// between the calls. -func makeUniqueTimestamps(events []ForwardingEvent) { - sort.Slice(events, func(i, j int) bool { - return events[i].Timestamp.Before(events[j].Timestamp) - }) - - // Now that we know the events are sorted by timestamp, we can go - // through the list and fix all duplicates until only unique values - // remain. - for outer := 0; outer < len(events)-1; outer++ { - current := events[outer].Timestamp.UnixNano() - next := events[outer+1].Timestamp.UnixNano() - - // We initially sorted the slice. So if the current is now - // greater or equal to the next one, it's either because it's a - // duplicate or because we increased the current in the last - // iteration. - if current >= next { - next = current + 1 - events[outer+1].Timestamp = time.Unix(0, next) - } - } -} diff --git a/lnd/channeldb/forwarding_log_test.go b/lnd/channeldb/forwarding_log_test.go deleted file mode 100644 index e41e984c..00000000 --- a/lnd/channeldb/forwarding_log_test.go +++ /dev/null @@ -1,383 +0,0 @@ -package channeldb - -import ( - "math/rand" - "reflect" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/stretchr/testify/assert" -) - -// TestForwardingLogBasicStorageAndQuery tests that we're able to store and -// then query for items that have previously been added to the event log. -func TestForwardingLogBasicStorageAndQuery(t *testing.T) { - t.Parallel() - - // First, we'll set up a test database, and use that to instantiate the - // forwarding event log that we'll be using for the duration of the - // test. - db, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test db: %v", err) - } - defer cleanUp() - - log := ForwardingLog{ - db: db, - } - - initialTime := time.Unix(1234, 0) - timestamp := time.Unix(1234, 0) - - // We'll create 100 random events, which each event being spaced 10 - // minutes after the prior event. - numEvents := 100 - events := make([]ForwardingEvent, numEvents) - for i := 0; i < numEvents; i++ { - events[i] = ForwardingEvent{ - Timestamp: timestamp, - IncomingChanID: lnwire.NewShortChanIDFromInt(uint64(rand.Int63())), - OutgoingChanID: lnwire.NewShortChanIDFromInt(uint64(rand.Int63())), - AmtIn: lnwire.MilliSatoshi(rand.Int63()), - AmtOut: lnwire.MilliSatoshi(rand.Int63()), - } - - timestamp = timestamp.Add(time.Minute * 10) - } - - // Now that all of our set of events constructed, we'll add them to the - // database in a batch manner. - if err := log.AddForwardingEvents(events); err != nil { - t.Fatalf("unable to add events: %v", err) - } - - // With our events added we'll now construct a basic query to retrieve - // all of the events. - eventQuery := ForwardingEventQuery{ - StartTime: initialTime, - EndTime: timestamp, - IndexOffset: 0, - NumMaxEvents: 1000, - } - timeSlice, err := log.Query(eventQuery) - if err != nil { - t.Fatalf("unable to query for events: %v", err) - } - - // The set of returned events should match identically, as they should - // be returned in sorted order. - if !reflect.DeepEqual(events, timeSlice.ForwardingEvents) { - t.Fatalf("event mismatch: expected %v vs %v", - spew.Sdump(events), spew.Sdump(timeSlice.ForwardingEvents)) - } - - // The offset index of the final entry should be numEvents, so the - // number of total events we've written. - if timeSlice.LastIndexOffset != uint32(numEvents) { - t.Fatalf("wrong final offset: expected %v, got %v", - timeSlice.LastIndexOffset, numEvents) - } -} - -// TestForwardingLogQueryOptions tests that the query offset works properly. So -// if we add a series of events, then we should be able to seek within the -// timeslice accordingly. This exercises the index offset and num max event -// field in the query, and also the last index offset field int he response. -func TestForwardingLogQueryOptions(t *testing.T) { - t.Parallel() - - // First, we'll set up a test database, and use that to instantiate the - // forwarding event log that we'll be using for the duration of the - // test. - db, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test db: %v", err) - } - defer cleanUp() - - log := ForwardingLog{ - db: db, - } - - initialTime := time.Unix(1234, 0) - endTime := time.Unix(1234, 0) - - // We'll create 20 random events, which each event being spaced 10 - // minutes after the prior event. - numEvents := 20 - events := make([]ForwardingEvent, numEvents) - for i := 0; i < numEvents; i++ { - events[i] = ForwardingEvent{ - Timestamp: endTime, - IncomingChanID: lnwire.NewShortChanIDFromInt(uint64(rand.Int63())), - OutgoingChanID: lnwire.NewShortChanIDFromInt(uint64(rand.Int63())), - AmtIn: lnwire.MilliSatoshi(rand.Int63()), - AmtOut: lnwire.MilliSatoshi(rand.Int63()), - } - - endTime = endTime.Add(time.Minute * 10) - } - - // Now that all of our set of events constructed, we'll add them to the - // database in a batch manner. - if err := log.AddForwardingEvents(events); err != nil { - t.Fatalf("unable to add events: %v", err) - } - - // With all of our events added, we should be able to query for the - // first 10 events using the max event query field. - eventQuery := ForwardingEventQuery{ - StartTime: initialTime, - EndTime: endTime, - IndexOffset: 0, - NumMaxEvents: 10, - } - timeSlice, err := log.Query(eventQuery) - if err != nil { - t.Fatalf("unable to query for events: %v", err) - } - - // We should get exactly 10 events back. - if len(timeSlice.ForwardingEvents) != 10 { - t.Fatalf("wrong number of events: expected %v, got %v", 10, - len(timeSlice.ForwardingEvents)) - } - - // The set of events returned should be the first 10 events that we - // added. - if !reflect.DeepEqual(events[:10], timeSlice.ForwardingEvents) { - t.Fatalf("wrong response: expected %v, got %v", - spew.Sdump(events[:10]), - spew.Sdump(timeSlice.ForwardingEvents)) - } - - // The final offset should be the exact number of events returned. - if timeSlice.LastIndexOffset != 10 { - t.Fatalf("wrong index offset: expected %v, got %v", 10, - timeSlice.LastIndexOffset) - } - - // If we use the final offset to query again, then we should get 10 - // more events, that are the last 10 events we wrote. - eventQuery.IndexOffset = 10 - timeSlice, err = log.Query(eventQuery) - if err != nil { - t.Fatalf("unable to query for events: %v", err) - } - - // We should get exactly 10 events back once again. - if len(timeSlice.ForwardingEvents) != 10 { - t.Fatalf("wrong number of events: expected %v, got %v", 10, - len(timeSlice.ForwardingEvents)) - } - - // The events that we got back should be the last 10 events that we - // wrote out. - if !reflect.DeepEqual(events[10:], timeSlice.ForwardingEvents) { - t.Fatalf("wrong response: expected %v, got %v", - spew.Sdump(events[10:]), - spew.Sdump(timeSlice.ForwardingEvents)) - } - - // Finally, the last index offset should be 20, or the number of - // records we've written out. - if timeSlice.LastIndexOffset != 20 { - t.Fatalf("wrong index offset: expected %v, got %v", 20, - timeSlice.LastIndexOffset) - } -} - -// TestForwardingLogQueryLimit tests that we're able to properly limit the -// number of events that are returned as part of a query. -func TestForwardingLogQueryLimit(t *testing.T) { - t.Parallel() - - // First, we'll set up a test database, and use that to instantiate the - // forwarding event log that we'll be using for the duration of the - // test. - db, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test db: %v", err) - } - defer cleanUp() - - log := ForwardingLog{ - db: db, - } - - initialTime := time.Unix(1234, 0) - endTime := time.Unix(1234, 0) - - // We'll create 200 random events, which each event being spaced 10 - // minutes after the prior event. - numEvents := 200 - events := make([]ForwardingEvent, numEvents) - for i := 0; i < numEvents; i++ { - events[i] = ForwardingEvent{ - Timestamp: endTime, - IncomingChanID: lnwire.NewShortChanIDFromInt(uint64(rand.Int63())), - OutgoingChanID: lnwire.NewShortChanIDFromInt(uint64(rand.Int63())), - AmtIn: lnwire.MilliSatoshi(rand.Int63()), - AmtOut: lnwire.MilliSatoshi(rand.Int63()), - } - - endTime = endTime.Add(time.Minute * 10) - } - - // Now that all of our set of events constructed, we'll add them to the - // database in a batch manner. - if err := log.AddForwardingEvents(events); err != nil { - t.Fatalf("unable to add events: %v", err) - } - - // Once the events have been written out, we'll issue a query over the - // entire range, but restrict the number of events to the first 100. - eventQuery := ForwardingEventQuery{ - StartTime: initialTime, - EndTime: endTime, - IndexOffset: 0, - NumMaxEvents: 100, - } - timeSlice, err := log.Query(eventQuery) - if err != nil { - t.Fatalf("unable to query for events: %v", err) - } - - // We should get exactly 100 events back. - if len(timeSlice.ForwardingEvents) != 100 { - t.Fatalf("wrong number of events: expected %v, got %v", 10, - len(timeSlice.ForwardingEvents)) - } - - // The set of events returned should be the first 100 events that we - // added. - if !reflect.DeepEqual(events[:100], timeSlice.ForwardingEvents) { - t.Fatalf("wrong response: expected %v, got %v", - spew.Sdump(events[:100]), - spew.Sdump(timeSlice.ForwardingEvents)) - } - - // The final offset should be the exact number of events returned. - if timeSlice.LastIndexOffset != 100 { - t.Fatalf("wrong index offset: expected %v, got %v", 100, - timeSlice.LastIndexOffset) - } -} - -// TestForwardingLogMakeUniqueTimestamps makes sure the function that creates -// unique timestamps does it job correctly. -func TestForwardingLogMakeUniqueTimestamps(t *testing.T) { - t.Parallel() - - // Create a list of events where some of the timestamps collide. We - // expect no existing timestamp to be overwritten, instead the "gaps" - // between them should be filled. - inputSlice := []ForwardingEvent{ - {Timestamp: time.Unix(0, 1001)}, - {Timestamp: time.Unix(0, 2001)}, - {Timestamp: time.Unix(0, 1001)}, - {Timestamp: time.Unix(0, 1002)}, - {Timestamp: time.Unix(0, 1004)}, - {Timestamp: time.Unix(0, 1004)}, - {Timestamp: time.Unix(0, 1007)}, - {Timestamp: time.Unix(0, 1001)}, - } - expectedSlice := []ForwardingEvent{ - {Timestamp: time.Unix(0, 1001)}, - {Timestamp: time.Unix(0, 1002)}, - {Timestamp: time.Unix(0, 1003)}, - {Timestamp: time.Unix(0, 1004)}, - {Timestamp: time.Unix(0, 1005)}, - {Timestamp: time.Unix(0, 1006)}, - {Timestamp: time.Unix(0, 1007)}, - {Timestamp: time.Unix(0, 2001)}, - } - - makeUniqueTimestamps(inputSlice) - - for idx, in := range inputSlice { - expect := expectedSlice[idx] - assert.Equal( - t, expect.Timestamp.UnixNano(), in.Timestamp.UnixNano(), - ) - } -} - -// TestForwardingLogStoreEvent makes sure forwarding events are stored without -// colliding on duplicate timestamps. -func TestForwardingLogStoreEvent(t *testing.T) { - t.Parallel() - - // First, we'll set up a test database, and use that to instantiate the - // forwarding event log that we'll be using for the duration of the - // test. - db, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test db: %v", err) - } - defer cleanUp() - - log := ForwardingLog{ - db: db, - } - - // We'll create 20 random events, with each event having a timestamp - // with just one nanosecond apart. - numEvents := 20 - events := make([]ForwardingEvent, numEvents) - ts := time.Now().UnixNano() - for i := 0; i < numEvents; i++ { - events[i] = ForwardingEvent{ - Timestamp: time.Unix(0, ts+int64(i)), - IncomingChanID: lnwire.NewShortChanIDFromInt(uint64(rand.Int63())), - OutgoingChanID: lnwire.NewShortChanIDFromInt(uint64(rand.Int63())), - AmtIn: lnwire.MilliSatoshi(rand.Int63()), - AmtOut: lnwire.MilliSatoshi(rand.Int63()), - } - } - - // Now that all of our events are constructed, we'll add them to the - // database in a batched manner. - if err := log.AddForwardingEvents(events); err != nil { - t.Fatalf("unable to add events: %v", err) - } - - // Because timestamps are de-duplicated when adding them in a single - // batch before they even hit the DB, we add the same events again but - // in a new batch. They now have to be de-duplicated on the DB level. - if err := log.AddForwardingEvents(events); err != nil { - t.Fatalf("unable to add second batch of events: %v", err) - } - - // With all of our events added, we should be able to query for all - // events with a range of just 40 nanoseconds (2 times 20 events, all - // spaced one nanosecond apart). - eventQuery := ForwardingEventQuery{ - StartTime: time.Unix(0, ts), - EndTime: time.Unix(0, ts+int64(numEvents*2)), - IndexOffset: 0, - NumMaxEvents: uint32(numEvents * 3), - } - timeSlice, err := log.Query(eventQuery) - if err != nil { - t.Fatalf("unable to query for events: %v", err) - } - - // We should get exactly 40 events back. - if len(timeSlice.ForwardingEvents) != numEvents*2 { - t.Fatalf("wrong number of events: expected %v, got %v", - numEvents*2, len(timeSlice.ForwardingEvents)) - } - - // The timestamps should be spaced out evenly and in order. - for i := 0; i < numEvents*2; i++ { - eventTs := timeSlice.ForwardingEvents[i].Timestamp.UnixNano() - if eventTs != ts+int64(i) { - t.Fatalf("unexpected timestamp of event %d: expected "+ - "%d, got %d", i, ts+int64(i), eventTs) - } - } -} diff --git a/lnd/channeldb/forwarding_package.go b/lnd/channeldb/forwarding_package.go deleted file mode 100644 index 6594db2d..00000000 --- a/lnd/channeldb/forwarding_package.go +++ /dev/null @@ -1,929 +0,0 @@ -package channeldb - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// ErrCorruptedFwdPkg signals that the on-disk structure of the forwarding -// package has potentially been mangled. -var ErrCorruptedFwdPkg = Err.CodeWithDetail("ErrCorruptedFwdPkg", "fwding package db has been corrupted") - -// FwdState is an enum used to describe the lifecycle of a FwdPkg. -type FwdState byte - -const ( - // FwdStateLockedIn is the starting state for all forwarding packages. - // Packages in this state have not yet committed to the exact set of - // Adds to forward to the switch. - FwdStateLockedIn FwdState = iota - - // FwdStateProcessed marks the state in which all Adds have been - // locally processed and the forwarding decision to the switch has been - // persisted. - FwdStateProcessed - - // FwdStateCompleted signals that all Adds have been acked, and that all - // settles and fails have been delivered to their sources. Packages in - // this state can be removed permanently. - FwdStateCompleted -) - -var ( - // fwdPackagesKey is the root-level bucket that all forwarding packages - // are written. This bucket is further subdivided based on the short - // channel ID of each channel. - fwdPackagesKey = []byte("fwd-packages") - - // addBucketKey is the bucket to which all Add log updates are written. - addBucketKey = []byte("add-updates") - - // failSettleBucketKey is the bucket to which all Settle/Fail log - // updates are written. - failSettleBucketKey = []byte("fail-settle-updates") - - // fwdFilterKey is a key used to write the set of Adds that passed - // validation and are to be forwarded to the switch. - // NOTE: The presence of this key within a forwarding package indicates - // that the package has reached FwdStateProcessed. - fwdFilterKey = []byte("fwd-filter-key") - - // ackFilterKey is a key used to access the PkgFilter indicating which - // Adds have received a Settle/Fail. This response may come from a - // number of sources, including: exitHop settle/fails, switch failures, - // chain arbiter interjections, as well as settle/fails from the - // next hop in the route. - ackFilterKey = []byte("ack-filter-key") - - // settleFailFilterKey is a key used to access the PkgFilter indicating - // which Settles/Fails in have been received and processed by the link - // that originally received the Add. - settleFailFilterKey = []byte("settle-fail-filter-key") -) - -// PkgFilter is used to compactly represent a particular subset of the Adds in a -// forwarding package. Each filter is represented as a simple, statically-sized -// bitvector, where the elements are intended to be the indices of the Adds as -// they are written in the FwdPkg. -type PkgFilter struct { - count uint16 - filter []byte -} - -// NewPkgFilter initializes an empty PkgFilter supporting `count` elements. -func NewPkgFilter(count uint16) *PkgFilter { - // We add 7 to ensure that the integer division yields properly rounded - // values. - filterLen := (count + 7) / 8 - - return &PkgFilter{ - count: count, - filter: make([]byte, filterLen), - } -} - -// Count returns the number of elements represented by this PkgFilter. -func (f *PkgFilter) Count() uint16 { - return f.count -} - -// Set marks the `i`-th element as included by this filter. -// NOTE: It is assumed that i is always less than count. -func (f *PkgFilter) Set(i uint16) { - byt := i / 8 - bit := i % 8 - - // Set the i-th bit in the filter. - // TODO(conner): ignore if > count to prevent panic? - f.filter[byt] |= byte(1 << (7 - bit)) -} - -// Contains queries the filter for membership of index `i`. -// NOTE: It is assumed that i is always less than count. -func (f *PkgFilter) Contains(i uint16) bool { - byt := i / 8 - bit := i % 8 - - // Read the i-th bit in the filter. - // TODO(conner): ignore if > count to prevent panic? - return f.filter[byt]&(1<<(7-bit)) != 0 -} - -// Equal checks two PkgFilters for equality. -func (f *PkgFilter) Equal(f2 *PkgFilter) bool { - if f == f2 { - return true - } - if f.count != f2.count { - return false - } - - return bytes.Equal(f.filter, f2.filter) -} - -// IsFull returns true if every element in the filter has been Set, and false -// otherwise. -func (f *PkgFilter) IsFull() bool { - // Batch validate bytes that are fully used. - for i := uint16(0); i < f.count/8; i++ { - if f.filter[i] != 0xFF { - return false - } - } - - // If the count is not a multiple of 8, check that the filter contains - // all remaining bits. - rem := f.count % 8 - for idx := f.count - rem; idx < f.count; idx++ { - if !f.Contains(idx) { - return false - } - } - - return true -} - -// Size returns number of bytes produced when the PkgFilter is serialized. -func (f *PkgFilter) Size() uint16 { - // 2 bytes for uint16 `count`, then round up number of bytes required to - // represent `count` bits. - return 2 + (f.count+7)/8 -} - -// Encode writes the filter to the provided io.Writer. -func (f *PkgFilter) Encode(w io.Writer) er.R { - if err := util.WriteBin(w, binary.BigEndian, f.count); err != nil { - return err - } - - _, err := util.Write(w, f.filter) - - return err -} - -// Decode reads the filter from the provided io.Reader. -func (f *PkgFilter) Decode(r io.Reader) er.R { - if err := util.ReadBin(r, binary.BigEndian, &f.count); err != nil { - return err - } - - f.filter = make([]byte, f.Size()-2) - _, err := util.ReadFull(r, f.filter) - - return err -} - -// FwdPkg records all adds, settles, and fails that were locked in as a result -// of the remote peer sending us a revocation. Each package is identified by -// the short chanid and remote commitment height corresponding to the revocation -// that locked in the HTLCs. For everything except a locally initiated payment, -// settles and fails in a forwarding package must have a corresponding Add in -// another package, and can be removed individually once the source link has -// received the fail/settle. -// -// Adds cannot be removed, as we need to present the same batch of Adds to -// properly handle replay protection. Instead, we use a PkgFilter to mark that -// we have finished processing a particular Add. A FwdPkg should only be deleted -// after the AckFilter is full and all settles and fails have been persistently -// removed. -type FwdPkg struct { - // Source identifies the channel that wrote this forwarding package. - Source lnwire.ShortChannelID - - // Height is the height of the remote commitment chain that locked in - // this forwarding package. - Height uint64 - - // State signals the persistent condition of the package and directs how - // to reprocess the package in the event of failures. - State FwdState - - // Adds contains all add messages which need to be processed and - // forwarded to the switch. Adds does not change over the life of a - // forwarding package. - Adds []LogUpdate - - // FwdFilter is a filter containing the indices of all Adds that were - // forwarded to the switch. - FwdFilter *PkgFilter - - // AckFilter is a filter containing the indices of all Adds for which - // the source has received a settle or fail and is reflected in the next - // commitment txn. A package should not be removed until IsFull() - // returns true. - AckFilter *PkgFilter - - // SettleFails contains all settle and fail messages that should be - // forwarded to the switch. - SettleFails []LogUpdate - - // SettleFailFilter is a filter containing the indices of all Settle or - // Fails originating in this package that have been received and locked - // into the incoming link's commitment state. - SettleFailFilter *PkgFilter -} - -// NewFwdPkg initializes a new forwarding package in FwdStateLockedIn. This -// should be used to create a package at the time we receive a revocation. -func NewFwdPkg(source lnwire.ShortChannelID, height uint64, - addUpdates, settleFailUpdates []LogUpdate) *FwdPkg { - - nAddUpdates := uint16(len(addUpdates)) - nSettleFailUpdates := uint16(len(settleFailUpdates)) - - return &FwdPkg{ - Source: source, - Height: height, - State: FwdStateLockedIn, - Adds: addUpdates, - FwdFilter: NewPkgFilter(nAddUpdates), - AckFilter: NewPkgFilter(nAddUpdates), - SettleFails: settleFailUpdates, - SettleFailFilter: NewPkgFilter(nSettleFailUpdates), - } -} - -// ID returns an unique identifier for this package, used to ensure that sphinx -// replay processing of this batch is idempotent. -func (f *FwdPkg) ID() []byte { - var id = make([]byte, 16) - byteOrder.PutUint64(id[:8], f.Source.ToUint64()) - byteOrder.PutUint64(id[8:], f.Height) - return id -} - -// String returns a human-readable description of the forwarding package. -func (f *FwdPkg) String() string { - return fmt.Sprintf("%T(src=%v, height=%v, nadds=%v, nfailsettles=%v)", - f, f.Source, f.Height, len(f.Adds), len(f.SettleFails)) -} - -// AddRef is used to identify a particular Add in a FwdPkg. The short channel ID -// is assumed to be that of the packager. -type AddRef struct { - // Height is the remote commitment height that locked in the Add. - Height uint64 - - // Index is the index of the Add within the fwd pkg's Adds. - // - // NOTE: This index is static over the lifetime of a forwarding package. - Index uint16 -} - -// Encode serializes the AddRef to the given io.Writer. -func (a *AddRef) Encode(w io.Writer) er.R { - if err := util.WriteBin(w, binary.BigEndian, a.Height); err != nil { - return err - } - - return util.WriteBin(w, binary.BigEndian, a.Index) -} - -// Decode deserializes the AddRef from the given io.Reader. -func (a *AddRef) Decode(r io.Reader) er.R { - if err := util.ReadBin(r, binary.BigEndian, &a.Height); err != nil { - return err - } - - return util.ReadBin(r, binary.BigEndian, &a.Index) -} - -// SettleFailRef is used to locate a Settle/Fail in another channel's FwdPkg. A -// channel does not remove its own Settle/Fail htlcs, so the source is provided -// to locate a db bucket belonging to another channel. -type SettleFailRef struct { - // Source identifies the outgoing link that locked in the settle or - // fail. This is then used by the *incoming* link to find the settle - // fail in another link's forwarding packages. - Source lnwire.ShortChannelID - - // Height is the remote commitment height that locked in this - // Settle/Fail. - Height uint64 - - // Index is the index of the Add with the fwd pkg's SettleFails. - // - // NOTE: This index is static over the lifetime of a forwarding package. - Index uint16 -} - -// SettleFailAcker is a generic interface providing the ability to acknowledge -// settle/fail HTLCs stored in forwarding packages. -type SettleFailAcker interface { - // AckSettleFails atomically updates the settle-fail filters in *other* - // channels' forwarding packages. - AckSettleFails(tx kvdb.RwTx, settleFailRefs ...SettleFailRef) er.R -} - -// GlobalFwdPkgReader is an interface used to retrieve the forwarding packages -// of any active channel. -type GlobalFwdPkgReader interface { - // LoadChannelFwdPkgs loads all known forwarding packages for the given - // channel. - LoadChannelFwdPkgs(tx kvdb.RTx, - source lnwire.ShortChannelID) ([]*FwdPkg, er.R) -} - -// FwdOperator defines the interfaces for managing forwarding packages that are -// external to a particular channel. This interface is used by the switch to -// read forwarding packages from arbitrary channels, and acknowledge settles and -// fails for locally-sourced payments. -type FwdOperator interface { - // GlobalFwdPkgReader provides read access to all known forwarding - // packages - GlobalFwdPkgReader - - // SettleFailAcker grants the ability to acknowledge settles or fails - // residing in arbitrary forwarding packages. - SettleFailAcker -} - -// SwitchPackager is a concrete implementation of the FwdOperator interface. -// A SwitchPackager offers the ability to read any forwarding package, and ack -// arbitrary settle and fail HTLCs. -type SwitchPackager struct{} - -// NewSwitchPackager instantiates a new SwitchPackager. -func NewSwitchPackager() *SwitchPackager { - return &SwitchPackager{} -} - -// AckSettleFails atomically updates the settle-fail filters in *other* -// channels' forwarding packages, to mark that the switch has received a settle -// or fail residing in the forwarding package of a link. -func (*SwitchPackager) AckSettleFails(tx kvdb.RwTx, - settleFailRefs ...SettleFailRef) er.R { - - return ackSettleFails(tx, settleFailRefs) -} - -// LoadChannelFwdPkgs loads all forwarding packages for a particular channel. -func (*SwitchPackager) LoadChannelFwdPkgs(tx kvdb.RTx, - source lnwire.ShortChannelID) ([]*FwdPkg, er.R) { - - return loadChannelFwdPkgs(tx, source) -} - -// FwdPackager supports all operations required to modify fwd packages, such as -// creation, updates, reading, and removal. The interfaces are broken down in -// this way to support future delegation of the subinterfaces. -type FwdPackager interface { - // AddFwdPkg serializes and writes a FwdPkg for this channel at the - // remote commitment height included in the forwarding package. - AddFwdPkg(tx kvdb.RwTx, fwdPkg *FwdPkg) er.R - - // SetFwdFilter looks up the forwarding package at the remote `height` - // and sets the `fwdFilter`, marking the Adds for which: - // 1) We are not the exit node - // 2) Passed all validation - // 3) Should be forwarded to the switch immediately after a failure - SetFwdFilter(tx kvdb.RwTx, height uint64, fwdFilter *PkgFilter) er.R - - // AckAddHtlcs atomically updates the add filters in this channel's - // forwarding packages to mark the resolution of an Add that was - // received from the remote party. - AckAddHtlcs(tx kvdb.RwTx, addRefs ...AddRef) er.R - - // SettleFailAcker allows a link to acknowledge settle/fail HTLCs - // belonging to other channels. - SettleFailAcker - - // LoadFwdPkgs loads all known forwarding packages owned by this - // channel. - LoadFwdPkgs(tx kvdb.RTx) ([]*FwdPkg, er.R) - - // RemovePkg deletes a forwarding package owned by this channel at - // the provided remote `height`. - RemovePkg(tx kvdb.RwTx, height uint64) er.R -} - -// ChannelPackager is used by a channel to manage the lifecycle of its forwarding -// packages. The packager is tied to a particular source channel ID, allowing it -// to create and edit its own packages. Each packager also has the ability to -// remove fail/settle htlcs that correspond to an add contained in one of -// source's packages. -type ChannelPackager struct { - source lnwire.ShortChannelID -} - -// NewChannelPackager creates a new packager for a single channel. -func NewChannelPackager(source lnwire.ShortChannelID) *ChannelPackager { - return &ChannelPackager{ - source: source, - } -} - -// AddFwdPkg writes a newly locked in forwarding package to disk. -func (*ChannelPackager) AddFwdPkg(tx kvdb.RwTx, fwdPkg *FwdPkg) er.R { - fwdPkgBkt, err := tx.CreateTopLevelBucket(fwdPackagesKey) - if err != nil { - return err - } - - source := makeLogKey(fwdPkg.Source.ToUint64()) - sourceBkt, err := fwdPkgBkt.CreateBucketIfNotExists(source[:]) - if err != nil { - return err - } - - heightKey := makeLogKey(fwdPkg.Height) - heightBkt, err := sourceBkt.CreateBucketIfNotExists(heightKey[:]) - if err != nil { - return err - } - - // Write ADD updates we received at this commit height. - addBkt, err := heightBkt.CreateBucketIfNotExists(addBucketKey) - if err != nil { - return err - } - - // Write SETTLE/FAIL updates we received at this commit height. - failSettleBkt, err := heightBkt.CreateBucketIfNotExists(failSettleBucketKey) - if err != nil { - return err - } - - for i := range fwdPkg.Adds { - errr := putLogUpdate(addBkt, uint16(i), &fwdPkg.Adds[i]) - if errr != nil { - return errr - } - } - - // Persist the initialized pkg filter, which will be used to determine - // when we can remove this forwarding package from disk. - var ackFilterBuf bytes.Buffer - if err := fwdPkg.AckFilter.Encode(&ackFilterBuf); err != nil { - return err - } - - if err := heightBkt.Put(ackFilterKey, ackFilterBuf.Bytes()); err != nil { - return err - } - - for i := range fwdPkg.SettleFails { - errr := putLogUpdate(failSettleBkt, uint16(i), &fwdPkg.SettleFails[i]) - if errr != nil { - return errr - } - } - - var settleFailFilterBuf bytes.Buffer - errr := fwdPkg.SettleFailFilter.Encode(&settleFailFilterBuf) - if errr != nil { - return errr - } - - return heightBkt.Put(settleFailFilterKey, settleFailFilterBuf.Bytes()) -} - -// putLogUpdate writes an htlc to the provided `bkt`, using `index` as the key. -func putLogUpdate(bkt kvdb.RwBucket, idx uint16, htlc *LogUpdate) er.R { - var b bytes.Buffer - if err := htlc.Encode(&b); err != nil { - return err - } - - return bkt.Put(uint16Key(idx), b.Bytes()) -} - -// LoadFwdPkgs scans the forwarding log for any packages that haven't been -// processed, and returns their deserialized log updates in a map indexed by the -// remote commitment height at which the updates were locked in. -func (p *ChannelPackager) LoadFwdPkgs(tx kvdb.RTx) ([]*FwdPkg, er.R) { - return loadChannelFwdPkgs(tx, p.source) -} - -// loadChannelFwdPkgs loads all forwarding packages owned by `source`. -func loadChannelFwdPkgs(tx kvdb.RTx, source lnwire.ShortChannelID) ([]*FwdPkg, er.R) { - fwdPkgBkt := tx.ReadBucket(fwdPackagesKey) - if fwdPkgBkt == nil { - return nil, nil - } - - sourceKey := makeLogKey(source.ToUint64()) - sourceBkt := fwdPkgBkt.NestedReadBucket(sourceKey[:]) - if sourceBkt == nil { - return nil, nil - } - - var heights []uint64 - if err := sourceBkt.ForEach(func(k, _ []byte) er.R { - if len(k) != 8 { - return ErrCorruptedFwdPkg.Default() - } - - heights = append(heights, byteOrder.Uint64(k)) - - return nil - }); err != nil { - return nil, err - } - - // Load the forwarding package for each retrieved height. - fwdPkgs := make([]*FwdPkg, 0, len(heights)) - for _, height := range heights { - fwdPkg, err := loadFwdPkg(fwdPkgBkt, source, height) - if err != nil { - return nil, err - } - - fwdPkgs = append(fwdPkgs, fwdPkg) - } - - return fwdPkgs, nil -} - -// loadFwPkg reads the packager's fwd pkg at a given height, and determines the -// appropriate FwdState. -func loadFwdPkg(fwdPkgBkt kvdb.RBucket, source lnwire.ShortChannelID, - height uint64) (*FwdPkg, er.R) { - - sourceKey := makeLogKey(source.ToUint64()) - sourceBkt := fwdPkgBkt.NestedReadBucket(sourceKey[:]) - if sourceBkt == nil { - return nil, ErrCorruptedFwdPkg.Default() - } - - heightKey := makeLogKey(height) - heightBkt := sourceBkt.NestedReadBucket(heightKey[:]) - if heightBkt == nil { - return nil, ErrCorruptedFwdPkg.Default() - } - - // Load ADDs from disk. - addBkt := heightBkt.NestedReadBucket(addBucketKey) - if addBkt == nil { - return nil, ErrCorruptedFwdPkg.Default() - } - - adds, err := loadHtlcs(addBkt) - if err != nil { - return nil, err - } - - // Load ack filter from disk. - ackFilterBytes := heightBkt.Get(ackFilterKey) - if ackFilterBytes == nil { - return nil, ErrCorruptedFwdPkg.Default() - } - ackFilterReader := bytes.NewReader(ackFilterBytes) - - ackFilter := &PkgFilter{} - if err := ackFilter.Decode(ackFilterReader); err != nil { - return nil, err - } - - // Load SETTLE/FAILs from disk. - failSettleBkt := heightBkt.NestedReadBucket(failSettleBucketKey) - if failSettleBkt == nil { - return nil, ErrCorruptedFwdPkg.Default() - } - - failSettles, err := loadHtlcs(failSettleBkt) - if err != nil { - return nil, err - } - - // Load settle fail filter from disk. - settleFailFilterBytes := heightBkt.Get(settleFailFilterKey) - if settleFailFilterBytes == nil { - return nil, ErrCorruptedFwdPkg.Default() - } - settleFailFilterReader := bytes.NewReader(settleFailFilterBytes) - - settleFailFilter := &PkgFilter{} - if err := settleFailFilter.Decode(settleFailFilterReader); err != nil { - return nil, err - } - - // Initialize the fwding package, which always starts in the - // FwdStateLockedIn. We can determine what state the package was left in - // by examining constraints on the information loaded from disk. - fwdPkg := &FwdPkg{ - Source: source, - State: FwdStateLockedIn, - Height: height, - Adds: adds, - AckFilter: ackFilter, - SettleFails: failSettles, - SettleFailFilter: settleFailFilter, - } - - // Check to see if we have written the set exported filter adds to - // disk. If we haven't, processing of this package was never started, or - // failed during the last attempt. - fwdFilterBytes := heightBkt.Get(fwdFilterKey) - if fwdFilterBytes == nil { - nAdds := uint16(len(adds)) - fwdPkg.FwdFilter = NewPkgFilter(nAdds) - return fwdPkg, nil - } - - fwdFilterReader := bytes.NewReader(fwdFilterBytes) - fwdPkg.FwdFilter = &PkgFilter{} - if err := fwdPkg.FwdFilter.Decode(fwdFilterReader); err != nil { - return nil, err - } - - // Otherwise, a complete round of processing was completed, and we - // advance the package to FwdStateProcessed. - fwdPkg.State = FwdStateProcessed - - // If every add, settle, and fail has been fully acknowledged, we can - // safely set the package's state to FwdStateCompleted, signalling that - // it can be garbage collected. - if fwdPkg.AckFilter.IsFull() && fwdPkg.SettleFailFilter.IsFull() { - fwdPkg.State = FwdStateCompleted - } - - return fwdPkg, nil -} - -// loadHtlcs retrieves all serialized htlcs in a bucket, returning -// them in order of the indexes they were written under. -func loadHtlcs(bkt kvdb.RBucket) ([]LogUpdate, er.R) { - var htlcs []LogUpdate - if err := bkt.ForEach(func(_, v []byte) er.R { - var htlc LogUpdate - if err := htlc.Decode(bytes.NewReader(v)); err != nil { - return err - } - - htlcs = append(htlcs, htlc) - - return nil - }); err != nil { - return nil, err - } - - return htlcs, nil -} - -// SetFwdFilter writes the set of indexes corresponding to Adds at the -// `height` that are to be forwarded to the switch. Calling this method causes -// the forwarding package at `height` to be in FwdStateProcessed. We write this -// forwarding decision so that we always arrive at the same behavior for HTLCs -// leaving this channel. After a restart, we skip validation of these Adds, -// since they are assumed to have already been validated, and make the switch or -// outgoing link responsible for handling replays. -func (p *ChannelPackager) SetFwdFilter(tx kvdb.RwTx, height uint64, - fwdFilter *PkgFilter) er.R { - - fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey) - if fwdPkgBkt == nil { - return ErrCorruptedFwdPkg.Default() - } - - source := makeLogKey(p.source.ToUint64()) - sourceBkt := fwdPkgBkt.NestedReadWriteBucket(source[:]) - if sourceBkt == nil { - return ErrCorruptedFwdPkg.Default() - } - - heightKey := makeLogKey(height) - heightBkt := sourceBkt.NestedReadWriteBucket(heightKey[:]) - if heightBkt == nil { - return ErrCorruptedFwdPkg.Default() - } - - // If the fwd filter has already been written, we return early to avoid - // modifying the persistent state. - forwardedAddsBytes := heightBkt.Get(fwdFilterKey) - if forwardedAddsBytes != nil { - return nil - } - - // Otherwise we serialize and write the provided fwd filter. - var b bytes.Buffer - if err := fwdFilter.Encode(&b); err != nil { - return err - } - - return heightBkt.Put(fwdFilterKey, b.Bytes()) -} - -// AckAddHtlcs accepts a list of references to add htlcs, and updates the -// AckAddFilter of those forwarding packages to indicate that a settle or fail -// has been received in response to the add. -func (p *ChannelPackager) AckAddHtlcs(tx kvdb.RwTx, addRefs ...AddRef) er.R { - if len(addRefs) == 0 { - return nil - } - - fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey) - if fwdPkgBkt == nil { - return ErrCorruptedFwdPkg.Default() - } - - sourceKey := makeLogKey(p.source.ToUint64()) - sourceBkt := fwdPkgBkt.NestedReadWriteBucket(sourceKey[:]) - if sourceBkt == nil { - return ErrCorruptedFwdPkg.Default() - } - - // Organize the forward references such that we just get a single slice - // of indexes for each unique height. - heightDiffs := make(map[uint64][]uint16) - for _, addRef := range addRefs { - heightDiffs[addRef.Height] = append( - heightDiffs[addRef.Height], - addRef.Index, - ) - } - - // Load each height bucket once and remove all acked htlcs at that - // height. - for height, indexes := range heightDiffs { - err := ackAddHtlcsAtHeight(sourceBkt, height, indexes) - if err != nil { - return err - } - } - - return nil -} - -// ackAddHtlcsAtHeight updates the AddAckFilter of a single forwarding package -// with a list of indexes, writing the resulting filter back in its place. -func ackAddHtlcsAtHeight(sourceBkt kvdb.RwBucket, height uint64, - indexes []uint16) er.R { - - heightKey := makeLogKey(height) - heightBkt := sourceBkt.NestedReadWriteBucket(heightKey[:]) - if heightBkt == nil { - // If the height bucket isn't found, this could be because the - // forwarding package was already removed. We'll return nil to - // signal that the operation is successful, as there is nothing - // to ack. - return nil - } - - // Load ack filter from disk. - ackFilterBytes := heightBkt.Get(ackFilterKey) - if ackFilterBytes == nil { - return ErrCorruptedFwdPkg.Default() - } - - ackFilter := &PkgFilter{} - ackFilterReader := bytes.NewReader(ackFilterBytes) - if err := ackFilter.Decode(ackFilterReader); err != nil { - return err - } - - // Update the ack filter for this height. - for _, index := range indexes { - ackFilter.Set(index) - } - - // Write the resulting filter to disk. - var ackFilterBuf bytes.Buffer - if err := ackFilter.Encode(&ackFilterBuf); err != nil { - return err - } - - return heightBkt.Put(ackFilterKey, ackFilterBuf.Bytes()) -} - -// AckSettleFails persistently acknowledges settles or fails from a remote forwarding -// package. This should only be called after the source of the Add has locked in -// the settle/fail, or it becomes otherwise safe to forgo retransmitting the -// settle/fail after a restart. -func (p *ChannelPackager) AckSettleFails(tx kvdb.RwTx, settleFailRefs ...SettleFailRef) er.R { - return ackSettleFails(tx, settleFailRefs) -} - -// ackSettleFails persistently acknowledges a batch of settle fail references. -func ackSettleFails(tx kvdb.RwTx, settleFailRefs []SettleFailRef) er.R { - if len(settleFailRefs) == 0 { - return nil - } - - fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey) - if fwdPkgBkt == nil { - return ErrCorruptedFwdPkg.Default() - } - - // Organize the forward references such that we just get a single slice - // of indexes for each unique destination-height pair. - destHeightDiffs := make(map[lnwire.ShortChannelID]map[uint64][]uint16) - for _, settleFailRef := range settleFailRefs { - destHeights, ok := destHeightDiffs[settleFailRef.Source] - if !ok { - destHeights = make(map[uint64][]uint16) - destHeightDiffs[settleFailRef.Source] = destHeights - } - - destHeights[settleFailRef.Height] = append( - destHeights[settleFailRef.Height], - settleFailRef.Index, - ) - } - - // With the references organized by destination and height, we now load - // each remote bucket, and update the settle fail filter for any - // settle/fail htlcs. - for dest, destHeights := range destHeightDiffs { - destKey := makeLogKey(dest.ToUint64()) - destBkt := fwdPkgBkt.NestedReadWriteBucket(destKey[:]) - if destBkt == nil { - // If the destination bucket is not found, this is - // likely the result of the destination channel being - // closed and having it's forwarding packages wiped. We - // won't treat this as an error, because the response - // will no longer be retransmitted internally. - continue - } - - for height, indexes := range destHeights { - err := ackSettleFailsAtHeight(destBkt, height, indexes) - if err != nil { - return err - } - } - } - - return nil -} - -// ackSettleFailsAtHeight given a destination bucket, acks the provided indexes -// at particular a height by updating the settle fail filter. -func ackSettleFailsAtHeight(destBkt kvdb.RwBucket, height uint64, - indexes []uint16) er.R { - - heightKey := makeLogKey(height) - heightBkt := destBkt.NestedReadWriteBucket(heightKey[:]) - if heightBkt == nil { - // If the height bucket isn't found, this could be because the - // forwarding package was already removed. We'll return nil to - // signal that the operation is as there is nothing to ack. - return nil - } - - // Load ack filter from disk. - settleFailFilterBytes := heightBkt.Get(settleFailFilterKey) - if settleFailFilterBytes == nil { - return ErrCorruptedFwdPkg.Default() - } - - settleFailFilter := &PkgFilter{} - settleFailFilterReader := bytes.NewReader(settleFailFilterBytes) - if err := settleFailFilter.Decode(settleFailFilterReader); err != nil { - return err - } - - // Update the ack filter for this height. - for _, index := range indexes { - settleFailFilter.Set(index) - } - - // Write the resulting filter to disk. - var settleFailFilterBuf bytes.Buffer - if err := settleFailFilter.Encode(&settleFailFilterBuf); err != nil { - return err - } - - return heightBkt.Put(settleFailFilterKey, settleFailFilterBuf.Bytes()) -} - -// RemovePkg deletes the forwarding package at the given height from the -// packager's source bucket. -func (p *ChannelPackager) RemovePkg(tx kvdb.RwTx, height uint64) er.R { - fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey) - if fwdPkgBkt == nil { - return nil - } - - sourceBytes := makeLogKey(p.source.ToUint64()) - sourceBkt := fwdPkgBkt.NestedReadWriteBucket(sourceBytes[:]) - if sourceBkt == nil { - return ErrCorruptedFwdPkg.Default() - } - - heightKey := makeLogKey(height) - - return sourceBkt.DeleteNestedBucket(heightKey[:]) -} - -// uint16Key writes the provided 16-bit unsigned integer to a 2-byte slice. -func uint16Key(i uint16) []byte { - key := make([]byte, 2) - byteOrder.PutUint16(key, i) - return key -} - -// Compile-time constraint to ensure that ChannelPackager implements the public -// FwdPackager interface. -var _ FwdPackager = (*ChannelPackager)(nil) - -// Compile-time constraint to ensure that SwitchPackager implements the public -// FwdOperator interface. -var _ FwdOperator = (*SwitchPackager)(nil) diff --git a/lnd/channeldb/forwarding_package_test.go b/lnd/channeldb/forwarding_package_test.go deleted file mode 100644 index 1500a201..00000000 --- a/lnd/channeldb/forwarding_package_test.go +++ /dev/null @@ -1,818 +0,0 @@ -package channeldb_test - -import ( - "bytes" - "io/ioutil" - "path/filepath" - "runtime" - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/wire" -) - -// TestPkgFilterBruteForce tests the behavior of a pkg filter up to size 1000, -// which is greater than the number of HTLCs we permit on a commitment txn. -// This should encapsulate every potential filter used in practice. -func TestPkgFilterBruteForce(t *testing.T) { - t.Parallel() - - checkPkgFilterRange(t, 1000) -} - -// checkPkgFilterRange verifies the behavior of a pkg filter when doing a linear -// insertion of `high` elements. This is primarily to test that IsFull functions -// properly for all relevant sizes of `high`. -func checkPkgFilterRange(t *testing.T, high int) { - for i := uint16(0); i < uint16(high); i++ { - f := channeldb.NewPkgFilter(i) - - if f.Count() != i { - t.Fatalf("pkg filter count=%d is actually %d", - i, f.Count()) - } - checkPkgFilterEncodeDecode(t, i, f) - - for j := uint16(0); j < i; j++ { - if f.Contains(j) { - t.Fatalf("pkg filter count=%d contains %d "+ - "before being added", i, j) - } - - f.Set(j) - checkPkgFilterEncodeDecode(t, i, f) - - if !f.Contains(j) { - t.Fatalf("pkg filter count=%d missing %d "+ - "after being added", i, j) - } - - if j < i-1 && f.IsFull() { - t.Fatalf("pkg filter count=%d already full", i) - } - } - - if !f.IsFull() { - t.Fatalf("pkg filter count=%d not full", i) - } - checkPkgFilterEncodeDecode(t, i, f) - } -} - -// TestPkgFilterRand uses a random permutation to verify the proper behavior of -// the pkg filter if the entries are not inserted in-order. -func TestPkgFilterRand(t *testing.T) { - t.Parallel() - - checkPkgFilterRand(t, 3, 17) -} - -// checkPkgFilterRand checks the behavior of a pkg filter by randomly inserting -// indices and asserting the invariants. The order in which indices are inserted -// is parameterized by a base `b` coprime to `p`, and using modular -// exponentiation to generate all elements in [1,p). -func checkPkgFilterRand(t *testing.T, b, p uint16) { - f := channeldb.NewPkgFilter(p) - var j = b - for i := uint16(1); i < p; i++ { - if f.Contains(j) { - t.Fatalf("pkg filter contains %d-%d "+ - "before being added", i, j) - } - - f.Set(j) - checkPkgFilterEncodeDecode(t, i, f) - - if !f.Contains(j) { - t.Fatalf("pkg filter missing %d-%d "+ - "after being added", i, j) - } - - if i < p-1 && f.IsFull() { - t.Fatalf("pkg filter %d already full", i) - } - checkPkgFilterEncodeDecode(t, i, f) - - j = (b * j) % p - } - - // Set 0 independently, since it will never be emitted by the generator. - f.Set(0) - checkPkgFilterEncodeDecode(t, p, f) - - if !f.IsFull() { - t.Fatalf("pkg filter count=%d not full", p) - } - checkPkgFilterEncodeDecode(t, p, f) -} - -// checkPkgFilterEncodeDecode tests the serialization of a pkg filter by: -// 1) writing it to a buffer -// 2) verifying the number of bytes written matches the filter's Size() -// 3) reconstructing the filter decoding the bytes -// 4) checking that the two filters are the same according to Equal -func checkPkgFilterEncodeDecode(t *testing.T, i uint16, f *channeldb.PkgFilter) { - var b bytes.Buffer - if err := f.Encode(&b); err != nil { - t.Fatalf("unable to serialize pkg filter: %v", err) - } - - // +2 for uint16 length - size := uint16(len(b.Bytes())) - if size != f.Size() { - t.Fatalf("pkg filter count=%d serialized size differs, "+ - "Size(): %d, len(bytes): %v", i, f.Size(), size) - } - - reader := bytes.NewReader(b.Bytes()) - - f2 := &channeldb.PkgFilter{} - if err := f2.Decode(reader); err != nil { - t.Fatalf("unable to deserialize pkg filter: %v", err) - } - - if !f.Equal(f2) { - t.Fatalf("pkg filter count=%v does is not equal "+ - "after deserialization, want: %v, got %v", - i, f, f2) - } -} - -var ( - chanID = lnwire.NewChanIDFromOutPoint(&wire.OutPoint{}) - - adds = []channeldb.LogUpdate{ - { - LogIndex: 0, - UpdateMsg: &lnwire.UpdateAddHTLC{ - ChanID: chanID, - ID: 1, - Amount: 100, - Expiry: 1000, - PaymentHash: [32]byte{0}, - }, - }, - { - LogIndex: 1, - UpdateMsg: &lnwire.UpdateAddHTLC{ - ChanID: chanID, - ID: 1, - Amount: 101, - Expiry: 1001, - PaymentHash: [32]byte{1}, - }, - }, - } - - settleFails = []channeldb.LogUpdate{ - { - LogIndex: 2, - UpdateMsg: &lnwire.UpdateFulfillHTLC{ - ChanID: chanID, - ID: 0, - PaymentPreimage: [32]byte{0}, - }, - }, - { - LogIndex: 3, - UpdateMsg: &lnwire.UpdateFailHTLC{ - ChanID: chanID, - ID: 1, - Reason: []byte{}, - }, - }, - } -) - -// TestPackagerEmptyFwdPkg checks that the state transitions exhibited by a -// forwarding package that contains no adds, fails or settles. We expect that -// the fwdpkg reaches FwdStateCompleted immediately after writing the forwarding -// decision via SetFwdFilter. -func TestPackagerEmptyFwdPkg(t *testing.T) { - t.Parallel() - - db := makeFwdPkgDB(t, "") - - shortChanID := lnwire.NewShortChanIDFromInt(1) - packager := channeldb.NewChannelPackager(shortChanID) - - // To begin, there should be no forwarding packages on disk. - fwdPkgs := loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 0 { - t.Fatalf("no forwarding packages should exist, found %d", len(fwdPkgs)) - } - - // Next, create and write a new forwarding package with no htlcs. - fwdPkg := channeldb.NewFwdPkg(shortChanID, 0, nil, nil) - - if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - return packager.AddFwdPkg(tx, fwdPkg) - }, func() {}); err != nil { - t.Fatalf("unable to add fwd pkg: %v", err) - } - - // There should now be one fwdpkg on disk. Since no forwarding decision - // has been written, we expect it to be FwdStateLockedIn. With no HTLCs, - // the ack filter will have no elements, and should always return true. - fwdPkgs = loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 1 { - t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs)) - } - assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateLockedIn) - assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], 0, 0) - assertAckFilterIsFull(t, fwdPkgs[0], true) - - // Now, write the forwarding decision. In this case, its just an empty - // fwd filter. - if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter) - }, func() {}); err != nil { - t.Fatalf("unable to set fwdfiter: %v", err) - } - - // We should still have one package on disk. Since the forwarding - // decision has been written, it will minimally be in FwdStateProcessed. - // However with no htlcs, it should leap frog to FwdStateCompleted. - fwdPkgs = loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 1 { - t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs)) - } - assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateCompleted) - assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], 0, 0) - assertAckFilterIsFull(t, fwdPkgs[0], true) - - // Lastly, remove the completed forwarding package from disk. - if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - return packager.RemovePkg(tx, fwdPkg.Height) - }, func() {}); err != nil { - t.Fatalf("unable to remove fwdpkg: %v", err) - } - - // Check that the fwd package was actually removed. - fwdPkgs = loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 0 { - t.Fatalf("no forwarding packages should exist, found %d", len(fwdPkgs)) - } -} - -// TestPackagerOnlyAdds checks that the fwdpkg does not reach FwdStateCompleted -// as soon as all the adds in the package have been acked using AckAddHtlcs. -func TestPackagerOnlyAdds(t *testing.T) { - t.Parallel() - - db := makeFwdPkgDB(t, "") - - shortChanID := lnwire.NewShortChanIDFromInt(1) - packager := channeldb.NewChannelPackager(shortChanID) - - // To begin, there should be no forwarding packages on disk. - fwdPkgs := loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 0 { - t.Fatalf("no forwarding packages should exist, found %d", len(fwdPkgs)) - } - - // Next, create and write a new forwarding package that only has add - // htlcs. - fwdPkg := channeldb.NewFwdPkg(shortChanID, 0, adds, nil) - - nAdds := len(adds) - - if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - return packager.AddFwdPkg(tx, fwdPkg) - }, func() {}); err != nil { - t.Fatalf("unable to add fwd pkg: %v", err) - } - - // There should now be one fwdpkg on disk. Since no forwarding decision - // has been written, we expect it to be FwdStateLockedIn. The package - // has unacked add HTLCs, so the ack filter should not be full. - fwdPkgs = loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 1 { - t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs)) - } - assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateLockedIn) - assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], nAdds, 0) - assertAckFilterIsFull(t, fwdPkgs[0], false) - - // Now, write the forwarding decision. Since we have not explicitly - // added any adds to the fwdfilter, this would indicate that all of the - // adds were 1) settled locally by this link (exit hop), or 2) the htlc - // was failed locally. - if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter) - }, func() {}); err != nil { - t.Fatalf("unable to set fwdfiter: %v", err) - } - - for i := range adds { - // We should still have one package on disk. Since the forwarding - // decision has been written, it will minimally be in FwdStateProcessed. - // However not allf of the HTLCs have been acked, so should not - // have advanced further. - fwdPkgs = loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 1 { - t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs)) - } - assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateProcessed) - assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], nAdds, 0) - assertAckFilterIsFull(t, fwdPkgs[0], false) - - addRef := channeldb.AddRef{ - Height: fwdPkg.Height, - Index: uint16(i), - } - - if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - return packager.AckAddHtlcs(tx, addRef) - }, func() {}); err != nil { - t.Fatalf("unable to ack add htlc: %v", err) - } - } - - // We should still have one package on disk. Now that all adds have been - // acked, the ack filter should return true and the package should be - // FwdStateCompleted since there are no other settle/fail packets. - fwdPkgs = loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 1 { - t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs)) - } - assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateCompleted) - assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], nAdds, 0) - assertAckFilterIsFull(t, fwdPkgs[0], true) - - // Lastly, remove the completed forwarding package from disk. - if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - return packager.RemovePkg(tx, fwdPkg.Height) - }, func() {}); err != nil { - t.Fatalf("unable to remove fwdpkg: %v", err) - } - - // Check that the fwd package was actually removed. - fwdPkgs = loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 0 { - t.Fatalf("no forwarding packages should exist, found %d", len(fwdPkgs)) - } -} - -// TestPackagerOnlySettleFails asserts that the fwdpkg remains in -// FwdStateProcessed after writing the forwarding decision when there are no -// adds in the fwdpkg. We expect this because an empty FwdFilter will always -// return true, but we are still waiting for the remaining fails and settles to -// be deleted. -func TestPackagerOnlySettleFails(t *testing.T) { - t.Parallel() - - db := makeFwdPkgDB(t, "") - - shortChanID := lnwire.NewShortChanIDFromInt(1) - packager := channeldb.NewChannelPackager(shortChanID) - - // To begin, there should be no forwarding packages on disk. - fwdPkgs := loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 0 { - t.Fatalf("no forwarding packages should exist, found %d", len(fwdPkgs)) - } - - // Next, create and write a new forwarding package that only has add - // htlcs. - fwdPkg := channeldb.NewFwdPkg(shortChanID, 0, nil, settleFails) - - nSettleFails := len(settleFails) - - if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - return packager.AddFwdPkg(tx, fwdPkg) - }, func() {}); err != nil { - t.Fatalf("unable to add fwd pkg: %v", err) - } - - // There should now be one fwdpkg on disk. Since no forwarding decision - // has been written, we expect it to be FwdStateLockedIn. The package - // has unacked add HTLCs, so the ack filter should not be full. - fwdPkgs = loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 1 { - t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs)) - } - assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateLockedIn) - assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], 0, nSettleFails) - assertAckFilterIsFull(t, fwdPkgs[0], true) - - // Now, write the forwarding decision. Since we have not explicitly - // added any adds to the fwdfilter, this would indicate that all of the - // adds were 1) settled locally by this link (exit hop), or 2) the htlc - // was failed locally. - if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter) - }, func() {}); err != nil { - t.Fatalf("unable to set fwdfiter: %v", err) - } - - for i := range settleFails { - // We should still have one package on disk. Since the - // forwarding decision has been written, it will minimally be in - // FwdStateProcessed. However, not all of the HTLCs have been - // acked, so should not have advanced further. - fwdPkgs = loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 1 { - t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs)) - } - assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateProcessed) - assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], 0, nSettleFails) - assertSettleFailFilterIsFull(t, fwdPkgs[0], false) - assertAckFilterIsFull(t, fwdPkgs[0], true) - - failSettleRef := channeldb.SettleFailRef{ - Source: shortChanID, - Height: fwdPkg.Height, - Index: uint16(i), - } - - if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - return packager.AckSettleFails(tx, failSettleRef) - }, func() {}); err != nil { - t.Fatalf("unable to ack add htlc: %v", err) - } - } - - // We should still have one package on disk. Now that all settles and - // fails have been removed, package should be FwdStateCompleted since - // there are no other add packets. - fwdPkgs = loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 1 { - t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs)) - } - assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateCompleted) - assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], 0, nSettleFails) - assertSettleFailFilterIsFull(t, fwdPkgs[0], true) - assertAckFilterIsFull(t, fwdPkgs[0], true) - - // Lastly, remove the completed forwarding package from disk. - if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - return packager.RemovePkg(tx, fwdPkg.Height) - }, func() {}); err != nil { - t.Fatalf("unable to remove fwdpkg: %v", err) - } - - // Check that the fwd package was actually removed. - fwdPkgs = loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 0 { - t.Fatalf("no forwarding packages should exist, found %d", len(fwdPkgs)) - } -} - -// TestPackagerAddsThenSettleFails writes a fwdpkg containing both adds and -// settle/fails, then checks the behavior when the adds are acked before any of -// the settle fails. Here we expect pkg to remain in FwdStateProcessed while the -// remainder of the fail/settles are being deleted. -func TestPackagerAddsThenSettleFails(t *testing.T) { - t.Parallel() - - db := makeFwdPkgDB(t, "") - - shortChanID := lnwire.NewShortChanIDFromInt(1) - packager := channeldb.NewChannelPackager(shortChanID) - - // To begin, there should be no forwarding packages on disk. - fwdPkgs := loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 0 { - t.Fatalf("no forwarding packages should exist, found %d", len(fwdPkgs)) - } - - // Next, create and write a new forwarding package that only has add - // htlcs. - fwdPkg := channeldb.NewFwdPkg(shortChanID, 0, adds, settleFails) - - nAdds := len(adds) - nSettleFails := len(settleFails) - - if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - return packager.AddFwdPkg(tx, fwdPkg) - }, func() {}); err != nil { - t.Fatalf("unable to add fwd pkg: %v", err) - } - - // There should now be one fwdpkg on disk. Since no forwarding decision - // has been written, we expect it to be FwdStateLockedIn. The package - // has unacked add HTLCs, so the ack filter should not be full. - fwdPkgs = loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 1 { - t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs)) - } - assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateLockedIn) - assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], nAdds, nSettleFails) - assertAckFilterIsFull(t, fwdPkgs[0], false) - - // Now, write the forwarding decision. Since we have not explicitly - // added any adds to the fwdfilter, this would indicate that all of the - // adds were 1) settled locally by this link (exit hop), or 2) the htlc - // was failed locally. - if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter) - }, func() {}); err != nil { - t.Fatalf("unable to set fwdfiter: %v", err) - } - - for i := range adds { - // We should still have one package on disk. Since the forwarding - // decision has been written, it will minimally be in FwdStateProcessed. - // However not allf of the HTLCs have been acked, so should not - // have advanced further. - fwdPkgs = loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 1 { - t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs)) - } - assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateProcessed) - assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], nAdds, nSettleFails) - assertSettleFailFilterIsFull(t, fwdPkgs[0], false) - assertAckFilterIsFull(t, fwdPkgs[0], false) - - addRef := channeldb.AddRef{ - Height: fwdPkg.Height, - Index: uint16(i), - } - - if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - return packager.AckAddHtlcs(tx, addRef) - }, func() {}); err != nil { - t.Fatalf("unable to ack add htlc: %v", err) - } - } - - for i := range settleFails { - // We should still have one package on disk. Since the - // forwarding decision has been written, it will minimally be in - // FwdStateProcessed. However not allf of the HTLCs have been - // acked, so should not have advanced further. - fwdPkgs = loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 1 { - t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs)) - } - assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateProcessed) - assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], nAdds, nSettleFails) - assertSettleFailFilterIsFull(t, fwdPkgs[0], false) - assertAckFilterIsFull(t, fwdPkgs[0], true) - - failSettleRef := channeldb.SettleFailRef{ - Source: shortChanID, - Height: fwdPkg.Height, - Index: uint16(i), - } - - if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - return packager.AckSettleFails(tx, failSettleRef) - }, func() {}); err != nil { - t.Fatalf("unable to remove settle/fail htlc: %v", err) - } - } - - // We should still have one package on disk. Now that all settles and - // fails have been removed, package should be FwdStateCompleted since - // there are no other add packets. - fwdPkgs = loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 1 { - t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs)) - } - assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateCompleted) - assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], nAdds, nSettleFails) - assertSettleFailFilterIsFull(t, fwdPkgs[0], true) - assertAckFilterIsFull(t, fwdPkgs[0], true) - - // Lastly, remove the completed forwarding package from disk. - if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - return packager.RemovePkg(tx, fwdPkg.Height) - }, func() {}); err != nil { - t.Fatalf("unable to remove fwdpkg: %v", err) - } - - // Check that the fwd package was actually removed. - fwdPkgs = loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 0 { - t.Fatalf("no forwarding packages should exist, found %d", len(fwdPkgs)) - } -} - -// TestPackagerSettleFailsThenAdds writes a fwdpkg with both adds and -// settle/fails, then checks the behavior when the settle/fails are removed -// before any of the adds have been acked. This should cause the fwdpkg to -// remain in FwdStateProcessed until the final ack is recorded, at which point -// it should be promoted directly to FwdStateCompleted.since all adds have been -// removed. -func TestPackagerSettleFailsThenAdds(t *testing.T) { - t.Parallel() - - db := makeFwdPkgDB(t, "") - - shortChanID := lnwire.NewShortChanIDFromInt(1) - packager := channeldb.NewChannelPackager(shortChanID) - - // To begin, there should be no forwarding packages on disk. - fwdPkgs := loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 0 { - t.Fatalf("no forwarding packages should exist, found %d", len(fwdPkgs)) - } - - // Next, create and write a new forwarding package that has both add - // and settle/fail htlcs. - fwdPkg := channeldb.NewFwdPkg(shortChanID, 0, adds, settleFails) - - nAdds := len(adds) - nSettleFails := len(settleFails) - - if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - return packager.AddFwdPkg(tx, fwdPkg) - }, func() {}); err != nil { - t.Fatalf("unable to add fwd pkg: %v", err) - } - - // There should now be one fwdpkg on disk. Since no forwarding decision - // has been written, we expect it to be FwdStateLockedIn. The package - // has unacked add HTLCs, so the ack filter should not be full. - fwdPkgs = loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 1 { - t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs)) - } - assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateLockedIn) - assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], nAdds, nSettleFails) - assertAckFilterIsFull(t, fwdPkgs[0], false) - - // Now, write the forwarding decision. Since we have not explicitly - // added any adds to the fwdfilter, this would indicate that all of the - // adds were 1) settled locally by this link (exit hop), or 2) the htlc - // was failed locally. - if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter) - }, func() {}); err != nil { - t.Fatalf("unable to set fwdfiter: %v", err) - } - - // Simulate another channel deleting the settle/fails it received from - // the original fwd pkg. - // TODO(conner): use different packager/s? - for i := range settleFails { - // We should still have one package on disk. Since the - // forwarding decision has been written, it will minimally be in - // FwdStateProcessed. However none all of the add HTLCs have - // been acked, so should not have advanced further. - fwdPkgs = loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 1 { - t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs)) - } - assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateProcessed) - assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], nAdds, nSettleFails) - assertSettleFailFilterIsFull(t, fwdPkgs[0], false) - assertAckFilterIsFull(t, fwdPkgs[0], false) - - failSettleRef := channeldb.SettleFailRef{ - Source: shortChanID, - Height: fwdPkg.Height, - Index: uint16(i), - } - - if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - return packager.AckSettleFails(tx, failSettleRef) - }, func() {}); err != nil { - t.Fatalf("unable to remove settle/fail htlc: %v", err) - } - } - - // Now simulate this channel receiving a fail/settle for the adds in the - // fwdpkg. - for i := range adds { - // Again, we should still have one package on disk and be in - // FwdStateProcessed. This should not change until all of the - // add htlcs have been acked. - fwdPkgs = loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 1 { - t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs)) - } - assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateProcessed) - assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], nAdds, nSettleFails) - assertSettleFailFilterIsFull(t, fwdPkgs[0], true) - assertAckFilterIsFull(t, fwdPkgs[0], false) - - addRef := channeldb.AddRef{ - Height: fwdPkg.Height, - Index: uint16(i), - } - - if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - return packager.AckAddHtlcs(tx, addRef) - }, func() {}); err != nil { - t.Fatalf("unable to ack add htlc: %v", err) - } - } - - // We should still have one package on disk. Now that all settles and - // fails have been removed, package should be FwdStateCompleted since - // there are no other add packets. - fwdPkgs = loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 1 { - t.Fatalf("expected 1 fwdpkg, instead found %d", len(fwdPkgs)) - } - assertFwdPkgState(t, fwdPkgs[0], channeldb.FwdStateCompleted) - assertFwdPkgNumAddsSettleFails(t, fwdPkgs[0], nAdds, nSettleFails) - assertSettleFailFilterIsFull(t, fwdPkgs[0], true) - assertAckFilterIsFull(t, fwdPkgs[0], true) - - // Lastly, remove the completed forwarding package from disk. - if err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - return packager.RemovePkg(tx, fwdPkg.Height) - }, func() {}); err != nil { - t.Fatalf("unable to remove fwdpkg: %v", err) - } - - // Check that the fwd package was actually removed. - fwdPkgs = loadFwdPkgs(t, db, packager) - if len(fwdPkgs) != 0 { - t.Fatalf("no forwarding packages should exist, found %d", len(fwdPkgs)) - } -} - -// assertFwdPkgState checks the current state of a fwdpkg meets our -// expectations. -func assertFwdPkgState(t *testing.T, fwdPkg *channeldb.FwdPkg, - state channeldb.FwdState) { - _, _, line, _ := runtime.Caller(1) - if fwdPkg.State != state { - t.Fatalf("line %d: expected fwdpkg in state %v, found %v", - line, state, fwdPkg.State) - } -} - -// assertFwdPkgNumAddsSettleFails checks that the number of adds and -// settle/fail log updates are correct. -func assertFwdPkgNumAddsSettleFails(t *testing.T, fwdPkg *channeldb.FwdPkg, - expectedNumAdds, expectedNumSettleFails int) { - _, _, line, _ := runtime.Caller(1) - if len(fwdPkg.Adds) != expectedNumAdds { - t.Fatalf("line %d: expected fwdpkg to have %d adds, found %d", - line, expectedNumAdds, len(fwdPkg.Adds)) - } - - if len(fwdPkg.SettleFails) != expectedNumSettleFails { - t.Fatalf("line %d: expected fwdpkg to have %d settle/fails, found %d", - line, expectedNumSettleFails, len(fwdPkg.SettleFails)) - } -} - -// assertAckFilterIsFull checks whether or not a fwdpkg's ack filter matches our -// expected full-ness. -func assertAckFilterIsFull(t *testing.T, fwdPkg *channeldb.FwdPkg, expected bool) { - _, _, line, _ := runtime.Caller(1) - if fwdPkg.AckFilter.IsFull() != expected { - t.Fatalf("line %d: expected fwdpkg ack filter IsFull to be %v, "+ - "found %v", line, expected, fwdPkg.AckFilter.IsFull()) - } -} - -// assertSettleFailFilterIsFull checks whether or not a fwdpkg's settle fail -// filter matches our expected full-ness. -func assertSettleFailFilterIsFull(t *testing.T, fwdPkg *channeldb.FwdPkg, expected bool) { - _, _, line, _ := runtime.Caller(1) - if fwdPkg.SettleFailFilter.IsFull() != expected { - t.Fatalf("line %d: expected fwdpkg settle/fail filter IsFull to be %v, "+ - "found %v", line, expected, fwdPkg.SettleFailFilter.IsFull()) - } -} - -// loadFwdPkgs is a helper method that reads all forwarding packages for a -// particular packager. -func loadFwdPkgs(t *testing.T, db kvdb.Backend, - packager channeldb.FwdPackager) []*channeldb.FwdPkg { - - var fwdPkgs []*channeldb.FwdPkg - if err := kvdb.View(db, func(tx kvdb.RTx) er.R { - var err er.R - fwdPkgs, err = packager.LoadFwdPkgs(tx) - return err - }, func() { - fwdPkgs = nil - }); err != nil { - t.Fatalf("unable to load fwd pkgs: %v", err) - } - - return fwdPkgs -} - -// makeFwdPkgDB initializes a test database for forwarding packages. If the -// provided path is an empty, it will create a temp dir/file to use. -func makeFwdPkgDB(t *testing.T, path string) kvdb.Backend { // nolint:unparam - if path == "" { - var err error - path, err = ioutil.TempDir("", "fwdpkgdb") - if err != nil { - t.Fatalf("unable to create temp path: %v", err) - } - - path = filepath.Join(path, "fwdpkg.db") - } - - bdb, err := kvdb.Create(kvdb.BoltBackendName, path, true) - if err != nil { - t.Fatalf("unable to open boltdb: %v", err) - } - - return bdb -} diff --git a/lnd/channeldb/graph.go b/lnd/channeldb/graph.go deleted file mode 100644 index 5f6b16d3..00000000 --- a/lnd/channeldb/graph.go +++ /dev/null @@ -1,4120 +0,0 @@ -package channeldb - -import ( - "bytes" - "crypto/sha256" - "encoding/binary" - "fmt" - "image/color" - "io" - "math" - "net" - "sync" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/routing/route" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/txscript/opcode" - "github.com/pkt-cash/pktd/txscript/scriptbuilder" - "github.com/pkt-cash/pktd/wire" -) - -var ( - // nodeBucket is a bucket which houses all the vertices or nodes within - // the channel graph. This bucket has a single-sub bucket which adds an - // additional index from pubkey -> alias. Within the top-level of this - // bucket, the key space maps a node's compressed public key to the - // serialized information for that node. Additionally, there's a - // special key "source" which stores the pubkey of the source node. The - // source node is used as the starting point for all graph/queries and - // traversals. The graph is formed as a star-graph with the source node - // at the center. - // - // maps: pubKey -> nodeInfo - // maps: source -> selfPubKey - nodeBucket = []byte("graph-node") - - // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket - // will be used to quickly look up the "freshness" of a node's last - // update to the network. The bucket only contains keys, and no values, - // it's mapping: - // - // maps: updateTime || nodeID -> nil - nodeUpdateIndexBucket = []byte("graph-node-update-index") - - // sourceKey is a special key that resides within the nodeBucket. The - // sourceKey maps a key to the public key of the "self node". - sourceKey = []byte("source") - - // aliasIndexBucket is a sub-bucket that's nested within the main - // nodeBucket. This bucket maps the public key of a node to its - // current alias. This bucket is provided as it can be used within a - // future UI layer to add an additional degree of confirmation. - aliasIndexBucket = []byte("alias") - - // edgeBucket is a bucket which houses all of the edge or channel - // information within the channel graph. This bucket essentially acts - // as an adjacency list, which in conjunction with a range scan, can be - // used to iterate over all the incoming and outgoing edges for a - // particular node. Key in the bucket use a prefix scheme which leads - // with the node's public key and sends with the compact edge ID. - // For each chanID, there will be two entries within the bucket, as the - // graph is directed: nodes may have different policies w.r.t to fees - // for their respective directions. - // - // maps: pubKey || chanID -> channel edge policy for node - edgeBucket = []byte("graph-edge") - - // unknownPolicy is represented as an empty slice. It is - // used as the value in edgeBucket for unknown channel edge policies. - // Unknown policies are still stored in the database to enable efficient - // lookup of incoming channel edges. - unknownPolicy = []byte{} - - // chanStart is an array of all zero bytes which is used to perform - // range scans within the edgeBucket to obtain all of the outgoing - // edges for a particular node. - chanStart [8]byte - - // edgeIndexBucket is an index which can be used to iterate all edges - // in the bucket, grouping them according to their in/out nodes. - // Additionally, the items in this bucket also contain the complete - // edge information for a channel. The edge information includes the - // capacity of the channel, the nodes that made the channel, etc. This - // bucket resides within the edgeBucket above. Creation of an edge - // proceeds in two phases: first the edge is added to the edge index, - // afterwards the edgeBucket can be updated with the latest details of - // the edge as they are announced on the network. - // - // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo - edgeIndexBucket = []byte("edge-index") - - // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This - // bucket contains an index which allows us to gauge the "freshness" of - // a channel's last updates. - // - // maps: updateTime || chanID -> nil - edgeUpdateIndexBucket = []byte("edge-update-index") - - // channelPointBucket maps a channel's full outpoint (txid:index) to - // its short 8-byte channel ID. This bucket resides within the - // edgeBucket above, and can be used to quickly remove an edge due to - // the outpoint being spent, or to query for existence of a channel. - // - // maps: outPoint -> chanID - channelPointBucket = []byte("chan-index") - - // zombieBucket is a sub-bucket of the main edgeBucket bucket - // responsible for maintaining an index of zombie channels. Each entry - // exists within the bucket as follows: - // - // maps: chanID -> pubKey1 || pubKey2 - // - // The chanID represents the channel ID of the edge that is marked as a - // zombie and is used as the key, which maps to the public keys of the - // edge's participants. - zombieBucket = []byte("zombie-index") - - // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket bucket - // responsible for maintaining an index of disabled edge policies. Each - // entry exists within the bucket as follows: - // - // maps: -> []byte{} - // - // The chanID represents the channel ID of the edge and the direction is - // one byte representing the direction of the edge. The main purpose of - // this index is to allow pruning disabled channels in a fast way without - // the need to iterate all over the graph. - disabledEdgePolicyBucket = []byte("disabled-edge-policy-index") - - // graphMetaBucket is a top-level bucket which stores various meta-deta - // related to the on-disk channel graph. Data stored in this bucket - // includes the block to which the graph has been synced to, the total - // number of channels, etc. - graphMetaBucket = []byte("graph-meta") - - // pruneLogBucket is a bucket within the graphMetaBucket that stores - // a mapping from the block height to the hash for the blocks used to - // prune the graph. - // Once a new block is discovered, any channels that have been closed - // (by spending the outpoint) can safely be removed from the graph, and - // the block is added to the prune log. We need to keep such a log for - // the case where a reorg happens, and we must "rewind" the state of the - // graph by removing channels that were previously confirmed. In such a - // case we'll remove all entries from the prune log with a block height - // that no longer exists. - pruneLogBucket = []byte("prune-log") -) - -const ( - // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that - // we'll permit to be written to disk. We limit this as otherwise, it - // would be possible for a node to create a ton of updates and slowly - // fill our disk, and also waste bandwidth due to relaying. - MaxAllowedExtraOpaqueBytes = 10000 - - // feeRateParts is the total number of parts used to express fee rates. - feeRateParts = 1e6 -) - -// ChannelGraph is a persistent, on-disk graph representation of the Lightning -// Network. This struct can be used to implement path finding algorithms on top -// of, and also to update a node's view based on information received from the -// p2p network. Internally, the graph is stored using a modified adjacency list -// representation with some added object interaction possible with each -// serialized edge/node. The graph is stored is directed, meaning that are two -// edges stored for each channel: an inbound/outbound edge for each node pair. -// Nodes, edges, and edge information can all be added to the graph -// independently. Edge removal results in the deletion of all edge information -// for that edge. -type ChannelGraph struct { - db *DB - - cacheMu sync.RWMutex - rejectCache *rejectCache - chanCache *channelCache -} - -// newChannelGraph allocates a new ChannelGraph backed by a DB instance. The -// returned instance has its own unique reject cache and channel cache. -func newChannelGraph(db *DB, rejectCacheSize, chanCacheSize int) *ChannelGraph { - return &ChannelGraph{ - db: db, - rejectCache: newRejectCache(rejectCacheSize), - chanCache: newChannelCache(chanCacheSize), - } -} - -// Database returns a pointer to the underlying database. -func (c *ChannelGraph) Database() *DB { - return c.db -} - -// ForEachChannel iterates through all the channel edges stored within the -// graph and invokes the passed callback for each edge. The callback takes two -// edges as since this is a directed graph, both the in/out edges are visited. -// If the callback returns an error, then the transaction is aborted and the -// iteration stops early. -// -// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer -// for that particular channel edge routing policy will be passed into the -// callback. -func (c *ChannelGraph) ForEachChannel(cb func(*ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) er.R) er.R { - // TODO(roasbeef): ptr map to reduce # of allocs? no duplicates - - return kvdb.View(c.db, func(tx kvdb.RTx) er.R { - // First, grab the node bucket. This will be used to populate - // the Node pointers in each edge read from disk. - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNotFound.Default() - } - - // Next, grab the edge bucket which stores the edges, and also - // the index itself so we can group the directed edges together - // logically. - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound.Default() - } - edgeIndex := edges.NestedReadBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound.Default() - } - - // For each edge pair within the edge index, we fetch each edge - // itself and also the node information in order to fully - // populated the object. - return edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) er.R { - infoReader := bytes.NewReader(edgeInfoBytes) - edgeInfo, err := deserializeChanEdgeInfo(infoReader) - if err != nil { - return err - } - edgeInfo.db = c.db - - edge1, edge2, err := fetchChanEdgePolicies( - edgeIndex, edges, nodes, chanID, c.db, - ) - if err != nil { - return err - } - - // With both edges read, execute the call back. IF this - // function returns an error then the transaction will - // be aborted. - return cb(&edgeInfo, edge1, edge2) - }) - }, func() {}) -} - -// ForEachNodeChannel iterates through all channels of a given node, executing the -// passed callback with an edge info structure and the policies of each end -// of the channel. The first edge policy is the outgoing edge *to* the -// the connecting node, while the second is the incoming edge *from* the -// connecting node. If the callback returns an error, then the iteration is -// halted with the error propagated back up to the caller. -// -// Unknown policies are passed into the callback as nil values. -// -// If the caller wishes to re-use an existing boltdb transaction, then it -// should be passed as the first argument. Otherwise the first argument should -// be nil and a fresh transaction will be created to execute the graph -// traversal. -func (c *ChannelGraph) ForEachNodeChannel(tx kvdb.RTx, nodePub []byte, - cb func(kvdb.RTx, *ChannelEdgeInfo, *ChannelEdgePolicy, - *ChannelEdgePolicy) er.R) er.R { - - db := c.db - - return nodeTraversal(tx, nodePub, db, cb) -} - -// DisabledChannelIDs returns the channel ids of disabled channels. -// A channel is disabled when two of the associated ChanelEdgePolicies -// have their disabled bit on. -func (c *ChannelGraph) DisabledChannelIDs() ([]uint64, er.R) { - var disabledChanIDs []uint64 - var chanEdgeFound map[uint64]struct{} - - err := kvdb.View(c.db, func(tx kvdb.RTx) er.R { - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound.Default() - } - - disabledEdgePolicyIndex := edges.NestedReadBucket( - disabledEdgePolicyBucket, - ) - if disabledEdgePolicyIndex == nil { - return nil - } - - // We iterate over all disabled policies and we add each channel that - // has more than one disabled policy to disabledChanIDs array. - return disabledEdgePolicyIndex.ForEach(func(k, v []byte) er.R { - chanID := byteOrder.Uint64(k[:8]) - _, edgeFound := chanEdgeFound[chanID] - if edgeFound { - delete(chanEdgeFound, chanID) - disabledChanIDs = append(disabledChanIDs, chanID) - return nil - } - - chanEdgeFound[chanID] = struct{}{} - return nil - }) - }, func() { - disabledChanIDs = nil - chanEdgeFound = make(map[uint64]struct{}) - }) - if err != nil { - return nil, err - } - - return disabledChanIDs, nil -} - -// ForEachNode iterates through all the stored vertices/nodes in the graph, -// executing the passed callback with each node encountered. If the callback -// returns an error, then the transaction is aborted and the iteration stops -// early. -// -// TODO(roasbeef): add iterator interface to allow for memory efficient graph -// traversal when graph gets mega -func (c *ChannelGraph) ForEachNode(cb func(kvdb.RTx, *LightningNode) er.R) er.R { // nolint:interfacer - traversal := func(tx kvdb.RTx) er.R { - // First grab the nodes bucket which stores the mapping from - // pubKey to node information. - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNotFound.Default() - } - - return nodes.ForEach(func(pubKey, nodeBytes []byte) er.R { - // If this is the source key, then we skip this - // iteration as the value for this key is a pubKey - // rather than raw node information. - if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 { - return nil - } - - nodeReader := bytes.NewReader(nodeBytes) - node, err := deserializeLightningNode(nodeReader) - if err != nil { - return err - } - node.db = c.db - - // Execute the callback, the transaction will abort if - // this returns an error. - return cb(tx, &node) - }) - } - - return kvdb.View(c.db, traversal, func() {}) -} - -// SourceNode returns the source node of the graph. The source node is treated -// as the center node within a star-graph. This method may be used to kick off -// a path finding algorithm in order to explore the reachability of another -// node based off the source node. -func (c *ChannelGraph) SourceNode() (*LightningNode, er.R) { - var source *LightningNode - err := kvdb.View(c.db, func(tx kvdb.RTx) er.R { - // First grab the nodes bucket which stores the mapping from - // pubKey to node information. - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNotFound.Default() - } - - node, err := c.sourceNode(nodes) - if err != nil { - return err - } - source = node - - return nil - }, func() { - source = nil - }) - if err != nil { - return nil, err - } - - return source, nil -} - -// sourceNode uses an existing database transaction and returns the source node -// of the graph. The source node is treated as the center node within a -// star-graph. This method may be used to kick off a path finding algorithm in -// order to explore the reachability of another node based off the source node. -func (c *ChannelGraph) sourceNode(nodes kvdb.RBucket) (*LightningNode, er.R) { - selfPub := nodes.Get(sourceKey) - if selfPub == nil { - return nil, ErrSourceNodeNotSet.Default() - } - - // With the pubKey of the source node retrieved, we're able to - // fetch the full node information. - node, err := fetchLightningNode(nodes, selfPub) - if err != nil { - return nil, err - } - node.db = c.db - - return &node, nil -} - -// SetSourceNode sets the source node within the graph database. The source -// node is to be used as the center of a star-graph within path finding -// algorithms. -func (c *ChannelGraph) SetSourceNode(node *LightningNode) er.R { - nodePubBytes := node.PubKeyBytes[:] - - return kvdb.Update(c.db, func(tx kvdb.RwTx) er.R { - // First grab the nodes bucket which stores the mapping from - // pubKey to node information. - nodes, err := tx.CreateTopLevelBucket(nodeBucket) - if err != nil { - return err - } - - // Next we create the mapping from source to the targeted - // public key. - if err := nodes.Put(sourceKey, nodePubBytes); err != nil { - return err - } - - // Finally, we commit the information of the lightning node - // itself. - return addLightningNode(tx, node) - }, func() {}) -} - -// AddLightningNode adds a vertex/node to the graph database. If the node is not -// in the database from before, this will add a new, unconnected one to the -// graph. If it is present from before, this will update that node's -// information. Note that this method is expected to only be called to update -// an already present node from a node announcement, or to insert a node found -// in a channel update. -// -// TODO(roasbeef): also need sig of announcement -func (c *ChannelGraph) AddLightningNode(node *LightningNode) er.R { - return kvdb.Update(c.db, func(tx kvdb.RwTx) er.R { - return addLightningNode(tx, node) - }, func() {}) -} - -func addLightningNode(tx kvdb.RwTx, node *LightningNode) er.R { - nodes, err := tx.CreateTopLevelBucket(nodeBucket) - if err != nil { - return err - } - - aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket) - if err != nil { - return err - } - - updateIndex, err := nodes.CreateBucketIfNotExists( - nodeUpdateIndexBucket, - ) - if err != nil { - return err - } - - return putLightningNode(nodes, aliases, updateIndex, node) -} - -// LookupAlias attempts to return the alias as advertised by the target node. -// TODO(roasbeef): currently assumes that aliases are unique... -func (c *ChannelGraph) LookupAlias(pub *btcec.PublicKey) (string, er.R) { - var alias string - - err := kvdb.View(c.db, func(tx kvdb.RTx) er.R { - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNodesNotFound.Default() - } - - aliases := nodes.NestedReadBucket(aliasIndexBucket) - if aliases == nil { - return ErrGraphNodesNotFound.Default() - } - - nodePub := pub.SerializeCompressed() - a := aliases.Get(nodePub) - if a == nil { - return ErrNodeAliasNotFound.Default() - } - - // TODO(roasbeef): should actually be using the utf-8 - // package... - alias = string(a) - return nil - }, func() { - alias = "" - }) - if err != nil { - return "", err - } - - return alias, nil -} - -// DeleteLightningNode starts a new database transaction to remove a vertex/node -// from the database according to the node's public key. -func (c *ChannelGraph) DeleteLightningNode(nodePub route.Vertex) er.R { - // TODO(roasbeef): ensure dangling edges are removed... - return kvdb.Update(c.db, func(tx kvdb.RwTx) er.R { - nodes := tx.ReadWriteBucket(nodeBucket) - if nodes == nil { - return ErrGraphNodeNotFound.Default() - } - - return c.deleteLightningNode(nodes, nodePub[:]) - }, func() {}) -} - -// deleteLightningNode uses an existing database transaction to remove a -// vertex/node from the database according to the node's public key. -func (c *ChannelGraph) deleteLightningNode(nodes kvdb.RwBucket, - compressedPubKey []byte) er.R { - - aliases := nodes.NestedReadWriteBucket(aliasIndexBucket) - if aliases == nil { - return ErrGraphNodesNotFound.Default() - } - - if err := aliases.Delete(compressedPubKey); err != nil { - return err - } - - // Before we delete the node, we'll fetch its current state so we can - // determine when its last update was to clear out the node update - // index. - node, err := fetchLightningNode(nodes, compressedPubKey) - if err != nil { - return err - } - - if err := nodes.Delete(compressedPubKey); err != nil { - - return err - } - - // Finally, we'll delete the index entry for the node within the - // nodeUpdateIndexBucket as this node is no longer active, so we don't - // need to track its last update. - nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket) - if nodeUpdateIndex == nil { - return ErrGraphNodesNotFound.Default() - } - - // In order to delete the entry, we'll need to reconstruct the key for - // its last update. - updateUnix := uint64(node.LastUpdate.Unix()) - var indexKey [8 + 33]byte - byteOrder.PutUint64(indexKey[:8], updateUnix) - copy(indexKey[8:], compressedPubKey) - - return nodeUpdateIndex.Delete(indexKey[:]) -} - -// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An -// undirected edge from the two target nodes are created. The information -// stored denotes the static attributes of the channel, such as the channelID, -// the keys involved in creation of the channel, and the set of features that -// the channel supports. The chanPoint and chanID are used to uniquely identify -// the edge globally within the database. -func (c *ChannelGraph) AddChannelEdge(edge *ChannelEdgeInfo) er.R { - c.cacheMu.Lock() - defer c.cacheMu.Unlock() - - err := kvdb.Update(c.db, func(tx kvdb.RwTx) er.R { - return c.addChannelEdge(tx, edge) - }, func() {}) - if err != nil { - return err - } - - c.rejectCache.remove(edge.ChannelID) - c.chanCache.remove(edge.ChannelID) - - return nil -} - -// addChannelEdge is the private form of AddChannelEdge that allows callers to -// utilize an existing db transaction. -func (c *ChannelGraph) addChannelEdge(tx kvdb.RwTx, edge *ChannelEdgeInfo) er.R { - // Construct the channel's primary key which is the 8-byte channel ID. - var chanKey [8]byte - binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID) - - nodes, err := tx.CreateTopLevelBucket(nodeBucket) - if err != nil { - return err - } - edges, err := tx.CreateTopLevelBucket(edgeBucket) - if err != nil { - return err - } - edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket) - if err != nil { - return err - } - chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket) - if err != nil { - return err - } - - // First, attempt to check if this edge has already been created. If - // so, then we can exit early as this method is meant to be idempotent. - if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo != nil { - return ErrEdgeAlreadyExist.Default() - } - - // Before we insert the channel into the database, we'll ensure that - // both nodes already exist in the channel graph. If either node - // doesn't, then we'll insert a "shell" node that just includes its - // public key, so subsequent validation and queries can work properly. - _, node1Err := fetchLightningNode(nodes, edge.NodeKey1Bytes[:]) - switch { - case ErrGraphNodeNotFound.Is(node1Err): - node1Shell := LightningNode{ - PubKeyBytes: edge.NodeKey1Bytes, - HaveNodeAnnouncement: false, - } - err := addLightningNode(tx, &node1Shell) - if err != nil { - return er.Errorf("unable to create shell node "+ - "for: %x", edge.NodeKey1Bytes) - - } - case node1Err != nil: - return err - } - - _, node2Err := fetchLightningNode(nodes, edge.NodeKey2Bytes[:]) - switch { - case ErrGraphNodeNotFound.Is(node2Err): - node2Shell := LightningNode{ - PubKeyBytes: edge.NodeKey2Bytes, - HaveNodeAnnouncement: false, - } - err := addLightningNode(tx, &node2Shell) - if err != nil { - return er.Errorf("unable to create shell node "+ - "for: %x", edge.NodeKey2Bytes) - - } - case node2Err != nil: - return err - } - - // If the edge hasn't been created yet, then we'll first add it to the - // edge index in order to associate the edge between two nodes and also - // store the static components of the channel. - if err := putChanEdgeInfo(edgeIndex, edge, chanKey); err != nil { - return err - } - - // Mark edge policies for both sides as unknown. This is to enable - // efficient incoming channel lookup for a node. - for _, key := range []*[33]byte{&edge.NodeKey1Bytes, - &edge.NodeKey2Bytes} { - - err := putChanEdgePolicyUnknown(edges, edge.ChannelID, - key[:]) - if err != nil { - return err - } - } - - // Finally we add it to the channel index which maps channel points - // (outpoints) to the shorter channel ID's. - var b bytes.Buffer - if err := writeOutpoint(&b, &edge.ChannelPoint); err != nil { - return err - } - return chanIndex.Put(b.Bytes(), chanKey[:]) -} - -// HasChannelEdge returns true if the database knows of a channel edge with the -// passed channel ID, and false otherwise. If an edge with that ID is found -// within the graph, then two time stamps representing the last time the edge -// was updated for both directed edges are returned along with the boolean. If -// it is not found, then the zombie index is checked and its result is returned -// as the second boolean. -func (c *ChannelGraph) HasChannelEdge( - chanID uint64) (time.Time, time.Time, bool, bool, er.R) { - - var ( - upd1Time time.Time - upd2Time time.Time - exists bool - isZombie bool - ) - - // We'll query the cache with the shared lock held to allow multiple - // readers to access values in the cache concurrently if they exist. - c.cacheMu.RLock() - if entry, ok := c.rejectCache.get(chanID); ok { - c.cacheMu.RUnlock() - upd1Time = time.Unix(entry.upd1Time, 0) - upd2Time = time.Unix(entry.upd2Time, 0) - exists, isZombie = entry.flags.unpack() - return upd1Time, upd2Time, exists, isZombie, nil - } - c.cacheMu.RUnlock() - - c.cacheMu.Lock() - defer c.cacheMu.Unlock() - - // The item was not found with the shared lock, so we'll acquire the - // exclusive lock and check the cache again in case another method added - // the entry to the cache while no lock was held. - if entry, ok := c.rejectCache.get(chanID); ok { - upd1Time = time.Unix(entry.upd1Time, 0) - upd2Time = time.Unix(entry.upd2Time, 0) - exists, isZombie = entry.flags.unpack() - return upd1Time, upd2Time, exists, isZombie, nil - } - - if err := kvdb.View(c.db, func(tx kvdb.RTx) er.R { - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound.Default() - } - edgeIndex := edges.NestedReadBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound.Default() - } - - var channelID [8]byte - byteOrder.PutUint64(channelID[:], chanID) - - // If the edge doesn't exist, then we'll also check our zombie - // index. - if edgeIndex.Get(channelID[:]) == nil { - exists = false - zombieIndex := edges.NestedReadBucket(zombieBucket) - if zombieIndex != nil { - isZombie, _, _ = isZombieEdge( - zombieIndex, chanID, - ) - } - - return nil - } - - exists = true - isZombie = false - - // If the channel has been found in the graph, then retrieve - // the edges itself so we can return the last updated - // timestamps. - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNodeNotFound.Default() - } - - e1, e2, err := fetchChanEdgePolicies(edgeIndex, edges, nodes, - channelID[:], c.db) - if err != nil { - return err - } - - // As we may have only one of the edges populated, only set the - // update time if the edge was found in the database. - if e1 != nil { - upd1Time = e1.LastUpdate - } - if e2 != nil { - upd2Time = e2.LastUpdate - } - - return nil - }, func() {}); err != nil { - return time.Time{}, time.Time{}, exists, isZombie, err - } - - c.rejectCache.insert(chanID, rejectCacheEntry{ - upd1Time: upd1Time.Unix(), - upd2Time: upd2Time.Unix(), - flags: packRejectFlags(exists, isZombie), - }) - - return upd1Time, upd2Time, exists, isZombie, nil -} - -// UpdateChannelEdge retrieves and update edge of the graph database. Method -// only reserved for updating an edge info after its already been created. -// In order to maintain this constraints, we return an error in the scenario -// that an edge info hasn't yet been created yet, but someone attempts to update -// it. -func (c *ChannelGraph) UpdateChannelEdge(edge *ChannelEdgeInfo) er.R { - // Construct the channel's primary key which is the 8-byte channel ID. - var chanKey [8]byte - binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID) - - return kvdb.Update(c.db, func(tx kvdb.RwTx) er.R { - edges := tx.ReadWriteBucket(edgeBucket) - if edge == nil { - return ErrEdgeNotFound.Default() - } - - edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrEdgeNotFound.Default() - } - - if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo == nil { - return ErrEdgeNotFound.Default() - } - - return putChanEdgeInfo(edgeIndex, edge, chanKey) - }, func() {}) -} - -const ( - // pruneTipBytes is the total size of the value which stores a prune - // entry of the graph in the prune log. The "prune tip" is the last - // entry in the prune log, and indicates if the channel graph is in - // sync with the current UTXO state. The structure of the value - // is: blockHash, taking 32 bytes total. - pruneTipBytes = 32 -) - -// PruneGraph prunes newly closed channels from the channel graph in response -// to a new block being solved on the network. Any transactions which spend the -// funding output of any known channels within he graph will be deleted. -// Additionally, the "prune tip", or the last block which has been used to -// prune the graph is stored so callers can ensure the graph is fully in sync -// with the current UTXO state. A slice of channels that have been closed by -// the target block are returned if the function succeeds without error. -func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint, - blockHash *chainhash.Hash, blockHeight uint32) ([]*ChannelEdgeInfo, er.R) { - - c.cacheMu.Lock() - defer c.cacheMu.Unlock() - - var chansClosed []*ChannelEdgeInfo - - err := kvdb.Update(c.db, func(tx kvdb.RwTx) er.R { - // First grab the edges bucket which houses the information - // we'd like to delete - edges, err := tx.CreateTopLevelBucket(edgeBucket) - if err != nil { - return err - } - - // Next grab the two edge indexes which will also need to be updated. - edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket) - if err != nil { - return err - } - chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket) - if err != nil { - return err - } - nodes := tx.ReadWriteBucket(nodeBucket) - if nodes == nil { - return ErrSourceNodeNotSet.Default() - } - zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket) - if err != nil { - return err - } - - // For each of the outpoints that have been spent within the - // block, we attempt to delete them from the graph as if that - // outpoint was a channel, then it has now been closed. - for _, chanPoint := range spentOutputs { - // TODO(roasbeef): load channel bloom filter, continue - // if NOT if filter - - var opBytes bytes.Buffer - if err := writeOutpoint(&opBytes, chanPoint); err != nil { - return err - } - - // First attempt to see if the channel exists within - // the database, if not, then we can exit early. - chanID := chanIndex.Get(opBytes.Bytes()) - if chanID == nil { - continue - } - - // However, if it does, then we'll read out the full - // version so we can add it to the set of deleted - // channels. - edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID) - if err != nil { - return err - } - - // Attempt to delete the channel, an ErrEdgeNotFound - // will be returned if that outpoint isn't known to be - // a channel. If no error is returned, then a channel - // was successfully pruned. - err = delChannelEdge( - edges, edgeIndex, chanIndex, zombieIndex, nodes, - chanID, false, - ) - if err != nil && !ErrEdgeNotFound.Is(err) { - return err - } - - chansClosed = append(chansClosed, &edgeInfo) - } - - metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket) - if err != nil { - return err - } - - pruneBucket, err := metaBucket.CreateBucketIfNotExists(pruneLogBucket) - if err != nil { - return err - } - - // With the graph pruned, add a new entry to the prune log, - // which can be used to check if the graph is fully synced with - // the current UTXO state. - var blockHeightBytes [4]byte - byteOrder.PutUint32(blockHeightBytes[:], blockHeight) - - var newTip [pruneTipBytes]byte - copy(newTip[:], blockHash[:]) - - err = pruneBucket.Put(blockHeightBytes[:], newTip[:]) - if err != nil { - return err - } - - // Now that the graph has been pruned, we'll also attempt to - // prune any nodes that have had a channel closed within the - // latest block. - return c.pruneGraphNodes(nodes, edgeIndex) - }, func() { - chansClosed = nil - }) - if err != nil { - return nil, err - } - - for _, channel := range chansClosed { - c.rejectCache.remove(channel.ChannelID) - c.chanCache.remove(channel.ChannelID) - } - - return chansClosed, nil -} - -// PruneGraphNodes is a garbage collection method which attempts to prune out -// any nodes from the channel graph that are currently unconnected. This ensure -// that we only maintain a graph of reachable nodes. In the event that a pruned -// node gains more channels, it will be re-added back to the graph. -func (c *ChannelGraph) PruneGraphNodes() er.R { - return kvdb.Update(c.db, func(tx kvdb.RwTx) er.R { - nodes := tx.ReadWriteBucket(nodeBucket) - if nodes == nil { - return ErrGraphNodesNotFound.Default() - } - edges := tx.ReadWriteBucket(edgeBucket) - if edges == nil { - return ErrGraphNotFound.Default() - } - edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound.Default() - } - - return c.pruneGraphNodes(nodes, edgeIndex) - }, func() {}) -} - -// pruneGraphNodes attempts to remove any nodes from the graph who have had a -// channel closed within the current block. If the node still has existing -// channels in the graph, this will act as a no-op. -func (c *ChannelGraph) pruneGraphNodes(nodes kvdb.RwBucket, - edgeIndex kvdb.RwBucket) er.R { - - log.Trace("Pruning nodes from graph with no open channels") - - // We'll retrieve the graph's source node to ensure we don't remove it - // even if it no longer has any open channels. - sourceNode, err := c.sourceNode(nodes) - if err != nil { - return err - } - - // We'll use this map to keep count the number of references to a node - // in the graph. A node should only be removed once it has no more - // references in the graph. - nodeRefCounts := make(map[[33]byte]int) - err = nodes.ForEach(func(pubKey, nodeBytes []byte) er.R { - // If this is the source key, then we skip this - // iteration as the value for this key is a pubKey - // rather than raw node information. - if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 { - return nil - } - - var nodePub [33]byte - copy(nodePub[:], pubKey) - nodeRefCounts[nodePub] = 0 - - return nil - }) - if err != nil { - return err - } - - // To ensure we never delete the source node, we'll start off by - // bumping its ref count to 1. - nodeRefCounts[sourceNode.PubKeyBytes] = 1 - - // Next, we'll run through the edgeIndex which maps a channel ID to the - // edge info. We'll use this scan to populate our reference count map - // above. - err = edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) er.R { - // The first 66 bytes of the edge info contain the pubkeys of - // the nodes that this edge attaches. We'll extract them, and - // add them to the ref count map. - var node1, node2 [33]byte - copy(node1[:], edgeInfoBytes[:33]) - copy(node2[:], edgeInfoBytes[33:]) - - // With the nodes extracted, we'll increase the ref count of - // each of the nodes. - nodeRefCounts[node1]++ - nodeRefCounts[node2]++ - - return nil - }) - if err != nil { - return err - } - - // Finally, we'll make a second pass over the set of nodes, and delete - // any nodes that have a ref count of zero. - var numNodesPruned int - for nodePubKey, refCount := range nodeRefCounts { - // If the ref count of the node isn't zero, then we can safely - // skip it as it still has edges to or from it within the - // graph. - if refCount != 0 { - continue - } - - // If we reach this point, then there are no longer any edges - // that connect this node, so we can delete it. - if err := c.deleteLightningNode(nodes, nodePubKey[:]); err != nil { - log.Warnf("Unable to prune node %x from the "+ - "graph: %v", nodePubKey, err) - continue - } - - log.Infof("Pruned unconnected node %x from channel graph", - nodePubKey[:]) - - numNodesPruned++ - } - - if numNodesPruned > 0 { - log.Infof("Pruned %v unconnected nodes from the channel graph", - numNodesPruned) - } - - return nil -} - -// DisconnectBlockAtHeight is used to indicate that the block specified -// by the passed height has been disconnected from the main chain. This -// will "rewind" the graph back to the height below, deleting channels -// that are no longer confirmed from the graph. The prune log will be -// set to the last prune height valid for the remaining chain. -// Channels that were removed from the graph resulting from the -// disconnected block are returned. -func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInfo, - er.R) { - - // Every channel having a ShortChannelID starting at 'height' - // will no longer be confirmed. - startShortChanID := lnwire.ShortChannelID{ - BlockHeight: height, - } - - // Delete everything after this height from the db. - endShortChanID := lnwire.ShortChannelID{ - BlockHeight: math.MaxUint32 & 0x00ffffff, - TxIndex: math.MaxUint32 & 0x00ffffff, - TxPosition: math.MaxUint16, - } - // The block height will be the 3 first bytes of the channel IDs. - var chanIDStart [8]byte - byteOrder.PutUint64(chanIDStart[:], startShortChanID.ToUint64()) - var chanIDEnd [8]byte - byteOrder.PutUint64(chanIDEnd[:], endShortChanID.ToUint64()) - - c.cacheMu.Lock() - defer c.cacheMu.Unlock() - - // Keep track of the channels that are removed from the graph. - var removedChans []*ChannelEdgeInfo - - if err := kvdb.Update(c.db, func(tx kvdb.RwTx) er.R { - edges, err := tx.CreateTopLevelBucket(edgeBucket) - if err != nil { - return err - } - edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket) - if err != nil { - return err - } - chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket) - if err != nil { - return err - } - zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket) - if err != nil { - return err - } - nodes, err := tx.CreateTopLevelBucket(nodeBucket) - if err != nil { - return err - } - - // Scan from chanIDStart to chanIDEnd, deleting every - // found edge. - // NOTE: we must delete the edges after the cursor loop, since - // modifying the bucket while traversing is not safe. - var keys [][]byte - cursor := edgeIndex.ReadWriteCursor() - for k, v := cursor.Seek(chanIDStart[:]); k != nil && - bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() { - - edgeInfoReader := bytes.NewReader(v) - edgeInfo, err := deserializeChanEdgeInfo(edgeInfoReader) - if err != nil { - return err - } - - keys = append(keys, k) - removedChans = append(removedChans, &edgeInfo) - } - - for _, k := range keys { - err := delChannelEdge( - edges, edgeIndex, chanIndex, zombieIndex, nodes, - k, false, - ) - if err != nil && ErrEdgeNotFound.Is(err) { - return err - } - } - - // Delete all the entries in the prune log having a height - // greater or equal to the block disconnected. - metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket) - if err != nil { - return err - } - - pruneBucket, err := metaBucket.CreateBucketIfNotExists(pruneLogBucket) - if err != nil { - return err - } - - var pruneKeyStart [4]byte - byteOrder.PutUint32(pruneKeyStart[:], height) - - var pruneKeyEnd [4]byte - byteOrder.PutUint32(pruneKeyEnd[:], math.MaxUint32) - - // To avoid modifying the bucket while traversing, we delete - // the keys in a second loop. - var pruneKeys [][]byte - pruneCursor := pruneBucket.ReadWriteCursor() - for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil && - bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() { - - pruneKeys = append(pruneKeys, k) - } - - for _, k := range pruneKeys { - if err := pruneBucket.Delete(k); err != nil { - return err - } - } - - return nil - }, func() { - removedChans = nil - }); err != nil { - return nil, err - } - - for _, channel := range removedChans { - c.rejectCache.remove(channel.ChannelID) - c.chanCache.remove(channel.ChannelID) - } - - return removedChans, nil -} - -// PruneTip returns the block height and hash of the latest block that has been -// used to prune channels in the graph. Knowing the "prune tip" allows callers -// to tell if the graph is currently in sync with the current best known UTXO -// state. -func (c *ChannelGraph) PruneTip() (*chainhash.Hash, uint32, er.R) { - var ( - tipHash chainhash.Hash - tipHeight uint32 - ) - - err := kvdb.View(c.db, func(tx kvdb.RTx) er.R { - graphMeta := tx.ReadBucket(graphMetaBucket) - if graphMeta == nil { - return ErrGraphNotFound.Default() - } - pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket) - if pruneBucket == nil { - return ErrGraphNeverPruned.Default() - } - - pruneCursor := pruneBucket.ReadCursor() - - // The prune key with the largest block height will be our - // prune tip. - k, v := pruneCursor.Last() - if k == nil { - return ErrGraphNeverPruned.Default() - } - - // Once we have the prune tip, the value will be the block hash, - // and the key the block height. - copy(tipHash[:], v[:]) - tipHeight = byteOrder.Uint32(k[:]) - - return nil - }, func() {}) - if err != nil { - return nil, 0, err - } - - return &tipHash, tipHeight, nil -} - -// DeleteChannelEdges removes edges with the given channel IDs from the database -// and marks them as zombies. This ensures that we're unable to re-add it to our -// database once again. If an edge does not exist within the database, then -// ErrEdgeNotFound will be returned. -func (c *ChannelGraph) DeleteChannelEdges(chanIDs ...uint64) er.R { - // TODO(roasbeef): possibly delete from node bucket if node has no more - // channels - // TODO(roasbeef): don't delete both edges? - - c.cacheMu.Lock() - defer c.cacheMu.Unlock() - - err := kvdb.Update(c.db, func(tx kvdb.RwTx) er.R { - edges := tx.ReadWriteBucket(edgeBucket) - if edges == nil { - return ErrEdgeNotFound.Default() - } - edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrEdgeNotFound.Default() - } - chanIndex := edges.NestedReadWriteBucket(channelPointBucket) - if chanIndex == nil { - return ErrEdgeNotFound.Default() - } - nodes := tx.ReadWriteBucket(nodeBucket) - if nodes == nil { - return ErrGraphNodeNotFound.Default() - } - zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket) - if err != nil { - return err - } - - var rawChanID [8]byte - for _, chanID := range chanIDs { - byteOrder.PutUint64(rawChanID[:], chanID) - err := delChannelEdge( - edges, edgeIndex, chanIndex, zombieIndex, nodes, - rawChanID[:], true, - ) - if err != nil { - return err - } - } - - return nil - }, func() {}) - if err != nil { - return err - } - - for _, chanID := range chanIDs { - c.rejectCache.remove(chanID) - c.chanCache.remove(chanID) - } - - return nil -} - -// ChannelID attempt to lookup the 8-byte compact channel ID which maps to the -// passed channel point (outpoint). If the passed channel doesn't exist within -// the database, then ErrEdgeNotFound is returned. -func (c *ChannelGraph) ChannelID(chanPoint *wire.OutPoint) (uint64, er.R) { - var chanID uint64 - if err := kvdb.View(c.db, func(tx kvdb.RTx) er.R { - var err er.R - chanID, err = getChanID(tx, chanPoint) - return err - }, func() { - chanID = 0 - }); err != nil { - return 0, err - } - - return chanID, nil -} - -// getChanID returns the assigned channel ID for a given channel point. -func getChanID(tx kvdb.RTx, chanPoint *wire.OutPoint) (uint64, er.R) { - var b bytes.Buffer - if err := writeOutpoint(&b, chanPoint); err != nil { - return 0, err - } - - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return 0, ErrGraphNoEdgesFound.Default() - } - chanIndex := edges.NestedReadBucket(channelPointBucket) - if chanIndex == nil { - return 0, ErrGraphNoEdgesFound.Default() - } - - chanIDBytes := chanIndex.Get(b.Bytes()) - if chanIDBytes == nil { - return 0, ErrEdgeNotFound.Default() - } - - chanID := byteOrder.Uint64(chanIDBytes) - - return chanID, nil -} - -// TODO(roasbeef): allow updates to use Batch? - -// HighestChanID returns the "highest" known channel ID in the channel graph. -// This represents the "newest" channel from the PoV of the chain. This method -// can be used by peers to quickly determine if they're graphs are in sync. -func (c *ChannelGraph) HighestChanID() (uint64, er.R) { - var cid uint64 - - err := kvdb.View(c.db, func(tx kvdb.RTx) er.R { - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound.Default() - } - edgeIndex := edges.NestedReadBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound.Default() - } - - // In order to find the highest chan ID, we'll fetch a cursor - // and use that to seek to the "end" of our known rage. - cidCursor := edgeIndex.ReadCursor() - - lastChanID, _ := cidCursor.Last() - - // If there's no key, then this means that we don't actually - // know of any channels, so we'll return a predicable error. - if lastChanID == nil { - return ErrGraphNoEdgesFound.Default() - } - - // Otherwise, we'll de serialize the channel ID and return it - // to the caller. - cid = byteOrder.Uint64(lastChanID) - return nil - }, func() { - cid = 0 - }) - if err != nil && !ErrGraphNoEdgesFound.Is(err) { - return 0, err - } - - return cid, nil -} - -// ChannelEdge represents the complete set of information for a channel edge in -// the known channel graph. This struct couples the core information of the -// edge as well as each of the known advertised edge policies. -type ChannelEdge struct { - // Info contains all the static information describing the channel. - Info *ChannelEdgeInfo - - // Policy1 points to the "first" edge policy of the channel containing - // the dynamic information required to properly route through the edge. - Policy1 *ChannelEdgePolicy - - // Policy2 points to the "second" edge policy of the channel containing - // the dynamic information required to properly route through the edge. - Policy2 *ChannelEdgePolicy -} - -// ChanUpdatesInHorizon returns all the known channel edges which have at least -// one edge that has an update timestamp within the specified horizon. -func (c *ChannelGraph) ChanUpdatesInHorizon(startTime, endTime time.Time) ([]ChannelEdge, er.R) { - // To ensure we don't return duplicate ChannelEdges, we'll use an - // additional map to keep track of the edges already seen to prevent - // re-adding it. - var edgesSeen map[uint64]struct{} - var edgesToCache map[uint64]ChannelEdge - var edgesInHorizon []ChannelEdge - - c.cacheMu.Lock() - defer c.cacheMu.Unlock() - - var hits int - err := kvdb.View(c.db, func(tx kvdb.RTx) er.R { - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound.Default() - } - edgeIndex := edges.NestedReadBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound.Default() - } - edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket) - if edgeUpdateIndex == nil { - return ErrGraphNoEdgesFound.Default() - } - - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNodesNotFound.Default() - } - - // We'll now obtain a cursor to perform a range query within - // the index to find all channels within the horizon. - updateCursor := edgeUpdateIndex.ReadCursor() - - var startTimeBytes, endTimeBytes [8 + 8]byte - byteOrder.PutUint64( - startTimeBytes[:8], uint64(startTime.Unix()), - ) - byteOrder.PutUint64( - endTimeBytes[:8], uint64(endTime.Unix()), - ) - - // With our start and end times constructed, we'll step through - // the index collecting the info and policy of each update of - // each channel that has a last update within the time range. - for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil && - bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() { - - // We have a new eligible entry, so we'll slice of the - // chan ID so we can query it in the DB. - chanID := indexKey[8:] - - // If we've already retrieved the info and policies for - // this edge, then we can skip it as we don't need to do - // so again. - chanIDInt := byteOrder.Uint64(chanID) - if _, ok := edgesSeen[chanIDInt]; ok { - continue - } - - if channel, ok := c.chanCache.get(chanIDInt); ok { - hits++ - edgesSeen[chanIDInt] = struct{}{} - edgesInHorizon = append(edgesInHorizon, channel) - continue - } - - // First, we'll fetch the static edge information. - edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID) - if err != nil { - chanID := byteOrder.Uint64(chanID) - return er.Errorf("unable to fetch info for "+ - "edge with chan_id=%v: %v", chanID, err) - } - edgeInfo.db = c.db - - // With the static information obtained, we'll now - // fetch the dynamic policy info. - edge1, edge2, err := fetchChanEdgePolicies( - edgeIndex, edges, nodes, chanID, c.db, - ) - if err != nil { - chanID := byteOrder.Uint64(chanID) - return er.Errorf("unable to fetch policies "+ - "for edge with chan_id=%v: %v", chanID, - err) - } - - // Finally, we'll collate this edge with the rest of - // edges to be returned. - edgesSeen[chanIDInt] = struct{}{} - channel := ChannelEdge{ - Info: &edgeInfo, - Policy1: edge1, - Policy2: edge2, - } - edgesInHorizon = append(edgesInHorizon, channel) - edgesToCache[chanIDInt] = channel - } - - return nil - }, func() { - edgesSeen = make(map[uint64]struct{}) - edgesToCache = make(map[uint64]ChannelEdge) - edgesInHorizon = nil - }) - switch { - case ErrGraphNoEdgesFound.Is(err): - fallthrough - case ErrGraphNodesNotFound.Is(err): - break - - case err != nil: - return nil, err - } - - // Insert any edges loaded from disk into the cache. - for chanid, channel := range edgesToCache { - c.chanCache.insert(chanid, channel) - } - - log.Debugf("ChanUpdatesInHorizon hit percentage: %f (%d/%d)", - float64(hits)/float64(len(edgesInHorizon)), hits, - len(edgesInHorizon)) - - return edgesInHorizon, nil -} - -// NodeUpdatesInHorizon returns all the known lightning node which have an -// update timestamp within the passed range. This method can be used by two -// nodes to quickly determine if they have the same set of up to date node -// announcements. -func (c *ChannelGraph) NodeUpdatesInHorizon(startTime, endTime time.Time) ([]LightningNode, er.R) { - var nodesInHorizon []LightningNode - - err := kvdb.View(c.db, func(tx kvdb.RTx) er.R { - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNodesNotFound.Default() - } - - nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket) - if nodeUpdateIndex == nil { - return ErrGraphNodesNotFound.Default() - } - - // We'll now obtain a cursor to perform a range query within - // the index to find all node announcements within the horizon. - updateCursor := nodeUpdateIndex.ReadCursor() - - var startTimeBytes, endTimeBytes [8 + 33]byte - byteOrder.PutUint64( - startTimeBytes[:8], uint64(startTime.Unix()), - ) - byteOrder.PutUint64( - endTimeBytes[:8], uint64(endTime.Unix()), - ) - - // With our start and end times constructed, we'll step through - // the index collecting info for each node within the time - // range. - for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil && - bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() { - - nodePub := indexKey[8:] - node, err := fetchLightningNode(nodes, nodePub) - if err != nil { - return err - } - node.db = c.db - - nodesInHorizon = append(nodesInHorizon, node) - } - - return nil - }, func() { - nodesInHorizon = nil - }) - switch { - case ErrGraphNoEdgesFound.Is(err): - fallthrough - case ErrGraphNodesNotFound.Is(err): - break - - case err != nil: - return nil, err - } - - return nodesInHorizon, nil -} - -// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan -// ID's that we don't know and are not known zombies of the passed set. In other -// words, we perform a set difference of our set of chan ID's and the ones -// passed in. This method can be used by callers to determine the set of -// channels another peer knows of that we don't. -func (c *ChannelGraph) FilterKnownChanIDs(chanIDs []uint64) ([]uint64, er.R) { - var newChanIDs []uint64 - - err := kvdb.View(c.db, func(tx kvdb.RTx) er.R { - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound.Default() - } - edgeIndex := edges.NestedReadBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound.Default() - } - - // Fetch the zombie index, it may not exist if no edges have - // ever been marked as zombies. If the index has been - // initialized, we will use it later to skip known zombie edges. - zombieIndex := edges.NestedReadBucket(zombieBucket) - - // We'll run through the set of chanIDs and collate only the - // set of channel that are unable to be found within our db. - var cidBytes [8]byte - for _, cid := range chanIDs { - byteOrder.PutUint64(cidBytes[:], cid) - - // If the edge is already known, skip it. - if v := edgeIndex.Get(cidBytes[:]); v != nil { - continue - } - - // If the edge is a known zombie, skip it. - if zombieIndex != nil { - isZombie, _, _ := isZombieEdge(zombieIndex, cid) - if isZombie { - continue - } - } - - newChanIDs = append(newChanIDs, cid) - } - - return nil - }, func() { - newChanIDs = nil - }) - switch { - // If we don't know of any edges yet, then we'll return the entire set - // of chan IDs specified. - case ErrGraphNoEdgesFound.Is(err): - return chanIDs, nil - - case err != nil: - return nil, err - } - - return newChanIDs, nil -} - -// FilterChannelRange returns the channel ID's of all known channels which were -// mined in a block height within the passed range. This method can be used to -// quickly share with a peer the set of channels we know of within a particular -// range to catch them up after a period of time offline. -func (c *ChannelGraph) FilterChannelRange(startHeight, endHeight uint32) ([]uint64, er.R) { - var chanIDs []uint64 - - startChanID := &lnwire.ShortChannelID{ - BlockHeight: startHeight, - } - - endChanID := lnwire.ShortChannelID{ - BlockHeight: endHeight, - TxIndex: math.MaxUint32 & 0x00ffffff, - TxPosition: math.MaxUint16, - } - - // As we need to perform a range scan, we'll convert the starting and - // ending height to their corresponding values when encoded using short - // channel ID's. - var chanIDStart, chanIDEnd [8]byte - byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64()) - byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64()) - - err := kvdb.View(c.db, func(tx kvdb.RTx) er.R { - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound.Default() - } - edgeIndex := edges.NestedReadBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound.Default() - } - - cursor := edgeIndex.ReadCursor() - - // We'll now iterate through the database, and find each - // channel ID that resides within the specified range. - var cid uint64 - for k, _ := cursor.Seek(chanIDStart[:]); k != nil && - bytes.Compare(k, chanIDEnd[:]) <= 0; k, _ = cursor.Next() { - - // This channel ID rests within the target range, so - // we'll convert it into an integer and add it to our - // returned set. - cid = byteOrder.Uint64(k) - chanIDs = append(chanIDs, cid) - } - - return nil - }, func() { - chanIDs = nil - }) - - switch { - // If we don't know of any channels yet, then there's nothing to - // filter, so we'll return an empty slice. - case ErrGraphNoEdgesFound.Is(err): - return chanIDs, nil - - case err != nil: - return nil, err - } - - return chanIDs, nil -} - -// FetchChanInfos returns the set of channel edges that correspond to the passed -// channel ID's. If an edge is the query is unknown to the database, it will -// skipped and the result will contain only those edges that exist at the time -// of the query. This can be used to respond to peer queries that are seeking to -// fill in gaps in their view of the channel graph. -func (c *ChannelGraph) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, er.R) { - // TODO(roasbeef): sort cids? - - var ( - chanEdges []ChannelEdge - cidBytes [8]byte - ) - - err := kvdb.View(c.db, func(tx kvdb.RTx) er.R { - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound.Default() - } - edgeIndex := edges.NestedReadBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound.Default() - } - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNotFound.Default() - } - - for _, cid := range chanIDs { - byteOrder.PutUint64(cidBytes[:], cid) - - // First, we'll fetch the static edge information. If - // the edge is unknown, we will skip the edge and - // continue gathering all known edges. - edgeInfo, err := fetchChanEdgeInfo( - edgeIndex, cidBytes[:], - ) - switch { - case ErrEdgeNotFound.Is(err): - continue - case err != nil: - return err - } - edgeInfo.db = c.db - - // With the static information obtained, we'll now - // fetch the dynamic policy info. - edge1, edge2, err := fetchChanEdgePolicies( - edgeIndex, edges, nodes, cidBytes[:], c.db, - ) - if err != nil { - return err - } - - chanEdges = append(chanEdges, ChannelEdge{ - Info: &edgeInfo, - Policy1: edge1, - Policy2: edge2, - }) - } - return nil - }, func() { - chanEdges = nil - }) - if err != nil { - return nil, err - } - - return chanEdges, nil -} - -func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64, - edge1, edge2 *ChannelEdgePolicy) er.R { - - // First, we'll fetch the edge update index bucket which currently - // stores an entry for the channel we're about to delete. - updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket) - if updateIndex == nil { - // No edges in bucket, return early. - return nil - } - - // Now that we have the bucket, we'll attempt to construct a template - // for the index key: updateTime || chanid. - var indexKey [8 + 8]byte - byteOrder.PutUint64(indexKey[8:], chanID) - - // With the template constructed, we'll attempt to delete an entry that - // would have been created by both edges: we'll alternate the update - // times, as one may had overridden the other. - if edge1 != nil { - byteOrder.PutUint64(indexKey[:8], uint64(edge1.LastUpdate.Unix())) - if err := updateIndex.Delete(indexKey[:]); err != nil { - return err - } - } - - // We'll also attempt to delete the entry that may have been created by - // the second edge. - if edge2 != nil { - byteOrder.PutUint64(indexKey[:8], uint64(edge2.LastUpdate.Unix())) - if err := updateIndex.Delete(indexKey[:]); err != nil { - return err - } - } - - return nil -} - -func delChannelEdge(edges, edgeIndex, chanIndex, zombieIndex, - nodes kvdb.RwBucket, chanID []byte, isZombie bool) er.R { - - edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID) - if err != nil { - return err - } - - // We'll also remove the entry in the edge update index bucket before - // we delete the edges themselves so we can access their last update - // times. - cid := byteOrder.Uint64(chanID) - edge1, edge2, err := fetchChanEdgePolicies( - edgeIndex, edges, nodes, chanID, nil, - ) - if err != nil { - return err - } - err = delEdgeUpdateIndexEntry(edges, cid, edge1, edge2) - if err != nil { - return err - } - - // The edge key is of the format pubKey || chanID. First we construct - // the latter half, populating the channel ID. - var edgeKey [33 + 8]byte - copy(edgeKey[33:], chanID) - - // With the latter half constructed, copy over the first public key to - // delete the edge in this direction, then the second to delete the - // edge in the opposite direction. - copy(edgeKey[:33], edgeInfo.NodeKey1Bytes[:]) - if edges.Get(edgeKey[:]) != nil { - if err := edges.Delete(edgeKey[:]); err != nil { - return err - } - } - copy(edgeKey[:33], edgeInfo.NodeKey2Bytes[:]) - if edges.Get(edgeKey[:]) != nil { - if err := edges.Delete(edgeKey[:]); err != nil { - return err - } - } - - // As part of deleting the edge we also remove all disabled entries - // from the edgePolicyDisabledIndex bucket. We do that for both directions. - updateEdgePolicyDisabledIndex(edges, cid, false, false) - updateEdgePolicyDisabledIndex(edges, cid, true, false) - - // With the edge data deleted, we can purge the information from the two - // edge indexes. - if err := edgeIndex.Delete(chanID); err != nil { - return err - } - var b bytes.Buffer - if err := writeOutpoint(&b, &edgeInfo.ChannelPoint); err != nil { - return err - } - if err := chanIndex.Delete(b.Bytes()); err != nil { - return err - } - - // Finally, we'll mark the edge as a zombie within our index if it's - // being removed due to the channel becoming a zombie. We do this to - // ensure we don't store unnecessary data for spent channels. - if !isZombie { - return nil - } - - return markEdgeZombie( - zombieIndex, byteOrder.Uint64(chanID), edgeInfo.NodeKey1Bytes, - edgeInfo.NodeKey2Bytes, - ) -} - -// UpdateEdgePolicy updates the edge routing policy for a single directed edge -// within the database for the referenced channel. The `flags` attribute within -// the ChannelEdgePolicy determines which of the directed edges are being -// updated. If the flag is 1, then the first node's information is being -// updated, otherwise it's the second node's information. The node ordering is -// determined by the lexicographical ordering of the identity public keys of -// the nodes on either side of the channel. -func (c *ChannelGraph) UpdateEdgePolicy(edge *ChannelEdgePolicy) er.R { - c.cacheMu.Lock() - defer c.cacheMu.Unlock() - - var isUpdate1 bool - err := kvdb.Update(c.db, func(tx kvdb.RwTx) er.R { - var err er.R - isUpdate1, err = updateEdgePolicy(tx, edge) - return err - }, func() { - isUpdate1 = false - }) - if err != nil { - return err - } - - // If an entry for this channel is found in reject cache, we'll modify - // the entry with the updated timestamp for the direction that was just - // written. If the edge doesn't exist, we'll load the cache entry lazily - // during the next query for this edge. - if entry, ok := c.rejectCache.get(edge.ChannelID); ok { - if isUpdate1 { - entry.upd1Time = edge.LastUpdate.Unix() - } else { - entry.upd2Time = edge.LastUpdate.Unix() - } - c.rejectCache.insert(edge.ChannelID, entry) - } - - // If an entry for this channel is found in channel cache, we'll modify - // the entry with the updated policy for the direction that was just - // written. If the edge doesn't exist, we'll defer loading the info and - // policies and lazily read from disk during the next query. - if channel, ok := c.chanCache.get(edge.ChannelID); ok { - if isUpdate1 { - channel.Policy1 = edge - } else { - channel.Policy2 = edge - } - c.chanCache.insert(edge.ChannelID, channel) - } - - return nil -} - -// updateEdgePolicy attempts to update an edge's policy within the relevant -// buckets using an existing database transaction. The returned boolean will be -// true if the updated policy belongs to node1, and false if the policy belonged -// to node2. -func updateEdgePolicy(tx kvdb.RwTx, edge *ChannelEdgePolicy) (bool, er.R) { - edges := tx.ReadWriteBucket(edgeBucket) - if edges == nil { - return false, ErrEdgeNotFound.Default() - - } - edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket) - if edgeIndex == nil { - return false, ErrEdgeNotFound.Default() - } - nodes, err := tx.CreateTopLevelBucket(nodeBucket) - if err != nil { - return false, err - } - - // Create the channelID key be converting the channel ID - // integer into a byte slice. - var chanID [8]byte - byteOrder.PutUint64(chanID[:], edge.ChannelID) - - // With the channel ID, we then fetch the value storing the two - // nodes which connect this channel edge. - nodeInfo := edgeIndex.Get(chanID[:]) - if nodeInfo == nil { - return false, ErrEdgeNotFound.Default() - } - - // Depending on the flags value passed above, either the first - // or second edge policy is being updated. - var fromNode, toNode []byte - var isUpdate1 bool - if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 { - fromNode = nodeInfo[:33] - toNode = nodeInfo[33:66] - isUpdate1 = true - } else { - fromNode = nodeInfo[33:66] - toNode = nodeInfo[:33] - isUpdate1 = false - } - - // Finally, with the direction of the edge being updated - // identified, we update the on-disk edge representation. - errr := putChanEdgePolicy(edges, nodes, edge, fromNode, toNode) - if errr != nil { - return false, errr - } - - return isUpdate1, nil -} - -// LightningNode represents an individual vertex/node within the channel graph. -// A node is connected to other nodes by one or more channel edges emanating -// from it. As the graph is directed, a node will also have an incoming edge -// attached to it for each outgoing edge. -type LightningNode struct { - // PubKeyBytes is the raw bytes of the public key of the target node. - PubKeyBytes [33]byte - pubKey *btcec.PublicKey - - // HaveNodeAnnouncement indicates whether we received a node - // announcement for this particular node. If true, the remaining fields - // will be set, if false only the PubKey is known for this node. - HaveNodeAnnouncement bool - - // LastUpdate is the last time the vertex information for this node has - // been updated. - LastUpdate time.Time - - // Address is the TCP address this node is reachable over. - Addresses []net.Addr - - // Color is the selected color for the node. - Color color.RGBA - - // Alias is a nick-name for the node. The alias can be used to confirm - // a node's identity or to serve as a short ID for an address book. - Alias string - - // AuthSigBytes is the raw signature under the advertised public key - // which serves to authenticate the attributes announced by this node. - AuthSigBytes []byte - - // Features is the list of protocol features supported by this node. - Features *lnwire.FeatureVector - - // ExtraOpaqueData is the set of data that was appended to this - // message, some of which we may not actually know how to iterate or - // parse. By holding onto this data, we ensure that we're able to - // properly validate the set of signatures that cover these new fields, - // and ensure we're able to make upgrades to the network in a forwards - // compatible manner. - ExtraOpaqueData []byte - - db *DB - - // TODO(roasbeef): discovery will need storage to keep it's last IP - // address and re-announce if interface changes? - - // TODO(roasbeef): add update method and fetch? -} - -// PubKey is the node's long-term identity public key. This key will be used to -// authenticated any advertisements/updates sent by the node. -// -// NOTE: By having this method to access an attribute, we ensure we only need -// to fully deserialize the pubkey if absolutely necessary. -func (l *LightningNode) PubKey() (*btcec.PublicKey, er.R) { - if l.pubKey != nil { - return l.pubKey, nil - } - - key, err := btcec.ParsePubKey(l.PubKeyBytes[:], btcec.S256()) - if err != nil { - return nil, err - } - l.pubKey = key - - return key, nil -} - -// AuthSig is a signature under the advertised public key which serves to -// authenticate the attributes announced by this node. -// -// NOTE: By having this method to access an attribute, we ensure we only need -// to fully deserialize the signature if absolutely necessary. -func (l *LightningNode) AuthSig() (*btcec.Signature, er.R) { - return btcec.ParseSignature(l.AuthSigBytes, btcec.S256()) -} - -// AddPubKey is a setter-link method that can be used to swap out the public -// key for a node. -func (l *LightningNode) AddPubKey(key *btcec.PublicKey) { - l.pubKey = key - copy(l.PubKeyBytes[:], key.SerializeCompressed()) -} - -// NodeAnnouncement retrieves the latest node announcement of the node. -func (l *LightningNode) NodeAnnouncement(signed bool) (*lnwire.NodeAnnouncement, - er.R) { - - if !l.HaveNodeAnnouncement { - return nil, er.Errorf("node does not have node announcement") - } - - alias, err := lnwire.NewNodeAlias(l.Alias) - if err != nil { - return nil, err - } - - nodeAnn := &lnwire.NodeAnnouncement{ - Features: l.Features.RawFeatureVector, - NodeID: l.PubKeyBytes, - RGBColor: l.Color, - Alias: alias, - Addresses: l.Addresses, - Timestamp: uint32(l.LastUpdate.Unix()), - ExtraOpaqueData: l.ExtraOpaqueData, - } - - if !signed { - return nodeAnn, nil - } - - sig, err := lnwire.NewSigFromRawSignature(l.AuthSigBytes) - if err != nil { - return nil, err - } - - nodeAnn.Signature = sig - - return nodeAnn, nil -} - -// isPublic determines whether the node is seen as public within the graph from -// the source node's point of view. An existing database transaction can also be -// specified. -func (l *LightningNode) isPublic(tx kvdb.RTx, sourcePubKey []byte) (bool, er.R) { - // In order to determine whether this node is publicly advertised within - // the graph, we'll need to look at all of its edges and check whether - // they extend to any other node than the source node. errDone will be - // used to terminate the check early. - nodeIsPublic := false - err := l.ForEachChannel(tx, func(_ kvdb.RTx, info *ChannelEdgeInfo, - _, _ *ChannelEdgePolicy) er.R { - - // If this edge doesn't extend to the source node, we'll - // terminate our search as we can now conclude that the node is - // publicly advertised within the graph due to the local node - // knowing of the current edge. - if !bytes.Equal(info.NodeKey1Bytes[:], sourcePubKey) && - !bytes.Equal(info.NodeKey2Bytes[:], sourcePubKey) { - - nodeIsPublic = true - return er.LoopBreak - } - - // Since the edge _does_ extend to the source node, we'll also - // need to ensure that this is a public edge. - if info.AuthProof != nil { - nodeIsPublic = true - return er.LoopBreak - } - - // Otherwise, we'll continue our search. - return nil - }) - if err != nil && !er.IsLoopBreak(err) { - return false, err - } - - return nodeIsPublic, nil -} - -// FetchLightningNode attempts to look up a target node by its identity public -// key. If the node isn't found in the database, then ErrGraphNodeNotFound is -// returned. -// -// If the caller wishes to re-use an existing boltdb transaction, then it -// should be passed as the first argument. Otherwise the first argument should -// be nil and a fresh transaction will be created to execute the graph -// traversal. -func (c *ChannelGraph) FetchLightningNode(tx kvdb.RTx, nodePub route.Vertex) ( - *LightningNode, er.R) { - - var node *LightningNode - - fetchNode := func(tx kvdb.RTx) er.R { - // First grab the nodes bucket which stores the mapping from - // pubKey to node information. - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNotFound.Default() - } - - // If a key for this serialized public key isn't found, then - // the target node doesn't exist within the database. - nodeBytes := nodes.Get(nodePub[:]) - if nodeBytes == nil { - return ErrGraphNodeNotFound.Default() - } - - // If the node is found, then we can de deserialize the node - // information to return to the user. - nodeReader := bytes.NewReader(nodeBytes) - n, err := deserializeLightningNode(nodeReader) - if err != nil { - return err - } - n.db = c.db - - node = &n - - return nil - } - - var err er.R - if tx == nil { - err = kvdb.View(c.db, fetchNode, func() {}) - } else { - err = fetchNode(tx) - } - if err != nil { - return nil, err - } - - return node, nil -} - -// HasLightningNode determines if the graph has a vertex identified by the -// target node identity public key. If the node exists in the database, a -// timestamp of when the data for the node was lasted updated is returned along -// with a true boolean. Otherwise, an empty time.Time is returned with a false -// boolean. -func (c *ChannelGraph) HasLightningNode(nodePub [33]byte) (time.Time, bool, er.R) { - var ( - updateTime time.Time - exists bool - ) - - err := kvdb.View(c.db, func(tx kvdb.RTx) er.R { - // First grab the nodes bucket which stores the mapping from - // pubKey to node information. - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNotFound.Default() - } - - // If a key for this serialized public key isn't found, we can - // exit early. - nodeBytes := nodes.Get(nodePub[:]) - if nodeBytes == nil { - exists = false - return nil - } - - // Otherwise we continue on to obtain the time stamp - // representing the last time the data for this node was - // updated. - nodeReader := bytes.NewReader(nodeBytes) - node, err := deserializeLightningNode(nodeReader) - if err != nil { - return err - } - - exists = true - updateTime = node.LastUpdate - return nil - }, func() { - updateTime = time.Time{} - exists = false - }) - if err != nil { - return time.Time{}, exists, err - } - - return updateTime, exists, nil -} - -// nodeTraversal is used to traverse all channels of a node given by its -// public key and passes channel information into the specified callback. -func nodeTraversal(tx kvdb.RTx, nodePub []byte, db *DB, - cb func(kvdb.RTx, *ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) er.R) er.R { - - traversal := func(tx kvdb.RTx) er.R { - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNotFound.Default() - } - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNotFound.Default() - } - edgeIndex := edges.NestedReadBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound.Default() - } - - // In order to reach all the edges for this node, we take - // advantage of the construction of the key-space within the - // edge bucket. The keys are stored in the form: pubKey || - // chanID. Therefore, starting from a chanID of zero, we can - // scan forward in the bucket, grabbing all the edges for the - // node. Once the prefix no longer matches, then we know we're - // done. - var nodeStart [33 + 8]byte - copy(nodeStart[:], nodePub) - copy(nodeStart[33:], chanStart[:]) - - // Starting from the key pubKey || 0, we seek forward in the - // bucket until the retrieved key no longer has the public key - // as its prefix. This indicates that we've stepped over into - // another node's edges, so we can terminate our scan. - edgeCursor := edges.ReadCursor() - for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() { - // If the prefix still matches, the channel id is - // returned in nodeEdge. Channel id is used to lookup - // the node at the other end of the channel and both - // edge policies. - chanID := nodeEdge[33:] - edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID) - if err != nil { - return err - } - edgeInfo.db = db - - outgoingPolicy, err := fetchChanEdgePolicy( - edges, chanID, nodePub, nodes, - ) - if err != nil { - return err - } - - otherNode, err := edgeInfo.OtherNodeKeyBytes(nodePub) - if err != nil { - return err - } - - incomingPolicy, err := fetchChanEdgePolicy( - edges, chanID, otherNode[:], nodes, - ) - if err != nil { - return err - } - - // Finally, we execute the callback. - err = cb(tx, &edgeInfo, outgoingPolicy, incomingPolicy) - if err != nil { - return err - } - } - - return nil - } - - // If no transaction was provided, then we'll create a new transaction - // to execute the transaction within. - if tx == nil { - return kvdb.View(db, traversal, func() {}) - } - - // Otherwise, we re-use the existing transaction to execute the graph - // traversal. - return traversal(tx) -} - -// ForEachChannel iterates through all channels of this node, executing the -// passed callback with an edge info structure and the policies of each end -// of the channel. The first edge policy is the outgoing edge *to* the -// the connecting node, while the second is the incoming edge *from* the -// connecting node. If the callback returns an error, then the iteration is -// halted with the error propagated back up to the caller. -// -// Unknown policies are passed into the callback as nil values. -// -// If the caller wishes to re-use an existing boltdb transaction, then it -// should be passed as the first argument. Otherwise the first argument should -// be nil and a fresh transaction will be created to execute the graph -// traversal. -func (l *LightningNode) ForEachChannel(tx kvdb.RTx, - cb func(kvdb.RTx, *ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) er.R) er.R { - - nodePub := l.PubKeyBytes[:] - db := l.db - - return nodeTraversal(tx, nodePub, db, cb) -} - -// ChannelEdgeInfo represents a fully authenticated channel along with all its -// unique attributes. Once an authenticated channel announcement has been -// processed on the network, then an instance of ChannelEdgeInfo encapsulating -// the channels attributes is stored. The other portions relevant to routing -// policy of a channel are stored within a ChannelEdgePolicy for each direction -// of the channel. -type ChannelEdgeInfo struct { - // ChannelID is the unique channel ID for the channel. The first 3 - // bytes are the block height, the next 3 the index within the block, - // and the last 2 bytes are the output index for the channel. - ChannelID uint64 - - // ChainHash is the hash that uniquely identifies the chain that this - // channel was opened within. - // - // TODO(roasbeef): need to modify db keying for multi-chain - // * must add chain hash to prefix as well - ChainHash chainhash.Hash - - // NodeKey1Bytes is the raw public key of the first node. - NodeKey1Bytes [33]byte - nodeKey1 *btcec.PublicKey - - // NodeKey2Bytes is the raw public key of the first node. - NodeKey2Bytes [33]byte - nodeKey2 *btcec.PublicKey - - // BitcoinKey1Bytes is the raw public key of the first node. - BitcoinKey1Bytes [33]byte - bitcoinKey1 *btcec.PublicKey - - // BitcoinKey2Bytes is the raw public key of the first node. - BitcoinKey2Bytes [33]byte - bitcoinKey2 *btcec.PublicKey - - // Features is an opaque byte slice that encodes the set of channel - // specific features that this channel edge supports. - Features []byte - - // AuthProof is the authentication proof for this channel. This proof - // contains a set of signatures binding four identities, which attests - // to the legitimacy of the advertised channel. - AuthProof *ChannelAuthProof - - // ChannelPoint is the funding outpoint of the channel. This can be - // used to uniquely identify the channel within the channel graph. - ChannelPoint wire.OutPoint - - // Capacity is the total capacity of the channel, this is determined by - // the value output in the outpoint that created this channel. - Capacity btcutil.Amount - - // ExtraOpaqueData is the set of data that was appended to this - // message, some of which we may not actually know how to iterate or - // parse. By holding onto this data, we ensure that we're able to - // properly validate the set of signatures that cover these new fields, - // and ensure we're able to make upgrades to the network in a forwards - // compatible manner. - ExtraOpaqueData []byte - - db *DB -} - -// AddNodeKeys is a setter-like method that can be used to replace the set of -// keys for the target ChannelEdgeInfo. -func (c *ChannelEdgeInfo) AddNodeKeys(nodeKey1, nodeKey2, bitcoinKey1, - bitcoinKey2 *btcec.PublicKey) { - - c.nodeKey1 = nodeKey1 - copy(c.NodeKey1Bytes[:], c.nodeKey1.SerializeCompressed()) - - c.nodeKey2 = nodeKey2 - copy(c.NodeKey2Bytes[:], nodeKey2.SerializeCompressed()) - - c.bitcoinKey1 = bitcoinKey1 - copy(c.BitcoinKey1Bytes[:], c.bitcoinKey1.SerializeCompressed()) - - c.bitcoinKey2 = bitcoinKey2 - copy(c.BitcoinKey2Bytes[:], bitcoinKey2.SerializeCompressed()) -} - -// NodeKey1 is the identity public key of the "first" node that was involved in -// the creation of this channel. A node is considered "first" if the -// lexicographical ordering the its serialized public key is "smaller" than -// that of the other node involved in channel creation. -// -// NOTE: By having this method to access an attribute, we ensure we only need -// to fully deserialize the pubkey if absolutely necessary. -func (c *ChannelEdgeInfo) NodeKey1() (*btcec.PublicKey, er.R) { - if c.nodeKey1 != nil { - return c.nodeKey1, nil - } - - key, err := btcec.ParsePubKey(c.NodeKey1Bytes[:], btcec.S256()) - if err != nil { - return nil, err - } - c.nodeKey1 = key - - return key, nil -} - -// NodeKey2 is the identity public key of the "second" node that was -// involved in the creation of this channel. A node is considered -// "second" if the lexicographical ordering the its serialized public -// key is "larger" than that of the other node involved in channel -// creation. -// -// NOTE: By having this method to access an attribute, we ensure we only need -// to fully deserialize the pubkey if absolutely necessary. -func (c *ChannelEdgeInfo) NodeKey2() (*btcec.PublicKey, er.R) { - if c.nodeKey2 != nil { - return c.nodeKey2, nil - } - - key, err := btcec.ParsePubKey(c.NodeKey2Bytes[:], btcec.S256()) - if err != nil { - return nil, err - } - c.nodeKey2 = key - - return key, nil -} - -// BitcoinKey1 is the Bitcoin multi-sig key belonging to the first -// node, that was involved in the funding transaction that originally -// created the channel that this struct represents. -// -// NOTE: By having this method to access an attribute, we ensure we only need -// to fully deserialize the pubkey if absolutely necessary. -func (c *ChannelEdgeInfo) BitcoinKey1() (*btcec.PublicKey, er.R) { - if c.bitcoinKey1 != nil { - return c.bitcoinKey1, nil - } - - key, err := btcec.ParsePubKey(c.BitcoinKey1Bytes[:], btcec.S256()) - if err != nil { - return nil, err - } - c.bitcoinKey1 = key - - return key, nil -} - -// BitcoinKey2 is the Bitcoin multi-sig key belonging to the second -// node, that was involved in the funding transaction that originally -// created the channel that this struct represents. -// -// NOTE: By having this method to access an attribute, we ensure we only need -// to fully deserialize the pubkey if absolutely necessary. -func (c *ChannelEdgeInfo) BitcoinKey2() (*btcec.PublicKey, er.R) { - if c.bitcoinKey2 != nil { - return c.bitcoinKey2, nil - } - - key, err := btcec.ParsePubKey(c.BitcoinKey2Bytes[:], btcec.S256()) - if err != nil { - return nil, err - } - c.bitcoinKey2 = key - - return key, nil -} - -// OtherNodeKeyBytes returns the node key bytes of the other end of -// the channel. -func (c *ChannelEdgeInfo) OtherNodeKeyBytes(thisNodeKey []byte) ( - [33]byte, er.R) { - - switch { - case bytes.Equal(c.NodeKey1Bytes[:], thisNodeKey): - return c.NodeKey2Bytes, nil - case bytes.Equal(c.NodeKey2Bytes[:], thisNodeKey): - return c.NodeKey1Bytes, nil - default: - return [33]byte{}, er.Errorf("node not participating in this channel") - } -} - -// FetchOtherNode attempts to fetch the full LightningNode that's opposite of -// the target node in the channel. This is useful when one knows the pubkey of -// one of the nodes, and wishes to obtain the full LightningNode for the other -// end of the channel. -func (c *ChannelEdgeInfo) FetchOtherNode(tx kvdb.RTx, thisNodeKey []byte) (*LightningNode, er.R) { - - // Ensure that the node passed in is actually a member of the channel. - var targetNodeBytes [33]byte - switch { - case bytes.Equal(c.NodeKey1Bytes[:], thisNodeKey): - targetNodeBytes = c.NodeKey2Bytes - case bytes.Equal(c.NodeKey2Bytes[:], thisNodeKey): - targetNodeBytes = c.NodeKey1Bytes - default: - return nil, er.Errorf("node not participating in this channel") - } - - var targetNode *LightningNode - fetchNodeFunc := func(tx kvdb.RTx) er.R { - // First grab the nodes bucket which stores the mapping from - // pubKey to node information. - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNotFound.Default() - } - - node, err := fetchLightningNode(nodes, targetNodeBytes[:]) - if err != nil { - return err - } - node.db = c.db - - targetNode = &node - - return nil - } - - // If the transaction is nil, then we'll need to create a new one, - // otherwise we can use the existing db transaction. - var err er.R - if tx == nil { - err = kvdb.View(c.db, fetchNodeFunc, func() { targetNode = nil }) - } else { - err = fetchNodeFunc(tx) - } - - return targetNode, err -} - -// ChannelAuthProof is the authentication proof (the signature portion) for a -// channel. Using the four signatures contained in the struct, and some -// auxiliary knowledge (the funding script, node identities, and outpoint) nodes -// on the network are able to validate the authenticity and existence of a -// channel. Each of these signatures signs the following digest: chanID || -// nodeID1 || nodeID2 || bitcoinKey1|| bitcoinKey2 || 2-byte-feature-len || -// features. -type ChannelAuthProof struct { - // nodeSig1 is a cached instance of the first node signature. - nodeSig1 *btcec.Signature - - // NodeSig1Bytes are the raw bytes of the first node signature encoded - // in DER format. - NodeSig1Bytes []byte - - // nodeSig2 is a cached instance of the second node signature. - nodeSig2 *btcec.Signature - - // NodeSig2Bytes are the raw bytes of the second node signature - // encoded in DER format. - NodeSig2Bytes []byte - - // bitcoinSig1 is a cached instance of the first bitcoin signature. - bitcoinSig1 *btcec.Signature - - // BitcoinSig1Bytes are the raw bytes of the first bitcoin signature - // encoded in DER format. - BitcoinSig1Bytes []byte - - // bitcoinSig2 is a cached instance of the second bitcoin signature. - bitcoinSig2 *btcec.Signature - - // BitcoinSig2Bytes are the raw bytes of the second bitcoin signature - // encoded in DER format. - BitcoinSig2Bytes []byte -} - -// Node1Sig is the signature using the identity key of the node that is first -// in a lexicographical ordering of the serialized public keys of the two nodes -// that created the channel. -// -// NOTE: By having this method to access an attribute, we ensure we only need -// to fully deserialize the signature if absolutely necessary. -func (c *ChannelAuthProof) Node1Sig() (*btcec.Signature, er.R) { - if c.nodeSig1 != nil { - return c.nodeSig1, nil - } - - sig, err := btcec.ParseSignature(c.NodeSig1Bytes, btcec.S256()) - if err != nil { - return nil, err - } - - c.nodeSig1 = sig - - return sig, nil -} - -// Node2Sig is the signature using the identity key of the node that is second -// in a lexicographical ordering of the serialized public keys of the two nodes -// that created the channel. -// -// NOTE: By having this method to access an attribute, we ensure we only need -// to fully deserialize the signature if absolutely necessary. -func (c *ChannelAuthProof) Node2Sig() (*btcec.Signature, er.R) { - if c.nodeSig2 != nil { - return c.nodeSig2, nil - } - - sig, err := btcec.ParseSignature(c.NodeSig2Bytes, btcec.S256()) - if err != nil { - return nil, err - } - - c.nodeSig2 = sig - - return sig, nil -} - -// BitcoinSig1 is the signature using the public key of the first node that was -// used in the channel's multi-sig output. -// -// NOTE: By having this method to access an attribute, we ensure we only need -// to fully deserialize the signature if absolutely necessary. -func (c *ChannelAuthProof) BitcoinSig1() (*btcec.Signature, er.R) { - if c.bitcoinSig1 != nil { - return c.bitcoinSig1, nil - } - - sig, err := btcec.ParseSignature(c.BitcoinSig1Bytes, btcec.S256()) - if err != nil { - return nil, err - } - - c.bitcoinSig1 = sig - - return sig, nil -} - -// BitcoinSig2 is the signature using the public key of the second node that -// was used in the channel's multi-sig output. -// -// NOTE: By having this method to access an attribute, we ensure we only need -// to fully deserialize the signature if absolutely necessary. -func (c *ChannelAuthProof) BitcoinSig2() (*btcec.Signature, er.R) { - if c.bitcoinSig2 != nil { - return c.bitcoinSig2, nil - } - - sig, err := btcec.ParseSignature(c.BitcoinSig2Bytes, btcec.S256()) - if err != nil { - return nil, err - } - - c.bitcoinSig2 = sig - - return sig, nil -} - -// IsEmpty check is the authentication proof is empty Proof is empty if at -// least one of the signatures are equal to nil. -func (c *ChannelAuthProof) IsEmpty() bool { - return len(c.NodeSig1Bytes) == 0 || - len(c.NodeSig2Bytes) == 0 || - len(c.BitcoinSig1Bytes) == 0 || - len(c.BitcoinSig2Bytes) == 0 -} - -// ChannelEdgePolicy represents a *directed* edge within the channel graph. For -// each channel in the database, there are two distinct edges: one for each -// possible direction of travel along the channel. The edges themselves hold -// information concerning fees, and minimum time-lock information which is -// utilized during path finding. -type ChannelEdgePolicy struct { - // SigBytes is the raw bytes of the signature of the channel edge - // policy. We'll only parse these if the caller needs to access the - // signature for validation purposes. Do not set SigBytes directly, but - // use SetSigBytes instead to make sure that the cache is invalidated. - SigBytes []byte - - // sig is a cached fully parsed signature. - sig *btcec.Signature - - // ChannelID is the unique channel ID for the channel. The first 3 - // bytes are the block height, the next 3 the index within the block, - // and the last 2 bytes are the output index for the channel. - ChannelID uint64 - - // LastUpdate is the last time an authenticated edge for this channel - // was received. - LastUpdate time.Time - - // MessageFlags is a bitfield which indicates the presence of optional - // fields (like max_htlc) in the policy. - MessageFlags lnwire.ChanUpdateMsgFlags - - // ChannelFlags is a bitfield which signals the capabilities of the - // channel as well as the directed edge this update applies to. - ChannelFlags lnwire.ChanUpdateChanFlags - - // TimeLockDelta is the number of blocks this node will subtract from - // the expiry of an incoming HTLC. This value expresses the time buffer - // the node would like to HTLC exchanges. - TimeLockDelta uint16 - - // MinHTLC is the smallest value HTLC this node will forward, expressed - // in millisatoshi. - MinHTLC lnwire.MilliSatoshi - - // MaxHTLC is the largest value HTLC this node will forward, expressed - // in millisatoshi. - MaxHTLC lnwire.MilliSatoshi - - // FeeBaseMSat is the base HTLC fee that will be charged for forwarding - // ANY HTLC, expressed in mSAT's. - FeeBaseMSat lnwire.MilliSatoshi - - // FeeProportionalMillionths is the rate that the node will charge for - // HTLCs for each millionth of a satoshi forwarded. - FeeProportionalMillionths lnwire.MilliSatoshi - - // Node is the LightningNode that this directed edge leads to. Using - // this pointer the channel graph can further be traversed. - Node *LightningNode - - // ExtraOpaqueData is the set of data that was appended to this - // message, some of which we may not actually know how to iterate or - // parse. By holding onto this data, we ensure that we're able to - // properly validate the set of signatures that cover these new fields, - // and ensure we're able to make upgrades to the network in a forwards - // compatible manner. - ExtraOpaqueData []byte - - db *DB -} - -// Signature is a channel announcement signature, which is needed for proper -// edge policy announcement. -// -// NOTE: By having this method to access an attribute, we ensure we only need -// to fully deserialize the signature if absolutely necessary. -func (c *ChannelEdgePolicy) Signature() (*btcec.Signature, er.R) { - if c.sig != nil { - return c.sig, nil - } - - sig, err := btcec.ParseSignature(c.SigBytes, btcec.S256()) - if err != nil { - return nil, err - } - - c.sig = sig - - return sig, nil -} - -// SetSigBytes updates the signature and invalidates the cached parsed -// signature. -func (c *ChannelEdgePolicy) SetSigBytes(sig []byte) { - c.SigBytes = sig - c.sig = nil -} - -// IsDisabled determines whether the edge has the disabled bit set. -func (c *ChannelEdgePolicy) IsDisabled() bool { - return c.ChannelFlags&lnwire.ChanUpdateDisabled == - lnwire.ChanUpdateDisabled -} - -// ComputeFee computes the fee to forward an HTLC of `amt` milli-satoshis over -// the passed active payment channel. This value is currently computed as -// specified in BOLT07, but will likely change in the near future. -func (c *ChannelEdgePolicy) ComputeFee( - amt lnwire.MilliSatoshi) lnwire.MilliSatoshi { - - return c.FeeBaseMSat + (amt*c.FeeProportionalMillionths)/feeRateParts -} - -// divideCeil divides dividend by factor and rounds the result up. -func divideCeil(dividend, factor lnwire.MilliSatoshi) lnwire.MilliSatoshi { - return (dividend + factor - 1) / factor -} - -// ComputeFeeFromIncoming computes the fee to forward an HTLC given the incoming -// amount. -func (c *ChannelEdgePolicy) ComputeFeeFromIncoming( - incomingAmt lnwire.MilliSatoshi) lnwire.MilliSatoshi { - - return incomingAmt - divideCeil( - feeRateParts*(incomingAmt-c.FeeBaseMSat), - feeRateParts+c.FeeProportionalMillionths, - ) -} - -// FetchChannelEdgesByOutpoint attempts to lookup the two directed edges for -// the channel identified by the funding outpoint. If the channel can't be -// found, then ErrEdgeNotFound is returned. A struct which houses the general -// information for the channel itself is returned as well as two structs that -// contain the routing policies for the channel in either direction. -func (c *ChannelGraph) FetchChannelEdgesByOutpoint(op *wire.OutPoint, -) (*ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy, er.R) { - - var ( - edgeInfo *ChannelEdgeInfo - policy1 *ChannelEdgePolicy - policy2 *ChannelEdgePolicy - ) - - err := kvdb.View(c.db, func(tx kvdb.RTx) er.R { - // First, grab the node bucket. This will be used to populate - // the Node pointers in each edge read from disk. - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNotFound.Default() - } - - // Next, grab the edge bucket which stores the edges, and also - // the index itself so we can group the directed edges together - // logically. - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound.Default() - } - edgeIndex := edges.NestedReadBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound.Default() - } - - // If the channel's outpoint doesn't exist within the outpoint - // index, then the edge does not exist. - chanIndex := edges.NestedReadBucket(channelPointBucket) - if chanIndex == nil { - return ErrGraphNoEdgesFound.Default() - } - var b bytes.Buffer - if err := writeOutpoint(&b, op); err != nil { - return err - } - chanID := chanIndex.Get(b.Bytes()) - if chanID == nil { - return ErrEdgeNotFound.Default() - } - - // If the channel is found to exists, then we'll first retrieve - // the general information for the channel. - edge, err := fetchChanEdgeInfo(edgeIndex, chanID) - if err != nil { - return err - } - edgeInfo = &edge - edgeInfo.db = c.db - - // Once we have the information about the channels' parameters, - // we'll fetch the routing policies for each for the directed - // edges. - e1, e2, err := fetchChanEdgePolicies( - edgeIndex, edges, nodes, chanID, c.db, - ) - if err != nil { - return err - } - - policy1 = e1 - policy2 = e2 - return nil - }, func() { - edgeInfo = nil - policy1 = nil - policy2 = nil - }) - if err != nil { - return nil, nil, nil, err - } - - return edgeInfo, policy1, policy2, nil -} - -// FetchChannelEdgesByID attempts to lookup the two directed edges for the -// channel identified by the channel ID. If the channel can't be found, then -// ErrEdgeNotFound is returned. A struct which houses the general information -// for the channel itself is returned as well as two structs that contain the -// routing policies for the channel in either direction. -// -// ErrZombieEdge an be returned if the edge is currently marked as a zombie -// within the database. In this case, the ChannelEdgePolicy's will be nil, and -// the ChannelEdgeInfo will only include the public keys of each node. -func (c *ChannelGraph) FetchChannelEdgesByID(chanID uint64, -) (*ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy, er.R) { - - var ( - edgeInfo *ChannelEdgeInfo - policy1 *ChannelEdgePolicy - policy2 *ChannelEdgePolicy - channelID [8]byte - ) - - err := kvdb.View(c.db, func(tx kvdb.RTx) er.R { - // First, grab the node bucket. This will be used to populate - // the Node pointers in each edge read from disk. - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNotFound.Default() - } - - // Next, grab the edge bucket which stores the edges, and also - // the index itself so we can group the directed edges together - // logically. - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound.Default() - } - edgeIndex := edges.NestedReadBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound.Default() - } - - byteOrder.PutUint64(channelID[:], chanID) - - // Now, attempt to fetch edge. - edge, err := fetchChanEdgeInfo(edgeIndex, channelID[:]) - - // If it doesn't exist, we'll quickly check our zombie index to - // see if we've previously marked it as so. - if ErrEdgeNotFound.Is(err) { - // If the zombie index doesn't exist, or the edge is not - // marked as a zombie within it, then we'll return the - // original ErrEdgeNotFound error. - zombieIndex := edges.NestedReadBucket(zombieBucket) - if zombieIndex == nil { - return ErrEdgeNotFound.Default() - } - - isZombie, pubKey1, pubKey2 := isZombieEdge( - zombieIndex, chanID, - ) - if !isZombie { - return ErrEdgeNotFound.Default() - } - - // Otherwise, the edge is marked as a zombie, so we'll - // populate the edge info with the public keys of each - // party as this is the only information we have about - // it and return an error signaling so. - edgeInfo = &ChannelEdgeInfo{ - NodeKey1Bytes: pubKey1, - NodeKey2Bytes: pubKey2, - } - return ErrZombieEdge.Default() - } - - // Otherwise, we'll just return the error if any. - if err != nil { - return err - } - - edgeInfo = &edge - edgeInfo.db = c.db - - // Then we'll attempt to fetch the accompanying policies of this - // edge. - e1, e2, err := fetchChanEdgePolicies( - edgeIndex, edges, nodes, channelID[:], c.db, - ) - if err != nil { - return err - } - - policy1 = e1 - policy2 = e2 - return nil - }, func() { - edgeInfo = nil - policy1 = nil - policy2 = nil - }) - if ErrZombieEdge.Is(err) { - return edgeInfo, nil, nil, err - } - if err != nil { - return nil, nil, nil, err - } - - return edgeInfo, policy1, policy2, nil -} - -// IsPublicNode is a helper method that determines whether the node with the -// given public key is seen as a public node in the graph from the graph's -// source node's point of view. -func (c *ChannelGraph) IsPublicNode(pubKey [33]byte) (bool, er.R) { - var nodeIsPublic bool - err := kvdb.View(c.db, func(tx kvdb.RTx) er.R { - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNodesNotFound.Default() - } - ourPubKey := nodes.Get(sourceKey) - if ourPubKey == nil { - return ErrSourceNodeNotSet.Default() - } - node, err := fetchLightningNode(nodes, pubKey[:]) - if err != nil { - return err - } - - nodeIsPublic, err = node.isPublic(tx, ourPubKey) - return err - }, func() { - nodeIsPublic = false - }) - if err != nil { - return false, err - } - - return nodeIsPublic, nil -} - -// genMultiSigP2WSH generates the p2wsh'd multisig script for 2 of 2 pubkeys. -func genMultiSigP2WSH(aPub, bPub []byte) ([]byte, er.R) { - if len(aPub) != 33 || len(bPub) != 33 { - return nil, er.Errorf("pubkey size error. Compressed " + - "pubkeys only") - } - - // Swap to sort pubkeys if needed. Keys are sorted in lexicographical - // order. The signatures within the scriptSig must also adhere to the - // order, ensuring that the signatures for each public key appears in - // the proper order on the stack. - if bytes.Compare(aPub, bPub) == 1 { - aPub, bPub = bPub, aPub - } - - // First, we'll generate the witness script for the multi-sig. - bldr := scriptbuilder.NewScriptBuilder() - bldr.AddOp(opcode.OP_2) - bldr.AddData(aPub) // Add both pubkeys (sorted). - bldr.AddData(bPub) - bldr.AddOp(opcode.OP_2) - bldr.AddOp(opcode.OP_CHECKMULTISIG) - witnessScript, err := bldr.Script() - if err != nil { - return nil, err - } - - // With the witness script generated, we'll now turn it into a p2sh - // script: - // * OP_0 - bldr = scriptbuilder.NewScriptBuilder() - bldr.AddOp(opcode.OP_0) - scriptHash := sha256.Sum256(witnessScript) - bldr.AddData(scriptHash[:]) - - return bldr.Script() -} - -// EdgePoint couples the outpoint of a channel with the funding script that it -// creates. The FilteredChainView will use this to watch for spends of this -// edge point on chain. We require both of these values as depending on the -// concrete implementation, either the pkScript, or the out point will be used. -type EdgePoint struct { - // FundingPkScript is the p2wsh multi-sig script of the target channel. - FundingPkScript []byte - - // OutPoint is the outpoint of the target channel. - OutPoint wire.OutPoint -} - -// String returns a human readable version of the target EdgePoint. We return -// the outpoint directly as it is enough to uniquely identify the edge point. -func (e *EdgePoint) String() string { - return e.OutPoint.String() -} - -// ChannelView returns the verifiable edge information for each active channel -// within the known channel graph. The set of UTXO's (along with their scripts) -// returned are the ones that need to be watched on chain to detect channel -// closes on the resident blockchain. -func (c *ChannelGraph) ChannelView() ([]EdgePoint, er.R) { - var edgePoints []EdgePoint - if err := kvdb.View(c.db, func(tx kvdb.RTx) er.R { - // We're going to iterate over the entire channel index, so - // we'll need to fetch the edgeBucket to get to the index as - // it's a sub-bucket. - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound.Default() - } - chanIndex := edges.NestedReadBucket(channelPointBucket) - if chanIndex == nil { - return ErrGraphNoEdgesFound.Default() - } - edgeIndex := edges.NestedReadBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrGraphNoEdgesFound.Default() - } - - // Once we have the proper bucket, we'll range over each key - // (which is the channel point for the channel) and decode it, - // accumulating each entry. - return chanIndex.ForEach(func(chanPointBytes, chanID []byte) er.R { - chanPointReader := bytes.NewReader(chanPointBytes) - - var chanPoint wire.OutPoint - err := readOutpoint(chanPointReader, &chanPoint) - if err != nil { - return err - } - - edgeInfo, err := fetchChanEdgeInfo( - edgeIndex, chanID, - ) - if err != nil { - return err - } - - pkScript, err := genMultiSigP2WSH( - edgeInfo.BitcoinKey1Bytes[:], - edgeInfo.BitcoinKey2Bytes[:], - ) - if err != nil { - return err - } - - edgePoints = append(edgePoints, EdgePoint{ - FundingPkScript: pkScript, - OutPoint: chanPoint, - }) - - return nil - }) - }, func() { - edgePoints = nil - }); err != nil { - return nil, err - } - - return edgePoints, nil -} - -// NewChannelEdgePolicy returns a new blank ChannelEdgePolicy. -func (c *ChannelGraph) NewChannelEdgePolicy() *ChannelEdgePolicy { - return &ChannelEdgePolicy{db: c.db} -} - -// markEdgeZombie marks an edge as a zombie within our zombie index. The public -// keys should represent the node public keys of the two parties involved in the -// edge. -func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1, - pubKey2 [33]byte) er.R { - - var k [8]byte - byteOrder.PutUint64(k[:], chanID) - - var v [66]byte - copy(v[:33], pubKey1[:]) - copy(v[33:], pubKey2[:]) - - return zombieIndex.Put(k[:], v[:]) -} - -// MarkEdgeLive clears an edge from our zombie index, deeming it as live. -func (c *ChannelGraph) MarkEdgeLive(chanID uint64) er.R { - c.cacheMu.Lock() - defer c.cacheMu.Unlock() - - err := kvdb.Update(c.db, func(tx kvdb.RwTx) er.R { - edges := tx.ReadWriteBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound.Default() - } - zombieIndex := edges.NestedReadWriteBucket(zombieBucket) - if zombieIndex == nil { - return nil - } - - var k [8]byte - byteOrder.PutUint64(k[:], chanID) - return zombieIndex.Delete(k[:]) - }, func() {}) - if err != nil { - return err - } - - c.rejectCache.remove(chanID) - c.chanCache.remove(chanID) - - return nil -} - -// IsZombieEdge returns whether the edge is considered zombie. If it is a -// zombie, then the two node public keys corresponding to this edge are also -// returned. -func (c *ChannelGraph) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte) { - var ( - isZombie bool - pubKey1, pubKey2 [33]byte - ) - - err := kvdb.View(c.db, func(tx kvdb.RTx) er.R { - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound.Default() - } - zombieIndex := edges.NestedReadBucket(zombieBucket) - if zombieIndex == nil { - return nil - } - - isZombie, pubKey1, pubKey2 = isZombieEdge(zombieIndex, chanID) - return nil - }, func() { - isZombie = false - pubKey1 = [33]byte{} - pubKey2 = [33]byte{} - }) - if err != nil { - return false, [33]byte{}, [33]byte{} - } - - return isZombie, pubKey1, pubKey2 -} - -// isZombieEdge returns whether an entry exists for the given channel in the -// zombie index. If an entry exists, then the two node public keys corresponding -// to this edge are also returned. -func isZombieEdge(zombieIndex kvdb.RBucket, - chanID uint64) (bool, [33]byte, [33]byte) { - - var k [8]byte - byteOrder.PutUint64(k[:], chanID) - - v := zombieIndex.Get(k[:]) - if v == nil { - return false, [33]byte{}, [33]byte{} - } - - var pubKey1, pubKey2 [33]byte - copy(pubKey1[:], v[:33]) - copy(pubKey2[:], v[33:]) - - return true, pubKey1, pubKey2 -} - -// NumZombies returns the current number of zombie channels in the graph. -func (c *ChannelGraph) NumZombies() (uint64, er.R) { - var numZombies uint64 - err := kvdb.View(c.db, func(tx kvdb.RTx) er.R { - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return nil - } - zombieIndex := edges.NestedReadBucket(zombieBucket) - if zombieIndex == nil { - return nil - } - - return zombieIndex.ForEach(func(_, _ []byte) er.R { - numZombies++ - return nil - }) - }, func() { - numZombies = 0 - }) - if err != nil { - return 0, err - } - - return numZombies, nil -} - -func putLightningNode(nodeBucket kvdb.RwBucket, aliasBucket kvdb.RwBucket, // nolint:dupl - updateIndex kvdb.RwBucket, node *LightningNode) er.R { - - var ( - scratch [16]byte - b bytes.Buffer - ) - - pub, err := node.PubKey() - if err != nil { - return err - } - nodePub := pub.SerializeCompressed() - - // If the node has the update time set, write it, else write 0. - updateUnix := uint64(0) - if node.LastUpdate.Unix() > 0 { - updateUnix = uint64(node.LastUpdate.Unix()) - } - - byteOrder.PutUint64(scratch[:8], updateUnix) - if _, err := b.Write(scratch[:8]); err != nil { - return er.E(err) - } - - if _, err := b.Write(nodePub); err != nil { - return er.E(err) - } - - // If we got a node announcement for this node, we will have the rest - // of the data available. If not we don't have more data to write. - if !node.HaveNodeAnnouncement { - // Write HaveNodeAnnouncement=0. - byteOrder.PutUint16(scratch[:2], 0) - if _, err := b.Write(scratch[:2]); err != nil { - return er.E(err) - } - - return nodeBucket.Put(nodePub, b.Bytes()) - } - - // Write HaveNodeAnnouncement=1. - byteOrder.PutUint16(scratch[:2], 1) - if _, err := b.Write(scratch[:2]); err != nil { - return er.E(err) - } - - if err := util.WriteBin(&b, byteOrder, node.Color.R); err != nil { - return err - } - if err := util.WriteBin(&b, byteOrder, node.Color.G); err != nil { - return err - } - if err := util.WriteBin(&b, byteOrder, node.Color.B); err != nil { - return err - } - - if err := wire.WriteVarString(&b, 0, node.Alias); err != nil { - return err - } - - if err := node.Features.Encode(&b); err != nil { - return err - } - - numAddresses := uint16(len(node.Addresses)) - byteOrder.PutUint16(scratch[:2], numAddresses) - if _, err := b.Write(scratch[:2]); err != nil { - return er.E(err) - } - - for _, address := range node.Addresses { - if err := serializeAddr(&b, address); err != nil { - return err - } - } - - sigLen := len(node.AuthSigBytes) - if sigLen > 80 { - return er.Errorf("max sig len allowed is 80, had %v", - sigLen) - } - - err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes) - if err != nil { - return err - } - - if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes { - return ErrTooManyExtraOpaqueBytes.New( - fmt.Sprintf("%d", len(node.ExtraOpaqueData)), nil) - } - err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData) - if err != nil { - return err - } - - if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil { - return err - } - - // With the alias bucket updated, we'll now update the index that - // tracks the time series of node updates. - var indexKey [8 + 33]byte - byteOrder.PutUint64(indexKey[:8], updateUnix) - copy(indexKey[8:], nodePub) - - // If there was already an old index entry for this node, then we'll - // delete the old one before we write the new entry. - if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil { - // Extract out the old update time to we can reconstruct the - // prior index key to delete it from the index. - oldUpdateTime := nodeBytes[:8] - - var oldIndexKey [8 + 33]byte - copy(oldIndexKey[:8], oldUpdateTime) - copy(oldIndexKey[8:], nodePub) - - if err := updateIndex.Delete(oldIndexKey[:]); err != nil { - return err - } - } - - if err := updateIndex.Put(indexKey[:], nil); err != nil { - return err - } - - return nodeBucket.Put(nodePub, b.Bytes()) -} - -func fetchLightningNode(nodeBucket kvdb.RBucket, - nodePub []byte) (LightningNode, er.R) { - - nodeBytes := nodeBucket.Get(nodePub) - if nodeBytes == nil { - return LightningNode{}, ErrGraphNodeNotFound.Default() - } - - nodeReader := bytes.NewReader(nodeBytes) - return deserializeLightningNode(nodeReader) -} - -func deserializeLightningNode(r io.Reader) (LightningNode, er.R) { - var ( - node LightningNode - scratch [8]byte - err er.R - ) - - // Always populate a feature vector, even if we don't have a node - // announcement and short circuit below. - node.Features = lnwire.EmptyFeatureVector() - - if _, err := r.Read(scratch[:]); err != nil { - return LightningNode{}, er.E(err) - } - - unix := int64(byteOrder.Uint64(scratch[:])) - node.LastUpdate = time.Unix(unix, 0) - - if _, err := util.ReadFull(r, node.PubKeyBytes[:]); err != nil { - return LightningNode{}, err - } - - if _, err := r.Read(scratch[:2]); err != nil { - return LightningNode{}, er.E(err) - } - - hasNodeAnn := byteOrder.Uint16(scratch[:2]) - if hasNodeAnn == 1 { - node.HaveNodeAnnouncement = true - } else { - node.HaveNodeAnnouncement = false - } - - // The rest of the data is optional, and will only be there if we got a node - // announcement for this node. - if !node.HaveNodeAnnouncement { - return node, nil - } - - // We did get a node announcement for this node, so we'll have the rest - // of the data available. - if err := util.ReadBin(r, byteOrder, &node.Color.R); err != nil { - return LightningNode{}, err - } - if err := util.ReadBin(r, byteOrder, &node.Color.G); err != nil { - return LightningNode{}, err - } - if err := util.ReadBin(r, byteOrder, &node.Color.B); err != nil { - return LightningNode{}, err - } - - node.Alias, err = wire.ReadVarString(r, 0) - if err != nil { - return LightningNode{}, err - } - - err = node.Features.Decode(r) - if err != nil { - return LightningNode{}, err - } - - if _, err := r.Read(scratch[:2]); err != nil { - return LightningNode{}, er.E(err) - } - numAddresses := int(byteOrder.Uint16(scratch[:2])) - - var addresses []net.Addr - for i := 0; i < numAddresses; i++ { - address, err := deserializeAddr(r) - if err != nil { - return LightningNode{}, err - } - addresses = append(addresses, address) - } - node.Addresses = addresses - - node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig") - if err != nil { - return LightningNode{}, err - } - - // We'll try and see if there are any opaque bytes left, if not, then - // we'll ignore the EOF error and return the node as is. - node.ExtraOpaqueData, err = wire.ReadVarBytes( - r, 0, MaxAllowedExtraOpaqueBytes, "blob", - ) - switch { - case er.Wrapped(err) == io.ErrUnexpectedEOF: - case er.Wrapped(err) == io.EOF: - case err != nil: - return LightningNode{}, err - } - - return node, nil -} - -func putChanEdgeInfo(edgeIndex kvdb.RwBucket, edgeInfo *ChannelEdgeInfo, chanID [8]byte) er.R { - var b bytes.Buffer - - if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil { - return er.E(err) - } - if _, err := b.Write(edgeInfo.NodeKey2Bytes[:]); err != nil { - return er.E(err) - } - if _, err := b.Write(edgeInfo.BitcoinKey1Bytes[:]); err != nil { - return er.E(err) - } - if _, err := b.Write(edgeInfo.BitcoinKey2Bytes[:]); err != nil { - return er.E(err) - } - - if err := wire.WriteVarBytes(&b, 0, edgeInfo.Features); err != nil { - return err - } - - authProof := edgeInfo.AuthProof - var nodeSig1, nodeSig2, bitcoinSig1, bitcoinSig2 []byte - if authProof != nil { - nodeSig1 = authProof.NodeSig1Bytes - nodeSig2 = authProof.NodeSig2Bytes - bitcoinSig1 = authProof.BitcoinSig1Bytes - bitcoinSig2 = authProof.BitcoinSig2Bytes - } - - if err := wire.WriteVarBytes(&b, 0, nodeSig1); err != nil { - return err - } - if err := wire.WriteVarBytes(&b, 0, nodeSig2); err != nil { - return err - } - if err := wire.WriteVarBytes(&b, 0, bitcoinSig1); err != nil { - return err - } - if err := wire.WriteVarBytes(&b, 0, bitcoinSig2); err != nil { - return err - } - - if err := writeOutpoint(&b, &edgeInfo.ChannelPoint); err != nil { - return err - } - if err := util.WriteBin(&b, byteOrder, uint64(edgeInfo.Capacity)); err != nil { - return err - } - if _, err := b.Write(chanID[:]); err != nil { - return er.E(err) - } - if _, err := b.Write(edgeInfo.ChainHash[:]); err != nil { - return er.E(err) - } - - if len(edgeInfo.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes { - return ErrTooManyExtraOpaqueBytes.New( - fmt.Sprintf("%d", len(edgeInfo.ExtraOpaqueData)), nil) - } - err := wire.WriteVarBytes(&b, 0, edgeInfo.ExtraOpaqueData) - if err != nil { - return err - } - - return edgeIndex.Put(chanID[:], b.Bytes()) -} - -func fetchChanEdgeInfo(edgeIndex kvdb.RBucket, - chanID []byte) (ChannelEdgeInfo, er.R) { - - edgeInfoBytes := edgeIndex.Get(chanID) - if edgeInfoBytes == nil { - return ChannelEdgeInfo{}, ErrEdgeNotFound.Default() - } - - edgeInfoReader := bytes.NewReader(edgeInfoBytes) - return deserializeChanEdgeInfo(edgeInfoReader) -} - -func deserializeChanEdgeInfo(r io.Reader) (ChannelEdgeInfo, er.R) { - var ( - err er.R - edgeInfo ChannelEdgeInfo - ) - - if _, err := util.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil { - return ChannelEdgeInfo{}, err - } - if _, err := util.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil { - return ChannelEdgeInfo{}, err - } - if _, err := util.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil { - return ChannelEdgeInfo{}, err - } - if _, err := util.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil { - return ChannelEdgeInfo{}, err - } - - edgeInfo.Features, err = wire.ReadVarBytes(r, 0, 900, "features") - if err != nil { - return ChannelEdgeInfo{}, err - } - - proof := &ChannelAuthProof{} - - proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") - if err != nil { - return ChannelEdgeInfo{}, err - } - proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") - if err != nil { - return ChannelEdgeInfo{}, err - } - proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") - if err != nil { - return ChannelEdgeInfo{}, err - } - proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") - if err != nil { - return ChannelEdgeInfo{}, err - } - - if !proof.IsEmpty() { - edgeInfo.AuthProof = proof - } - - edgeInfo.ChannelPoint = wire.OutPoint{} - if err := readOutpoint(r, &edgeInfo.ChannelPoint); err != nil { - return ChannelEdgeInfo{}, err - } - if err := util.ReadBin(r, byteOrder, &edgeInfo.Capacity); err != nil { - return ChannelEdgeInfo{}, err - } - if err := util.ReadBin(r, byteOrder, &edgeInfo.ChannelID); err != nil { - return ChannelEdgeInfo{}, err - } - - if _, err := util.ReadFull(r, edgeInfo.ChainHash[:]); err != nil { - return ChannelEdgeInfo{}, err - } - - // We'll try and see if there are any opaque bytes left, if not, then - // we'll ignore the EOF error and return the edge as is. - edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes( - r, 0, MaxAllowedExtraOpaqueBytes, "blob", - ) - switch { - case er.Wrapped(err) == io.ErrUnexpectedEOF: - case er.Wrapped(err) == io.EOF: - case err != nil: - return ChannelEdgeInfo{}, err - } - - return edgeInfo, nil -} - -func putChanEdgePolicy(edges, nodes kvdb.RwBucket, edge *ChannelEdgePolicy, - from, to []byte) er.R { - - var edgeKey [33 + 8]byte - copy(edgeKey[:], from) - byteOrder.PutUint64(edgeKey[33:], edge.ChannelID) - - var b bytes.Buffer - if err := serializeChanEdgePolicy(&b, edge, to); err != nil { - return err - } - - // Before we write out the new edge, we'll create a new entry in the - // update index in order to keep it fresh. - updateUnix := uint64(edge.LastUpdate.Unix()) - var indexKey [8 + 8]byte - byteOrder.PutUint64(indexKey[:8], updateUnix) - byteOrder.PutUint64(indexKey[8:], edge.ChannelID) - - updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket) - if err != nil { - return err - } - - // If there was already an entry for this edge, then we'll need to - // delete the old one to ensure we don't leave around any after-images. - // An unknown policy value does not have a update time recorded, so - // it also does not need to be removed. - if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil && - !bytes.Equal(edgeBytes[:], unknownPolicy) { - - // In order to delete the old entry, we'll need to obtain the - // *prior* update time in order to delete it. To do this, we'll - // need to deserialize the existing policy within the database - // (now outdated by the new one), and delete its corresponding - // entry within the update index. We'll ignore any - // ErrEdgePolicyOptionalFieldNotFound error, as we only need - // the channel ID and update time to delete the entry. - // TODO(halseth): get rid of these invalid policies in a - // migration. - oldEdgePolicy, err := deserializeChanEdgePolicy( - bytes.NewReader(edgeBytes), nodes, - ) - if err != nil && !ErrEdgePolicyOptionalFieldNotFound.Is(err) { - return err - } - - oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix()) - - var oldIndexKey [8 + 8]byte - byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime) - byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID) - - if err := updateIndex.Delete(oldIndexKey[:]); err != nil { - return err - } - } - - if err := updateIndex.Put(indexKey[:], nil); err != nil { - return err - } - - updateEdgePolicyDisabledIndex( - edges, edge.ChannelID, - edge.ChannelFlags&lnwire.ChanUpdateDirection > 0, - edge.IsDisabled(), - ) - - return edges.Put(edgeKey[:], b.Bytes()[:]) -} - -// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex -// bucket by either add a new disabled ChannelEdgePolicy or remove an existing -// one. -// The direction represents the direction of the edge and disabled is used for -// deciding whether to remove or add an entry to the bucket. -// In general a channel is disabled if two entries for the same chanID exist -// in this bucket. -// Maintaining the bucket this way allows a fast retrieval of disabled -// channels, for example when prune is needed. -func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64, - direction bool, disabled bool) er.R { - - var disabledEdgeKey [8 + 1]byte - byteOrder.PutUint64(disabledEdgeKey[0:], chanID) - if direction { - disabledEdgeKey[8] = 1 - } - - disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists( - disabledEdgePolicyBucket, - ) - if err != nil { - return err - } - - if disabled { - return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{}) - } - - return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:]) -} - -// putChanEdgePolicyUnknown marks the edge policy as unknown -// in the edges bucket. -func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64, - from []byte) er.R { - - var edgeKey [33 + 8]byte - copy(edgeKey[:], from) - byteOrder.PutUint64(edgeKey[33:], channelID) - - if edges.Get(edgeKey[:]) != nil { - return er.Errorf("cannot write unknown policy for channel %v "+ - " when there is already a policy present", channelID) - } - - return edges.Put(edgeKey[:], unknownPolicy) -} - -func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte, - nodePub []byte, nodes kvdb.RBucket) (*ChannelEdgePolicy, er.R) { - - var edgeKey [33 + 8]byte - copy(edgeKey[:], nodePub) - copy(edgeKey[33:], chanID[:]) - - edgeBytes := edges.Get(edgeKey[:]) - if edgeBytes == nil { - return nil, ErrEdgeNotFound.Default() - } - - // No need to deserialize unknown policy. - if bytes.Equal(edgeBytes[:], unknownPolicy) { - return nil, nil - } - - edgeReader := bytes.NewReader(edgeBytes) - - ep, err := deserializeChanEdgePolicy(edgeReader, nodes) - switch { - // If the db policy was missing an expected optional field, we return - // nil as if the policy was unknown. - case ErrEdgePolicyOptionalFieldNotFound.Is(err): - return nil, nil - - case err != nil: - return nil, err - } - - return ep, nil -} - -func fetchChanEdgePolicies(edgeIndex kvdb.RBucket, edges kvdb.RBucket, - nodes kvdb.RBucket, chanID []byte, - db *DB) (*ChannelEdgePolicy, *ChannelEdgePolicy, er.R) { - - edgeInfo := edgeIndex.Get(chanID) - if edgeInfo == nil { - return nil, nil, ErrEdgeNotFound.Default() - } - - // The first node is contained within the first half of the edge - // information. We only propagate the error here and below if it's - // something other than edge non-existence. - node1Pub := edgeInfo[:33] - edge1, err := fetchChanEdgePolicy(edges, chanID, node1Pub, nodes) - if err != nil { - return nil, nil, err - } - - // As we may have a single direction of the edge but not the other, - // only fill in the database pointers if the edge is found. - if edge1 != nil { - edge1.db = db - edge1.Node.db = db - } - - // Similarly, the second node is contained within the latter - // half of the edge information. - node2Pub := edgeInfo[33:66] - edge2, err := fetchChanEdgePolicy(edges, chanID, node2Pub, nodes) - if err != nil { - return nil, nil, err - } - - if edge2 != nil { - edge2.db = db - edge2.Node.db = db - } - - return edge1, edge2, nil -} - -func serializeChanEdgePolicy(w io.Writer, edge *ChannelEdgePolicy, - to []byte) er.R { - - err := wire.WriteVarBytes(w, 0, edge.SigBytes) - if err != nil { - return err - } - - if err := util.WriteBin(w, byteOrder, edge.ChannelID); err != nil { - return err - } - - var scratch [8]byte - updateUnix := uint64(edge.LastUpdate.Unix()) - byteOrder.PutUint64(scratch[:], updateUnix) - if _, err := util.Write(w, scratch[:]); err != nil { - return err - } - - if err := util.WriteBin(w, byteOrder, edge.MessageFlags); err != nil { - return err - } - if err := util.WriteBin(w, byteOrder, edge.ChannelFlags); err != nil { - return err - } - if err := util.WriteBin(w, byteOrder, edge.TimeLockDelta); err != nil { - return err - } - if err := util.WriteBin(w, byteOrder, uint64(edge.MinHTLC)); err != nil { - return err - } - if err := util.WriteBin(w, byteOrder, uint64(edge.FeeBaseMSat)); err != nil { - return err - } - if err := util.WriteBin(w, byteOrder, uint64(edge.FeeProportionalMillionths)); err != nil { - return err - } - - if _, err := util.Write(w, to); err != nil { - return err - } - - // If the max_htlc field is present, we write it. To be compatible with - // older versions that wasn't aware of this field, we write it as part - // of the opaque data. - // TODO(halseth): clean up when moving to TLV. - var opaqueBuf bytes.Buffer - if edge.MessageFlags.HasMaxHtlc() { - err := util.WriteBin(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC)) - if err != nil { - return err - } - } - - if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes { - return ErrTooManyExtraOpaqueBytes.New( - fmt.Sprintf("%d", len(edge.ExtraOpaqueData)), nil) - } - if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil { - return er.E(err) - } - - if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil { - return err - } - return nil -} - -func deserializeChanEdgePolicy(r io.Reader, - nodes kvdb.RBucket) (*ChannelEdgePolicy, er.R) { - - edge := &ChannelEdgePolicy{} - - var err er.R - edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig") - if err != nil { - return nil, err - } - - if err := util.ReadBin(r, byteOrder, &edge.ChannelID); err != nil { - return nil, err - } - - var scratch [8]byte - if _, err := r.Read(scratch[:]); err != nil { - return nil, er.E(err) - } - unix := int64(byteOrder.Uint64(scratch[:])) - edge.LastUpdate = time.Unix(unix, 0) - - if err := util.ReadBin(r, byteOrder, &edge.MessageFlags); err != nil { - return nil, err - } - if err := util.ReadBin(r, byteOrder, &edge.ChannelFlags); err != nil { - return nil, err - } - if err := util.ReadBin(r, byteOrder, &edge.TimeLockDelta); err != nil { - return nil, err - } - - var n uint64 - if err := util.ReadBin(r, byteOrder, &n); err != nil { - return nil, err - } - edge.MinHTLC = lnwire.MilliSatoshi(n) - - if err := util.ReadBin(r, byteOrder, &n); err != nil { - return nil, err - } - edge.FeeBaseMSat = lnwire.MilliSatoshi(n) - - if err := util.ReadBin(r, byteOrder, &n); err != nil { - return nil, err - } - edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n) - - var pub [33]byte - if _, err := r.Read(pub[:]); err != nil { - return nil, er.E(err) - } - - node, err := fetchLightningNode(nodes, pub[:]) - if err != nil { - return nil, er.Errorf("unable to fetch node: %x, %v", - pub[:], err) - } - edge.Node = &node - - // We'll try and see if there are any opaque bytes left, if not, then - // we'll ignore the EOF error and return the edge as is. - edge.ExtraOpaqueData, err = wire.ReadVarBytes( - r, 0, MaxAllowedExtraOpaqueBytes, "blob", - ) - switch { - case er.Wrapped(err) == io.ErrUnexpectedEOF: - case er.Wrapped(err) == io.EOF: - case err != nil: - return nil, err - } - - // See if optional fields are present. - if edge.MessageFlags.HasMaxHtlc() { - // The max_htlc field should be at the beginning of the opaque - // bytes. - opq := edge.ExtraOpaqueData - - // If the max_htlc field is not present, it might be old data - // stored before this field was validated. We'll return the - // edge along with an error. - if len(opq) < 8 { - return edge, ErrEdgePolicyOptionalFieldNotFound.Default() - } - - maxHtlc := byteOrder.Uint64(opq[:8]) - edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc) - - // Exclude the parsed field from the rest of the opaque data. - edge.ExtraOpaqueData = opq[8:] - } - - return edge, nil -} diff --git a/lnd/channeldb/graph_test.go b/lnd/channeldb/graph_test.go deleted file mode 100644 index 322b161d..00000000 --- a/lnd/channeldb/graph_test.go +++ /dev/null @@ -1,3197 +0,0 @@ -package channeldb - -import ( - "bytes" - "crypto/sha256" - "image/color" - "math" - "math/big" - prand "math/rand" - "net" - "reflect" - "runtime" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/routing/route" - "github.com/pkt-cash/pktd/wire" -) - -var ( - testAddr = &net.TCPAddr{IP: (net.IP)([]byte{0xA, 0x0, 0x0, 0x1}), - Port: 9000} - anotherAddr, _ = net.ResolveTCPAddr("tcp", - "[2001:db8:85a3:0:0:8a2e:370:7334]:80") - testAddrs = []net.Addr{testAddr, anotherAddr} - - testSig = &btcec.Signature{ - R: new(big.Int), - S: new(big.Int), - } - _, _ = testSig.R.SetString("63724406601629180062774974542967536251589935445068131219452686511677818569431", 10) - _, _ = testSig.S.SetString("18801056069249825825291287104931333862866033135609736119018462340006816851118", 10) - - testFeatures = lnwire.NewFeatureVector(nil, lnwire.Features) - - testPub = route.Vertex{2, 202, 4} -) - -func createLightningNode(db *DB, priv *btcec.PrivateKey) (*LightningNode, er.R) { - updateTime := prand.Int63() - - pub := priv.PubKey().SerializeCompressed() - n := &LightningNode{ - HaveNodeAnnouncement: true, - AuthSigBytes: testSig.Serialize(), - LastUpdate: time.Unix(updateTime, 0), - Color: color.RGBA{1, 2, 3, 0}, - Alias: "kek" + string(pub[:]), - Features: testFeatures, - Addresses: testAddrs, - db: db, - } - copy(n.PubKeyBytes[:], priv.PubKey().SerializeCompressed()) - - return n, nil -} - -func createTestVertex(db *DB) (*LightningNode, er.R) { - priv, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - return nil, err - } - - return createLightningNode(db, priv) -} - -func TestNodeInsertionAndDeletion(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - - graph := db.ChannelGraph() - - // We'd like to test basic insertion/deletion for vertexes from the - // graph, so we'll create a test vertex to start with. - node := &LightningNode{ - HaveNodeAnnouncement: true, - AuthSigBytes: testSig.Serialize(), - LastUpdate: time.Unix(1232342, 0), - Color: color.RGBA{1, 2, 3, 0}, - Alias: "kek", - Features: testFeatures, - Addresses: testAddrs, - ExtraOpaqueData: []byte("extra new data"), - PubKeyBytes: testPub, - db: db, - } - - // First, insert the node into the graph DB. This should succeed - // without any errors. - if err := graph.AddLightningNode(node); err != nil { - t.Fatalf("unable to add node: %v", err) - } - - // Next, fetch the node from the database to ensure everything was - // serialized properly. - dbNode, err := graph.FetchLightningNode(nil, testPub) - if err != nil { - t.Fatalf("unable to locate node: %v", err) - } - - if _, exists, err := graph.HasLightningNode(dbNode.PubKeyBytes); err != nil { - t.Fatalf("unable to query for node: %v", err) - } else if !exists { - t.Fatalf("node should be found but wasn't") - } - - // The two nodes should match exactly! - if err := compareNodes(node, dbNode); err != nil { - t.Fatalf("nodes don't match: %v", err) - } - - // Next, delete the node from the graph, this should purge all data - // related to the node. - if err := graph.DeleteLightningNode(testPub); err != nil { - t.Fatalf("unable to delete node; %v", err) - } - - // Finally, attempt to fetch the node again. This should fail as the - // node should have been deleted from the database. - _, err = graph.FetchLightningNode(nil, testPub) - if !ErrGraphNodeNotFound.Is(err) { - t.Fatalf("fetch after delete should fail!") - } -} - -// TestPartialNode checks that we can add and retrieve a LightningNode where -// where only the pubkey is known to the database. -func TestPartialNode(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - - graph := db.ChannelGraph() - - // We want to be able to insert nodes into the graph that only has the - // PubKey set. - node := &LightningNode{ - HaveNodeAnnouncement: false, - PubKeyBytes: testPub, - } - - if err := graph.AddLightningNode(node); err != nil { - t.Fatalf("unable to add node: %v", err) - } - - // Next, fetch the node from the database to ensure everything was - // serialized properly. - dbNode, err := graph.FetchLightningNode(nil, testPub) - if err != nil { - t.Fatalf("unable to locate node: %v", err) - } - - if _, exists, err := graph.HasLightningNode(dbNode.PubKeyBytes); err != nil { - t.Fatalf("unable to query for node: %v", err) - } else if !exists { - t.Fatalf("node should be found but wasn't") - } - - // The two nodes should match exactly! (with default values for - // LastUpdate and db set to satisfy compareNodes()) - node = &LightningNode{ - HaveNodeAnnouncement: false, - LastUpdate: time.Unix(0, 0), - PubKeyBytes: testPub, - db: db, - } - - if err := compareNodes(node, dbNode); err != nil { - t.Fatalf("nodes don't match: %v", err) - } - - // Next, delete the node from the graph, this should purge all data - // related to the node. - if err := graph.DeleteLightningNode(testPub); err != nil { - t.Fatalf("unable to delete node: %v", err) - } - - // Finally, attempt to fetch the node again. This should fail as the - // node should have been deleted from the database. - _, err = graph.FetchLightningNode(nil, testPub) - if !ErrGraphNodeNotFound.Is(err) { - t.Fatalf("fetch after delete should fail!") - } -} - -func TestAliasLookup(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - - graph := db.ChannelGraph() - - // We'd like to test the alias index within the database, so first - // create a new test node. - testNode, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - - // Add the node to the graph's database, this should also insert an - // entry into the alias index for this node. - if err := graph.AddLightningNode(testNode); err != nil { - t.Fatalf("unable to add node: %v", err) - } - - // Next, attempt to lookup the alias. The alias should exactly match - // the one which the test node was assigned. - nodePub, err := testNode.PubKey() - if err != nil { - t.Fatalf("unable to generate pubkey: %v", err) - } - dbAlias, err := graph.LookupAlias(nodePub) - if err != nil { - t.Fatalf("unable to find alias: %v", err) - } - if dbAlias != testNode.Alias { - t.Fatalf("aliases don't match, expected %v got %v", - testNode.Alias, dbAlias) - } - - // Ensure that looking up a non-existent alias results in an error. - node, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - nodePub, err = node.PubKey() - if err != nil { - t.Fatalf("unable to generate pubkey: %v", err) - } - _, err = graph.LookupAlias(nodePub) - if !ErrNodeAliasNotFound.Is(err) { - t.Fatalf("alias lookup should fail for non-existent pubkey") - } -} - -func TestSourceNode(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - - graph := db.ChannelGraph() - - // We'd like to test the setting/getting of the source node, so we - // first create a fake node to use within the test. - testNode, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - - // Attempt to fetch the source node, this should return an error as the - // source node hasn't yet been set. - if _, err := graph.SourceNode(); !ErrSourceNodeNotSet.Is(err) { - t.Fatalf("source node shouldn't be set in new graph") - } - - // Set the source the source node, this should insert the node into the - // database in a special way indicating it's the source node. - if err := graph.SetSourceNode(testNode); err != nil { - t.Fatalf("unable to set source node: %v", err) - } - - // Retrieve the source node from the database, it should exactly match - // the one we set above. - sourceNode, err := graph.SourceNode() - if err != nil { - t.Fatalf("unable to fetch source node: %v", err) - } - if err := compareNodes(testNode, sourceNode); err != nil { - t.Fatalf("nodes don't match: %v", err) - } -} - -func TestEdgeInsertionDeletion(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - - graph := db.ChannelGraph() - - // We'd like to test the insertion/deletion of edges, so we create two - // vertexes to connect. - node1, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - node2, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - - // In addition to the fake vertexes we create some fake channel - // identifiers. - chanID := uint64(prand.Int63()) - outpoint := wire.OutPoint{ - Hash: rev, - Index: 9, - } - - // Add the new edge to the database, this should proceed without any - // errors. - node1Pub, err := node1.PubKey() - if err != nil { - t.Fatalf("unable to generate node key: %v", err) - } - node2Pub, err := node2.PubKey() - if err != nil { - t.Fatalf("unable to generate node key: %v", err) - } - edgeInfo := ChannelEdgeInfo{ - ChannelID: chanID, - ChainHash: key, - AuthProof: &ChannelAuthProof{ - NodeSig1Bytes: testSig.Serialize(), - NodeSig2Bytes: testSig.Serialize(), - BitcoinSig1Bytes: testSig.Serialize(), - BitcoinSig2Bytes: testSig.Serialize(), - }, - ChannelPoint: outpoint, - Capacity: 9000, - } - copy(edgeInfo.NodeKey1Bytes[:], node1Pub.SerializeCompressed()) - copy(edgeInfo.NodeKey2Bytes[:], node2Pub.SerializeCompressed()) - copy(edgeInfo.BitcoinKey1Bytes[:], node1Pub.SerializeCompressed()) - copy(edgeInfo.BitcoinKey2Bytes[:], node2Pub.SerializeCompressed()) - - if err := graph.AddChannelEdge(&edgeInfo); err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } - - // Ensure that both policies are returned as unknown (nil). - _, e1, e2, err := graph.FetchChannelEdgesByID(chanID) - if err != nil { - t.Fatalf("unable to fetch channel edge") - } - if e1 != nil || e2 != nil { - t.Fatalf("channel edges not unknown") - } - - // Next, attempt to delete the edge from the database, again this - // should proceed without any issues. - if err := graph.DeleteChannelEdges(chanID); err != nil { - t.Fatalf("unable to delete edge: %v", err) - } - - // Ensure that any query attempts to lookup the delete channel edge are - // properly deleted. - if _, _, _, err := graph.FetchChannelEdgesByOutpoint(&outpoint); err == nil { - t.Fatalf("channel edge not deleted") - } - if _, _, _, err := graph.FetchChannelEdgesByID(chanID); err == nil { - t.Fatalf("channel edge not deleted") - } - isZombie, _, _ := graph.IsZombieEdge(chanID) - if !isZombie { - t.Fatal("channel edge not marked as zombie") - } - - // Finally, attempt to delete a (now) non-existent edge within the - // database, this should result in an error. - err = graph.DeleteChannelEdges(chanID) - if !ErrEdgeNotFound.Is(err) { - t.Fatalf("deleting a non-existent edge should fail!") - } -} - -func createEdge(height, txIndex uint32, txPosition uint16, outPointIndex uint32, - node1, node2 *LightningNode) (ChannelEdgeInfo, lnwire.ShortChannelID) { - - shortChanID := lnwire.ShortChannelID{ - BlockHeight: height, - TxIndex: txIndex, - TxPosition: txPosition, - } - outpoint := wire.OutPoint{ - Hash: rev, - Index: outPointIndex, - } - - node1Pub, _ := node1.PubKey() - node2Pub, _ := node2.PubKey() - edgeInfo := ChannelEdgeInfo{ - ChannelID: shortChanID.ToUint64(), - ChainHash: key, - AuthProof: &ChannelAuthProof{ - NodeSig1Bytes: testSig.Serialize(), - NodeSig2Bytes: testSig.Serialize(), - BitcoinSig1Bytes: testSig.Serialize(), - BitcoinSig2Bytes: testSig.Serialize(), - }, - ChannelPoint: outpoint, - Capacity: 9000, - } - - copy(edgeInfo.NodeKey1Bytes[:], node1Pub.SerializeCompressed()) - copy(edgeInfo.NodeKey2Bytes[:], node2Pub.SerializeCompressed()) - copy(edgeInfo.BitcoinKey1Bytes[:], node1Pub.SerializeCompressed()) - copy(edgeInfo.BitcoinKey2Bytes[:], node2Pub.SerializeCompressed()) - - return edgeInfo, shortChanID -} - -// TestDisconnectBlockAtHeight checks that the pruned state of the channel -// database is what we expect after calling DisconnectBlockAtHeight. -func TestDisconnectBlockAtHeight(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - - graph := db.ChannelGraph() - sourceNode, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create source node: %v", err) - } - if err := graph.SetSourceNode(sourceNode); err != nil { - t.Fatalf("unable to set source node: %v", err) - } - - // We'd like to test the insertion/deletion of edges, so we create two - // vertexes to connect. - node1, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - node2, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - - // In addition to the fake vertexes we create some fake channel - // identifiers. - var spendOutputs []*wire.OutPoint - var blockHash chainhash.Hash - copy(blockHash[:], bytes.Repeat([]byte{1}, 32)) - - // Prune the graph a few times to make sure we have entries in the - // prune log. - _, err = graph.PruneGraph(spendOutputs, &blockHash, 155) - if err != nil { - t.Fatalf("unable to prune graph: %v", err) - } - var blockHash2 chainhash.Hash - copy(blockHash2[:], bytes.Repeat([]byte{2}, 32)) - - _, err = graph.PruneGraph(spendOutputs, &blockHash2, 156) - if err != nil { - t.Fatalf("unable to prune graph: %v", err) - } - - // We'll create 3 almost identical edges, so first create a helper - // method containing all logic for doing so. - - // Create an edge which has its block height at 156. - height := uint32(156) - edgeInfo, _ := createEdge(height, 0, 0, 0, node1, node2) - - // Create an edge with block height 157. We give it - // maximum values for tx index and position, to make - // sure our database range scan get edges from the - // entire range. - edgeInfo2, _ := createEdge( - height+1, math.MaxUint32&0x00ffffff, math.MaxUint16, 1, - node1, node2, - ) - - // Create a third edge, this with a block height of 155. - edgeInfo3, _ := createEdge(height-1, 0, 0, 2, node1, node2) - - // Now add all these new edges to the database. - if err := graph.AddChannelEdge(&edgeInfo); err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } - - if err := graph.AddChannelEdge(&edgeInfo2); err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } - - if err := graph.AddChannelEdge(&edgeInfo3); err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } - - // Call DisconnectBlockAtHeight, which should prune every channel - // that has a funding height of 'height' or greater. - removed, err := graph.DisconnectBlockAtHeight(uint32(height)) - if err != nil { - t.Fatalf("unable to prune %v", err) - } - - // The two edges should have been removed. - if len(removed) != 2 { - t.Fatalf("expected two edges to be removed from graph, "+ - "only %d were", len(removed)) - } - if removed[0].ChannelID != edgeInfo.ChannelID { - t.Fatalf("expected edge to be removed from graph") - } - if removed[1].ChannelID != edgeInfo2.ChannelID { - t.Fatalf("expected edge to be removed from graph") - } - - // The two first edges should be removed from the db. - _, _, has, isZombie, err := graph.HasChannelEdge(edgeInfo.ChannelID) - if err != nil { - t.Fatalf("unable to query for edge: %v", err) - } - if has { - t.Fatalf("edge1 was not pruned from the graph") - } - if isZombie { - t.Fatal("reorged edge1 should not be marked as zombie") - } - _, _, has, isZombie, err = graph.HasChannelEdge(edgeInfo2.ChannelID) - if err != nil { - t.Fatalf("unable to query for edge: %v", err) - } - if has { - t.Fatalf("edge2 was not pruned from the graph") - } - if isZombie { - t.Fatal("reorged edge2 should not be marked as zombie") - } - - // Edge 3 should not be removed. - _, _, has, isZombie, err = graph.HasChannelEdge(edgeInfo3.ChannelID) - if err != nil { - t.Fatalf("unable to query for edge: %v", err) - } - if !has { - t.Fatalf("edge3 was pruned from the graph") - } - if isZombie { - t.Fatal("edge3 was marked as zombie") - } - - // PruneTip should be set to the blockHash we specified for the block - // at height 155. - hash, h, err := graph.PruneTip() - if err != nil { - t.Fatalf("unable to get prune tip: %v", err) - } - if !blockHash.IsEqual(hash) { - t.Fatalf("expected best block to be %x, was %x", blockHash, hash) - } - if h != height-1 { - t.Fatalf("expected best block height to be %d, was %d", height-1, h) - } -} - -func assertEdgeInfoEqual(t *testing.T, e1 *ChannelEdgeInfo, - e2 *ChannelEdgeInfo) { - - if e1.ChannelID != e2.ChannelID { - t.Fatalf("chan id's don't match: %v vs %v", e1.ChannelID, - e2.ChannelID) - } - - if e1.ChainHash != e2.ChainHash { - t.Fatalf("chain hashes don't match: %v vs %v", e1.ChainHash, - e2.ChainHash) - } - - if !bytes.Equal(e1.NodeKey1Bytes[:], e2.NodeKey1Bytes[:]) { - t.Fatalf("nodekey1 doesn't match") - } - if !bytes.Equal(e1.NodeKey2Bytes[:], e2.NodeKey2Bytes[:]) { - t.Fatalf("nodekey2 doesn't match") - } - if !bytes.Equal(e1.BitcoinKey1Bytes[:], e2.BitcoinKey1Bytes[:]) { - t.Fatalf("bitcoinkey1 doesn't match") - } - if !bytes.Equal(e1.BitcoinKey2Bytes[:], e2.BitcoinKey2Bytes[:]) { - t.Fatalf("bitcoinkey2 doesn't match") - } - - if !bytes.Equal(e1.Features, e2.Features) { - t.Fatalf("features doesn't match: %x vs %x", e1.Features, - e2.Features) - } - - if !bytes.Equal(e1.AuthProof.NodeSig1Bytes, e2.AuthProof.NodeSig1Bytes) { - t.Fatalf("nodesig1 doesn't match: %v vs %v", - spew.Sdump(e1.AuthProof.NodeSig1Bytes), - spew.Sdump(e2.AuthProof.NodeSig1Bytes)) - } - if !bytes.Equal(e1.AuthProof.NodeSig2Bytes, e2.AuthProof.NodeSig2Bytes) { - t.Fatalf("nodesig2 doesn't match") - } - if !bytes.Equal(e1.AuthProof.BitcoinSig1Bytes, e2.AuthProof.BitcoinSig1Bytes) { - t.Fatalf("bitcoinsig1 doesn't match") - } - if !bytes.Equal(e1.AuthProof.BitcoinSig2Bytes, e2.AuthProof.BitcoinSig2Bytes) { - t.Fatalf("bitcoinsig2 doesn't match") - } - - if e1.ChannelPoint != e2.ChannelPoint { - t.Fatalf("channel point match: %v vs %v", e1.ChannelPoint, - e2.ChannelPoint) - } - - if e1.Capacity != e2.Capacity { - t.Fatalf("capacity doesn't match: %v vs %v", e1.Capacity, - e2.Capacity) - } - - if !bytes.Equal(e1.ExtraOpaqueData, e2.ExtraOpaqueData) { - t.Fatalf("extra data doesn't match: %v vs %v", - e2.ExtraOpaqueData, e2.ExtraOpaqueData) - } -} - -func createChannelEdge(db *DB, node1, node2 *LightningNode) (*ChannelEdgeInfo, - *ChannelEdgePolicy, *ChannelEdgePolicy) { - - var ( - firstNode *LightningNode - secondNode *LightningNode - ) - if bytes.Compare(node1.PubKeyBytes[:], node2.PubKeyBytes[:]) == -1 { - firstNode = node1 - secondNode = node2 - } else { - firstNode = node2 - secondNode = node1 - } - - // In addition to the fake vertexes we create some fake channel - // identifiers. - chanID := uint64(prand.Int63()) - outpoint := wire.OutPoint{ - Hash: rev, - Index: 9, - } - - // Add the new edge to the database, this should proceed without any - // errors. - edgeInfo := &ChannelEdgeInfo{ - ChannelID: chanID, - ChainHash: key, - AuthProof: &ChannelAuthProof{ - NodeSig1Bytes: testSig.Serialize(), - NodeSig2Bytes: testSig.Serialize(), - BitcoinSig1Bytes: testSig.Serialize(), - BitcoinSig2Bytes: testSig.Serialize(), - }, - ChannelPoint: outpoint, - Capacity: 1000, - ExtraOpaqueData: []byte("new unknown feature"), - } - copy(edgeInfo.NodeKey1Bytes[:], firstNode.PubKeyBytes[:]) - copy(edgeInfo.NodeKey2Bytes[:], secondNode.PubKeyBytes[:]) - copy(edgeInfo.BitcoinKey1Bytes[:], firstNode.PubKeyBytes[:]) - copy(edgeInfo.BitcoinKey2Bytes[:], secondNode.PubKeyBytes[:]) - - edge1 := &ChannelEdgePolicy{ - SigBytes: testSig.Serialize(), - ChannelID: chanID, - LastUpdate: time.Unix(433453, 0), - MessageFlags: 1, - ChannelFlags: 0, - TimeLockDelta: 99, - MinHTLC: 2342135, - MaxHTLC: 13928598, - FeeBaseMSat: 4352345, - FeeProportionalMillionths: 3452352, - Node: secondNode, - ExtraOpaqueData: []byte("new unknown feature2"), - db: db, - } - edge2 := &ChannelEdgePolicy{ - SigBytes: testSig.Serialize(), - ChannelID: chanID, - LastUpdate: time.Unix(124234, 0), - MessageFlags: 1, - ChannelFlags: 1, - TimeLockDelta: 99, - MinHTLC: 2342135, - MaxHTLC: 13928598, - FeeBaseMSat: 4352345, - FeeProportionalMillionths: 90392423, - Node: firstNode, - ExtraOpaqueData: []byte("new unknown feature1"), - db: db, - } - - return edgeInfo, edge1, edge2 -} - -func TestEdgeInfoUpdates(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - - graph := db.ChannelGraph() - - // We'd like to test the update of edges inserted into the database, so - // we create two vertexes to connect. - node1, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - if err := graph.AddLightningNode(node1); err != nil { - t.Fatalf("unable to add node: %v", err) - } - node2, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - if err := graph.AddLightningNode(node2); err != nil { - t.Fatalf("unable to add node: %v", err) - } - - // Create an edge and add it to the db. - edgeInfo, edge1, edge2 := createChannelEdge(db, node1, node2) - - // Make sure inserting the policy at this point, before the edge info - // is added, will fail. - if err := graph.UpdateEdgePolicy(edge1); !ErrEdgeNotFound.Is(err) { - t.Fatalf("expected ErrEdgeNotFound, got: %v", err) - } - - // Add the edge info. - if err := graph.AddChannelEdge(edgeInfo); err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } - - chanID := edgeInfo.ChannelID - outpoint := edgeInfo.ChannelPoint - - // Next, insert both edge policies into the database, they should both - // be inserted without any issues. - if err := graph.UpdateEdgePolicy(edge1); err != nil { - t.Fatalf("unable to update edge: %v", err) - } - if err := graph.UpdateEdgePolicy(edge2); err != nil { - t.Fatalf("unable to update edge: %v", err) - } - - // Check for existence of the edge within the database, it should be - // found. - _, _, found, isZombie, err := graph.HasChannelEdge(chanID) - if err != nil { - t.Fatalf("unable to query for edge: %v", err) - } - if !found { - t.Fatalf("graph should have of inserted edge") - } - if isZombie { - t.Fatal("live edge should not be marked as zombie") - } - - // We should also be able to retrieve the channelID only knowing the - // channel point of the channel. - dbChanID, err := graph.ChannelID(&outpoint) - if err != nil { - t.Fatalf("unable to retrieve channel ID: %v", err) - } - if dbChanID != chanID { - t.Fatalf("chan ID's mismatch, expected %v got %v", dbChanID, - chanID) - } - - // With the edges inserted, perform some queries to ensure that they've - // been inserted properly. - dbEdgeInfo, dbEdge1, dbEdge2, err := graph.FetchChannelEdgesByID(chanID) - if err != nil { - t.Fatalf("unable to fetch channel by ID: %v", err) - } - if err := compareEdgePolicies(dbEdge1, edge1); err != nil { - t.Fatalf("edge doesn't match: %v", err) - } - if err := compareEdgePolicies(dbEdge2, edge2); err != nil { - t.Fatalf("edge doesn't match: %v", err) - } - assertEdgeInfoEqual(t, dbEdgeInfo, edgeInfo) - - // Next, attempt to query the channel edges according to the outpoint - // of the channel. - dbEdgeInfo, dbEdge1, dbEdge2, err = graph.FetchChannelEdgesByOutpoint(&outpoint) - if err != nil { - t.Fatalf("unable to fetch channel by ID: %v", err) - } - if err := compareEdgePolicies(dbEdge1, edge1); err != nil { - t.Fatalf("edge doesn't match: %v", err) - } - if err := compareEdgePolicies(dbEdge2, edge2); err != nil { - t.Fatalf("edge doesn't match: %v", err) - } - assertEdgeInfoEqual(t, dbEdgeInfo, edgeInfo) -} - -func randEdgePolicy(chanID uint64, op wire.OutPoint, db *DB) *ChannelEdgePolicy { - update := prand.Int63() - - return newEdgePolicy(chanID, op, db, update) -} - -func newEdgePolicy(chanID uint64, op wire.OutPoint, db *DB, - updateTime int64) *ChannelEdgePolicy { - - return &ChannelEdgePolicy{ - ChannelID: chanID, - LastUpdate: time.Unix(updateTime, 0), - MessageFlags: 1, - ChannelFlags: 0, - TimeLockDelta: uint16(prand.Int63()), - MinHTLC: lnwire.MilliSatoshi(prand.Int63()), - MaxHTLC: lnwire.MilliSatoshi(prand.Int63()), - FeeBaseMSat: lnwire.MilliSatoshi(prand.Int63()), - FeeProportionalMillionths: lnwire.MilliSatoshi(prand.Int63()), - db: db, - } -} - -func TestGraphTraversal(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - - graph := db.ChannelGraph() - - // We'd like to test some of the graph traversal capabilities within - // the DB, so we'll create a series of fake nodes to insert into the - // graph. - const numNodes = 20 - nodes := make([]*LightningNode, numNodes) - nodeIndex := map[string]struct{}{} - for i := 0; i < numNodes; i++ { - node, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create node: %v", err) - } - - nodes[i] = node - nodeIndex[node.Alias] = struct{}{} - } - - // Add each of the nodes into the graph, they should be inserted - // without error. - for _, node := range nodes { - if err := graph.AddLightningNode(node); err != nil { - t.Fatalf("unable to add node: %v", err) - } - } - - // Iterate over each node as returned by the graph, if all nodes are - // reached, then the map created above should be empty. - err = graph.ForEachNode(func(_ kvdb.RTx, node *LightningNode) er.R { - delete(nodeIndex, node.Alias) - return nil - }) - if err != nil { - t.Fatalf("for each failure: %v", err) - } - if len(nodeIndex) != 0 { - t.Fatalf("all nodes not reached within ForEach") - } - - // Determine which node is "smaller", we'll need this in order to - // properly create the edges for the graph. - var firstNode, secondNode *LightningNode - if bytes.Compare(nodes[0].PubKeyBytes[:], nodes[1].PubKeyBytes[:]) == -1 { - firstNode = nodes[0] - secondNode = nodes[1] - } else { - firstNode = nodes[0] - secondNode = nodes[1] - } - - // Create 5 channels between the first two nodes we generated above. - const numChannels = 5 - chanIndex := map[uint64]struct{}{} - for i := 0; i < numChannels; i++ { - txHash := sha256.Sum256([]byte{byte(i)}) - chanID := uint64(i + 1) - op := wire.OutPoint{ - Hash: txHash, - Index: 0, - } - - edgeInfo := ChannelEdgeInfo{ - ChannelID: chanID, - ChainHash: key, - AuthProof: &ChannelAuthProof{ - NodeSig1Bytes: testSig.Serialize(), - NodeSig2Bytes: testSig.Serialize(), - BitcoinSig1Bytes: testSig.Serialize(), - BitcoinSig2Bytes: testSig.Serialize(), - }, - ChannelPoint: op, - Capacity: 1000, - } - copy(edgeInfo.NodeKey1Bytes[:], nodes[0].PubKeyBytes[:]) - copy(edgeInfo.NodeKey2Bytes[:], nodes[1].PubKeyBytes[:]) - copy(edgeInfo.BitcoinKey1Bytes[:], nodes[0].PubKeyBytes[:]) - copy(edgeInfo.BitcoinKey2Bytes[:], nodes[1].PubKeyBytes[:]) - err := graph.AddChannelEdge(&edgeInfo) - if err != nil { - t.Fatalf("unable to add node: %v", err) - } - - // Create and add an edge with random data that points from - // node1 -> node2. - edge := randEdgePolicy(chanID, op, db) - edge.ChannelFlags = 0 - edge.Node = secondNode - edge.SigBytes = testSig.Serialize() - if err := graph.UpdateEdgePolicy(edge); err != nil { - t.Fatalf("unable to update edge: %v", err) - } - - // Create another random edge that points from node2 -> node1 - // this time. - edge = randEdgePolicy(chanID, op, db) - edge.ChannelFlags = 1 - edge.Node = firstNode - edge.SigBytes = testSig.Serialize() - if err := graph.UpdateEdgePolicy(edge); err != nil { - t.Fatalf("unable to update edge: %v", err) - } - - chanIndex[chanID] = struct{}{} - } - - // Iterate through all the known channels within the graph DB, once - // again if the map is empty that indicates that all edges have - // properly been reached. - err = graph.ForEachChannel(func(ei *ChannelEdgeInfo, _ *ChannelEdgePolicy, - _ *ChannelEdgePolicy) er.R { - - delete(chanIndex, ei.ChannelID) - return nil - }) - if err != nil { - t.Fatalf("for each failure: %v", err) - } - if len(chanIndex) != 0 { - t.Fatalf("all edges not reached within ForEach") - } - - // Finally, we want to test the ability to iterate over all the - // outgoing channels for a particular node. - numNodeChans := 0 - err = firstNode.ForEachChannel(nil, func(_ kvdb.RTx, _ *ChannelEdgeInfo, - outEdge, inEdge *ChannelEdgePolicy) er.R { - - // All channels between first and second node should have fully - // (both sides) specified policies. - if inEdge == nil || outEdge == nil { - return er.Errorf("channel policy not present") - } - - // Each should indicate that it's outgoing (pointed - // towards the second node). - if !bytes.Equal(outEdge.Node.PubKeyBytes[:], secondNode.PubKeyBytes[:]) { - return er.Errorf("wrong outgoing edge") - } - - // The incoming edge should also indicate that it's pointing to - // the origin node. - if !bytes.Equal(inEdge.Node.PubKeyBytes[:], firstNode.PubKeyBytes[:]) { - return er.Errorf("wrong outgoing edge") - } - - numNodeChans++ - return nil - }) - if err != nil { - t.Fatalf("for each failure: %v", err) - } - if numNodeChans != numChannels { - t.Fatalf("all edges for node not reached within ForEach: "+ - "expected %v, got %v", numChannels, numNodeChans) - } -} - -func assertPruneTip(t *testing.T, graph *ChannelGraph, blockHash *chainhash.Hash, - blockHeight uint32) { - - pruneHash, pruneHeight, err := graph.PruneTip() - if err != nil { - _, _, line, _ := runtime.Caller(1) - t.Fatalf("line %v: unable to fetch prune tip: %v", line, err) - } - if !bytes.Equal(blockHash[:], pruneHash[:]) { - _, _, line, _ := runtime.Caller(1) - t.Fatalf("line: %v, prune tips don't match, expected %x got %x", - line, blockHash, pruneHash) - } - if pruneHeight != blockHeight { - _, _, line, _ := runtime.Caller(1) - t.Fatalf("line %v: prune heights don't match, expected %v "+ - "got %v", line, blockHeight, pruneHeight) - } -} - -func assertNumChans(t *testing.T, graph *ChannelGraph, n int) { - numChans := 0 - if err := graph.ForEachChannel(func(*ChannelEdgeInfo, *ChannelEdgePolicy, - *ChannelEdgePolicy) er.R { - - numChans++ - return nil - }); err != nil { - _, _, line, _ := runtime.Caller(1) - t.Fatalf("line %v: unable to scan channels: %v", line, err) - } - if numChans != n { - _, _, line, _ := runtime.Caller(1) - t.Fatalf("line %v: expected %v chans instead have %v", line, - n, numChans) - } -} - -func assertNumNodes(t *testing.T, graph *ChannelGraph, n int) { - numNodes := 0 - err := graph.ForEachNode(func(_ kvdb.RTx, _ *LightningNode) er.R { - numNodes++ - return nil - }) - if err != nil { - _, _, line, _ := runtime.Caller(1) - t.Fatalf("line %v: unable to scan nodes: %v", line, err) - } - - if numNodes != n { - _, _, line, _ := runtime.Caller(1) - t.Fatalf("line %v: expected %v nodes, got %v", line, n, numNodes) - } -} - -func assertChanViewEqual(t *testing.T, a []EdgePoint, b []EdgePoint) { - if len(a) != len(b) { - _, _, line, _ := runtime.Caller(1) - t.Fatalf("line %v: chan views don't match", line) - } - - chanViewSet := make(map[wire.OutPoint]struct{}) - for _, op := range a { - chanViewSet[op.OutPoint] = struct{}{} - } - - for _, op := range b { - if _, ok := chanViewSet[op.OutPoint]; !ok { - _, _, line, _ := runtime.Caller(1) - t.Fatalf("line %v: chanPoint(%v) not found in first "+ - "view", line, op) - } - } -} - -func assertChanViewEqualChanPoints(t *testing.T, a []EdgePoint, b []*wire.OutPoint) { - if len(a) != len(b) { - _, _, line, _ := runtime.Caller(1) - t.Fatalf("line %v: chan views don't match", line) - } - - chanViewSet := make(map[wire.OutPoint]struct{}) - for _, op := range a { - chanViewSet[op.OutPoint] = struct{}{} - } - - for _, op := range b { - if _, ok := chanViewSet[*op]; !ok { - _, _, line, _ := runtime.Caller(1) - t.Fatalf("line %v: chanPoint(%v) not found in first "+ - "view", line, op) - } - } -} - -func TestGraphPruning(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - - graph := db.ChannelGraph() - sourceNode, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create source node: %v", err) - } - if err := graph.SetSourceNode(sourceNode); err != nil { - t.Fatalf("unable to set source node: %v", err) - } - - // As initial set up for the test, we'll create a graph with 5 vertexes - // and enough edges to create a fully connected graph. The graph will - // be rather simple, representing a straight line. - const numNodes = 5 - graphNodes := make([]*LightningNode, numNodes) - for i := 0; i < numNodes; i++ { - node, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create node: %v", err) - } - - if err := graph.AddLightningNode(node); err != nil { - t.Fatalf("unable to add node: %v", err) - } - - graphNodes[i] = node - } - - // With the vertexes created, we'll next create a series of channels - // between them. - channelPoints := make([]*wire.OutPoint, 0, numNodes-1) - edgePoints := make([]EdgePoint, 0, numNodes-1) - for i := 0; i < numNodes-1; i++ { - txHash := sha256.Sum256([]byte{byte(i)}) - chanID := uint64(i + 1) - op := wire.OutPoint{ - Hash: txHash, - Index: 0, - } - - channelPoints = append(channelPoints, &op) - - edgeInfo := ChannelEdgeInfo{ - ChannelID: chanID, - ChainHash: key, - AuthProof: &ChannelAuthProof{ - NodeSig1Bytes: testSig.Serialize(), - NodeSig2Bytes: testSig.Serialize(), - BitcoinSig1Bytes: testSig.Serialize(), - BitcoinSig2Bytes: testSig.Serialize(), - }, - ChannelPoint: op, - Capacity: 1000, - } - copy(edgeInfo.NodeKey1Bytes[:], graphNodes[i].PubKeyBytes[:]) - copy(edgeInfo.NodeKey2Bytes[:], graphNodes[i+1].PubKeyBytes[:]) - copy(edgeInfo.BitcoinKey1Bytes[:], graphNodes[i].PubKeyBytes[:]) - copy(edgeInfo.BitcoinKey2Bytes[:], graphNodes[i+1].PubKeyBytes[:]) - if err := graph.AddChannelEdge(&edgeInfo); err != nil { - t.Fatalf("unable to add node: %v", err) - } - - pkScript, err := genMultiSigP2WSH( - edgeInfo.BitcoinKey1Bytes[:], edgeInfo.BitcoinKey2Bytes[:], - ) - if err != nil { - t.Fatalf("unable to gen multi-sig p2wsh: %v", err) - } - edgePoints = append(edgePoints, EdgePoint{ - FundingPkScript: pkScript, - OutPoint: op, - }) - - // Create and add an edge with random data that points from - // node_i -> node_i+1 - edge := randEdgePolicy(chanID, op, db) - edge.ChannelFlags = 0 - edge.Node = graphNodes[i] - edge.SigBytes = testSig.Serialize() - if err := graph.UpdateEdgePolicy(edge); err != nil { - t.Fatalf("unable to update edge: %v", err) - } - - // Create another random edge that points from node_i+1 -> - // node_i this time. - edge = randEdgePolicy(chanID, op, db) - edge.ChannelFlags = 1 - edge.Node = graphNodes[i] - edge.SigBytes = testSig.Serialize() - if err := graph.UpdateEdgePolicy(edge); err != nil { - t.Fatalf("unable to update edge: %v", err) - } - } - - // With all the channel points added, we'll consult the graph to ensure - // it has the same channel view as the one we just constructed. - channelView, err := graph.ChannelView() - if err != nil { - t.Fatalf("unable to get graph channel view: %v", err) - } - assertChanViewEqual(t, channelView, edgePoints) - - // Now with our test graph created, we can test the pruning - // capabilities of the channel graph. - - // First we create a mock block that ends up closing the first two - // channels. - var blockHash chainhash.Hash - copy(blockHash[:], bytes.Repeat([]byte{1}, 32)) - blockHeight := uint32(1) - block := channelPoints[:2] - prunedChans, err := graph.PruneGraph(block, &blockHash, blockHeight) - if err != nil { - t.Fatalf("unable to prune graph: %v", err) - } - if len(prunedChans) != 2 { - t.Fatalf("incorrect number of channels pruned: "+ - "expected %v, got %v", 2, prunedChans) - } - - // Now ensure that the prune tip has been updated. - assertPruneTip(t, graph, &blockHash, blockHeight) - - // Count up the number of channels known within the graph, only 2 - // should be remaining. - assertNumChans(t, graph, 2) - - // Those channels should also be missing from the channel view. - channelView, err = graph.ChannelView() - if err != nil { - t.Fatalf("unable to get graph channel view: %v", err) - } - assertChanViewEqualChanPoints(t, channelView, channelPoints[2:]) - - // Next we'll create a block that doesn't close any channels within the - // graph to test the negative error case. - fakeHash := sha256.Sum256([]byte("test prune")) - nonChannel := &wire.OutPoint{ - Hash: fakeHash, - Index: 9, - } - blockHash = sha256.Sum256(blockHash[:]) - blockHeight = 2 - prunedChans, err = graph.PruneGraph( - []*wire.OutPoint{nonChannel}, &blockHash, blockHeight, - ) - if err != nil { - t.Fatalf("unable to prune graph: %v", err) - } - - // No channels should have been detected as pruned. - if len(prunedChans) != 0 { - t.Fatalf("channels were pruned but shouldn't have been") - } - - // Once again, the prune tip should have been updated. We should still - // see both channels and their participants, along with the source node. - assertPruneTip(t, graph, &blockHash, blockHeight) - assertNumChans(t, graph, 2) - assertNumNodes(t, graph, 4) - - // Finally, create a block that prunes the remainder of the channels - // from the graph. - blockHash = sha256.Sum256(blockHash[:]) - blockHeight = 3 - prunedChans, err = graph.PruneGraph( - channelPoints[2:], &blockHash, blockHeight, - ) - if err != nil { - t.Fatalf("unable to prune graph: %v", err) - } - - // The remainder of the channels should have been pruned from the - // graph. - if len(prunedChans) != 2 { - t.Fatalf("incorrect number of channels pruned: "+ - "expected %v, got %v", 2, len(prunedChans)) - } - - // The prune tip should be updated, no channels should be found, and - // only the source node should remain within the current graph. - assertPruneTip(t, graph, &blockHash, blockHeight) - assertNumChans(t, graph, 0) - assertNumNodes(t, graph, 1) - - // Finally, the channel view at this point in the graph should now be - // completely empty. Those channels should also be missing from the - // channel view. - channelView, err = graph.ChannelView() - if err != nil { - t.Fatalf("unable to get graph channel view: %v", err) - } - if len(channelView) != 0 { - t.Fatalf("channel view should be empty, instead have: %v", - channelView) - } -} - -// TestHighestChanID tests that we're able to properly retrieve the highest -// known channel ID in the database. -func TestHighestChanID(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - - graph := db.ChannelGraph() - - // If we don't yet have any channels in the database, then we should - // get a channel ID of zero if we ask for the highest channel ID. - bestID, err := graph.HighestChanID() - if err != nil { - t.Fatalf("unable to get highest ID: %v", err) - } - if bestID != 0 { - t.Fatalf("best ID w/ no chan should be zero, is instead: %v", - bestID) - } - - // Next, we'll insert two channels into the database, with each channel - // connecting the same two nodes. - node1, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - node2, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - - // The first channel with be at height 10, while the other will be at - // height 100. - edge1, _ := createEdge(10, 0, 0, 0, node1, node2) - edge2, chanID2 := createEdge(100, 0, 0, 0, node1, node2) - - if err := graph.AddChannelEdge(&edge1); err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } - if err := graph.AddChannelEdge(&edge2); err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } - - // Now that the edges has been inserted, we'll query for the highest - // known channel ID in the database. - bestID, err = graph.HighestChanID() - if err != nil { - t.Fatalf("unable to get highest ID: %v", err) - } - - if bestID != chanID2.ToUint64() { - t.Fatalf("expected %v got %v for best chan ID: ", - chanID2.ToUint64(), bestID) - } - - // If we add another edge, then the current best chan ID should be - // updated as well. - edge3, chanID3 := createEdge(1000, 0, 0, 0, node1, node2) - if err := graph.AddChannelEdge(&edge3); err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } - bestID, err = graph.HighestChanID() - if err != nil { - t.Fatalf("unable to get highest ID: %v", err) - } - - if bestID != chanID3.ToUint64() { - t.Fatalf("expected %v got %v for best chan ID: ", - chanID3.ToUint64(), bestID) - } -} - -// TestChanUpdatesInHorizon tests the we're able to properly retrieve all known -// channel updates within a specific time horizon. It also tests that upon -// insertion of a new edge, the edge update index is updated properly. -func TestChanUpdatesInHorizon(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - - graph := db.ChannelGraph() - - // If we issue an arbitrary query before any channel updates are - // inserted in the database, we should get zero results. - chanUpdates, err := graph.ChanUpdatesInHorizon( - time.Unix(999, 0), time.Unix(9999, 0), - ) - if err != nil { - t.Fatalf("unable to updates for updates: %v", err) - } - if len(chanUpdates) != 0 { - t.Fatalf("expected 0 chan updates, instead got %v", - len(chanUpdates)) - } - - // We'll start by creating two nodes which will seed our test graph. - node1, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - if err := graph.AddLightningNode(node1); err != nil { - t.Fatalf("unable to add node: %v", err) - } - node2, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - if err := graph.AddLightningNode(node2); err != nil { - t.Fatalf("unable to add node: %v", err) - } - - // We'll now create 10 channels between the two nodes, with update - // times 10 seconds after each other. - const numChans = 10 - startTime := time.Unix(1234, 0) - endTime := startTime - edges := make([]ChannelEdge, 0, numChans) - for i := 0; i < numChans; i++ { - txHash := sha256.Sum256([]byte{byte(i)}) - op := wire.OutPoint{ - Hash: txHash, - Index: 0, - } - - channel, chanID := createEdge( - uint32(i*10), 0, 0, 0, node1, node2, - ) - - if err := graph.AddChannelEdge(&channel); err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } - - edge1UpdateTime := endTime - edge2UpdateTime := edge1UpdateTime.Add(time.Second) - endTime = endTime.Add(time.Second * 10) - - edge1 := newEdgePolicy( - chanID.ToUint64(), op, db, edge1UpdateTime.Unix(), - ) - edge1.ChannelFlags = 0 - edge1.Node = node2 - edge1.SigBytes = testSig.Serialize() - if err := graph.UpdateEdgePolicy(edge1); err != nil { - t.Fatalf("unable to update edge: %v", err) - } - - edge2 := newEdgePolicy( - chanID.ToUint64(), op, db, edge2UpdateTime.Unix(), - ) - edge2.ChannelFlags = 1 - edge2.Node = node1 - edge2.SigBytes = testSig.Serialize() - if err := graph.UpdateEdgePolicy(edge2); err != nil { - t.Fatalf("unable to update edge: %v", err) - } - - edges = append(edges, ChannelEdge{ - Info: &channel, - Policy1: edge1, - Policy2: edge2, - }) - } - - // With our channels loaded, we'll now start our series of queries. - queryCases := []struct { - start time.Time - end time.Time - - resp []ChannelEdge - }{ - // If we query for a time range that's strictly below our set - // of updates, then we'll get an empty result back. - { - start: time.Unix(100, 0), - end: time.Unix(200, 0), - }, - - // If we query for a time range that's well beyond our set of - // updates, we should get an empty set of results back. - { - start: time.Unix(99999, 0), - end: time.Unix(999999, 0), - }, - - // If we query for the start time, and 10 seconds directly - // after it, we should only get a single update, that first - // one. - { - start: time.Unix(1234, 0), - end: startTime.Add(time.Second * 10), - - resp: []ChannelEdge{edges[0]}, - }, - - // If we add 10 seconds past the first update, and then - // subtract 10 from the last update, then we should only get - // the 8 edges in the middle. - { - start: startTime.Add(time.Second * 10), - end: endTime.Add(-time.Second * 10), - - resp: edges[1:9], - }, - - // If we use the start and end time as is, we should get the - // entire range. - { - start: startTime, - end: endTime, - - resp: edges, - }, - } - for _, queryCase := range queryCases { - resp, err := graph.ChanUpdatesInHorizon( - queryCase.start, queryCase.end, - ) - if err != nil { - t.Fatalf("unable to query for updates: %v", err) - } - - if len(resp) != len(queryCase.resp) { - t.Fatalf("expected %v chans, got %v chans", - len(queryCase.resp), len(resp)) - - } - - for i := 0; i < len(resp); i++ { - chanExp := queryCase.resp[i] - chanRet := resp[i] - - assertEdgeInfoEqual(t, chanExp.Info, chanRet.Info) - - err := compareEdgePolicies(chanExp.Policy1, chanRet.Policy1) - if err != nil { - t.Fatal(err) - } - compareEdgePolicies(chanExp.Policy2, chanRet.Policy2) - if err != nil { - t.Fatal(err) - } - } - } -} - -// TestNodeUpdatesInHorizon tests that we're able to properly scan and retrieve -// the most recent node updates within a particular time horizon. -func TestNodeUpdatesInHorizon(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - - graph := db.ChannelGraph() - - startTime := time.Unix(1234, 0) - endTime := startTime - - // If we issue an arbitrary query before we insert any nodes into the - // database, then we shouldn't get any results back. - nodeUpdates, err := graph.NodeUpdatesInHorizon( - time.Unix(999, 0), time.Unix(9999, 0), - ) - if err != nil { - t.Fatalf("unable to query for node updates: %v", err) - } - if len(nodeUpdates) != 0 { - t.Fatalf("expected 0 node updates, instead got %v", - len(nodeUpdates)) - } - - // We'll create 10 node announcements, each with an update timestamp 10 - // seconds after the other. - const numNodes = 10 - nodeAnns := make([]LightningNode, 0, numNodes) - for i := 0; i < numNodes; i++ { - nodeAnn, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test vertex: %v", err) - } - - // The node ann will use the current end time as its last - // update them, then we'll add 10 seconds in order to create - // the proper update time for the next node announcement. - updateTime := endTime - endTime = updateTime.Add(time.Second * 10) - - nodeAnn.LastUpdate = updateTime - - nodeAnns = append(nodeAnns, *nodeAnn) - - if err := graph.AddLightningNode(nodeAnn); err != nil { - t.Fatalf("unable to add lightning node: %v", err) - } - } - - queryCases := []struct { - start time.Time - end time.Time - - resp []LightningNode - }{ - // If we query for a time range that's strictly below our set - // of updates, then we'll get an empty result back. - { - start: time.Unix(100, 0), - end: time.Unix(200, 0), - }, - - // If we query for a time range that's well beyond our set of - // updates, we should get an empty set of results back. - { - start: time.Unix(99999, 0), - end: time.Unix(999999, 0), - }, - - // If we skip he first time epoch with out start time, then we - // should get back every now but the first. - { - start: startTime.Add(time.Second * 10), - end: endTime, - - resp: nodeAnns[1:], - }, - - // If we query for the range as is, we should get all 10 - // announcements back. - { - start: startTime, - end: endTime, - - resp: nodeAnns, - }, - - // If we reduce the ending time by 10 seconds, then we should - // get all but the last node we inserted. - { - start: startTime, - end: endTime.Add(-time.Second * 10), - - resp: nodeAnns[:9], - }, - } - for _, queryCase := range queryCases { - resp, err := graph.NodeUpdatesInHorizon(queryCase.start, queryCase.end) - if err != nil { - t.Fatalf("unable to query for nodes: %v", err) - } - - if len(resp) != len(queryCase.resp) { - t.Fatalf("expected %v nodes, got %v nodes", - len(queryCase.resp), len(resp)) - - } - - for i := 0; i < len(resp); i++ { - err := compareNodes(&queryCase.resp[i], &resp[i]) - if err != nil { - t.Fatal(err) - } - } - } -} - -// TestFilterKnownChanIDs tests that we're able to properly perform the set -// differences of an incoming set of channel ID's, and those that we already -// know of on disk. -func TestFilterKnownChanIDs(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - - graph := db.ChannelGraph() - - // If we try to filter out a set of channel ID's before we even know of - // any channels, then we should get the entire set back. - preChanIDs := []uint64{1, 2, 3, 4} - filteredIDs, err := graph.FilterKnownChanIDs(preChanIDs) - if err != nil { - t.Fatalf("unable to filter chan IDs: %v", err) - } - if !reflect.DeepEqual(preChanIDs, filteredIDs) { - t.Fatalf("chan IDs shouldn't have been filtered!") - } - - // We'll start by creating two nodes which will seed our test graph. - node1, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - if err := graph.AddLightningNode(node1); err != nil { - t.Fatalf("unable to add node: %v", err) - } - node2, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - if err := graph.AddLightningNode(node2); err != nil { - t.Fatalf("unable to add node: %v", err) - } - - // Next, we'll add 5 channel ID's to the graph, each of them having a - // block height 10 blocks after the previous. - const numChans = 5 - chanIDs := make([]uint64, 0, numChans) - for i := 0; i < numChans; i++ { - channel, chanID := createEdge( - uint32(i*10), 0, 0, 0, node1, node2, - ) - - if err := graph.AddChannelEdge(&channel); err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } - - chanIDs = append(chanIDs, chanID.ToUint64()) - } - - const numZombies = 5 - zombieIDs := make([]uint64, 0, numZombies) - for i := 0; i < numZombies; i++ { - channel, chanID := createEdge( - uint32(i*10+1), 0, 0, 0, node1, node2, - ) - if err := graph.AddChannelEdge(&channel); err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } - err := graph.DeleteChannelEdges(channel.ChannelID) - if err != nil { - t.Fatalf("unable to mark edge zombie: %v", err) - } - - zombieIDs = append(zombieIDs, chanID.ToUint64()) - } - - queryCases := []struct { - queryIDs []uint64 - - resp []uint64 - }{ - // If we attempt to filter out all chanIDs we know of, the - // response should be the empty set. - { - queryIDs: chanIDs, - }, - // If we attempt to filter out all zombies that we know of, the - // response should be the empty set. - { - queryIDs: zombieIDs, - }, - - // If we query for a set of ID's that we didn't insert, we - // should get the same set back. - { - queryIDs: []uint64{99, 100}, - resp: []uint64{99, 100}, - }, - - // If we query for a super-set of our the chan ID's inserted, - // we should only get those new chanIDs back. - { - queryIDs: append(chanIDs, []uint64{99, 101}...), - resp: []uint64{99, 101}, - }, - } - - for _, queryCase := range queryCases { - resp, err := graph.FilterKnownChanIDs(queryCase.queryIDs) - if err != nil { - t.Fatalf("unable to filter chan IDs: %v", err) - } - - if !reflect.DeepEqual(resp, queryCase.resp) { - t.Fatalf("expected %v, got %v", spew.Sdump(queryCase.resp), - spew.Sdump(resp)) - } - } -} - -// TestFilterChannelRange tests that we're able to properly retrieve the full -// set of short channel ID's for a given block range. -func TestFilterChannelRange(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - - graph := db.ChannelGraph() - - // We'll first populate our graph with two nodes. All channels created - // below will be made between these two nodes. - node1, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - if err := graph.AddLightningNode(node1); err != nil { - t.Fatalf("unable to add node: %v", err) - } - node2, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - if err := graph.AddLightningNode(node2); err != nil { - t.Fatalf("unable to add node: %v", err) - } - - // If we try to filter a channel range before we have any channels - // inserted, we should get an empty slice of results. - resp, err := graph.FilterChannelRange(10, 100) - if err != nil { - t.Fatalf("unable to filter channels: %v", err) - } - if len(resp) != 0 { - t.Fatalf("expected zero chans, instead got %v", len(resp)) - } - - // To start, we'll create a set of channels, each mined in a block 10 - // blocks after the prior one. - startHeight := uint32(100) - endHeight := startHeight - const numChans = 10 - chanIDs := make([]uint64, 0, numChans) - for i := 0; i < numChans; i++ { - chanHeight := endHeight - channel, chanID := createEdge( - uint32(chanHeight), uint32(i+1), 0, 0, node1, node2, - ) - - if err := graph.AddChannelEdge(&channel); err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } - - chanIDs = append(chanIDs, chanID.ToUint64()) - - endHeight += 10 - } - - // With our channels inserted, we'll construct a series of queries that - // we'll execute below in order to exercise the features of the - // FilterKnownChanIDs method. - queryCases := []struct { - startHeight uint32 - endHeight uint32 - - resp []uint64 - }{ - // If we query for the entire range, then we should get the same - // set of short channel IDs back. - { - startHeight: startHeight, - endHeight: endHeight, - - resp: chanIDs, - }, - - // If we query for a range of channels right before our range, we - // shouldn't get any results back. - { - startHeight: 0, - endHeight: 10, - }, - - // If we only query for the last height (range wise), we should - // only get that last channel. - { - startHeight: endHeight - 10, - endHeight: endHeight - 10, - - resp: chanIDs[9:], - }, - - // If we query for just the first height, we should only get a - // single channel back (the first one). - { - startHeight: startHeight, - endHeight: startHeight, - - resp: chanIDs[:1], - }, - } - for i, queryCase := range queryCases { - resp, err := graph.FilterChannelRange( - queryCase.startHeight, queryCase.endHeight, - ) - if err != nil { - t.Fatalf("unable to issue range query: %v", err) - } - - if !reflect.DeepEqual(resp, queryCase.resp) { - t.Fatalf("case #%v: expected %v, got %v", i, - queryCase.resp, resp) - } - } -} - -// TestFetchChanInfos tests that we're able to properly retrieve the full set -// of ChannelEdge structs for a given set of short channel ID's. -func TestFetchChanInfos(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - - graph := db.ChannelGraph() - - // We'll first populate our graph with two nodes. All channels created - // below will be made between these two nodes. - node1, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - if err := graph.AddLightningNode(node1); err != nil { - t.Fatalf("unable to add node: %v", err) - } - node2, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - if err := graph.AddLightningNode(node2); err != nil { - t.Fatalf("unable to add node: %v", err) - } - - // We'll make 5 test channels, ensuring we keep track of which channel - // ID corresponds to a particular ChannelEdge. - const numChans = 5 - startTime := time.Unix(1234, 0) - endTime := startTime - edges := make([]ChannelEdge, 0, numChans) - edgeQuery := make([]uint64, 0, numChans) - for i := 0; i < numChans; i++ { - txHash := sha256.Sum256([]byte{byte(i)}) - op := wire.OutPoint{ - Hash: txHash, - Index: 0, - } - - channel, chanID := createEdge( - uint32(i*10), 0, 0, 0, node1, node2, - ) - - if err := graph.AddChannelEdge(&channel); err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } - - updateTime := endTime - endTime = updateTime.Add(time.Second * 10) - - edge1 := newEdgePolicy( - chanID.ToUint64(), op, db, updateTime.Unix(), - ) - edge1.ChannelFlags = 0 - edge1.Node = node2 - edge1.SigBytes = testSig.Serialize() - if err := graph.UpdateEdgePolicy(edge1); err != nil { - t.Fatalf("unable to update edge: %v", err) - } - - edge2 := newEdgePolicy( - chanID.ToUint64(), op, db, updateTime.Unix(), - ) - edge2.ChannelFlags = 1 - edge2.Node = node1 - edge2.SigBytes = testSig.Serialize() - if err := graph.UpdateEdgePolicy(edge2); err != nil { - t.Fatalf("unable to update edge: %v", err) - } - - edges = append(edges, ChannelEdge{ - Info: &channel, - Policy1: edge1, - Policy2: edge2, - }) - - edgeQuery = append(edgeQuery, chanID.ToUint64()) - } - - // Add an additional edge that does not exist. The query should skip - // this channel and return only infos for the edges that exist. - edgeQuery = append(edgeQuery, 500) - - // Add an another edge to the query that has been marked as a zombie - // edge. The query should also skip this channel. - zombieChan, zombieChanID := createEdge( - 666, 0, 0, 0, node1, node2, - ) - if err := graph.AddChannelEdge(&zombieChan); err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } - err = graph.DeleteChannelEdges(zombieChan.ChannelID) - if err != nil { - t.Fatalf("unable to delete and mark edge zombie: %v", err) - } - edgeQuery = append(edgeQuery, zombieChanID.ToUint64()) - - // We'll now attempt to query for the range of channel ID's we just - // inserted into the database. We should get the exact same set of - // edges back. - resp, err := graph.FetchChanInfos(edgeQuery) - if err != nil { - t.Fatalf("unable to fetch chan edges: %v", err) - } - if len(resp) != len(edges) { - t.Fatalf("expected %v edges, instead got %v", len(edges), - len(resp)) - } - - for i := 0; i < len(resp); i++ { - err := compareEdgePolicies(resp[i].Policy1, edges[i].Policy1) - if err != nil { - t.Fatalf("edge doesn't match: %v", err) - } - err = compareEdgePolicies(resp[i].Policy2, edges[i].Policy2) - if err != nil { - t.Fatalf("edge doesn't match: %v", err) - } - assertEdgeInfoEqual(t, resp[i].Info, edges[i].Info) - } -} - -// TestIncompleteChannelPolicies tests that a channel that only has a policy -// specified on one end is properly returned in ForEachChannel calls from -// both sides. -func TestIncompleteChannelPolicies(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - - graph := db.ChannelGraph() - - // Create two nodes. - node1, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - if err := graph.AddLightningNode(node1); err != nil { - t.Fatalf("unable to add node: %v", err) - } - node2, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - if err := graph.AddLightningNode(node2); err != nil { - t.Fatalf("unable to add node: %v", err) - } - - // Create channel between nodes. - txHash := sha256.Sum256([]byte{0}) - op := wire.OutPoint{ - Hash: txHash, - Index: 0, - } - - channel, chanID := createEdge( - uint32(0), 0, 0, 0, node1, node2, - ) - - if err := graph.AddChannelEdge(&channel); err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } - - // Ensure that channel is reported with unknown policies. - checkPolicies := func(node *LightningNode, expectedIn, expectedOut bool) { - calls := 0 - err := node.ForEachChannel(nil, func(_ kvdb.RTx, _ *ChannelEdgeInfo, - outEdge, inEdge *ChannelEdgePolicy) er.R { - - if !expectedOut && outEdge != nil { - t.Fatalf("Expected no outgoing policy") - } - - if expectedOut && outEdge == nil { - t.Fatalf("Expected an outgoing policy") - } - - if !expectedIn && inEdge != nil { - t.Fatalf("Expected no incoming policy") - } - - if expectedIn && inEdge == nil { - t.Fatalf("Expected an incoming policy") - } - - calls++ - - return nil - }) - if err != nil { - t.Fatalf("unable to scan channels: %v", err) - } - - if calls != 1 { - t.Fatalf("Expected only one callback call") - } - } - - checkPolicies(node2, false, false) - - // Only create an edge policy for node1 and leave the policy for node2 - // unknown. - updateTime := time.Unix(1234, 0) - - edgePolicy := newEdgePolicy( - chanID.ToUint64(), op, db, updateTime.Unix(), - ) - edgePolicy.ChannelFlags = 0 - edgePolicy.Node = node2 - edgePolicy.SigBytes = testSig.Serialize() - if err := graph.UpdateEdgePolicy(edgePolicy); err != nil { - t.Fatalf("unable to update edge: %v", err) - } - - checkPolicies(node1, false, true) - checkPolicies(node2, true, false) - - // Create second policy and assert that both policies are reported - // as present. - edgePolicy = newEdgePolicy( - chanID.ToUint64(), op, db, updateTime.Unix(), - ) - edgePolicy.ChannelFlags = 1 - edgePolicy.Node = node1 - edgePolicy.SigBytes = testSig.Serialize() - if err := graph.UpdateEdgePolicy(edgePolicy); err != nil { - t.Fatalf("unable to update edge: %v", err) - } - - checkPolicies(node1, true, true) - checkPolicies(node2, true, true) -} - -// TestChannelEdgePruningUpdateIndexDeletion tests that once edges are deleted -// from the graph, then their entries within the update index are also cleaned -// up. -func TestChannelEdgePruningUpdateIndexDeletion(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - - graph := db.ChannelGraph() - sourceNode, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create source node: %v", err) - } - if err := graph.SetSourceNode(sourceNode); err != nil { - t.Fatalf("unable to set source node: %v", err) - } - - // We'll first populate our graph with two nodes. All channels created - // below will be made between these two nodes. - node1, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - if err := graph.AddLightningNode(node1); err != nil { - t.Fatalf("unable to add node: %v", err) - } - node2, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - if err := graph.AddLightningNode(node2); err != nil { - t.Fatalf("unable to add node: %v", err) - } - - // With the two nodes created, we'll now create a random channel, as - // well as two edges in the database with distinct update times. - edgeInfo, chanID := createEdge(100, 0, 0, 0, node1, node2) - if err := graph.AddChannelEdge(&edgeInfo); err != nil { - t.Fatalf("unable to add edge: %v", err) - } - - edge1 := randEdgePolicy(chanID.ToUint64(), edgeInfo.ChannelPoint, db) - edge1.ChannelFlags = 0 - edge1.Node = node1 - edge1.SigBytes = testSig.Serialize() - if err := graph.UpdateEdgePolicy(edge1); err != nil { - t.Fatalf("unable to update edge: %v", err) - } - - edge2 := randEdgePolicy(chanID.ToUint64(), edgeInfo.ChannelPoint, db) - edge2.ChannelFlags = 1 - edge2.Node = node2 - edge2.SigBytes = testSig.Serialize() - if err := graph.UpdateEdgePolicy(edge2); err != nil { - t.Fatalf("unable to update edge: %v", err) - } - - // checkIndexTimestamps is a helper function that checks the edge update - // index only includes the given timestamps. - checkIndexTimestamps := func(timestamps ...uint64) { - timestampSet := make(map[uint64]struct{}) - for _, t := range timestamps { - timestampSet[t] = struct{}{} - } - - err := kvdb.View(db, func(tx kvdb.RTx) er.R { - edges := tx.ReadBucket(edgeBucket) - if edges == nil { - return ErrGraphNoEdgesFound.Default() - } - edgeUpdateIndex := edges.NestedReadBucket( - edgeUpdateIndexBucket, - ) - if edgeUpdateIndex == nil { - return ErrGraphNoEdgesFound.Default() - } - - var numEntries int - err := edgeUpdateIndex.ForEach(func(k, v []byte) er.R { - numEntries++ - return nil - }) - if err != nil { - return err - } - - expectedEntries := len(timestampSet) - if numEntries != expectedEntries { - return er.Errorf("expected %v entries in the "+ - "update index, got %v", expectedEntries, - numEntries) - } - - return edgeUpdateIndex.ForEach(func(k, _ []byte) er.R { - t := byteOrder.Uint64(k[:8]) - if _, ok := timestampSet[t]; !ok { - return er.Errorf("found unexpected "+ - "timestamp "+"%d", t) - } - - return nil - }) - }, func() {}) - if err != nil { - t.Fatal(err) - } - } - - // With both edges policies added, we'll make sure to check they exist - // within the edge update index. - checkIndexTimestamps( - uint64(edge1.LastUpdate.Unix()), - uint64(edge2.LastUpdate.Unix()), - ) - - // Now, we'll update the edge policies to ensure the old timestamps are - // removed from the update index. - edge1.ChannelFlags = 2 - edge1.LastUpdate = time.Now() - if err := graph.UpdateEdgePolicy(edge1); err != nil { - t.Fatalf("unable to update edge: %v", err) - } - edge2.ChannelFlags = 3 - edge2.LastUpdate = edge1.LastUpdate.Add(time.Hour) - if err := graph.UpdateEdgePolicy(edge2); err != nil { - t.Fatalf("unable to update edge: %v", err) - } - - // With the policies updated, we should now be able to find their - // updated entries within the update index. - checkIndexTimestamps( - uint64(edge1.LastUpdate.Unix()), - uint64(edge2.LastUpdate.Unix()), - ) - - // Now we'll prune the graph, removing the edges, and also the update - // index entries from the database all together. - var blockHash chainhash.Hash - copy(blockHash[:], bytes.Repeat([]byte{2}, 32)) - _, err = graph.PruneGraph( - []*wire.OutPoint{&edgeInfo.ChannelPoint}, &blockHash, 101, - ) - if err != nil { - t.Fatalf("unable to prune graph: %v", err) - } - - // Finally, we'll check the database state one last time to conclude - // that we should no longer be able to locate _any_ entries within the - // edge update index. - checkIndexTimestamps() -} - -// TestPruneGraphNodes tests that unconnected vertexes are pruned via the -// PruneSyncState method. -func TestPruneGraphNodes(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - - // We'll start off by inserting our source node, to ensure that it's - // the only node left after we prune the graph. - graph := db.ChannelGraph() - sourceNode, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create source node: %v", err) - } - if err := graph.SetSourceNode(sourceNode); err != nil { - t.Fatalf("unable to set source node: %v", err) - } - - // With the source node inserted, we'll now add three nodes to the - // channel graph, at the end of the scenario, only two of these nodes - // should still be in the graph. - node1, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - if err := graph.AddLightningNode(node1); err != nil { - t.Fatalf("unable to add node: %v", err) - } - node2, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - if err := graph.AddLightningNode(node2); err != nil { - t.Fatalf("unable to add node: %v", err) - } - node3, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - if err := graph.AddLightningNode(node3); err != nil { - t.Fatalf("unable to add node: %v", err) - } - - // We'll now add a new edge to the graph, but only actually advertise - // the edge of *one* of the nodes. - edgeInfo, chanID := createEdge(100, 0, 0, 0, node1, node2) - if err := graph.AddChannelEdge(&edgeInfo); err != nil { - t.Fatalf("unable to add edge: %v", err) - } - - // We'll now insert an advertised edge, but it'll only be the edge that - // points from the first to the second node. - edge1 := randEdgePolicy(chanID.ToUint64(), edgeInfo.ChannelPoint, db) - edge1.ChannelFlags = 0 - edge1.Node = node1 - edge1.SigBytes = testSig.Serialize() - if err := graph.UpdateEdgePolicy(edge1); err != nil { - t.Fatalf("unable to update edge: %v", err) - } - - // We'll now initiate a around of graph pruning. - if err := graph.PruneGraphNodes(); err != nil { - t.Fatalf("unable to prune graph nodes: %v", err) - } - - // At this point, there should be 3 nodes left in the graph still: the - // source node (which can't be pruned), and node 1+2. Nodes 1 and two - // should still be left in the graph as there's half of an advertised - // edge between them. - assertNumNodes(t, graph, 3) - - // Finally, we'll ensure that node3, the only fully unconnected node as - // properly deleted from the graph and not another node in its place. - _, err = graph.FetchLightningNode(nil, node3.PubKeyBytes) - if err == nil { - t.Fatalf("node 3 should have been deleted!") - } -} - -// TestAddChannelEdgeShellNodes tests that when we attempt to add a ChannelEdge -// to the graph, one or both of the nodes the edge involves aren't found in the -// database, then shell edges are created for each node if needed. -func TestAddChannelEdgeShellNodes(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - - graph := db.ChannelGraph() - - // To start, we'll create two nodes, and only add one of them to the - // channel graph. - node1, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - if err := graph.AddLightningNode(node1); err != nil { - t.Fatalf("unable to add node: %v", err) - } - node2, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - - // We'll now create an edge between the two nodes, as a result, node2 - // should be inserted into the database as a shell node. - edgeInfo, _ := createEdge(100, 0, 0, 0, node1, node2) - if err := graph.AddChannelEdge(&edgeInfo); err != nil { - t.Fatalf("unable to add edge: %v", err) - } - - // Ensure that node1 was inserted as a full node, while node2 only has - // a shell node present. - node1, err = graph.FetchLightningNode(nil, node1.PubKeyBytes) - if err != nil { - t.Fatalf("unable to fetch node1: %v", err) - } - if !node1.HaveNodeAnnouncement { - t.Fatalf("have shell announcement for node1, shouldn't") - } - - node2, err = graph.FetchLightningNode(nil, node2.PubKeyBytes) - if err != nil { - t.Fatalf("unable to fetch node2: %v", err) - } - if node2.HaveNodeAnnouncement { - t.Fatalf("should have shell announcement for node2, but is full") - } -} - -// TestNodePruningUpdateIndexDeletion tests that once a node has been removed -// from the channel graph, we also remove the entry from the update index as -// well. -func TestNodePruningUpdateIndexDeletion(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - - graph := db.ChannelGraph() - - // We'll first populate our graph with a single node that will be - // removed shortly. - node1, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - if err := graph.AddLightningNode(node1); err != nil { - t.Fatalf("unable to add node: %v", err) - } - - // We'll confirm that we can retrieve the node using - // NodeUpdatesInHorizon, using a time that's slightly beyond the last - // update time of our test node. - startTime := time.Unix(9, 0) - endTime := node1.LastUpdate.Add(time.Minute) - nodesInHorizon, err := graph.NodeUpdatesInHorizon(startTime, endTime) - if err != nil { - t.Fatalf("unable to fetch nodes in horizon: %v", err) - } - - // We should only have a single node, and that node should exactly - // match the node we just inserted. - if len(nodesInHorizon) != 1 { - t.Fatalf("should have 1 nodes instead have: %v", - len(nodesInHorizon)) - } - if err := compareNodes(node1, &nodesInHorizon[0]); err != nil { - t.Fatalf("nodes don't match: %v", err) - } - - // We'll now delete the node from the graph, this should result in it - // being removed from the update index as well. - if err := graph.DeleteLightningNode(node1.PubKeyBytes); err != nil { - t.Fatalf("unable to delete node: %v", err) - } - - // Now that the node has been deleted, we'll again query the nodes in - // the horizon. This time we should have no nodes at all. - nodesInHorizon, err = graph.NodeUpdatesInHorizon(startTime, endTime) - if err != nil { - t.Fatalf("unable to fetch nodes in horizon: %v", err) - } - - if len(nodesInHorizon) != 0 { - t.Fatalf("should have zero nodes instead have: %v", - len(nodesInHorizon)) - } -} - -// TestNodeIsPublic ensures that we properly detect nodes that are seen as -// public within the network graph. -func TestNodeIsPublic(t *testing.T) { - t.Parallel() - - // We'll start off the test by creating a small network of 3 - // participants with the following graph: - // - // Alice <-> Bob <-> Carol - // - // We'll need to create a separate database and channel graph for each - // participant to replicate real-world scenarios (private edges being in - // some graphs but not others, etc.). - aliceDB, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - aliceNode, err := createTestVertex(aliceDB) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - aliceGraph := aliceDB.ChannelGraph() - if err := aliceGraph.SetSourceNode(aliceNode); err != nil { - t.Fatalf("unable to set source node: %v", err) - } - - bobDB, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - bobNode, err := createTestVertex(bobDB) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - bobGraph := bobDB.ChannelGraph() - if err := bobGraph.SetSourceNode(bobNode); err != nil { - t.Fatalf("unable to set source node: %v", err) - } - - carolDB, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - carolNode, err := createTestVertex(carolDB) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - carolGraph := carolDB.ChannelGraph() - if err := carolGraph.SetSourceNode(carolNode); err != nil { - t.Fatalf("unable to set source node: %v", err) - } - - aliceBobEdge, _ := createEdge(10, 0, 0, 0, aliceNode, bobNode) - bobCarolEdge, _ := createEdge(10, 1, 0, 1, bobNode, carolNode) - - // After creating all of our nodes and edges, we'll add them to each - // participant's graph. - nodes := []*LightningNode{aliceNode, bobNode, carolNode} - edges := []*ChannelEdgeInfo{&aliceBobEdge, &bobCarolEdge} - dbs := []*DB{aliceDB, bobDB, carolDB} - graphs := []*ChannelGraph{aliceGraph, bobGraph, carolGraph} - for i, graph := range graphs { - for _, node := range nodes { - node.db = dbs[i] - if err := graph.AddLightningNode(node); err != nil { - t.Fatalf("unable to add node: %v", err) - } - } - for _, edge := range edges { - edge.db = dbs[i] - if err := graph.AddChannelEdge(edge); err != nil { - t.Fatalf("unable to add edge: %v", err) - } - } - } - - // checkNodes is a helper closure that will be used to assert that the - // given nodes are seen as public/private within the given graphs. - checkNodes := func(nodes []*LightningNode, graphs []*ChannelGraph, - public bool) { - - t.Helper() - - for _, node := range nodes { - for _, graph := range graphs { - isPublic, err := graph.IsPublicNode(node.PubKeyBytes) - if err != nil { - t.Fatalf("unable to determine if pivot "+ - "is public: %v", err) - } - - switch { - case isPublic && !public: - t.Fatalf("expected %x to be private", - node.PubKeyBytes) - case !isPublic && public: - t.Fatalf("expected %x to be public", - node.PubKeyBytes) - } - } - } - } - - // Due to the way the edges were set up above, we'll make sure each node - // can correctly determine that every other node is public. - checkNodes(nodes, graphs, true) - - // Now, we'll remove the edge between Alice and Bob from everyone's - // graph. This will make Alice be seen as a private node as it no longer - // has any advertised edges. - for _, graph := range graphs { - err := graph.DeleteChannelEdges(aliceBobEdge.ChannelID) - if err != nil { - t.Fatalf("unable to remove edge: %v", err) - } - } - checkNodes( - []*LightningNode{aliceNode}, - []*ChannelGraph{bobGraph, carolGraph}, - false, - ) - - // We'll also make the edge between Bob and Carol private. Within Bob's - // and Carol's graph, the edge will exist, but it will not have a proof - // that allows it to be advertised. Within Alice's graph, we'll - // completely remove the edge as it is not possible for her to know of - // it without it being advertised. - for i, graph := range graphs { - err := graph.DeleteChannelEdges(bobCarolEdge.ChannelID) - if err != nil { - t.Fatalf("unable to remove edge: %v", err) - } - - if graph == aliceGraph { - continue - } - - bobCarolEdge.AuthProof = nil - bobCarolEdge.db = dbs[i] - if err := graph.AddChannelEdge(&bobCarolEdge); err != nil { - t.Fatalf("unable to add edge: %v", err) - } - } - - // With the modifications above, Bob should now be seen as a private - // node from both Alice's and Carol's perspective. - checkNodes( - []*LightningNode{bobNode}, - []*ChannelGraph{aliceGraph, carolGraph}, - false, - ) -} - -// TestDisabledChannelIDs ensures that the disabled channels within the -// disabledEdgePolicyBucket are managed properly and the list returned from -// DisabledChannelIDs is correct. -func TestDisabledChannelIDs(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - defer cleanUp() - - graph := db.ChannelGraph() - - // Create first node and add it to the graph. - node1, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - if err := graph.AddLightningNode(node1); err != nil { - t.Fatalf("unable to add node: %v", err) - } - - // Create second node and add it to the graph. - node2, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - if err := graph.AddLightningNode(node2); err != nil { - t.Fatalf("unable to add node: %v", err) - } - - // Adding a new channel edge to the graph. - edgeInfo, edge1, edge2 := createChannelEdge(db, node1, node2) - if err := graph.AddLightningNode(node2); err != nil { - t.Fatalf("unable to add node: %v", err) - } - - if err := graph.AddChannelEdge(edgeInfo); err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } - - // Ensure no disabled channels exist in the bucket on start. - disabledChanIds, err := graph.DisabledChannelIDs() - if err != nil { - t.Fatalf("unable to get disabled channel ids: %v", err) - } - if len(disabledChanIds) > 0 { - t.Fatalf("expected empty disabled channels, got %v disabled channels", - len(disabledChanIds)) - } - - // Add one disabled policy and ensure the channel is still not in the - // disabled list. - edge1.ChannelFlags |= lnwire.ChanUpdateDisabled - if err := graph.UpdateEdgePolicy(edge1); err != nil { - t.Fatalf("unable to update edge: %v", err) - } - disabledChanIds, err = graph.DisabledChannelIDs() - if err != nil { - t.Fatalf("unable to get disabled channel ids: %v", err) - } - if len(disabledChanIds) > 0 { - t.Fatalf("expected empty disabled channels, got %v disabled channels", - len(disabledChanIds)) - } - - // Add second disabled policy and ensure the channel is now in the - // disabled list. - edge2.ChannelFlags |= lnwire.ChanUpdateDisabled - if err := graph.UpdateEdgePolicy(edge2); err != nil { - t.Fatalf("unable to update edge: %v", err) - } - disabledChanIds, err = graph.DisabledChannelIDs() - if err != nil { - t.Fatalf("unable to get disabled channel ids: %v", err) - } - if len(disabledChanIds) != 1 || disabledChanIds[0] != edgeInfo.ChannelID { - t.Fatalf("expected disabled channel with id %v, "+ - "got %v", edgeInfo.ChannelID, disabledChanIds) - } - - // Delete the channel edge and ensure it is removed from the disabled list. - if err = graph.DeleteChannelEdges(edgeInfo.ChannelID); err != nil { - t.Fatalf("unable to delete channel edge: %v", err) - } - disabledChanIds, err = graph.DisabledChannelIDs() - if err != nil { - t.Fatalf("unable to get disabled channel ids: %v", err) - } - if len(disabledChanIds) > 0 { - t.Fatalf("expected empty disabled channels, got %v disabled channels", - len(disabledChanIds)) - } -} - -// TestEdgePolicyMissingMaxHtcl tests that if we find a ChannelEdgePolicy in -// the DB that indicates that it should support the htlc_maximum_value_msat -// field, but it is not part of the opaque data, then we'll handle it as it is -// unknown. It also checks that we are correctly able to overwrite it when we -// receive the proper update. -func TestEdgePolicyMissingMaxHtcl(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - - graph := db.ChannelGraph() - - // We'd like to test the update of edges inserted into the database, so - // we create two vertexes to connect. - node1, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - if err := graph.AddLightningNode(node1); err != nil { - t.Fatalf("unable to add node: %v", err) - } - node2, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } - - edgeInfo, edge1, edge2 := createChannelEdge(db, node1, node2) - if err := graph.AddLightningNode(node2); err != nil { - t.Fatalf("unable to add node: %v", err) - } - if err := graph.AddChannelEdge(edgeInfo); err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } - - chanID := edgeInfo.ChannelID - from := edge2.Node.PubKeyBytes[:] - to := edge1.Node.PubKeyBytes[:] - - // We'll remove the no max_htlc field from the first edge policy, and - // all other opaque data, and serialize it. - edge1.MessageFlags = 0 - edge1.ExtraOpaqueData = nil - - var b bytes.Buffer - err = serializeChanEdgePolicy(&b, edge1, to) - if err != nil { - t.Fatalf("unable to serialize policy") - } - - // Set the max_htlc field. The extra bytes added to the serialization - // will be the opaque data containing the serialized field. - edge1.MessageFlags = lnwire.ChanUpdateOptionMaxHtlc - edge1.MaxHTLC = 13928598 - var b2 bytes.Buffer - err = serializeChanEdgePolicy(&b2, edge1, to) - if err != nil { - t.Fatalf("unable to serialize policy") - } - - withMaxHtlc := b2.Bytes() - - // Remove the opaque data from the serialization. - stripped := withMaxHtlc[:len(b.Bytes())] - - // Attempting to deserialize these bytes should return an error. - r := bytes.NewReader(stripped) - err = kvdb.View(db, func(tx kvdb.RTx) er.R { - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNotFound.Default() - } - - _, err = deserializeChanEdgePolicy(r, nodes) - if !ErrEdgePolicyOptionalFieldNotFound.Is(err) { - t.Fatalf("expected "+ - "ErrEdgePolicyOptionalFieldNotFound, got %v", - err) - } - - return nil - }, func() {}) - if err != nil { - t.Fatalf("error reading db: %v", err) - } - - // Put the stripped bytes in the DB. - err = kvdb.Update(db, func(tx kvdb.RwTx) er.R { - edges := tx.ReadWriteBucket(edgeBucket) - if edges == nil { - return ErrEdgeNotFound.Default() - } - - edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket) - if edgeIndex == nil { - return ErrEdgeNotFound.Default() - } - - var edgeKey [33 + 8]byte - copy(edgeKey[:], from) - byteOrder.PutUint64(edgeKey[33:], edge1.ChannelID) - - var scratch [8]byte - var indexKey [8 + 8]byte - copy(indexKey[:], scratch[:]) - byteOrder.PutUint64(indexKey[8:], edge1.ChannelID) - - updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket) - if err != nil { - return err - } - - if err := updateIndex.Put(indexKey[:], nil); err != nil { - return err - } - - return edges.Put(edgeKey[:], stripped) - }, func() {}) - if err != nil { - t.Fatalf("error writing db: %v", err) - } - - // And add the second, unmodified edge. - if err := graph.UpdateEdgePolicy(edge2); err != nil { - t.Fatalf("unable to update edge: %v", err) - } - - // Attempt to fetch the edge and policies from the DB. Since the policy - // we added is invalid according to the new format, it should be as we - // are not aware of the policy (indicated by the policy returned being - // nil) - dbEdgeInfo, dbEdge1, dbEdge2, err := graph.FetchChannelEdgesByID(chanID) - if err != nil { - t.Fatalf("unable to fetch channel by ID: %v", err) - } - - // The first edge should have a nil-policy returned - if dbEdge1 != nil { - t.Fatalf("expected db edge to be nil") - } - if err := compareEdgePolicies(dbEdge2, edge2); err != nil { - t.Fatalf("edge doesn't match: %v", err) - } - assertEdgeInfoEqual(t, dbEdgeInfo, edgeInfo) - - // Now add the original, unmodified edge policy, and make sure the edge - // policies then become fully populated. - if err := graph.UpdateEdgePolicy(edge1); err != nil { - t.Fatalf("unable to update edge: %v", err) - } - - dbEdgeInfo, dbEdge1, dbEdge2, err = graph.FetchChannelEdgesByID(chanID) - if err != nil { - t.Fatalf("unable to fetch channel by ID: %v", err) - } - if err := compareEdgePolicies(dbEdge1, edge1); err != nil { - t.Fatalf("edge doesn't match: %v", err) - } - if err := compareEdgePolicies(dbEdge2, edge2); err != nil { - t.Fatalf("edge doesn't match: %v", err) - } - assertEdgeInfoEqual(t, dbEdgeInfo, edgeInfo) -} - -// assertNumZombies queries the provided ChannelGraph for NumZombies, and -// asserts that the returned number is equal to expZombies. -func assertNumZombies(t *testing.T, graph *ChannelGraph, expZombies uint64) { - t.Helper() - - numZombies, err := graph.NumZombies() - if err != nil { - t.Fatalf("unable to query number of zombies: %v", err) - } - - if numZombies != expZombies { - t.Fatalf("expected %d zombies, found %d", - expZombies, numZombies) - } -} - -// TestGraphZombieIndex ensures that we can mark edges correctly as zombie/live. -func TestGraphZombieIndex(t *testing.T) { - t.Parallel() - - // We'll start by creating our test graph along with a test edge. - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to create test database: %v", err) - } - graph := db.ChannelGraph() - - node1, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test vertex: %v", err) - } - node2, err := createTestVertex(db) - if err != nil { - t.Fatalf("unable to create test vertex: %v", err) - } - - // Swap the nodes if the second's pubkey is smaller than the first. - // Without this, the comparisons at the end will fail probabilistically. - if bytes.Compare(node2.PubKeyBytes[:], node1.PubKeyBytes[:]) < 0 { - node1, node2 = node2, node1 - } - - edge, _, _ := createChannelEdge(db, node1, node2) - if err := graph.AddChannelEdge(edge); err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } - - // Since the edge is known the graph and it isn't a zombie, IsZombieEdge - // should not report the channel as a zombie. - isZombie, _, _ := graph.IsZombieEdge(edge.ChannelID) - if isZombie { - t.Fatal("expected edge to not be marked as zombie") - } - assertNumZombies(t, graph, 0) - - // If we delete the edge and mark it as a zombie, then we should expect - // to see it within the index. - err = graph.DeleteChannelEdges(edge.ChannelID) - if err != nil { - t.Fatalf("unable to mark edge as zombie: %v", err) - } - isZombie, pubKey1, pubKey2 := graph.IsZombieEdge(edge.ChannelID) - if !isZombie { - t.Fatal("expected edge to be marked as zombie") - } - if pubKey1 != node1.PubKeyBytes { - t.Fatalf("expected pubKey1 %x, got %x", node1.PubKeyBytes, - pubKey1) - } - if pubKey2 != node2.PubKeyBytes { - t.Fatalf("expected pubKey2 %x, got %x", node2.PubKeyBytes, - pubKey2) - } - assertNumZombies(t, graph, 1) - - // Similarly, if we mark the same edge as live, we should no longer see - // it within the index. - if err := graph.MarkEdgeLive(edge.ChannelID); err != nil { - t.Fatalf("unable to mark edge as live: %v", err) - } - isZombie, _, _ = graph.IsZombieEdge(edge.ChannelID) - if isZombie { - t.Fatal("expected edge to not be marked as zombie") - } - assertNumZombies(t, graph, 0) -} - -// compareNodes is used to compare two LightningNodes while excluding the -// Features struct, which cannot be compared as the semantics for reserializing -// the featuresMap have not been defined. -func compareNodes(a, b *LightningNode) er.R { - if a.LastUpdate != b.LastUpdate { - return er.Errorf("node LastUpdate doesn't match: expected %v, \n"+ - "got %v", a.LastUpdate, b.LastUpdate) - } - if !reflect.DeepEqual(a.Addresses, b.Addresses) { - return er.Errorf("Addresses doesn't match: expected %#v, \n "+ - "got %#v", a.Addresses, b.Addresses) - } - if !reflect.DeepEqual(a.PubKeyBytes, b.PubKeyBytes) { - return er.Errorf("PubKey doesn't match: expected %#v, \n "+ - "got %#v", a.PubKeyBytes, b.PubKeyBytes) - } - if !reflect.DeepEqual(a.Color, b.Color) { - return er.Errorf("Color doesn't match: expected %#v, \n "+ - "got %#v", a.Color, b.Color) - } - if !reflect.DeepEqual(a.Alias, b.Alias) { - return er.Errorf("Alias doesn't match: expected %#v, \n "+ - "got %#v", a.Alias, b.Alias) - } - if !reflect.DeepEqual(a.db, b.db) { - return er.Errorf("db doesn't match: expected %#v, \n "+ - "got %#v", a.db, b.db) - } - if !reflect.DeepEqual(a.HaveNodeAnnouncement, b.HaveNodeAnnouncement) { - return er.Errorf("HaveNodeAnnouncement doesn't match: expected %#v, \n "+ - "got %#v", a.HaveNodeAnnouncement, b.HaveNodeAnnouncement) - } - if !bytes.Equal(a.ExtraOpaqueData, b.ExtraOpaqueData) { - return er.Errorf("extra data doesn't match: %v vs %v", - a.ExtraOpaqueData, b.ExtraOpaqueData) - } - - return nil -} - -// compareEdgePolicies is used to compare two ChannelEdgePolices using -// compareNodes, so as to exclude comparisons of the Nodes' Features struct. -func compareEdgePolicies(a, b *ChannelEdgePolicy) er.R { - if a.ChannelID != b.ChannelID { - return er.Errorf("ChannelID doesn't match: expected %v, "+ - "got %v", a.ChannelID, b.ChannelID) - } - if !reflect.DeepEqual(a.LastUpdate, b.LastUpdate) { - return er.Errorf("edge LastUpdate doesn't match: expected %#v, \n "+ - "got %#v", a.LastUpdate, b.LastUpdate) - } - if a.MessageFlags != b.MessageFlags { - return er.Errorf("MessageFlags doesn't match: expected %v, "+ - "got %v", a.MessageFlags, b.MessageFlags) - } - if a.ChannelFlags != b.ChannelFlags { - return er.Errorf("ChannelFlags doesn't match: expected %v, "+ - "got %v", a.ChannelFlags, b.ChannelFlags) - } - if a.TimeLockDelta != b.TimeLockDelta { - return er.Errorf("TimeLockDelta doesn't match: expected %v, "+ - "got %v", a.TimeLockDelta, b.TimeLockDelta) - } - if a.MinHTLC != b.MinHTLC { - return er.Errorf("MinHTLC doesn't match: expected %v, "+ - "got %v", a.MinHTLC, b.MinHTLC) - } - if a.MaxHTLC != b.MaxHTLC { - return er.Errorf("MaxHTLC doesn't match: expected %v, "+ - "got %v", a.MaxHTLC, b.MaxHTLC) - } - if a.FeeBaseMSat != b.FeeBaseMSat { - return er.Errorf("FeeBaseMSat doesn't match: expected %v, "+ - "got %v", a.FeeBaseMSat, b.FeeBaseMSat) - } - if a.FeeProportionalMillionths != b.FeeProportionalMillionths { - return er.Errorf("FeeProportionalMillionths doesn't match: "+ - "expected %v, got %v", a.FeeProportionalMillionths, - b.FeeProportionalMillionths) - } - if !bytes.Equal(a.ExtraOpaqueData, b.ExtraOpaqueData) { - return er.Errorf("extra data doesn't match: %v vs %v", - a.ExtraOpaqueData, b.ExtraOpaqueData) - } - if err := compareNodes(a.Node, b.Node); err != nil { - return err - } - if !reflect.DeepEqual(a.db, b.db) { - return er.Errorf("db doesn't match: expected %#v, \n "+ - "got %#v", a.db, b.db) - } - return nil -} - -// TestLightningNodeSigVerifcation checks that we can use the LightningNode's -// pubkey to verify signatures. -func TestLightningNodeSigVerification(t *testing.T) { - t.Parallel() - - // Create some dummy data to sign. - var data [32]byte - if _, err := prand.Read(data[:]); err != nil { - t.Fatalf("unable to read prand: %v", err) - } - - // Create private key and sign the data with it. - priv, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Fatalf("unable to crete priv key: %v", err) - } - - sign, err := priv.Sign(data[:]) - if err != nil { - t.Fatalf("unable to sign: %v", err) - } - - // Sanity check that the signature checks out. - if !sign.Verify(data[:], priv.PubKey()) { - t.Fatalf("signature doesn't check out") - } - - // Create a LightningNode from the same private key. - db, cleanUp, errr := MakeTestDB() - if errr != nil { - t.Fatalf("unable to make test database: %v", errr) - } - defer cleanUp() - - node, errr := createLightningNode(db, priv) - if errr != nil { - t.Fatalf("unable to create node: %v", errr) - } - - // And finally check that we can verify the same signature from the - // pubkey returned from the lightning node. - nodePub, errr := node.PubKey() - if errr != nil { - t.Fatalf("unable to get pubkey: %v", errr) - } - - if !sign.Verify(data[:], nodePub) { - t.Fatalf("unable to verify sig") - } -} - -// TestComputeFee tests fee calculation based on both in- and outgoing amt. -func TestComputeFee(t *testing.T) { - var ( - policy = ChannelEdgePolicy{ - FeeBaseMSat: 10000, - FeeProportionalMillionths: 30000, - } - outgoingAmt = lnwire.MilliSatoshi(1000000) - expectedFee = lnwire.MilliSatoshi(40000) - ) - - fee := policy.ComputeFee(outgoingAmt) - if fee != expectedFee { - t.Fatalf("expected fee %v, got %v", expectedFee, fee) - } - - fwdFee := policy.ComputeFeeFromIncoming(outgoingAmt + fee) - if fwdFee != expectedFee { - t.Fatalf("expected fee %v, but got %v", fee, fwdFee) - } -} diff --git a/lnd/channeldb/invoice_test.go b/lnd/channeldb/invoice_test.go deleted file mode 100644 index 517d117a..00000000 --- a/lnd/channeldb/invoice_test.go +++ /dev/null @@ -1,1248 +0,0 @@ -package channeldb - -import ( - "crypto/rand" - "fmt" - "math" - "testing" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/record" - "github.com/stretchr/testify/require" -) - -var ( - emptyFeatures = lnwire.NewFeatureVector(nil, lnwire.Features) - testNow = time.Unix(1, 0) -) - -func randInvoice(value lnwire.MilliSatoshi) (*Invoice, er.R) { - var ( - pre lntypes.Preimage - payAddr [32]byte - ) - if _, err := rand.Read(pre[:]); err != nil { - return nil, er.E(err) - } - if _, err := rand.Read(payAddr[:]); err != nil { - return nil, er.E(err) - } - - i := &Invoice{ - CreationDate: testNow, - Terms: ContractTerm{ - Expiry: 4000, - PaymentPreimage: &pre, - PaymentAddr: payAddr, - Value: value, - Features: emptyFeatures, - }, - Htlcs: map[CircuitKey]*InvoiceHTLC{}, - } - i.Memo = []byte("memo") - - // Create a random byte slice of MaxPaymentRequestSize bytes to be used - // as a dummy paymentrequest, and determine if it should be set based - // on one of the random bytes. - var r [MaxPaymentRequestSize]byte - if _, err := rand.Read(r[:]); err != nil { - return nil, er.E(err) - } - if r[0]&1 == 0 { - i.PaymentRequest = r[:] - } else { - i.PaymentRequest = []byte("") - } - - return i, nil -} - -// settleTestInvoice settles a test invoice. -func settleTestInvoice(invoice *Invoice, settleIndex uint64) { - invoice.SettleDate = testNow - invoice.AmtPaid = invoice.Terms.Value - invoice.State = ContractSettled - invoice.Htlcs[CircuitKey{}] = &InvoiceHTLC{ - Amt: invoice.Terms.Value, - AcceptTime: testNow, - ResolveTime: testNow, - State: HtlcStateSettled, - CustomRecords: make(record.CustomSet), - } - invoice.SettleIndex = settleIndex -} - -// Tests that pending invoices are those which are either in ContractOpen or -// in ContractAccepted state. -func TestInvoiceIsPending(t *testing.T) { - contractStates := []ContractState{ - ContractOpen, ContractSettled, ContractCanceled, ContractAccepted, - } - - for _, state := range contractStates { - invoice := Invoice{ - State: state, - } - - // We expect that an invoice is pending if it's either in ContractOpen - // or ContractAccepted state. - pending := (state == ContractOpen || state == ContractAccepted) - - if invoice.IsPending() != pending { - t.Fatalf("expected pending: %v, got: %v, invoice: %v", - pending, invoice.IsPending(), invoice) - } - } -} - -type invWorkflowTest struct { - name string - queryPayHash bool - queryPayAddr bool -} - -var invWorkflowTests = []invWorkflowTest{ - { - name: "unknown", - queryPayHash: false, - queryPayAddr: false, - }, - { - name: "only payhash known", - queryPayHash: true, - queryPayAddr: false, - }, - { - name: "payaddr and payhash known", - queryPayHash: true, - queryPayAddr: true, - }, -} - -// TestInvoiceWorkflow asserts the basic process of inserting, fetching, and -// updating an invoice. We assert that the flow is successful using when -// querying with various combinations of payment hash and payment address. -func TestInvoiceWorkflow(t *testing.T) { - t.Parallel() - - for _, test := range invWorkflowTests { - test := test - t.Run(test.name, func(t *testing.T) { - testInvoiceWorkflow(t, test) - }) - } -} - -func testInvoiceWorkflow(t *testing.T, test invWorkflowTest) { - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test db: %v", err) - } - - // Create a fake invoice which we'll use several times in the tests - // below. - fakeInvoice, err := randInvoice(10000) - if err != nil { - t.Fatalf("unable to create invoice: %v", err) - } - invPayHash := fakeInvoice.Terms.PaymentPreimage.Hash() - - // Select the payment hash and payment address we will use to lookup or - // update the invoice for the remainder of the test. - var ( - payHash lntypes.Hash - payAddr *[32]byte - ref InvoiceRef - ) - switch { - case test.queryPayHash && test.queryPayAddr: - payHash = invPayHash - payAddr = &fakeInvoice.Terms.PaymentAddr - ref = InvoiceRefByHashAndAddr(payHash, *payAddr) - case test.queryPayHash: - payHash = invPayHash - ref = InvoiceRefByHash(payHash) - } - - // Add the invoice to the database, this should succeed as there aren't - // any existing invoices within the database with the same payment - // hash. - if _, err := db.AddInvoice(fakeInvoice, invPayHash); err != nil { - t.Fatalf("unable to find invoice: %v", err) - } - - // Attempt to retrieve the invoice which was just added to the - // database. It should be found, and the invoice returned should be - // identical to the one created above. - dbInvoice, err := db.LookupInvoice(ref) - if !test.queryPayAddr && !test.queryPayHash { - if !ErrInvoiceNotFound.Is(err) { - t.Fatalf("invoice should not exist: %v", err) - } - return - } - - require.Equal(t, - *fakeInvoice, dbInvoice, - "invoice fetched from db doesn't match original", - ) - - // The add index of the invoice retrieved from the database should now - // be fully populated. As this is the first index written to the DB, - // the addIndex should be 1. - if dbInvoice.AddIndex != 1 { - t.Fatalf("wrong add index: expected %v, got %v", 1, - dbInvoice.AddIndex) - } - - // Settle the invoice, the version retrieved from the database should - // now have the settled bit toggle to true and a non-default - // SettledDate - payAmt := fakeInvoice.Terms.Value * 2 - _, err = db.UpdateInvoice(ref, getUpdateInvoice(payAmt)) - if err != nil { - t.Fatalf("unable to settle invoice: %v", err) - } - dbInvoice2, err := db.LookupInvoice(ref) - if err != nil { - t.Fatalf("unable to fetch invoice: %v", err) - } - if dbInvoice2.State != ContractSettled { - t.Fatalf("invoice should now be settled but isn't") - } - if dbInvoice2.SettleDate.IsZero() { - t.Fatalf("invoice should have non-zero SettledDate but isn't") - } - - // Our 2x payment should be reflected, and also the settle index of 1 - // should also have been committed for this index. - if dbInvoice2.AmtPaid != payAmt { - t.Fatalf("wrong amt paid: expected %v, got %v", payAmt, - dbInvoice2.AmtPaid) - } - if dbInvoice2.SettleIndex != 1 { - t.Fatalf("wrong settle index: expected %v, got %v", 1, - dbInvoice2.SettleIndex) - } - - // Attempt to insert generated above again, this should fail as - // duplicates are rejected by the processing logic. - if _, err := db.AddInvoice(fakeInvoice, payHash); !ErrDuplicateInvoice.Is(err) { - t.Fatalf("invoice insertion should fail due to duplication, "+ - "instead %v", err) - } - - // Attempt to look up a non-existent invoice, this should also fail but - // with a "not found" error. - var fakeHash [32]byte - fakeRef := InvoiceRefByHash(fakeHash) - _, err = db.LookupInvoice(fakeRef) - if !ErrInvoiceNotFound.Is(err) { - t.Fatalf("lookup should have failed, instead %v", err) - } - - // Add 10 random invoices. - const numInvoices = 10 - amt := lnwire.NewMSatFromSatoshis(1000) - invoices := make([]*Invoice, numInvoices+1) - invoices[0] = &dbInvoice2 - for i := 1; i < len(invoices); i++ { - invoice, err := randInvoice(amt) - if err != nil { - t.Fatalf("unable to create invoice: %v", err) - } - - hash := invoice.Terms.PaymentPreimage.Hash() - if _, err := db.AddInvoice(invoice, hash); err != nil { - t.Fatalf("unable to add invoice %v", err) - } - - invoices[i] = invoice - } - - // Perform a scan to collect all the active invoices. - query := InvoiceQuery{ - IndexOffset: 0, - NumMaxInvoices: math.MaxUint64, - PendingOnly: false, - } - - response, err := db.QueryInvoices(query) - if err != nil { - t.Fatalf("invoice query failed: %v", err) - } - - // The retrieve list of invoices should be identical as since we're - // using big endian, the invoices should be retrieved in ascending - // order (and the primary key should be incremented with each - // insertion). - for i := 0; i < len(invoices); i++ { - require.Equal(t, - *invoices[i], response.Invoices[i], - "retrieved invoice doesn't match", - ) - } -} - -// TestAddDuplicatePayAddr asserts that the payment addresses of inserted -// invoices are unique. -func TestAddDuplicatePayAddr(t *testing.T) { - db, cleanUp, err := MakeTestDB() - defer cleanUp() - util.RequireNoErr(t, err) - - // Create two invoices with the same payment addr. - invoice1, err := randInvoice(1000) - util.RequireNoErr(t, err) - - invoice2, err := randInvoice(20000) - util.RequireNoErr(t, err) - invoice2.Terms.PaymentAddr = invoice1.Terms.PaymentAddr - - // First insert should succeed. - inv1Hash := invoice1.Terms.PaymentPreimage.Hash() - _, err = db.AddInvoice(invoice1, inv1Hash) - util.RequireNoErr(t, err) - - // Second insert should fail with duplicate payment addr. - inv2Hash := invoice2.Terms.PaymentPreimage.Hash() - _, err = db.AddInvoice(invoice2, inv2Hash) - util.RequireErr(t, err, ErrDuplicatePayAddr) -} - -// TestAddDuplicateKeysendPayAddr asserts that we permit duplicate payment -// addresses to be inserted if they are blank to support JIT legacy keysend -// invoices. -func TestAddDuplicateKeysendPayAddr(t *testing.T) { - db, cleanUp, err := MakeTestDB() - defer cleanUp() - util.RequireNoErr(t, err) - - // Create two invoices with the same _blank_ payment addr. - invoice1, err := randInvoice(1000) - util.RequireNoErr(t, err) - invoice1.Terms.PaymentAddr = BlankPayAddr - - invoice2, err := randInvoice(20000) - util.RequireNoErr(t, err) - invoice2.Terms.PaymentAddr = BlankPayAddr - - // Inserting both should succeed without a duplicate payment address - // failure. - inv1Hash := invoice1.Terms.PaymentPreimage.Hash() - _, err = db.AddInvoice(invoice1, inv1Hash) - util.RequireNoErr(t, err) - - inv2Hash := invoice2.Terms.PaymentPreimage.Hash() - _, err = db.AddInvoice(invoice2, inv2Hash) - util.RequireNoErr(t, err) - - // Querying for each should succeed. Here we use hash+addr refs since - // the lookup will fail if the hash and addr point to different - // invoices, so if both succeed we can be assured they aren't included - // in the payment address index. - ref1 := InvoiceRefByHashAndAddr(inv1Hash, BlankPayAddr) - dbInv1, err := db.LookupInvoice(ref1) - util.RequireNoErr(t, err) - require.Equal(t, invoice1, &dbInv1) - - ref2 := InvoiceRefByHashAndAddr(inv2Hash, BlankPayAddr) - dbInv2, err := db.LookupInvoice(ref2) - util.RequireNoErr(t, err) - require.Equal(t, invoice2, &dbInv2) -} - -// TestInvRefEquivocation asserts that retrieving or updating an invoice using -// an equivocating InvoiceRef results in ErrInvRefEquivocation. -func TestInvRefEquivocation(t *testing.T) { - db, cleanUp, err := MakeTestDB() - defer cleanUp() - util.RequireNoErr(t, err) - - // Add two random invoices. - invoice1, err := randInvoice(1000) - util.RequireNoErr(t, err) - - inv1Hash := invoice1.Terms.PaymentPreimage.Hash() - _, err = db.AddInvoice(invoice1, inv1Hash) - util.RequireNoErr(t, err) - - invoice2, err := randInvoice(2000) - util.RequireNoErr(t, err) - - inv2Hash := invoice2.Terms.PaymentPreimage.Hash() - _, err = db.AddInvoice(invoice2, inv2Hash) - util.RequireNoErr(t, err) - - // Now, query using invoice 1's payment address, but invoice 2's payment - // hash. We expect an error since the invref points to multiple - // invoices. - ref := InvoiceRefByHashAndAddr(inv2Hash, invoice1.Terms.PaymentAddr) - _, err = db.LookupInvoice(ref) - util.RequireErr(t, err, ErrInvRefEquivocation) - - // The same error should be returned when updating an equivocating - // reference. - nop := func(_ *Invoice) (*InvoiceUpdateDesc, er.R) { - return nil, nil - } - _, err = db.UpdateInvoice(ref, nop) - util.RequireErr(t, err, ErrInvRefEquivocation) -} - -// TestInvoiceCancelSingleHtlc tests that a single htlc can be canceled on the -// invoice. -func TestInvoiceCancelSingleHtlc(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test db: %v", err) - } - - preimage := lntypes.Preimage{1} - paymentHash := preimage.Hash() - - testInvoice := &Invoice{ - Htlcs: map[CircuitKey]*InvoiceHTLC{}, - Terms: ContractTerm{ - Value: lnwire.NewMSatFromSatoshis(10000), - Features: emptyFeatures, - PaymentPreimage: &preimage, - }, - } - - if _, err := db.AddInvoice(testInvoice, paymentHash); err != nil { - t.Fatalf("unable to find invoice: %v", err) - } - - // Accept an htlc on this invoice. - key := CircuitKey{ChanID: lnwire.NewShortChanIDFromInt(1), HtlcID: 4} - htlc := HtlcAcceptDesc{ - Amt: 500, - CustomRecords: make(record.CustomSet), - } - - ref := InvoiceRefByHash(paymentHash) - invoice, err := db.UpdateInvoice(ref, - func(invoice *Invoice) (*InvoiceUpdateDesc, er.R) { - return &InvoiceUpdateDesc{ - AddHtlcs: map[CircuitKey]*HtlcAcceptDesc{ - key: &htlc, - }, - }, nil - }) - if err != nil { - t.Fatalf("unable to add invoice htlc: %v", err) - } - if len(invoice.Htlcs) != 1 { - t.Fatalf("expected the htlc to be added") - } - if invoice.Htlcs[key].State != HtlcStateAccepted { - t.Fatalf("expected htlc in state accepted") - } - - // Cancel the htlc again. - invoice, err = db.UpdateInvoice(ref, - func(invoice *Invoice) (*InvoiceUpdateDesc, er.R) { - return &InvoiceUpdateDesc{ - CancelHtlcs: map[CircuitKey]struct{}{ - key: {}, - }, - }, nil - }) - if err != nil { - t.Fatalf("unable to cancel htlc: %v", err) - } - if len(invoice.Htlcs) != 1 { - t.Fatalf("expected the htlc to be present") - } - if invoice.Htlcs[key].State != HtlcStateCanceled { - t.Fatalf("expected htlc in state canceled") - } -} - -// TestInvoiceTimeSeries tests that newly added invoices invoices, as well as -// settled invoices are added to the database are properly placed in the add -// add or settle index which serves as an event time series. -func TestInvoiceAddTimeSeries(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB(OptionClock(testClock)) - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test db: %v", err) - } - - _, err = db.InvoicesAddedSince(0) - util.RequireNoErr(t, err) - - // We'll start off by creating 20 random invoices, and inserting them - // into the database. - const numInvoices = 20 - amt := lnwire.NewMSatFromSatoshis(1000) - invoices := make([]Invoice, numInvoices) - for i := 0; i < len(invoices); i++ { - invoice, err := randInvoice(amt) - if err != nil { - t.Fatalf("unable to create invoice: %v", err) - } - - paymentHash := invoice.Terms.PaymentPreimage.Hash() - - if _, err := db.AddInvoice(invoice, paymentHash); err != nil { - t.Fatalf("unable to add invoice %v", err) - } - - invoices[i] = *invoice - } - - // With the invoices constructed, we'll now create a series of queries - // that we'll use to assert expected return values of - // InvoicesAddedSince. - addQueries := []struct { - sinceAddIndex uint64 - - resp []Invoice - }{ - // If we specify a value of zero, we shouldn't get any invoices - // back. - { - sinceAddIndex: 0, - }, - - // If we specify a value well beyond the number of inserted - // invoices, we shouldn't get any invoices back. - { - sinceAddIndex: 99999999, - }, - - // Using an index of 1 should result in all values, but the - // first one being returned. - { - sinceAddIndex: 1, - resp: invoices[1:], - }, - - // If we use an index of 10, then we should retrieve the - // reaming 10 invoices. - { - sinceAddIndex: 10, - resp: invoices[10:], - }, - } - - for i, query := range addQueries { - resp, err := db.InvoicesAddedSince(query.sinceAddIndex) - if err != nil { - t.Fatalf("unable to query: %v", err) - } - - require.Equal(t, len(query.resp), len(resp)) - - for j := 0; j < len(query.resp); j++ { - require.Equal(t, - query.resp[j], resp[j], - fmt.Sprintf("test: #%v, item: #%v", i, j), - ) - } - } - - _, err = db.InvoicesSettledSince(0) - util.RequireNoErr(t, err) - - var settledInvoices []Invoice - var settleIndex uint64 = 1 - // We'll now only settle the latter half of each of those invoices. - for i := 10; i < len(invoices); i++ { - invoice := &invoices[i] - - paymentHash := invoice.Terms.PaymentPreimage.Hash() - - ref := InvoiceRefByHash(paymentHash) - _, err := db.UpdateInvoice( - ref, getUpdateInvoice(invoice.Terms.Value), - ) - if err != nil { - t.Fatalf("unable to settle invoice: %v", err) - } - - // Create the settled invoice for the expectation set. - settleTestInvoice(invoice, settleIndex) - settleIndex++ - - settledInvoices = append(settledInvoices, *invoice) - } - - // We'll now prepare an additional set of queries to ensure the settle - // time series has properly been maintained in the database. - settleQueries := []struct { - sinceSettleIndex uint64 - - resp []Invoice - }{ - // If we specify a value of zero, we shouldn't get any settled - // invoices back. - { - sinceSettleIndex: 0, - }, - - // If we specify a value well beyond the number of settled - // invoices, we shouldn't get any invoices back. - { - sinceSettleIndex: 99999999, - }, - - // Using an index of 1 should result in the final 10 invoices - // being returned, as we only settled those. - { - sinceSettleIndex: 1, - resp: settledInvoices[1:], - }, - } - - for i, query := range settleQueries { - resp, err := db.InvoicesSettledSince(query.sinceSettleIndex) - if err != nil { - t.Fatalf("unable to query: %v", err) - } - - require.Equal(t, len(query.resp), len(resp)) - - for j := 0; j < len(query.resp); j++ { - require.Equal(t, - query.resp[j], resp[j], - fmt.Sprintf("test: #%v, item: #%v", i, j), - ) - } - } -} - -// TestScanInvoices tests that ScanInvoices scans trough all stored invoices -// correctly. -func TestScanInvoices(t *testing.T) { - t.Parallel() - - db, cleanup, err := MakeTestDB() - defer cleanup() - if err != nil { - t.Fatalf("unable to make test db: %v", err) - } - - var invoices map[lntypes.Hash]*Invoice - callCount := 0 - resetCount := 0 - - // reset is used to reset/initialize results and is called once - // upon calling ScanInvoices and when the underlying transaction is - // retried. - reset := func() { - invoices = make(map[lntypes.Hash]*Invoice) - callCount = 0 - resetCount++ - - } - - scanFunc := func(paymentHash lntypes.Hash, invoice *Invoice) er.R { - invoices[paymentHash] = invoice - callCount++ - - return nil - } - - // With an empty DB we expect to not scan any invoices. - util.RequireNoErr(t, db.ScanInvoices(scanFunc, reset)) - require.Equal(t, 0, len(invoices)) - require.Equal(t, 0, callCount) - require.Equal(t, 1, resetCount) - - numInvoices := 5 - testInvoices := make(map[lntypes.Hash]*Invoice) - - // Now populate the DB and check if we can get all invoices with their - // payment hashes as expected. - for i := 1; i <= numInvoices; i++ { - invoice, err := randInvoice(lnwire.MilliSatoshi(i)) - util.RequireNoErr(t, err) - - paymentHash := invoice.Terms.PaymentPreimage.Hash() - testInvoices[paymentHash] = invoice - - _, err = db.AddInvoice(invoice, paymentHash) - util.RequireNoErr(t, err) - } - - resetCount = 0 - util.RequireNoErr(t, db.ScanInvoices(scanFunc, reset)) - require.Equal(t, numInvoices, callCount) - require.Equal(t, testInvoices, invoices) - require.Equal(t, 1, resetCount) -} - -// TestDuplicateSettleInvoice tests that if we add a new invoice and settle it -// twice, then the second time we also receive the invoice that we settled as a -// return argument. -func TestDuplicateSettleInvoice(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB(OptionClock(testClock)) - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test db: %v", err) - } - - // We'll start out by creating an invoice and writing it to the DB. - amt := lnwire.NewMSatFromSatoshis(1000) - invoice, err := randInvoice(amt) - if err != nil { - t.Fatalf("unable to create invoice: %v", err) - } - - payHash := invoice.Terms.PaymentPreimage.Hash() - - if _, err := db.AddInvoice(invoice, payHash); err != nil { - t.Fatalf("unable to add invoice %v", err) - } - - // With the invoice in the DB, we'll now attempt to settle the invoice. - ref := InvoiceRefByHash(payHash) - dbInvoice, err := db.UpdateInvoice(ref, getUpdateInvoice(amt)) - if err != nil { - t.Fatalf("unable to settle invoice: %v", err) - } - - // We'll update what we expect the settle invoice to be so that our - // comparison below has the correct assumption. - invoice.SettleIndex = 1 - invoice.State = ContractSettled - invoice.AmtPaid = amt - invoice.SettleDate = dbInvoice.SettleDate - invoice.Htlcs = map[CircuitKey]*InvoiceHTLC{ - {}: { - Amt: amt, - AcceptTime: time.Unix(1, 0), - ResolveTime: time.Unix(1, 0), - State: HtlcStateSettled, - CustomRecords: make(record.CustomSet), - }, - } - - // We should get back the exact same invoice that we just inserted. - require.Equal(t, invoice, dbInvoice, "wrong invoice after settle") - - // If we try to settle the invoice again, then we should get the very - // same invoice back, but with an error this time. - dbInvoice, err = db.UpdateInvoice(ref, getUpdateInvoice(amt)) - if !ErrInvoiceAlreadySettled.Is(err) { - t.Fatalf("expected ErrInvoiceAlreadySettled") - } - - if dbInvoice == nil { - t.Fatalf("invoice from db is nil after settle!") - } - - invoice.SettleDate = dbInvoice.SettleDate - require.Equal(t, invoice, dbInvoice, "wrong invoice after second settle") -} - -// TestQueryInvoices ensures that we can properly query the invoice database for -// invoices using different types of queries. -func TestQueryInvoices(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB(OptionClock(testClock)) - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test db: %v", err) - } - - // To begin the test, we'll add 50 invoices to the database. We'll - // assume that the index of the invoice within the database is the same - // as the amount of the invoice itself. - const numInvoices = 50 - var settleIndex uint64 = 1 - var invoices []Invoice - var pendingInvoices []Invoice - - for i := 1; i <= numInvoices; i++ { - amt := lnwire.MilliSatoshi(i) - invoice, err := randInvoice(amt) - if err != nil { - t.Fatalf("unable to create invoice: %v", err) - } - - paymentHash := invoice.Terms.PaymentPreimage.Hash() - - if _, err := db.AddInvoice(invoice, paymentHash); err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - // We'll only settle half of all invoices created. - if i%2 == 0 { - ref := InvoiceRefByHash(paymentHash) - _, err := db.UpdateInvoice(ref, getUpdateInvoice(amt)) - if err != nil { - t.Fatalf("unable to settle invoice: %v", err) - } - - // Create the settled invoice for the expectation set. - settleTestInvoice(invoice, settleIndex) - settleIndex++ - } else { - pendingInvoices = append(pendingInvoices, *invoice) - } - - invoices = append(invoices, *invoice) - } - - // The test will consist of several queries along with their respective - // expected response. Each query response should match its expected one. - testCases := []struct { - query InvoiceQuery - expected []Invoice - }{ - // Fetch all invoices with a single query. - { - query: InvoiceQuery{ - NumMaxInvoices: numInvoices, - }, - expected: invoices, - }, - // Fetch all invoices with a single query, reversed. - { - query: InvoiceQuery{ - Reversed: true, - NumMaxInvoices: numInvoices, - }, - expected: invoices, - }, - // Fetch the first 25 invoices. - { - query: InvoiceQuery{ - NumMaxInvoices: numInvoices / 2, - }, - expected: invoices[:numInvoices/2], - }, - // Fetch the first 10 invoices, but this time iterating - // backwards. - { - query: InvoiceQuery{ - IndexOffset: 11, - Reversed: true, - NumMaxInvoices: numInvoices, - }, - expected: invoices[:10], - }, - // Fetch the last 40 invoices. - { - query: InvoiceQuery{ - IndexOffset: 10, - NumMaxInvoices: numInvoices, - }, - expected: invoices[10:], - }, - // Fetch all but the first invoice. - { - query: InvoiceQuery{ - IndexOffset: 1, - NumMaxInvoices: numInvoices, - }, - expected: invoices[1:], - }, - // Fetch one invoice, reversed, with index offset 3. This - // should give us the second invoice in the array. - { - query: InvoiceQuery{ - IndexOffset: 3, - Reversed: true, - NumMaxInvoices: 1, - }, - expected: invoices[1:2], - }, - // Same as above, at index 2. - { - query: InvoiceQuery{ - IndexOffset: 2, - Reversed: true, - NumMaxInvoices: 1, - }, - expected: invoices[0:1], - }, - // Fetch one invoice, at index 1, reversed. Since invoice#1 is - // the very first, there won't be any left in a reverse search, - // so we expect no invoices to be returned. - { - query: InvoiceQuery{ - IndexOffset: 1, - Reversed: true, - NumMaxInvoices: 1, - }, - expected: nil, - }, - // Same as above, but don't restrict the number of invoices to - // 1. - { - query: InvoiceQuery{ - IndexOffset: 1, - Reversed: true, - NumMaxInvoices: numInvoices, - }, - expected: nil, - }, - // Fetch one invoice, reversed, with no offset set. We expect - // the last invoice in the response. - { - query: InvoiceQuery{ - Reversed: true, - NumMaxInvoices: 1, - }, - expected: invoices[numInvoices-1:], - }, - // Fetch one invoice, reversed, the offset set at numInvoices+1. - // We expect this to return the last invoice. - { - query: InvoiceQuery{ - IndexOffset: numInvoices + 1, - Reversed: true, - NumMaxInvoices: 1, - }, - expected: invoices[numInvoices-1:], - }, - // Same as above, at offset numInvoices. - { - query: InvoiceQuery{ - IndexOffset: numInvoices, - Reversed: true, - NumMaxInvoices: 1, - }, - expected: invoices[numInvoices-2 : numInvoices-1], - }, - // Fetch one invoice, at no offset (same as offset 0). We - // expect the first invoice only in the response. - { - query: InvoiceQuery{ - NumMaxInvoices: 1, - }, - expected: invoices[:1], - }, - // Same as above, at offset 1. - { - query: InvoiceQuery{ - IndexOffset: 1, - NumMaxInvoices: 1, - }, - expected: invoices[1:2], - }, - // Same as above, at offset 2. - { - query: InvoiceQuery{ - IndexOffset: 2, - NumMaxInvoices: 1, - }, - expected: invoices[2:3], - }, - // Same as above, at offset numInvoices-1. Expect the last - // invoice to be returned. - { - query: InvoiceQuery{ - IndexOffset: numInvoices - 1, - NumMaxInvoices: 1, - }, - expected: invoices[numInvoices-1:], - }, - // Same as above, at offset numInvoices. No invoices should be - // returned, as there are no invoices after this offset. - { - query: InvoiceQuery{ - IndexOffset: numInvoices, - NumMaxInvoices: 1, - }, - expected: nil, - }, - // Fetch all pending invoices with a single query. - { - query: InvoiceQuery{ - PendingOnly: true, - NumMaxInvoices: numInvoices, - }, - expected: pendingInvoices, - }, - // Fetch the first 12 pending invoices. - { - query: InvoiceQuery{ - PendingOnly: true, - NumMaxInvoices: numInvoices / 4, - }, - expected: pendingInvoices[:len(pendingInvoices)/2], - }, - // Fetch the first 5 pending invoices, but this time iterating - // backwards. - { - query: InvoiceQuery{ - IndexOffset: 10, - PendingOnly: true, - Reversed: true, - NumMaxInvoices: numInvoices, - }, - // Since we seek to the invoice with index 10 and - // iterate backwards, there should only be 5 pending - // invoices before it as every other invoice within the - // index is settled. - expected: pendingInvoices[:5], - }, - // Fetch the last 15 invoices. - { - query: InvoiceQuery{ - IndexOffset: 20, - PendingOnly: true, - NumMaxInvoices: numInvoices, - }, - // Since we seek to the invoice with index 20, there are - // 30 invoices left. From these 30, only 15 of them are - // still pending. - expected: pendingInvoices[len(pendingInvoices)-15:], - }, - // Fetch all invoices paginating backwards, with an index offset - // that is beyond our last offset. We expect all invoices to be - // returned. - { - query: InvoiceQuery{ - IndexOffset: numInvoices * 2, - PendingOnly: false, - Reversed: true, - NumMaxInvoices: numInvoices, - }, - expected: invoices, - }, - } - - for i, testCase := range testCases { - response, err := db.QueryInvoices(testCase.query) - if err != nil { - t.Fatalf("unable to query invoice database: %v", err) - } - - require.Equal(t, len(testCase.expected), len(response.Invoices)) - - for j, expected := range testCase.expected { - require.Equal(t, - expected, response.Invoices[j], - fmt.Sprintf("test: #%v, item: #%v", i, j), - ) - } - } -} - -// getUpdateInvoice returns an invoice update callback that, when called, -// settles the invoice with the given amount. -func getUpdateInvoice(amt lnwire.MilliSatoshi) InvoiceUpdateCallback { - return func(invoice *Invoice) (*InvoiceUpdateDesc, er.R) { - if invoice.State == ContractSettled { - return nil, ErrInvoiceAlreadySettled.Default() - } - - noRecords := make(record.CustomSet) - - update := &InvoiceUpdateDesc{ - State: &InvoiceStateUpdateDesc{ - Preimage: invoice.Terms.PaymentPreimage, - NewState: ContractSettled, - }, - AddHtlcs: map[CircuitKey]*HtlcAcceptDesc{ - {}: { - Amt: amt, - CustomRecords: noRecords, - }, - }, - } - - return update, nil - } -} - -// TestCustomRecords tests that custom records are properly recorded in the -// invoice database. -func TestCustomRecords(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatalf("unable to make test db: %v", err) - } - - preimage := lntypes.Preimage{1} - paymentHash := preimage.Hash() - - testInvoice := &Invoice{ - Htlcs: map[CircuitKey]*InvoiceHTLC{}, - Terms: ContractTerm{ - Value: lnwire.NewMSatFromSatoshis(10000), - Features: emptyFeatures, - PaymentPreimage: &preimage, - }, - } - - if _, err := db.AddInvoice(testInvoice, paymentHash); err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - // Accept an htlc with custom records on this invoice. - key := CircuitKey{ChanID: lnwire.NewShortChanIDFromInt(1), HtlcID: 4} - - records := record.CustomSet{ - 100000: []byte{}, - 100001: []byte{1, 2}, - } - - ref := InvoiceRefByHash(paymentHash) - _, err = db.UpdateInvoice(ref, - func(invoice *Invoice) (*InvoiceUpdateDesc, er.R) { - return &InvoiceUpdateDesc{ - AddHtlcs: map[CircuitKey]*HtlcAcceptDesc{ - key: { - Amt: 500, - CustomRecords: records, - }, - }, - }, nil - }, - ) - if err != nil { - t.Fatalf("unable to add invoice htlc: %v", err) - } - - // Retrieve the invoice from that database and verify that the custom - // records are present. - dbInvoice, err := db.LookupInvoice(ref) - if err != nil { - t.Fatalf("unable to lookup invoice: %v", err) - } - - if len(dbInvoice.Htlcs) != 1 { - t.Fatalf("expected the htlc to be added") - } - - require.Equal(t, - records, dbInvoice.Htlcs[key].CustomRecords, - "invalid custom records", - ) -} - -// TestInvoiceRef asserts that the proper identifiers are returned from an -// InvoiceRef depending on the constructor used. -func TestInvoiceRef(t *testing.T) { - payHash := lntypes.Hash{0x01} - payAddr := [32]byte{0x02} - - // An InvoiceRef by hash should return the provided hash and a nil - // payment addr. - refByHash := InvoiceRefByHash(payHash) - require.Equal(t, payHash, refByHash.PayHash()) - require.Equal(t, (*[32]byte)(nil), refByHash.PayAddr()) - - // An InvoiceRef by hash and addr should return the payment hash and - // payment addr passed to the constructor. - refByHashAndAddr := InvoiceRefByHashAndAddr(payHash, payAddr) - require.Equal(t, payHash, refByHashAndAddr.PayHash()) - require.Equal(t, &payAddr, refByHashAndAddr.PayAddr()) -} - -// TestDeleteInvoices tests that deleting a list of invoices will succeed -// if all delete references are valid, or will fail otherwise. -func TestDeleteInvoices(t *testing.T) { - t.Parallel() - - db, cleanup, err := MakeTestDB() - defer cleanup() - util.RequireNoErr(t, err, "unable to make test db") - - // Add some invoices to the test db. - numInvoices := 3 - invoicesToDelete := make([]InvoiceDeleteRef, numInvoices) - - for i := 0; i < numInvoices; i++ { - invoice, err := randInvoice(lnwire.MilliSatoshi(i + 1)) - util.RequireNoErr(t, err) - - paymentHash := invoice.Terms.PaymentPreimage.Hash() - addIndex, err := db.AddInvoice(invoice, paymentHash) - util.RequireNoErr(t, err) - - // Settle the second invoice. - if i == 1 { - invoice, err = db.UpdateInvoice( - InvoiceRefByHash(paymentHash), - getUpdateInvoice(invoice.Terms.Value), - ) - util.RequireNoErr(t, err, "unable to settle invoice") - } - - // store the delete ref for later. - invoicesToDelete[i] = InvoiceDeleteRef{ - PayHash: paymentHash, - PayAddr: &invoice.Terms.PaymentAddr, - AddIndex: addIndex, - SettleIndex: invoice.SettleIndex, - } - } - - // assertInvoiceCount asserts that the number of invoices equals - // to the passed count. - assertInvoiceCount := func(count int) { - // Query to collect all invoices. - query := InvoiceQuery{ - IndexOffset: 0, - NumMaxInvoices: math.MaxUint64, - } - - // Check that we really have 3 invoices. - response, err := db.QueryInvoices(query) - util.RequireNoErr(t, err) - require.Equal(t, count, len(response.Invoices)) - } - - // XOR one byte of one of the references' hash and attempt to delete. - invoicesToDelete[0].PayHash[2] ^= 3 - util.RequireErr(t, db.DeleteInvoice(invoicesToDelete)) - assertInvoiceCount(3) - - // Restore the hash. - invoicesToDelete[0].PayHash[2] ^= 3 - - // XOR one byte of one of the references' payment address and attempt - // to delete. - invoicesToDelete[1].PayAddr[5] ^= 7 - util.RequireErr(t, db.DeleteInvoice(invoicesToDelete)) - assertInvoiceCount(3) - - // Restore the payment address. - invoicesToDelete[1].PayAddr[5] ^= 7 - - // XOR the second invoice's payment settle index as it is settled, and - // attempt to delete. - invoicesToDelete[1].SettleIndex ^= 11 - util.RequireErr(t, db.DeleteInvoice(invoicesToDelete)) - assertInvoiceCount(3) - - // Restore the settle index. - invoicesToDelete[1].SettleIndex ^= 11 - - // XOR the add index for one of the references and attempt to delete. - invoicesToDelete[2].AddIndex ^= 13 - util.RequireErr(t, db.DeleteInvoice(invoicesToDelete)) - assertInvoiceCount(3) - - // Restore the add index. - invoicesToDelete[2].AddIndex ^= 13 - - // Delete should succeed with all the valid references. - util.RequireNoErr(t, db.DeleteInvoice(invoicesToDelete)) - assertInvoiceCount(0) -} diff --git a/lnd/channeldb/invoices.go b/lnd/channeldb/invoices.go deleted file mode 100644 index e4539cf8..00000000 --- a/lnd/channeldb/invoices.go +++ /dev/null @@ -1,1876 +0,0 @@ -package channeldb - -import ( - "bytes" - "fmt" - "io" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/htlcswitch/hop" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/record" - "github.com/pkt-cash/pktd/lnd/tlv" -) - -var ( - // unknownPreimage is an all-zeroes preimage that indicates that the - // preimage for this invoice is not yet known. - unknownPreimage lntypes.Preimage - - // BlankPayAddr is a sentinel payment address for legacy invoices. - // Invoices with this payment address are special-cased in the insertion - // logic to prevent being indexed in the payment address index, - // otherwise they would cause collisions after the first insertion. - BlankPayAddr [32]byte - - // invoiceBucket is the name of the bucket within the database that - // stores all data related to invoices no matter their final state. - // Within the invoice bucket, each invoice is keyed by its invoice ID - // which is a monotonically increasing uint32. - invoiceBucket = []byte("invoices") - - // paymentHashIndexBucket is the name of the sub-bucket within the - // invoiceBucket which indexes all invoices by their payment hash. The - // payment hash is the sha256 of the invoice's payment preimage. This - // index is used to detect duplicates, and also to provide a fast path - // for looking up incoming HTLCs to determine if we're able to settle - // them fully. - // - // maps: payHash => invoiceKey - invoiceIndexBucket = []byte("paymenthashes") - - // payAddrIndexBucket is the name of the top-level bucket that maps - // payment addresses to their invoice number. This can be used - // to efficiently query or update non-legacy invoices. Note that legacy - // invoices will not be included in this index since they all have the - // same, all-zero payment address, however all newly generated invoices - // will end up in this index. - // - // maps: payAddr => invoiceKey - payAddrIndexBucket = []byte("pay-addr-index") - - // numInvoicesKey is the name of key which houses the auto-incrementing - // invoice ID which is essentially used as a primary key. With each - // invoice inserted, the primary key is incremented by one. This key is - // stored within the invoiceIndexBucket. Within the invoiceBucket - // invoices are uniquely identified by the invoice ID. - numInvoicesKey = []byte("nik") - - // addIndexBucket is an index bucket that we'll use to create a - // monotonically increasing set of add indexes. Each time we add a new - // invoice, this sequence number will be incremented and then populated - // within the new invoice. - // - // In addition to this sequence number, we map: - // - // addIndexNo => invoiceKey - addIndexBucket = []byte("invoice-add-index") - - // settleIndexBucket is an index bucket that we'll use to create a - // monotonically increasing integer for tracking a "settle index". Each - // time an invoice is settled, this sequence number will be incremented - // as populate within the newly settled invoice. - // - // In addition to this sequence number, we map: - // - // settleIndexNo => invoiceKey - settleIndexBucket = []byte("invoice-settle-index") - - // ErrInvoiceAlreadySettled is returned when the invoice is already - // settled. - ErrInvoiceAlreadySettled = Err.CodeWithDetail("ErrInvoiceAlreadySettled", "invoice already settled") - - // ErrInvoiceAlreadyCanceled is returned when the invoice is already - // canceled. - ErrInvoiceAlreadyCanceled = Err.CodeWithDetail("ErrInvoiceAlreadyCanceled", "invoice already canceled") - - // ErrInvoiceAlreadyAccepted is returned when the invoice is already - // accepted. - ErrInvoiceAlreadyAccepted = Err.CodeWithDetail("ErrInvoiceAlreadyAccepted", "invoice already accepted") - - // ErrInvoiceStillOpen is returned when the invoice is still open. - ErrInvoiceStillOpen = Err.CodeWithDetail("ErrInvoiceStillOpen", "invoice still open") - - // ErrInvoiceCannotOpen is returned when an attempt is made to move an - // invoice to the open state. - ErrInvoiceCannotOpen = Err.CodeWithDetail("ErrInvoiceCannotOpen", "cannot move invoice to open") - - // ErrInvoiceCannotAccept is returned when an attempt is made to accept - // an invoice while the invoice is not in the open state. - ErrInvoiceCannotAccept = Err.CodeWithDetail("ErrInvoiceCannotAccept", "cannot accept invoice") - - // ErrInvoicePreimageMismatch is returned when the preimage doesn't - // match the invoice hash. - ErrInvoicePreimageMismatch = Err.CodeWithDetail("ErrInvoicePreimageMismatch", "preimage does not match") -) - -const ( - // MaxMemoSize is maximum size of the memo field within invoices stored - // in the database. - MaxMemoSize = 1024 - - // MaxPaymentRequestSize is the max size of a payment request for - // this invoice. - // TODO(halseth): determine the max length payment request when field - // lengths are final. - MaxPaymentRequestSize = 4096 - - // A set of tlv type definitions used to serialize invoice htlcs to the - // database. - // - // NOTE: A migration should be added whenever this list changes. This - // prevents against the database being rolled back to an older - // format where the surrounding logic might assume a different set of - // fields are known. - chanIDType tlv.Type = 1 - htlcIDType tlv.Type = 3 - amtType tlv.Type = 5 - acceptHeightType tlv.Type = 7 - acceptTimeType tlv.Type = 9 - resolveTimeType tlv.Type = 11 - expiryHeightType tlv.Type = 13 - htlcStateType tlv.Type = 15 - mppTotalAmtType tlv.Type = 17 - - // A set of tlv type definitions used to serialize invoice bodiees. - // - // NOTE: A migration should be added whenever this list changes. This - // prevents against the database being rolled back to an older - // format where the surrounding logic might assume a different set of - // fields are known. - memoType tlv.Type = 0 - payReqType tlv.Type = 1 - createTimeType tlv.Type = 2 - settleTimeType tlv.Type = 3 - addIndexType tlv.Type = 4 - settleIndexType tlv.Type = 5 - preimageType tlv.Type = 6 - valueType tlv.Type = 7 - cltvDeltaType tlv.Type = 8 - expiryType tlv.Type = 9 - paymentAddrType tlv.Type = 10 - featuresType tlv.Type = 11 - invStateType tlv.Type = 12 - amtPaidType tlv.Type = 13 - hodlInvoiceType tlv.Type = 14 -) - -// InvoiceRef is a composite identifier for invoices. Invoices can be referenced -// by various combinations of payment hash and payment addr, in certain contexts -// only some of these are known. An InvoiceRef and its constructors thus -// encapsulate the valid combinations of query parameters that can be supplied -// to LookupInvoice and UpdateInvoice. -type InvoiceRef struct { - // payHash is the payment hash of the target invoice. All invoices are - // currently indexed by payment hash. This value will be used as a - // fallback when no payment address is known. - payHash lntypes.Hash - - // payAddr is the payment addr of the target invoice. Newer invoices - // (0.11 and up) are indexed by payment address in addition to payment - // hash, but pre 0.8 invoices do not have one at all. When this value is - // known it will be used as the primary identifier, falling back to - // payHash if no value is known. - payAddr *[32]byte -} - -// InvoiceRefByHash creates an InvoiceRef that queries for an invoice only by -// its payment hash. -func InvoiceRefByHash(payHash lntypes.Hash) InvoiceRef { - return InvoiceRef{ - payHash: payHash, - } -} - -// InvoiceRefByHashAndAddr creates an InvoiceRef that first queries for an -// invoice by the provided payment address, falling back to the payment hash if -// the payment address is unknown. -func InvoiceRefByHashAndAddr(payHash lntypes.Hash, - payAddr [32]byte) InvoiceRef { - - return InvoiceRef{ - payHash: payHash, - payAddr: &payAddr, - } -} - -// PayHash returns the target invoice's payment hash. -func (r InvoiceRef) PayHash() lntypes.Hash { - return r.payHash -} - -// PayAddr returns the optional payment address of the target invoice. -// -// NOTE: This value may be nil. -func (r InvoiceRef) PayAddr() *[32]byte { - if r.payAddr != nil { - addr := *r.payAddr - return &addr - } - return nil -} - -// String returns a human-readable representation of an InvoiceRef. -func (r InvoiceRef) String() string { - if r.payAddr != nil { - return fmt.Sprintf("(pay_hash=%v, pay_addr=%x)", r.payHash, *r.payAddr) - } - return fmt.Sprintf("(pay_hash=%v)", r.payHash) -} - -// ContractState describes the state the invoice is in. -type ContractState uint8 - -const ( - // ContractOpen means the invoice has only been created. - ContractOpen ContractState = 0 - - // ContractSettled means the htlc is settled and the invoice has been paid. - ContractSettled ContractState = 1 - - // ContractCanceled means the invoice has been canceled. - ContractCanceled ContractState = 2 - - // ContractAccepted means the HTLC has been accepted but not settled yet. - ContractAccepted ContractState = 3 -) - -// String returns a human readable identifier for the ContractState type. -func (c ContractState) String() string { - switch c { - case ContractOpen: - return "Open" - case ContractSettled: - return "Settled" - case ContractCanceled: - return "Canceled" - case ContractAccepted: - return "Accepted" - } - - return "Unknown" -} - -// ContractTerm is a companion struct to the Invoice struct. This struct houses -// the necessary conditions required before the invoice can be considered fully -// settled by the payee. -type ContractTerm struct { - // FinalCltvDelta is the minimum required number of blocks before htlc - // expiry when the invoice is accepted. - FinalCltvDelta int32 - - // Expiry defines how long after creation this invoice should expire. - Expiry time.Duration - - // PaymentPreimage is the preimage which is to be revealed in the - // occasion that an HTLC paying to the hash of this preimage is - // extended. Set to nil if the preimage isn't known yet. - PaymentPreimage *lntypes.Preimage - - // Value is the expected amount of milli-satoshis to be paid to an HTLC - // which can be satisfied by the above preimage. - Value lnwire.MilliSatoshi - - // PaymentAddr is a randomly generated value include in the MPP record - // by the sender to prevent probing of the receiver. - PaymentAddr [32]byte - - // Features is the feature vectors advertised on the payment request. - Features *lnwire.FeatureVector -} - -// String returns a human-readable description of the prominent contract terms. -func (c ContractTerm) String() string { - return fmt.Sprintf("amt=%v, expiry=%v, final_cltv_delta=%v", c.Value, - c.Expiry, c.FinalCltvDelta) -} - -// Invoice is a payment invoice generated by a payee in order to request -// payment for some good or service. The inclusion of invoices within Lightning -// creates a payment work flow for merchants very similar to that of the -// existing financial system within PayPal, etc. Invoices are added to the -// database when a payment is requested, then can be settled manually once the -// payment is received at the upper layer. For record keeping purposes, -// invoices are never deleted from the database, instead a bit is toggled -// denoting the invoice has been fully settled. Within the database, all -// invoices must have a unique payment hash which is generated by taking the -// sha256 of the payment preimage. -type Invoice struct { - // Memo is an optional memo to be stored along side an invoice. The - // memo may contain further details pertaining to the invoice itself, - // or any other message which fits within the size constraints. - Memo []byte - - // PaymentRequest is the encoded payment request for this invoice. For - // spontaneous (keysend) payments, this field will be empty. - PaymentRequest []byte - - // CreationDate is the exact time the invoice was created. - CreationDate time.Time - - // SettleDate is the exact time the invoice was settled. - SettleDate time.Time - - // Terms are the contractual payment terms of the invoice. Once all the - // terms have been satisfied by the payer, then the invoice can be - // considered fully fulfilled. - // - // TODO(roasbeef): later allow for multiple terms to fulfill the final - // invoice: payment fragmentation, etc. - Terms ContractTerm - - // AddIndex is an auto-incrementing integer that acts as a - // monotonically increasing sequence number for all invoices created. - // Clients can then use this field as a "checkpoint" of sorts when - // implementing a streaming RPC to notify consumers of instances where - // an invoice has been added before they re-connected. - // - // NOTE: This index starts at 1. - AddIndex uint64 - - // SettleIndex is an auto-incrementing integer that acts as a - // monotonically increasing sequence number for all settled invoices. - // Clients can then use this field as a "checkpoint" of sorts when - // implementing a streaming RPC to notify consumers of instances where - // an invoice has been settled before they re-connected. - // - // NOTE: This index starts at 1. - SettleIndex uint64 - - // State describes the state the invoice is in. - State ContractState - - // AmtPaid is the final amount that we ultimately accepted for pay for - // this invoice. We specify this value independently as it's possible - // that the invoice originally didn't specify an amount, or the sender - // overpaid. - AmtPaid lnwire.MilliSatoshi - - // Htlcs records all htlcs that paid to this invoice. Some of these - // htlcs may have been marked as canceled. - Htlcs map[CircuitKey]*InvoiceHTLC - - // HodlInvoice indicates whether the invoice should be held in the - // Accepted state or be settled right away. - HodlInvoice bool -} - -// HtlcState defines the states an htlc paying to an invoice can be in. -type HtlcState uint8 - -const ( - // HtlcStateAccepted indicates the htlc is locked-in, but not resolved. - HtlcStateAccepted HtlcState = iota - - // HtlcStateCanceled indicates the htlc is canceled back to the - // sender. - HtlcStateCanceled - - // HtlcStateSettled indicates the htlc is settled. - HtlcStateSettled -) - -// InvoiceHTLC contains details about an htlc paying to this invoice. -type InvoiceHTLC struct { - // Amt is the amount that is carried by this htlc. - Amt lnwire.MilliSatoshi - - // MppTotalAmt is a field for mpp that indicates the expected total - // amount. - MppTotalAmt lnwire.MilliSatoshi - - // AcceptHeight is the block height at which the invoice registry - // decided to accept this htlc as a payment to the invoice. At this - // height, the invoice cltv delay must have been met. - AcceptHeight uint32 - - // AcceptTime is the wall clock time at which the invoice registry - // decided to accept the htlc. - AcceptTime time.Time - - // ResolveTime is the wall clock time at which the invoice registry - // decided to settle the htlc. - ResolveTime time.Time - - // Expiry is the expiry height of this htlc. - Expiry uint32 - - // State indicates the state the invoice htlc is currently in. A - // canceled htlc isn't just removed from the invoice htlcs map, because - // we need AcceptHeight to properly cancel the htlc back. - State HtlcState - - // CustomRecords contains the custom key/value pairs that accompanied - // the htlc. - CustomRecords record.CustomSet -} - -// HtlcAcceptDesc describes the details of a newly accepted htlc. -type HtlcAcceptDesc struct { - // AcceptHeight is the block height at which this htlc was accepted. - AcceptHeight int32 - - // Amt is the amount that is carried by this htlc. - Amt lnwire.MilliSatoshi - - // MppTotalAmt is a field for mpp that indicates the expected total - // amount. - MppTotalAmt lnwire.MilliSatoshi - - // Expiry is the expiry height of this htlc. - Expiry uint32 - - // CustomRecords contains the custom key/value pairs that accompanied - // the htlc. - CustomRecords record.CustomSet -} - -// InvoiceUpdateDesc describes the changes that should be applied to the -// invoice. -type InvoiceUpdateDesc struct { - // State is the new state that this invoice should progress to. If nil, - // the state is left unchanged. - State *InvoiceStateUpdateDesc - - // CancelHtlcs describes the htlcs that need to be canceled. - CancelHtlcs map[CircuitKey]struct{} - - // AddHtlcs describes the newly accepted htlcs that need to be added to - // the invoice. - AddHtlcs map[CircuitKey]*HtlcAcceptDesc -} - -// InvoiceStateUpdateDesc describes an invoice-level state transition. -type InvoiceStateUpdateDesc struct { - // NewState is the new state that this invoice should progress to. - NewState ContractState - - // Preimage must be set to the preimage when NewState is settled. - Preimage *lntypes.Preimage -} - -// InvoiceUpdateCallback is a callback used in the db transaction to update the -// invoice. -type InvoiceUpdateCallback = func(invoice *Invoice) (*InvoiceUpdateDesc, er.R) - -func validateInvoice(i *Invoice, paymentHash lntypes.Hash) er.R { - // Avoid conflicts with all-zeroes magic value in the database. - if paymentHash == unknownPreimage.Hash() { - return er.Errorf("cannot use hash of all-zeroes preimage") - } - - if len(i.Memo) > MaxMemoSize { - return er.Errorf("max length a memo is %v, and invoice "+ - "of length %v was provided", MaxMemoSize, len(i.Memo)) - } - if len(i.PaymentRequest) > MaxPaymentRequestSize { - return er.Errorf("max length of payment request is %v, length "+ - "provided was %v", MaxPaymentRequestSize, - len(i.PaymentRequest)) - } - if i.Terms.Features == nil { - return er.New("invoice must have a feature vector") - } - - if i.Terms.PaymentPreimage == nil && !i.HodlInvoice { - return er.New("non-hodl invoices must have a preimage") - } - return nil -} - -// IsPending returns ture if the invoice is in ContractOpen state. -func (i *Invoice) IsPending() bool { - return i.State == ContractOpen || i.State == ContractAccepted -} - -// AddInvoice inserts the targeted invoice into the database. If the invoice has -// *any* payment hashes which already exists within the database, then the -// insertion will be aborted and rejected due to the strict policy banning any -// duplicate payment hashes. A side effect of this function is that it sets -// AddIndex on newInvoice. -func (d *DB) AddInvoice(newInvoice *Invoice, paymentHash lntypes.Hash) ( - uint64, er.R) { - - if err := validateInvoice(newInvoice, paymentHash); err != nil { - return 0, err - } - - var invoiceAddIndex uint64 - err := kvdb.Update(d, func(tx kvdb.RwTx) er.R { - invoices, err := tx.CreateTopLevelBucket(invoiceBucket) - if err != nil { - return err - } - - invoiceIndex, err := invoices.CreateBucketIfNotExists( - invoiceIndexBucket, - ) - if err != nil { - return err - } - addIndex, err := invoices.CreateBucketIfNotExists( - addIndexBucket, - ) - if err != nil { - return err - } - - // Ensure that an invoice an identical payment hash doesn't - // already exist within the index. - if invoiceIndex.Get(paymentHash[:]) != nil { - return ErrDuplicateInvoice.Default() - } - - // Check that we aren't inserting an invoice with a duplicate - // payment address. The all-zeros payment address is - // special-cased to support legacy keysend invoices which don't - // assign one. This is safe since later we also will avoid - // indexing them and avoid collisions. - payAddrIndex := tx.ReadWriteBucket(payAddrIndexBucket) - if newInvoice.Terms.PaymentAddr != BlankPayAddr { - if payAddrIndex.Get(newInvoice.Terms.PaymentAddr[:]) != nil { - return ErrDuplicatePayAddr.Default() - } - } - - // If the current running payment ID counter hasn't yet been - // created, then create it now. - var invoiceNum uint32 - invoiceCounter := invoiceIndex.Get(numInvoicesKey) - if invoiceCounter == nil { - var scratch [4]byte - byteOrder.PutUint32(scratch[:], invoiceNum) - err := invoiceIndex.Put(numInvoicesKey, scratch[:]) - if err != nil { - return err - } - } else { - invoiceNum = byteOrder.Uint32(invoiceCounter) - } - - newIndex, errr := putInvoice( - invoices, invoiceIndex, payAddrIndex, addIndex, - newInvoice, invoiceNum, paymentHash, - ) - if errr != nil { - return errr - } - - invoiceAddIndex = newIndex - return nil - }, func() { - invoiceAddIndex = 0 - }) - if err != nil { - return 0, err - } - - return invoiceAddIndex, err -} - -// InvoicesAddedSince can be used by callers to seek into the event time series -// of all the invoices added in the database. The specified sinceAddIndex -// should be the highest add index that the caller knows of. This method will -// return all invoices with an add index greater than the specified -// sinceAddIndex. -// -// NOTE: The index starts from 1, as a result. We enforce that specifying a -// value below the starting index value is a noop. -func (d *DB) InvoicesAddedSince(sinceAddIndex uint64) ([]Invoice, er.R) { - var newInvoices []Invoice - - // If an index of zero was specified, then in order to maintain - // backwards compat, we won't send out any new invoices. - if sinceAddIndex == 0 { - return newInvoices, nil - } - - var startIndex [8]byte - byteOrder.PutUint64(startIndex[:], sinceAddIndex) - - err := kvdb.View(d, func(tx kvdb.RTx) er.R { - invoices := tx.ReadBucket(invoiceBucket) - if invoices == nil { - return nil - } - - addIndex := invoices.NestedReadBucket(addIndexBucket) - if addIndex == nil { - return nil - } - - // We'll now run through each entry in the add index starting - // at our starting index. We'll continue until we reach the - // very end of the current key space. - invoiceCursor := addIndex.ReadCursor() - - // We'll seek to the starting index, then manually advance the - // cursor in order to skip the entry with the since add index. - invoiceCursor.Seek(startIndex[:]) - addSeqNo, invoiceKey := invoiceCursor.Next() - - for ; addSeqNo != nil && bytes.Compare(addSeqNo, startIndex[:]) > 0; addSeqNo, invoiceKey = invoiceCursor.Next() { - - // For each key found, we'll look up the actual - // invoice, then accumulate it into our return value. - invoice, err := fetchInvoice(invoiceKey, invoices) - if err != nil { - return err - } - - newInvoices = append(newInvoices, invoice) - } - - return nil - }, func() { - newInvoices = nil - }) - if err != nil { - return nil, err - } - - return newInvoices, nil -} - -// LookupInvoice attempts to look up an invoice according to its 32 byte -// payment hash. If an invoice which can settle the HTLC identified by the -// passed payment hash isn't found, then an error is returned. Otherwise, the -// full invoice is returned. Before setting the incoming HTLC, the values -// SHOULD be checked to ensure the payer meets the agreed upon contractual -// terms of the payment. -func (d *DB) LookupInvoice(ref InvoiceRef) (Invoice, er.R) { - var invoice Invoice - err := kvdb.View(d, func(tx kvdb.RTx) er.R { - invoices := tx.ReadBucket(invoiceBucket) - if invoices == nil { - return ErrNoInvoicesCreated.Default() - } - invoiceIndex := invoices.NestedReadBucket(invoiceIndexBucket) - if invoiceIndex == nil { - return ErrNoInvoicesCreated.Default() - } - payAddrIndex := tx.ReadBucket(payAddrIndexBucket) - - // Retrieve the invoice number for this invoice using the - // provided invoice reference. - invoiceNum, err := fetchInvoiceNumByRef( - invoiceIndex, payAddrIndex, ref, - ) - if err != nil { - return err - } - - // An invoice was found, retrieve the remainder of the invoice - // body. - i, err := fetchInvoice(invoiceNum, invoices) - if err != nil { - return err - } - invoice = i - - return nil - }, func() {}) - if err != nil { - return invoice, err - } - - return invoice, nil -} - -// fetchInvoiceNumByRef retrieve the invoice number for the provided invoice -// reference. The payment address will be treated as the primary key, falling -// back to the payment hash if nothing is found for the payment address. An -// error is returned if the invoice is not found. -func fetchInvoiceNumByRef(invoiceIndex, payAddrIndex kvdb.RBucket, - ref InvoiceRef) ([]byte, er.R) { - - payHash := ref.PayHash() - payAddr := ref.PayAddr() - - var ( - invoiceNumByHash = invoiceIndex.Get(payHash[:]) - invoiceNumByAddr []byte - ) - if payAddr != nil { - // Only allow lookups for payment address if it is not a blank - // payment address, which is a special-cased value for legacy - // keysend invoices. - if *payAddr != BlankPayAddr { - invoiceNumByAddr = payAddrIndex.Get(payAddr[:]) - } - } - - switch { - - // If payment address and payment hash both reference an existing - // invoice, ensure they reference the _same_ invoice. - case invoiceNumByAddr != nil && invoiceNumByHash != nil: - if !bytes.Equal(invoiceNumByAddr, invoiceNumByHash) { - return nil, ErrInvRefEquivocation.Default() - } - - return invoiceNumByAddr, nil - - // If we were only able to reference the invoice by hash, return the - // corresponding invoice number. This can happen when no payment address - // was provided, or if it didn't match anything in our records. - case invoiceNumByHash != nil: - return invoiceNumByHash, nil - - // Otherwise we don't know of the target invoice. - default: - return nil, ErrInvoiceNotFound.Default() - } -} - -// ScanInvoices scans trough all invoices and calls the passed scanFunc for -// for each invoice with its respective payment hash. Additionally a reset() -// closure is passed which is used to reset/initialize partial results and also -// to signal if the kvdb.View transaction has been retried. -func (d *DB) ScanInvoices( - scanFunc func(lntypes.Hash, *Invoice) er.R, reset func()) er.R { - - return kvdb.View(d, func(tx kvdb.RTx) er.R { - invoices := tx.ReadBucket(invoiceBucket) - if invoices == nil { - return ErrNoInvoicesCreated.Default() - } - - invoiceIndex := invoices.NestedReadBucket(invoiceIndexBucket) - if invoiceIndex == nil { - // Mask the error if there's no invoice - // index as that simply means there are no - // invoices added yet to the DB. In this case - // we simply return an empty list. - return nil - } - - return invoiceIndex.ForEach(func(k, v []byte) er.R { - // Skip the special numInvoicesKey as that does not - // point to a valid invoice. - if bytes.Equal(k, numInvoicesKey) { - return nil - } - - if v == nil { - return nil - } - - invoice, err := fetchInvoice(v, invoices) - if err != nil { - return err - } - - var paymentHash lntypes.Hash - copy(paymentHash[:], k) - - return scanFunc(paymentHash, &invoice) - }) - }, reset) -} - -// InvoiceQuery represents a query to the invoice database. The query allows a -// caller to retrieve all invoices starting from a particular add index and -// limit the number of results returned. -type InvoiceQuery struct { - // IndexOffset is the offset within the add indices to start at. This - // can be used to start the response at a particular invoice. - IndexOffset uint64 - - // NumMaxInvoices is the maximum number of invoices that should be - // starting from the add index. - NumMaxInvoices uint64 - - // PendingOnly, if set, returns unsettled invoices starting from the - // add index. - PendingOnly bool - - // Reversed, if set, indicates that the invoices returned should start - // from the IndexOffset and go backwards. - Reversed bool -} - -// InvoiceSlice is the response to a invoice query. It includes the original -// query, the set of invoices that match the query, and an integer which -// represents the offset index of the last item in the set of returned invoices. -// This integer allows callers to resume their query using this offset in the -// event that the query's response exceeds the maximum number of returnable -// invoices. -type InvoiceSlice struct { - InvoiceQuery - - // Invoices is the set of invoices that matched the query above. - Invoices []Invoice - - // FirstIndexOffset is the index of the first element in the set of - // returned Invoices above. Callers can use this to resume their query - // in the event that the slice has too many events to fit into a single - // response. - FirstIndexOffset uint64 - - // LastIndexOffset is the index of the last element in the set of - // returned Invoices above. Callers can use this to resume their query - // in the event that the slice has too many events to fit into a single - // response. - LastIndexOffset uint64 -} - -// QueryInvoices allows a caller to query the invoice database for invoices -// within the specified add index range. -func (d *DB) QueryInvoices(q InvoiceQuery) (InvoiceSlice, er.R) { - var resp InvoiceSlice - - err := kvdb.View(d, func(tx kvdb.RTx) er.R { - // If the bucket wasn't found, then there aren't any invoices - // within the database yet, so we can simply exit. - invoices := tx.ReadBucket(invoiceBucket) - if invoices == nil { - return ErrNoInvoicesCreated.Default() - } - - // Get the add index bucket which we will use to iterate through - // our indexed invoices. - invoiceAddIndex := invoices.NestedReadBucket(addIndexBucket) - if invoiceAddIndex == nil { - return ErrNoInvoicesCreated.Default() - } - - // Create a paginator which reads from our add index bucket with - // the parameters provided by the invoice query. - paginator := newPaginator( - invoiceAddIndex.ReadCursor(), q.Reversed, q.IndexOffset, - q.NumMaxInvoices, - ) - - // accumulateInvoices looks up an invoice based on the index we - // are given, adds it to our set of invoices if it has the right - // characteristics for our query and returns the number of items - // we have added to our set of invoices. - accumulateInvoices := func(_, indexValue []byte) (bool, er.R) { - invoice, err := fetchInvoice(indexValue, invoices) - if err != nil { - return false, err - } - - // Skip any settled or canceled invoices if the caller - // is only interested in pending ones. - if q.PendingOnly && !invoice.IsPending() { - return false, nil - } - - // At this point, we've exhausted the offset, so we'll - // begin collecting invoices found within the range. - resp.Invoices = append(resp.Invoices, invoice) - return true, nil - } - - // Query our paginator using accumulateInvoices to build up a - // set of invoices. - if err := paginator.query(accumulateInvoices); err != nil { - return err - } - - // If we iterated through the add index in reverse order, then - // we'll need to reverse the slice of invoices to return them in - // forward order. - if q.Reversed { - numInvoices := len(resp.Invoices) - for i := 0; i < numInvoices/2; i++ { - opposite := numInvoices - i - 1 - resp.Invoices[i], resp.Invoices[opposite] = - resp.Invoices[opposite], resp.Invoices[i] - } - } - - return nil - }, func() { - resp = InvoiceSlice{ - InvoiceQuery: q, - } - }) - if err != nil && !ErrNoInvoicesCreated.Is(err) { - return resp, err - } - - // Finally, record the indexes of the first and last invoices returned - // so that the caller can resume from this point later on. - if len(resp.Invoices) > 0 { - resp.FirstIndexOffset = resp.Invoices[0].AddIndex - resp.LastIndexOffset = resp.Invoices[len(resp.Invoices)-1].AddIndex - } - - return resp, nil -} - -// UpdateInvoice attempts to update an invoice corresponding to the passed -// payment hash. If an invoice matching the passed payment hash doesn't exist -// within the database, then the action will fail with a "not found" error. -// -// The update is performed inside the same database transaction that fetches the -// invoice and is therefore atomic. The fields to update are controlled by the -// supplied callback. -func (d *DB) UpdateInvoice(ref InvoiceRef, - callback InvoiceUpdateCallback) (*Invoice, er.R) { - - var updatedInvoice *Invoice - err := kvdb.Update(d, func(tx kvdb.RwTx) er.R { - invoices, err := tx.CreateTopLevelBucket(invoiceBucket) - if err != nil { - return err - } - invoiceIndex, err := invoices.CreateBucketIfNotExists( - invoiceIndexBucket, - ) - if err != nil { - return err - } - settleIndex, err := invoices.CreateBucketIfNotExists( - settleIndexBucket, - ) - if err != nil { - return err - } - payAddrIndex := tx.ReadBucket(payAddrIndexBucket) - - // Retrieve the invoice number for this invoice using the - // provided invoice reference. - invoiceNum, errr := fetchInvoiceNumByRef( - invoiceIndex, payAddrIndex, ref, - ) - if errr != nil { - return errr - - } - payHash := ref.PayHash() - updatedInvoice, errr = d.updateInvoice( - payHash, invoices, settleIndex, invoiceNum, - callback, - ) - - return errr - }, func() { - updatedInvoice = nil - }) - - return updatedInvoice, err -} - -// InvoicesSettledSince can be used by callers to catch up any settled invoices -// they missed within the settled invoice time series. We'll return all known -// settled invoice that have a settle index higher than the passed -// sinceSettleIndex. -// -// NOTE: The index starts from 1, as a result. We enforce that specifying a -// value below the starting index value is a noop. -func (d *DB) InvoicesSettledSince(sinceSettleIndex uint64) ([]Invoice, er.R) { - var settledInvoices []Invoice - - // If an index of zero was specified, then in order to maintain - // backwards compat, we won't send out any new invoices. - if sinceSettleIndex == 0 { - return settledInvoices, nil - } - - var startIndex [8]byte - byteOrder.PutUint64(startIndex[:], sinceSettleIndex) - - err := kvdb.View(d, func(tx kvdb.RTx) er.R { - invoices := tx.ReadBucket(invoiceBucket) - if invoices == nil { - return nil - } - - settleIndex := invoices.NestedReadBucket(settleIndexBucket) - if settleIndex == nil { - return nil - } - - // We'll now run through each entry in the add index starting - // at our starting index. We'll continue until we reach the - // very end of the current key space. - invoiceCursor := settleIndex.ReadCursor() - - // We'll seek to the starting index, then manually advance the - // cursor in order to skip the entry with the since add index. - invoiceCursor.Seek(startIndex[:]) - seqNo, invoiceKey := invoiceCursor.Next() - - for ; seqNo != nil && bytes.Compare(seqNo, startIndex[:]) > 0; seqNo, invoiceKey = invoiceCursor.Next() { - - // For each key found, we'll look up the actual - // invoice, then accumulate it into our return value. - invoice, err := fetchInvoice(invoiceKey, invoices) - if err != nil { - return err - } - - settledInvoices = append(settledInvoices, invoice) - } - - return nil - }, func() { - settledInvoices = nil - }) - if err != nil { - return nil, err - } - - return settledInvoices, nil -} - -func putInvoice(invoices, invoiceIndex, payAddrIndex, addIndex kvdb.RwBucket, - i *Invoice, invoiceNum uint32, paymentHash lntypes.Hash) ( - uint64, er.R) { - - // Create the invoice key which is just the big-endian representation - // of the invoice number. - var invoiceKey [4]byte - byteOrder.PutUint32(invoiceKey[:], invoiceNum) - - // Increment the num invoice counter index so the next invoice bares - // the proper ID. - var scratch [4]byte - invoiceCounter := invoiceNum + 1 - byteOrder.PutUint32(scratch[:], invoiceCounter) - if err := invoiceIndex.Put(numInvoicesKey, scratch[:]); err != nil { - return 0, err - } - - // Add the payment hash to the invoice index. This will let us quickly - // identify if we can settle an incoming payment, and also to possibly - // allow a single invoice to have multiple payment installations. - err := invoiceIndex.Put(paymentHash[:], invoiceKey[:]) - if err != nil { - return 0, err - } - // Add the invoice to the payment address index, but only if the invoice - // has a non-zero payment address. The all-zero payment address is still - // in use by legacy keysend, so we special-case here to avoid - // collisions. - if i.Terms.PaymentAddr != BlankPayAddr { - err = payAddrIndex.Put(i.Terms.PaymentAddr[:], invoiceKey[:]) - if err != nil { - return 0, err - } - } - - // Next, we'll obtain the next add invoice index (sequence - // number), so we can properly place this invoice within this - // event stream. - nextAddSeqNo, errr := addIndex.NextSequence() - if errr != nil { - return 0, errr - } - - // With the next sequence obtained, we'll updating the event series in - // the add index bucket to map this current add counter to the index of - // this new invoice. - var seqNoBytes [8]byte - byteOrder.PutUint64(seqNoBytes[:], nextAddSeqNo) - if err := addIndex.Put(seqNoBytes[:], invoiceKey[:]); err != nil { - return 0, err - } - - i.AddIndex = nextAddSeqNo - - // Finally, serialize the invoice itself to be written to the disk. - var buf bytes.Buffer - if err := serializeInvoice(&buf, i); err != nil { - return 0, err - } - - if err := invoices.Put(invoiceKey[:], buf.Bytes()); err != nil { - return 0, err - } - - return nextAddSeqNo, nil -} - -// serializeInvoice serializes an invoice to a writer. -// -// Note: this function is in use for a migration. Before making changes that -// would modify the on disk format, make a copy of the original code and store -// it with the migration. -func serializeInvoice(w io.Writer, i *Invoice) er.R { - creationDateBytes, errr := i.CreationDate.MarshalBinary() - if errr != nil { - return er.E(errr) - } - - settleDateBytes, errr := i.SettleDate.MarshalBinary() - if errr != nil { - return er.E(errr) - } - - var fb bytes.Buffer - err := i.Terms.Features.EncodeBase256(&fb) - if err != nil { - return err - } - featureBytes := fb.Bytes() - - preimage := [32]byte(unknownPreimage) - if i.Terms.PaymentPreimage != nil { - preimage = *i.Terms.PaymentPreimage - if preimage == unknownPreimage { - return er.New("cannot use all-zeroes preimage") - } - } - value := uint64(i.Terms.Value) - cltvDelta := uint32(i.Terms.FinalCltvDelta) - expiry := uint64(i.Terms.Expiry) - - amtPaid := uint64(i.AmtPaid) - state := uint8(i.State) - - var hodlInvoice uint8 - if i.HodlInvoice { - hodlInvoice = 1 - } - - tlvStream, err := tlv.NewStream( - // Memo and payreq. - tlv.MakePrimitiveRecord(memoType, &i.Memo), - tlv.MakePrimitiveRecord(payReqType, &i.PaymentRequest), - - // Add/settle metadata. - tlv.MakePrimitiveRecord(createTimeType, &creationDateBytes), - tlv.MakePrimitiveRecord(settleTimeType, &settleDateBytes), - tlv.MakePrimitiveRecord(addIndexType, &i.AddIndex), - tlv.MakePrimitiveRecord(settleIndexType, &i.SettleIndex), - - // Terms. - tlv.MakePrimitiveRecord(preimageType, &preimage), - tlv.MakePrimitiveRecord(valueType, &value), - tlv.MakePrimitiveRecord(cltvDeltaType, &cltvDelta), - tlv.MakePrimitiveRecord(expiryType, &expiry), - tlv.MakePrimitiveRecord(paymentAddrType, &i.Terms.PaymentAddr), - tlv.MakePrimitiveRecord(featuresType, &featureBytes), - - // Invoice state. - tlv.MakePrimitiveRecord(invStateType, &state), - tlv.MakePrimitiveRecord(amtPaidType, &amtPaid), - - tlv.MakePrimitiveRecord(hodlInvoiceType, &hodlInvoice), - ) - if err != nil { - return err - } - - var b bytes.Buffer - if err = tlvStream.Encode(&b); err != nil { - return err - } - - err = util.WriteBin(w, byteOrder, uint64(b.Len())) - if err != nil { - return err - } - - if _, err = util.Write(w, b.Bytes()); err != nil { - return err - } - - return serializeHtlcs(w, i.Htlcs) -} - -// serializeHtlcs serializes a map containing circuit keys and invoice htlcs to -// a writer. -func serializeHtlcs(w io.Writer, htlcs map[CircuitKey]*InvoiceHTLC) er.R { - for key, htlc := range htlcs { - // Encode the htlc in a tlv stream. - chanID := key.ChanID.ToUint64() - amt := uint64(htlc.Amt) - mppTotalAmt := uint64(htlc.MppTotalAmt) - acceptTime := uint64(htlc.AcceptTime.UnixNano()) - resolveTime := uint64(htlc.ResolveTime.UnixNano()) - state := uint8(htlc.State) - - var records []tlv.Record - records = append(records, - tlv.MakePrimitiveRecord(chanIDType, &chanID), - tlv.MakePrimitiveRecord(htlcIDType, &key.HtlcID), - tlv.MakePrimitiveRecord(amtType, &amt), - tlv.MakePrimitiveRecord( - acceptHeightType, &htlc.AcceptHeight, - ), - tlv.MakePrimitiveRecord(acceptTimeType, &acceptTime), - tlv.MakePrimitiveRecord(resolveTimeType, &resolveTime), - tlv.MakePrimitiveRecord(expiryHeightType, &htlc.Expiry), - tlv.MakePrimitiveRecord(htlcStateType, &state), - tlv.MakePrimitiveRecord(mppTotalAmtType, &mppTotalAmt), - ) - - // Convert the custom records to tlv.Record types that are ready - // for serialization. - customRecords := tlv.MapToRecords(htlc.CustomRecords) - - // Append the custom records. Their ids are in the experimental - // range and sorted, so there is no need to sort again. - records = append(records, customRecords...) - - tlvStream, err := tlv.NewStream(records...) - if err != nil { - return err - } - - var b bytes.Buffer - if err := tlvStream.Encode(&b); err != nil { - return err - } - - // Write the length of the tlv stream followed by the stream - // bytes. - err = util.WriteBin(w, byteOrder, uint64(b.Len())) - if err != nil { - return err - } - - if _, err := util.Write(w, b.Bytes()); err != nil { - return err - } - } - - return nil -} - -func fetchInvoice(invoiceNum []byte, invoices kvdb.RBucket) (Invoice, er.R) { - invoiceBytes := invoices.Get(invoiceNum) - if invoiceBytes == nil { - return Invoice{}, ErrInvoiceNotFound.Default() - } - - invoiceReader := bytes.NewReader(invoiceBytes) - - return deserializeInvoice(invoiceReader) -} - -func deserializeInvoice(r io.Reader) (Invoice, er.R) { - var ( - preimageBytes [32]byte - value uint64 - cltvDelta uint32 - expiry uint64 - amtPaid uint64 - state uint8 - hodlInvoice uint8 - - creationDateBytes []byte - settleDateBytes []byte - featureBytes []byte - ) - - var i Invoice - tlvStream, err := tlv.NewStream( - // Memo and payreq. - tlv.MakePrimitiveRecord(memoType, &i.Memo), - tlv.MakePrimitiveRecord(payReqType, &i.PaymentRequest), - - // Add/settle metadata. - tlv.MakePrimitiveRecord(createTimeType, &creationDateBytes), - tlv.MakePrimitiveRecord(settleTimeType, &settleDateBytes), - tlv.MakePrimitiveRecord(addIndexType, &i.AddIndex), - tlv.MakePrimitiveRecord(settleIndexType, &i.SettleIndex), - - // Terms. - tlv.MakePrimitiveRecord(preimageType, &preimageBytes), - tlv.MakePrimitiveRecord(valueType, &value), - tlv.MakePrimitiveRecord(cltvDeltaType, &cltvDelta), - tlv.MakePrimitiveRecord(expiryType, &expiry), - tlv.MakePrimitiveRecord(paymentAddrType, &i.Terms.PaymentAddr), - tlv.MakePrimitiveRecord(featuresType, &featureBytes), - - // Invoice state. - tlv.MakePrimitiveRecord(invStateType, &state), - tlv.MakePrimitiveRecord(amtPaidType, &amtPaid), - - tlv.MakePrimitiveRecord(hodlInvoiceType, &hodlInvoice), - ) - if err != nil { - return i, err - } - - var bodyLen int64 - err = util.ReadBin(r, byteOrder, &bodyLen) - if err != nil { - return i, err - } - - lr := io.LimitReader(r, bodyLen) - if err = tlvStream.Decode(lr); err != nil { - return i, err - } - - preimage := lntypes.Preimage(preimageBytes) - if preimage != unknownPreimage { - i.Terms.PaymentPreimage = &preimage - } - - i.Terms.Value = lnwire.MilliSatoshi(value) - i.Terms.FinalCltvDelta = int32(cltvDelta) - i.Terms.Expiry = time.Duration(expiry) - i.AmtPaid = lnwire.MilliSatoshi(amtPaid) - i.State = ContractState(state) - - if hodlInvoice != 0 { - i.HodlInvoice = true - } - - errr := i.CreationDate.UnmarshalBinary(creationDateBytes) - if errr != nil { - return i, er.E(errr) - } - - errr = i.SettleDate.UnmarshalBinary(settleDateBytes) - if errr != nil { - return i, er.E(errr) - } - - rawFeatures := lnwire.NewRawFeatureVector() - err = rawFeatures.DecodeBase256( - bytes.NewReader(featureBytes), len(featureBytes), - ) - if err != nil { - return i, err - } - - i.Terms.Features = lnwire.NewFeatureVector( - rawFeatures, lnwire.Features, - ) - - i.Htlcs, err = deserializeHtlcs(r) - return i, err -} - -// deserializeHtlcs reads a list of invoice htlcs from a reader and returns it -// as a map. -func deserializeHtlcs(r io.Reader) (map[CircuitKey]*InvoiceHTLC, er.R) { - htlcs := make(map[CircuitKey]*InvoiceHTLC) - - for { - // Read the length of the tlv stream for this htlc. - var streamLen int64 - if err := util.ReadBin(r, byteOrder, &streamLen); err != nil { - if er.EOF.Is(err) { - break - } - - return nil, err - } - - // Limit the reader so that it stops at the end of this htlc's - // stream. - htlcReader := io.LimitReader(r, streamLen) - - // Decode the contents into the htlc fields. - var ( - htlc InvoiceHTLC - key CircuitKey - chanID uint64 - state uint8 - acceptTime, resolveTime uint64 - amt, mppTotalAmt uint64 - ) - tlvStream, err := tlv.NewStream( - tlv.MakePrimitiveRecord(chanIDType, &chanID), - tlv.MakePrimitiveRecord(htlcIDType, &key.HtlcID), - tlv.MakePrimitiveRecord(amtType, &amt), - tlv.MakePrimitiveRecord( - acceptHeightType, &htlc.AcceptHeight, - ), - tlv.MakePrimitiveRecord(acceptTimeType, &acceptTime), - tlv.MakePrimitiveRecord(resolveTimeType, &resolveTime), - tlv.MakePrimitiveRecord(expiryHeightType, &htlc.Expiry), - tlv.MakePrimitiveRecord(htlcStateType, &state), - tlv.MakePrimitiveRecord(mppTotalAmtType, &mppTotalAmt), - ) - if err != nil { - return nil, err - } - - parsedTypes, err := tlvStream.DecodeWithParsedTypes(htlcReader) - if err != nil { - return nil, err - } - - key.ChanID = lnwire.NewShortChanIDFromInt(chanID) - htlc.AcceptTime = time.Unix(0, int64(acceptTime)) - htlc.ResolveTime = time.Unix(0, int64(resolveTime)) - htlc.State = HtlcState(state) - htlc.Amt = lnwire.MilliSatoshi(amt) - htlc.MppTotalAmt = lnwire.MilliSatoshi(mppTotalAmt) - - // Reconstruct the custom records fields from the parsed types - // map return from the tlv parser. - htlc.CustomRecords = hop.NewCustomRecords(parsedTypes) - - htlcs[key] = &htlc - } - - return htlcs, nil -} - -// copySlice allocates a new slice and copies the source into it. -func copySlice(src []byte) []byte { - dest := make([]byte, len(src)) - copy(dest, src) - return dest -} - -// copyInvoiceHTLC makes a deep copy of the supplied invoice HTLC. -func copyInvoiceHTLC(src *InvoiceHTLC) *InvoiceHTLC { - result := *src - - // Make a copy of the CustomSet map. - result.CustomRecords = make(record.CustomSet) - for k, v := range src.CustomRecords { - result.CustomRecords[k] = v - } - - return &result -} - -// copyInvoice makes a deep copy of the supplied invoice. -func copyInvoice(src *Invoice) *Invoice { - dest := Invoice{ - Memo: copySlice(src.Memo), - PaymentRequest: copySlice(src.PaymentRequest), - CreationDate: src.CreationDate, - SettleDate: src.SettleDate, - Terms: src.Terms, - AddIndex: src.AddIndex, - SettleIndex: src.SettleIndex, - State: src.State, - AmtPaid: src.AmtPaid, - Htlcs: make( - map[CircuitKey]*InvoiceHTLC, len(src.Htlcs), - ), - HodlInvoice: src.HodlInvoice, - } - - dest.Terms.Features = src.Terms.Features.Clone() - - if src.Terms.PaymentPreimage != nil { - preimage := *src.Terms.PaymentPreimage - dest.Terms.PaymentPreimage = &preimage - } - - for k, v := range src.Htlcs { - dest.Htlcs[k] = copyInvoiceHTLC(v) - } - - return &dest -} - -// updateInvoice fetches the invoice, obtains the update descriptor from the -// callback and applies the updates in a single db transaction. -func (d *DB) updateInvoice(hash lntypes.Hash, invoices, settleIndex kvdb.RwBucket, - invoiceNum []byte, callback InvoiceUpdateCallback) (*Invoice, er.R) { - - invoice, err := fetchInvoice(invoiceNum, invoices) - if err != nil { - return nil, err - } - - // Create deep copy to prevent any accidental modification in the - // callback. - invoiceCopy := copyInvoice(&invoice) - - // Call the callback and obtain the update descriptor. - update, err := callback(invoiceCopy) - if err != nil { - return &invoice, err - } - - // If there is nothing to update, return early. - if update == nil { - return &invoice, nil - } - - now := d.clock.Now() - - // Update invoice state if the update descriptor indicates an invoice - // state change. - if update.State != nil { - err := updateInvoiceState(&invoice, hash, *update.State) - if err != nil { - return nil, err - } - - if update.State.NewState == ContractSettled { - err := setSettleMetaFields( - settleIndex, invoiceNum, &invoice, now, - ) - if err != nil { - return nil, err - } - } - } - - // Process add actions from update descriptor. - for key, htlcUpdate := range update.AddHtlcs { - if _, exists := invoice.Htlcs[key]; exists { - return nil, er.Errorf("duplicate add of htlc %v", key) - } - - // Force caller to supply htlc without custom records in a - // consistent way. - if htlcUpdate.CustomRecords == nil { - return nil, er.New("nil custom records map") - } - - htlc := &InvoiceHTLC{ - Amt: htlcUpdate.Amt, - MppTotalAmt: htlcUpdate.MppTotalAmt, - Expiry: htlcUpdate.Expiry, - AcceptHeight: uint32(htlcUpdate.AcceptHeight), - AcceptTime: now, - State: HtlcStateAccepted, - CustomRecords: htlcUpdate.CustomRecords, - } - - invoice.Htlcs[key] = htlc - } - - // Align htlc states with invoice state and recalculate amount paid. - var ( - amtPaid lnwire.MilliSatoshi - cancelHtlcs = update.CancelHtlcs - ) - for key, htlc := range invoice.Htlcs { - // Check whether this htlc needs to be canceled. If it does, - // update the htlc state to Canceled. - _, cancel := cancelHtlcs[key] - if cancel { - // Consistency check to verify that there is no overlap - // between the add and cancel sets. - if _, added := update.AddHtlcs[key]; added { - return nil, er.Errorf("added htlc %v canceled", - key) - } - - err := cancelSingleHtlc(now, htlc, invoice.State) - if err != nil { - return nil, err - } - - // Delete processed cancel action, so that we can check - // later that there are no actions left. - delete(cancelHtlcs, key) - - continue - } - - // The invoice state may have changed and this could have - // implications for the states of the individual htlcs. Align - // the htlc state with the current invoice state. - err := updateHtlc(now, htlc, invoice.State) - if err != nil { - return nil, err - } - - // Update the running amount paid to this invoice. We don't - // include accepted htlcs when the invoice is still open. - if invoice.State != ContractOpen && - (htlc.State == HtlcStateAccepted || - htlc.State == HtlcStateSettled) { - - amtPaid += htlc.Amt - } - } - invoice.AmtPaid = amtPaid - - // Verify that we didn't get an action for htlcs that are not present on - // the invoice. - if len(cancelHtlcs) > 0 { - return nil, er.New("cancel action on non-existent htlc(s)") - } - - // Reserialize and update invoice. - var buf bytes.Buffer - if err := serializeInvoice(&buf, &invoice); err != nil { - return nil, err - } - - if err := invoices.Put(invoiceNum[:], buf.Bytes()); err != nil { - return nil, err - } - - return &invoice, nil -} - -// updateInvoiceState validates and processes an invoice state update. -func updateInvoiceState(invoice *Invoice, hash lntypes.Hash, - update InvoiceStateUpdateDesc) er.R { - - // Returning to open is never allowed from any state. - if update.NewState == ContractOpen { - return ErrInvoiceCannotOpen.Default() - } - - switch invoice.State { - - // Once a contract is accepted, we can only transition to settled or - // canceled. Forbid transitioning back into this state. Otherwise this - // state is identical to ContractOpen, so we fallthrough to apply the - // same checks that we apply to open invoices. - case ContractAccepted: - if update.NewState == ContractAccepted { - return ErrInvoiceCannotAccept.Default() - } - - fallthrough - - // If a contract is open, permit a state transition to accepted, settled - // or canceled. The only restriction is on transitioning to settled - // where we ensure the preimage is valid. - case ContractOpen: - if update.NewState == ContractSettled { - // Validate preimage. - switch { - case update.Preimage != nil: - if update.Preimage.Hash() != hash { - return ErrInvoicePreimageMismatch.Default() - } - invoice.Terms.PaymentPreimage = update.Preimage - - case invoice.Terms.PaymentPreimage == nil: - return er.New("unknown preimage") - } - } - - // Once settled, we are in a terminal state. - case ContractSettled: - return ErrInvoiceAlreadySettled.Default() - - // Once canceled, we are in a terminal state. - case ContractCanceled: - return ErrInvoiceAlreadyCanceled.Default() - - default: - return er.New("unknown state transition") - } - - invoice.State = update.NewState - - return nil -} - -// cancelSingleHtlc validates cancelation of a single htlc and update its state. -func cancelSingleHtlc(resolveTime time.Time, htlc *InvoiceHTLC, - invState ContractState) er.R { - - // It is only possible to cancel individual htlcs on an open invoice. - if invState != ContractOpen { - return er.Errorf("htlc canceled on invoice in "+ - "state %v", invState) - } - - // It is only possible if the htlc is still pending. - if htlc.State != HtlcStateAccepted { - return er.Errorf("htlc canceled in state %v", - htlc.State) - } - - htlc.State = HtlcStateCanceled - htlc.ResolveTime = resolveTime - - return nil -} - -// updateHtlc aligns the state of an htlc with the given invoice state. -func updateHtlc(resolveTime time.Time, htlc *InvoiceHTLC, - invState ContractState) er.R { - - switch invState { - - case ContractSettled: - if htlc.State == HtlcStateAccepted { - htlc.State = HtlcStateSettled - htlc.ResolveTime = resolveTime - } - - case ContractCanceled: - switch htlc.State { - - case HtlcStateAccepted: - htlc.State = HtlcStateCanceled - htlc.ResolveTime = resolveTime - - case HtlcStateSettled: - return er.Errorf("cannot have a settled htlc with " + - "invoice in state canceled") - } - - case ContractOpen, ContractAccepted: - if htlc.State == HtlcStateSettled { - return er.Errorf("cannot have a settled htlc with "+ - "invoice in state %v", invState) - } - - default: - return er.New("unknown state transition") - } - - return nil -} - -// setSettleMetaFields updates the metadata associated with settlement of an -// invoice. -func setSettleMetaFields(settleIndex kvdb.RwBucket, invoiceNum []byte, - invoice *Invoice, now time.Time) er.R { - - // Now that we know the invoice hasn't already been settled, we'll - // update the settle index so we can place this settle event in the - // proper location within our time series. - nextSettleSeqNo, err := settleIndex.NextSequence() - if err != nil { - return err - } - - var seqNoBytes [8]byte - byteOrder.PutUint64(seqNoBytes[:], nextSettleSeqNo) - if err := settleIndex.Put(seqNoBytes[:], invoiceNum); err != nil { - return err - } - - invoice.SettleDate = now - invoice.SettleIndex = nextSettleSeqNo - - return nil -} - -// InvoiceDeleteRef holds a refererence to an invoice to be deleted. -type InvoiceDeleteRef struct { - // PayHash is the payment hash of the target invoice. All invoices are - // currently indexed by payment hash. - PayHash lntypes.Hash - - // PayAddr is the payment addr of the target invoice. Newer invoices - // (0.11 and up) are indexed by payment address in addition to payment - // hash, but pre 0.8 invoices do not have one at all. - PayAddr *[32]byte - - // AddIndex is the add index of the invoice. - AddIndex uint64 - - // SettleIndex is the settle index of the invoice. - SettleIndex uint64 -} - -// DeleteInvoice attempts to delete the passed invoices from the database in -// one transaction. The passed delete references hold all keys required to -// delete the invoices without also needing to deserialze them. -func (d *DB) DeleteInvoice(invoicesToDelete []InvoiceDeleteRef) er.R { - err := kvdb.Update(d, func(tx kvdb.RwTx) er.R { - invoices := tx.ReadWriteBucket(invoiceBucket) - if invoices == nil { - return ErrNoInvoicesCreated.Default() - } - - invoiceIndex := invoices.NestedReadWriteBucket( - invoiceIndexBucket, - ) - if invoiceIndex == nil { - return ErrNoInvoicesCreated.Default() - } - - invoiceAddIndex := invoices.NestedReadWriteBucket( - addIndexBucket, - ) - if invoiceAddIndex == nil { - return ErrNoInvoicesCreated.Default() - } - // settleIndex can be nil, as the bucket is created lazily - // when the first invoice is settled. - settleIndex := invoices.NestedReadWriteBucket(settleIndexBucket) - - payAddrIndex := tx.ReadWriteBucket(payAddrIndexBucket) - - for _, ref := range invoicesToDelete { - // Fetch the invoice key for using it to check for - // consistency and also to delete from the invoice index. - invoiceKey := invoiceIndex.Get(ref.PayHash[:]) - if invoiceKey == nil { - return ErrInvoiceNotFound.Default() - } - - err := invoiceIndex.Delete(ref.PayHash[:]) - if err != nil { - return err - } - - // Delete payment address index reference if there's a - // valid payment address passed. - if ref.PayAddr != nil { - // To ensure consistency check that the already - // fetched invoice key matches the one in the - // payment address index. - key := payAddrIndex.Get(ref.PayAddr[:]) - if !bytes.Equal(key, invoiceKey) { - return er.Errorf("unknown invoice") - } - - // Delete from the payment address index. - err := payAddrIndex.Delete(ref.PayAddr[:]) - if err != nil { - return err - } - } - - var addIndexKey [8]byte - byteOrder.PutUint64(addIndexKey[:], ref.AddIndex) - - // To ensure consistency check that the key stored in - // the add index also matches the previously fetched - // invoice key. - key := invoiceAddIndex.Get(addIndexKey[:]) - if !bytes.Equal(key, invoiceKey) { - return er.Errorf("unknown invoice") - } - - // Remove from the add index. - err = invoiceAddIndex.Delete(addIndexKey[:]) - if err != nil { - return err - } - - // Remove from the settle index if available and - // if the invoice is settled. - if settleIndex != nil && ref.SettleIndex > 0 { - var settleIndexKey [8]byte - byteOrder.PutUint64( - settleIndexKey[:], ref.SettleIndex, - ) - - // To ensure consistency check that the already - // fetched invoice key matches the one in the - // settle index - key := settleIndex.Get(settleIndexKey[:]) - if !bytes.Equal(key, invoiceKey) { - return er.Errorf("unknown invoice") - } - - err = settleIndex.Delete(settleIndexKey[:]) - if err != nil { - return err - } - } - - // Finally remove the serialized invoice from the - // invoice bucket. - err = invoices.Delete(invoiceKey) - if err != nil { - return err - } - } - - return nil - }, func() {}) - - return err -} diff --git a/lnd/channeldb/kvdb/backend.go b/lnd/channeldb/kvdb/backend.go deleted file mode 100644 index f186a7d8..00000000 --- a/lnd/channeldb/kvdb/backend.go +++ /dev/null @@ -1,250 +0,0 @@ -package kvdb - -import ( - "encoding/binary" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/pktlog/log" - _ "github.com/pkt-cash/pktd/pktwallet/walletdb/bdb" // Import to register backend. -) - -const ( - // DefaultTempDBFileName is the default name of the temporary bolt DB - // file that we'll use to atomically compact the primary DB file on - // startup. - DefaultTempDBFileName = "temp-dont-use.db" - - // LastCompactionFileNameSuffix is the suffix we append to the file name - // of a database file to record the timestamp when the last compaction - // occurred. - LastCompactionFileNameSuffix = ".last-compacted" -) - -var ( - byteOrder = binary.BigEndian -) - -// fileExists returns true if the file exists, and false otherwise. -func fileExists(path string) bool { - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - return false - } - } - - return true -} - -// BoltBackendConfig is a struct that holds settings specific to the bolt -// database backend. -type BoltBackendConfig struct { - // DBPath is the directory path in which the database file should be - // stored. - DBPath string - - // DBFileName is the name of the database file. - DBFileName string - - // NoFreelistSync, if true, prevents the database from syncing its - // freelist to disk, resulting in improved performance at the expense of - // increased startup time. - NoFreelistSync bool - - // AutoCompact specifies if a Bolt based database backend should be - // automatically compacted on startup (if the minimum age of the - // database file is reached). This will require additional disk space - // for the compacted copy of the database but will result in an overall - // lower database size after the compaction. - AutoCompact bool - - // AutoCompactMinAge specifies the minimum time that must have passed - // since a bolt database file was last compacted for the compaction to - // be considered again. - AutoCompactMinAge time.Duration -} - -// GetBoltBackend opens (or creates if doesn't exits) a bbolt backed database -// and returns a kvdb.Backend wrapping it. -func GetBoltBackend(cfg *BoltBackendConfig) (Backend, er.R) { - dbFilePath := filepath.Join(cfg.DBPath, cfg.DBFileName) - - // Is this a new database? - if !fileExists(dbFilePath) { - if !fileExists(cfg.DBPath) { - if err := os.MkdirAll(cfg.DBPath, 0700); err != nil { - return nil, er.E(err) - } - } - - return Create(BoltBackendName, dbFilePath, cfg.NoFreelistSync) - } - - // This is an existing database. We might want to compact it on startup - // to free up some space. - if cfg.AutoCompact { - if err := compactAndSwap(cfg); err != nil { - return nil, err - } - } - - return Open(BoltBackendName, dbFilePath, cfg.NoFreelistSync) -} - -// compactAndSwap will attempt to write a new temporary DB file to disk with -// the compacted database content, then atomically swap (via rename) the old -// file for the new file by updating the name of the new file to the old. -func compactAndSwap(cfg *BoltBackendConfig) er.R { - sourceName := cfg.DBFileName - - // If the main DB file isn't set, then we can't proceed. - if sourceName == "" { - return er.Errorf("cannot compact DB with empty name") - } - sourceFilePath := filepath.Join(cfg.DBPath, sourceName) - tempDestFilePath := filepath.Join(cfg.DBPath, DefaultTempDBFileName) - - // Let's find out how long ago the last compaction of the source file - // occurred and possibly skip compacting it again now. - lastCompactionDate, err := lastCompactionDate(sourceFilePath) - if err != nil { - return er.Errorf("cannot determine last compaction date of "+ - "source DB file: %v", err) - } - compactAge := time.Since(lastCompactionDate) - if cfg.AutoCompactMinAge != 0 && compactAge <= cfg.AutoCompactMinAge { - log.Infof("Not compacting database file at %v, it was last "+ - "compacted at %v (%v ago), min age is set to %v", - sourceFilePath, lastCompactionDate, - compactAge.Truncate(time.Second), cfg.AutoCompactMinAge) - return nil - } - - log.Infof("Compacting database file at %v", sourceFilePath) - - // If the old temporary DB file still exists, then we'll delete it - // before proceeding. - if _, err := os.Stat(tempDestFilePath); err == nil { - log.Infof("Found old temp DB @ %v, removing before swap", - tempDestFilePath) - - err = os.Remove(tempDestFilePath) - if err != nil { - return er.Errorf("unable to remove old temp DB file: "+ - "%v", err) - } - } - - // Now that we know the staging area is clear, we'll create the new - // temporary DB file and close it before we write the new DB to it. - tempFile, errr := os.Create(tempDestFilePath) - if errr != nil { - return er.Errorf("unable to create temp DB file: %v", errr) - } - if err := tempFile.Close(); err != nil { - return er.Errorf("unable to close file: %v", err) - } - - // With the file created, we'll start the compaction and remove the - // temporary file all together once this method exits. - defer func() { - // This will only succeed if the rename below fails. If the - // compaction is successful, the file won't exist on exit - // anymore so no need to log an error here. - _ = os.Remove(tempDestFilePath) - }() - c := &compacter{ - srcPath: sourceFilePath, - dstPath: tempDestFilePath, - } - initialSize, newSize, err := c.execute() - if err != nil { - return er.Errorf("error during compact: %v", err) - } - - log.Infof("DB compaction of %v successful, %d -> %d bytes (gain=%.2fx)", - sourceFilePath, initialSize, newSize, - float64(initialSize)/float64(newSize)) - - // We try to store the current timestamp in a file with the suffix - // .last-compacted so we can figure out how long ago the last compaction - // was. But since this shouldn't fail the compaction process itself, we - // only log the error. Worst case if this file cannot be written is that - // we compact on every startup. - err = updateLastCompactionDate(sourceFilePath) - if err != nil { - log.Warnf("Could not update last compaction timestamp in "+ - "%s%s: %v", sourceFilePath, - LastCompactionFileNameSuffix, err) - } - - log.Infof("Swapping old DB file from %v to %v", tempDestFilePath, - sourceFilePath) - - // Finally, we'll attempt to atomically rename the temporary file to - // the main back up file. If this succeeds, then we'll only have a - // single file on disk once this method exits. - return er.E(os.Rename(tempDestFilePath, sourceFilePath)) -} - -// lastCompactionDate returns the date the given database file was last -// compacted or a zero time.Time if no compaction was recorded before. The -// compaction date is read from a file in the same directory and with the same -// name as the DB file, but with the suffix ".last-compacted". -func lastCompactionDate(dbFile string) (time.Time, er.R) { - zeroTime := time.Unix(0, 0) - - tsFile := fmt.Sprintf("%s%s", dbFile, LastCompactionFileNameSuffix) - if !fileExists(tsFile) { - return zeroTime, nil - } - - tsBytes, err := ioutil.ReadFile(tsFile) - if err != nil { - return zeroTime, er.E(err) - } - - tsNano := byteOrder.Uint64(tsBytes) - return time.Unix(0, int64(tsNano)), nil -} - -// updateLastCompactionDate stores the current time as a timestamp in a file -// in the same directory and with the same name as the DB file, but with the -// suffix ".last-compacted". -func updateLastCompactionDate(dbFile string) er.R { - var tsBytes [8]byte - byteOrder.PutUint64(tsBytes[:], uint64(time.Now().UnixNano())) - - tsFile := fmt.Sprintf("%s%s", dbFile, LastCompactionFileNameSuffix) - return er.E(ioutil.WriteFile(tsFile, tsBytes[:], 0600)) -} - -// GetTestBackend opens (or creates if doesn't exist) a bbolt or etcd -// backed database (for testing), and returns a kvdb.Backend and a cleanup -// func. Whether to create/open bbolt or embedded etcd database is based -// on the TestBackend constant which is conditionally compiled with build tag. -// The passed path is used to hold all db files, while the name is only used -// for bbolt. -func GetTestBackend(path, name string) (Backend, func(), er.R) { - empty := func() {} - - if TestBackend == BoltBackendName { - db, err := GetBoltBackend(&BoltBackendConfig{ - DBPath: path, - DBFileName: name, - NoFreelistSync: true, - }) - if err != nil { - return nil, nil, err - } - return db, empty, nil - } else if TestBackend == EtcdBackendName { - return GetEtcdTestBackend(path, name) - } - - return nil, nil, er.Errorf("unknown backend") -} diff --git a/lnd/channeldb/kvdb/bolt_compact.go b/lnd/channeldb/kvdb/bolt_compact.go deleted file mode 100644 index 6154b031..00000000 --- a/lnd/channeldb/kvdb/bolt_compact.go +++ /dev/null @@ -1,247 +0,0 @@ -// The code in this file is an adapted version of the bbolt compact command -// implemented in this file: -// https://github.com/etcd-io/bbolt/blob/master/cmd/bbolt/main.go - -package kvdb - -import ( - "os" - "path" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/healthcheck" - "github.com/pkt-cash/pktd/pktlog/log" - "go.etcd.io/bbolt" -) - -const ( - // defaultResultFileSizeMultiplier is the default multiplier we apply to - // the current database size to calculate how big it could possibly get - // after compacting, in case the database is already at its optimal size - // and compaction causes it to grow. This should normally not be the - // case but we really want to avoid not having enough disk space for the - // compaction, so we apply a safety margin of 10%. - defaultResultFileSizeMultiplier = float64(1.1) - - // defaultTxMaxSize is the default maximum number of operations that - // are allowed to be executed in a single transaction. - defaultTxMaxSize = 65536 - - // bucketFillSize is the fill size setting that is used for each new - // bucket that is created in the compacted database. This setting is not - // persisted and is therefore only effective for the compaction itself. - // Because during the compaction we only append data a fill percent of - // 100% is optimal for performance. - bucketFillSize = 1.0 -) - -type compacter struct { - srcPath string - dstPath string - txMaxSize int64 -} - -// execute opens the source and destination databases and then compacts the -// source into destination and returns the size of both files as a result. -func (cmd *compacter) execute() (int64, int64, er.R) { - if cmd.txMaxSize == 0 { - cmd.txMaxSize = defaultTxMaxSize - } - - // Ensure source file exists. - fi, errr := os.Stat(cmd.srcPath) - if errr != nil { - return 0, 0, er.Errorf("error determining source database "+ - "size: %v", errr) - } - initialSize := fi.Size() - marginSize := float64(initialSize) * defaultResultFileSizeMultiplier - - // Before opening any of the databases, let's first make sure we have - // enough free space on the destination file system to create a full - // copy of the source DB (worst-case scenario if the compaction doesn't - // actually shrink the file size). - destFolder := path.Dir(cmd.dstPath) - freeSpace, err := healthcheck.AvailableDiskSpace(destFolder) - if err != nil { - return 0, 0, er.Errorf("error determining free disk space on "+ - "%s: %v", destFolder, err) - } - log.Debugf("Free disk space on compaction destination file system: "+ - "%d bytes", freeSpace) - if freeSpace < uint64(marginSize) { - return 0, 0, er.Errorf("could not start compaction, "+ - "destination folder %s only has %d bytes of free disk "+ - "space available while we need at least %d for worst-"+ - "case compaction", destFolder, freeSpace, initialSize) - } - - // Open source database. We open it in read only mode to avoid (and fix) - // possible freelist sync problems. - src, errr := bbolt.Open(cmd.srcPath, 0444, &bbolt.Options{ - ReadOnly: true, - }) - if errr != nil { - return 0, 0, er.Errorf("error opening source database: %v", - errr) - } - defer func() { - if err := src.Close(); err != nil { - log.Errorf("Compact error: closing source DB: %v", err) - } - }() - - // Open destination database. - dst, errr := bbolt.Open(cmd.dstPath, fi.Mode(), nil) - if errr != nil { - return 0, 0, er.Errorf("error opening destination database: "+ - "%v", errr) - } - defer func() { - if err := dst.Close(); err != nil { - log.Errorf("Compact error: closing dest DB: %v", err) - } - }() - - // Run compaction. - if err := cmd.compact(dst, src); err != nil { - return 0, 0, er.Errorf("error running compaction: %v", err) - } - - // Report stats on new size. - fi, errr = os.Stat(cmd.dstPath) - if errr != nil { - return 0, 0, er.Errorf("error determining destination "+ - "database size: %v", errr) - } else if fi.Size() == 0 { - return 0, 0, er.Errorf("zero db size") - } - - return initialSize, fi.Size(), nil -} - -// compact tries to create a compacted copy of the source database in a new -// destination database. -func (cmd *compacter) compact(dst, src *bbolt.DB) er.R { - // Commit regularly, or we'll run out of memory for large datasets if - // using one transaction. - var size int64 - tx, err := dst.Begin(true) - if err != nil { - return er.E(err) - } - defer func() { - _ = tx.Rollback() - }() - - if err := cmd.walk(src, func(keys [][]byte, k, v []byte, seq uint64) er.R { - // On each key/value, check if we have exceeded tx size. - sz := int64(len(k) + len(v)) - if size+sz > cmd.txMaxSize && cmd.txMaxSize != 0 { - // Commit previous transaction. - if err := tx.Commit(); err != nil { - return er.E(err) - } - - // Start new transaction. - tx, err = dst.Begin(true) - if err != nil { - return er.E(err) - } - size = 0 - } - size += sz - - // Create bucket on the root transaction if this is the first - // level. - nk := len(keys) - if nk == 0 { - bkt, err := tx.CreateBucket(k) - if err != nil { - return er.E(err) - } - if err := bkt.SetSequence(seq); err != nil { - return er.E(err) - } - return nil - } - - // Create buckets on subsequent levels, if necessary. - b := tx.Bucket(keys[0]) - if nk > 1 { - for _, k := range keys[1:] { - b = b.Bucket(k) - } - } - - // Fill the entire page for best compaction. - b.FillPercent = bucketFillSize - - // If there is no value then this is a bucket call. - if v == nil { - bkt, err := b.CreateBucket(k) - if err != nil { - return er.E(err) - } - if err := bkt.SetSequence(seq); err != nil { - return er.E(err) - } - return nil - } - - // Otherwise treat it as a key/value pair. - return er.E(b.Put(k, v)) - }); err != nil { - return err - } - - return er.E(tx.Commit()) -} - -// walkFunc is the type of the function called for keys (buckets and "normal" -// values) discovered by Walk. keys is the list of keys to descend to the bucket -// owning the discovered key/value pair k/v. -type walkFunc func(keys [][]byte, k, v []byte, seq uint64) er.R - -// walk walks recursively the bolt database db, calling walkFn for each key it -// finds. -func (cmd *compacter) walk(db *bbolt.DB, walkFn walkFunc) er.R { - return er.E(db.View(func(tx *bbolt.Tx) error { - return tx.ForEach(func(name []byte, b *bbolt.Bucket) error { - // This will log the top level buckets only to give the - // user some sense of progress. - log.Debugf("Compacting top level bucket %s", name) - - return er.Native(cmd.walkBucket( - b, nil, name, nil, b.Sequence(), walkFn, - )) - }) - })) -} - -// walkBucket recursively walks through a bucket. -func (cmd *compacter) walkBucket(b *bbolt.Bucket, keyPath [][]byte, k, v []byte, - seq uint64, fn walkFunc) er.R { - - // Execute callback. - if err := fn(keyPath, k, v, seq); err != nil { - return err - } - - // If this is not a bucket then stop. - if v != nil { - return nil - } - - // Iterate over each child key/value. - keyPath = append(keyPath, k) - return er.E(b.ForEach(func(k, v []byte) error { - if v == nil { - bkt := b.Bucket(k) - return er.Native(cmd.walkBucket( - bkt, keyPath, k, nil, bkt.Sequence(), fn, - )) - } - return er.Native(cmd.walkBucket(b, keyPath, k, v, b.Sequence(), fn)) - })) -} diff --git a/lnd/channeldb/kvdb/config.go b/lnd/channeldb/kvdb/config.go deleted file mode 100644 index 9ea50adc..00000000 --- a/lnd/channeldb/kvdb/config.go +++ /dev/null @@ -1,48 +0,0 @@ -package kvdb - -import "time" - -const ( - // BoltBackendName is the name of the backend that should be passed into - // kvdb.Create to initialize a new instance of kvdb.Backend backed by a - // live instance of bbolt. - BoltBackendName = "bdb" - - // EtcdBackendName is the name of the backend that should be passed into - // kvdb.Create to initialize a new instance of kvdb.Backend backed by a - // live instance of etcd. - EtcdBackendName = "etcd" - - // DefaultBoltAutoCompactMinAge is the default minimum time that must - // have passed since a bolt database file was last compacted for the - // compaction to be considered again. - DefaultBoltAutoCompactMinAge = time.Hour * 24 * 7 -) - -// BoltConfig holds bolt configuration. -type BoltConfig struct { - SyncFreelist bool `long:"nofreelistsync" description:"Whether the databases used within lnd should sync their freelist to disk. This is disabled by default resulting in improved memory performance during operation, but with an increase in startup time."` - - AutoCompact bool `long:"auto-compact" description:"Whether the databases used within lnd should automatically be compacted on every startup (and if the database has the configured minimum age). This is disabled by default because it requires additional disk space to be available during the compaction that is freed afterwards. In general compaction leads to smaller database files."` - - AutoCompactMinAge time.Duration `long:"auto-compact-min-age" description:"How long ago the last compaction of a database file must be for it to be considered for auto compaction again. Can be set to 0 to compact on every startup."` -} - -// EtcdConfig holds etcd configuration. -type EtcdConfig struct { - Embedded bool `long:"embedded" description:"Use embedded etcd instance instead of the external one."` - - Host string `long:"host" description:"Etcd database host."` - - User string `long:"user" description:"Etcd database user."` - - Pass string `long:"pass" description:"Password for the database user."` - - CertFile string `long:"cert_file" description:"Path to the TLS certificate for etcd RPC."` - - KeyFile string `long:"key_file" description:"Path to the TLS private key for etcd RPC."` - - InsecureSkipVerify bool `long:"insecure_skip_verify" description:"Whether we intend to skip TLS verification"` - - CollectStats bool `long:"collect_stats" description:"Whether to collect etcd commit stats."` -} diff --git a/lnd/channeldb/kvdb/etcd/bucket.go b/lnd/channeldb/kvdb/etcd/bucket.go deleted file mode 100644 index 8a1ff071..00000000 --- a/lnd/channeldb/kvdb/etcd/bucket.go +++ /dev/null @@ -1,92 +0,0 @@ -// +build kvdb_etcd - -package etcd - -import ( - "crypto/sha256" -) - -const ( - bucketIDLength = 32 -) - -var ( - valuePostfix = []byte{0x00} - bucketPostfix = []byte{0xFF} - sequencePrefix = []byte("$seq$") -) - -// makeBucketID returns a deterministic key for the passed byte slice. -// Currently it returns the sha256 hash of the slice. -func makeBucketID(key []byte) [bucketIDLength]byte { - return sha256.Sum256(key) -} - -// isValidBucketID checks if the passed slice is the required length to be a -// valid bucket id. -func isValidBucketID(s []byte) bool { - return len(s) == bucketIDLength -} - -// makeKey concatenates parent, key and postfix into one byte slice. -// The postfix indicates the use of this key (whether bucket or value), while -// parent refers to the parent bucket. -func makeKey(parent, key, postfix []byte) []byte { - keyBuf := make([]byte, len(parent)+len(key)+len(postfix)) - copy(keyBuf, parent) - copy(keyBuf[len(parent):], key) - copy(keyBuf[len(parent)+len(key):], postfix) - - return keyBuf -} - -// makeBucketKey returns a bucket key from the passed parent bucket id and -// the key. -func makeBucketKey(parent []byte, key []byte) []byte { - return makeKey(parent, key, bucketPostfix) -} - -// makeValueKey returns a value key from the passed parent bucket id and -// the key. -func makeValueKey(parent []byte, key []byte) []byte { - return makeKey(parent, key, valuePostfix) -} - -// makeSequenceKey returns a sequence key of the passed parent bucket id. -func makeSequenceKey(parent []byte) []byte { - keyBuf := make([]byte, len(sequencePrefix)+len(parent)) - copy(keyBuf, sequencePrefix) - copy(keyBuf[len(sequencePrefix):], parent) - return keyBuf -} - -// isBucketKey returns true if the passed key is a bucket key, meaning it -// keys a bucket name. -func isBucketKey(key string) bool { - if len(key) < bucketIDLength+1 { - return false - } - - return key[len(key)-1] == bucketPostfix[0] -} - -// getKey chops out the key from the raw key (by removing the bucket id -// prefixing the key and the postfix indicating whether it is a bucket or -// a value key) -func getKey(rawKey string) []byte { - return []byte(rawKey[bucketIDLength : len(rawKey)-1]) -} - -// getKeyVal chops out the key from the raw key (by removing the bucket id -// prefixing the key and the postfix indicating whether it is a bucket or -// a value key) and also returns the appropriate value for the key, which is -// nil in case of buckets (or the set value otherwise). -func getKeyVal(kv *KV) ([]byte, []byte) { - var val []byte - - if !isBucketKey(kv.key) { - val = []byte(kv.val) - } - - return getKey(kv.key), val -} diff --git a/lnd/channeldb/kvdb/etcd/bucket_test.go b/lnd/channeldb/kvdb/etcd/bucket_test.go deleted file mode 100644 index e68821f1..00000000 --- a/lnd/channeldb/kvdb/etcd/bucket_test.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build kvdb_etcd - -package etcd - -// bkey is a helper functon used in tests to create a bucket key from passed -// bucket list. -func bkey(buckets ...string) string { - var bucketKey []byte - - rootID := makeBucketID([]byte("")) - parent := rootID[:] - - for _, bucketName := range buckets { - bucketKey = makeBucketKey(parent, []byte(bucketName)) - id := makeBucketID(bucketKey) - parent = id[:] - } - - return string(bucketKey) -} - -// bval is a helper function used in tests to create a bucket value (the value -// for a bucket key) from the passed bucket list. -func bval(buckets ...string) string { - id := makeBucketID([]byte(bkey(buckets...))) - return string(id[:]) -} - -// vkey is a helper function used in tests to create a value key from the -// passed key and bucket list. -func vkey(key string, buckets ...string) string { - rootID := makeBucketID([]byte("")) - bucket := rootID[:] - - for _, bucketName := range buckets { - bucketKey := makeBucketKey(bucket, []byte(bucketName)) - id := makeBucketID(bucketKey) - bucket = id[:] - } - - return string(makeValueKey(bucket, []byte(key))) -} diff --git a/lnd/channeldb/kvdb/etcd/commit_queue.go b/lnd/channeldb/kvdb/etcd/commit_queue.go deleted file mode 100644 index f0384565..00000000 --- a/lnd/channeldb/kvdb/etcd/commit_queue.go +++ /dev/null @@ -1,150 +0,0 @@ -// +build kvdb_etcd - -package etcd - -import ( - "context" - "sync" -) - -// commitQueueSize is the maximum number of commits we let to queue up. All -// remaining commits will block on commitQueue.Add(). -const commitQueueSize = 100 - -// commitQueue is a simple execution queue to manage conflicts for transactions -// and thereby reduce the number of times conflicting transactions need to be -// retried. When a new transaction is added to the queue, we first upgrade the -// read/write counts in the queue's own accounting to decide whether the new -// transaction has any conflicting dependencies. If the transaction does not -// conflict with any other, then it is comitted immediately, otherwise it'll be -// queued up for later exection. -// The algorithm is described in: http://www.cs.umd.edu/~abadi/papers/vll-vldb13.pdf -type commitQueue struct { - ctx context.Context - mx sync.Mutex - readerMap map[string]int - writerMap map[string]int - - commitMutex sync.RWMutex - queue chan (func()) - wg sync.WaitGroup -} - -// NewCommitQueue creates a new commit queue, with the passed abort context. -func NewCommitQueue(ctx context.Context) *commitQueue { - q := &commitQueue{ - ctx: ctx, - readerMap: make(map[string]int), - writerMap: make(map[string]int), - queue: make(chan func(), commitQueueSize), - } - - // Start the queue consumer loop. - q.wg.Add(1) - go q.mainLoop() - - return q -} - -// Wait waits for the queue to stop (after the queue context has been canceled). -func (c *commitQueue) Wait() { - c.wg.Wait() -} - -// Add increases lock counts and queues up tx commit closure for execution. -// Transactions that don't have any conflicts are executed immediately by -// "downgrading" the count mutex to allow concurrency. -func (c *commitQueue) Add(commitLoop func(), rset readSet, wset writeSet) { - c.mx.Lock() - blocked := false - - // Mark as blocked if there's any writer changing any of the keys in - // the read set. Do not increment the reader counts yet as we'll need to - // use the original reader counts when scanning through the write set. - for key := range rset { - if c.writerMap[key] > 0 { - blocked = true - break - } - } - - // Mark as blocked if there's any writer or reader for any of the keys - // in the write set. - for key := range wset { - blocked = blocked || c.readerMap[key] > 0 || c.writerMap[key] > 0 - - // Increment the writer count. - c.writerMap[key] += 1 - } - - // Finally we can increment the reader counts for keys in the read set. - for key := range rset { - c.readerMap[key] += 1 - } - - if blocked { - // Add the transaction to the queue if conflicts with an already - // queued one. - c.mx.Unlock() - - select { - case c.queue <- commitLoop: - case <-c.ctx.Done(): - } - } else { - // To make sure we don't add a new tx to the queue that depends - // on this "unblocked" tx, grab the commitMutex before lifting - // the mutex guarding the lock maps. - c.commitMutex.RLock() - c.mx.Unlock() - - // At this point we're safe to execute the "unblocked" tx, as - // we cannot execute blocked tx that may have been read from the - // queue until the commitMutex is held. - commitLoop() - - c.commitMutex.RUnlock() - } -} - -// Done decreases lock counts of the keys in the read/write sets. -func (c *commitQueue) Done(rset readSet, wset writeSet) { - c.mx.Lock() - defer c.mx.Unlock() - - for key := range rset { - c.readerMap[key] -= 1 - if c.readerMap[key] == 0 { - delete(c.readerMap, key) - } - } - - for key := range wset { - c.writerMap[key] -= 1 - if c.writerMap[key] == 0 { - delete(c.writerMap, key) - } - } -} - -// mainLoop executes queued transaction commits for transactions that have -// dependencies. The queue ensures that the top element doesn't conflict with -// any other transactions and therefore can be executed freely. -func (c *commitQueue) mainLoop() { - defer c.wg.Done() - - for { - select { - case top := <-c.queue: - // Execute the next blocked transaction. As it is - // the top element in the queue it means that it doesn't - // depend on any other transactions anymore. - c.commitMutex.Lock() - top() - c.commitMutex.Unlock() - - case <-c.ctx.Done(): - return - } - } -} diff --git a/lnd/channeldb/kvdb/etcd/commit_queue_test.go b/lnd/channeldb/kvdb/etcd/commit_queue_test.go deleted file mode 100644 index 16ff7100..00000000 --- a/lnd/channeldb/kvdb/etcd/commit_queue_test.go +++ /dev/null @@ -1,115 +0,0 @@ -// +build kvdb_etcd - -package etcd - -import ( - "context" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -// TestCommitQueue tests that non-conflicting transactions commit concurrently, -// while conflicting transactions are queued up. -func TestCommitQueue(t *testing.T) { - // The duration of each commit. - const commitDuration = time.Millisecond * 500 - const numCommits = 4 - - var wg sync.WaitGroup - commits := make([]string, numCommits) - idx := int32(-1) - - commit := func(tag string, sleep bool) func() { - return func() { - defer wg.Done() - - // Update our log of commit order. Avoid blocking - // by preallocating the commit log and increasing - // the log index atomically. - i := atomic.AddInt32(&idx, 1) - commits[i] = tag - - if sleep { - time.Sleep(commitDuration) - } - } - } - - // Helper function to create a read set from the passed keys. - makeReadSet := func(keys []string) readSet { - rs := make(map[string]stmGet) - - for _, key := range keys { - rs[key] = stmGet{} - } - - return rs - } - - // Helper function to create a write set from the passed keys. - makeWriteSet := func(keys []string) writeSet { - ws := make(map[string]stmPut) - - for _, key := range keys { - ws[key] = stmPut{} - } - - return ws - } - - ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) - q := NewCommitQueue(ctx) - defer q.Wait() - defer cancel() - - wg.Add(numCommits) - t1 := time.Now() - - // Tx1: reads: key1, key2, writes: key3, conflict: none - q.Add( - commit("free", true), - makeReadSet([]string{"key1", "key2"}), - makeWriteSet([]string{"key3"}), - ) - // Tx2: reads: key1, key2, writes: key3, conflict: Tx1 - q.Add( - commit("blocked1", false), - makeReadSet([]string{"key1", "key2"}), - makeWriteSet([]string{"key3"}), - ) - // Tx3: reads: key1, writes: key4, conflict: none - q.Add( - commit("free", true), - makeReadSet([]string{"key1", "key2"}), - makeWriteSet([]string{"key4"}), - ) - // Tx4: reads: key2, writes: key4 conflict: Tx3 - q.Add( - commit("blocked2", false), - makeReadSet([]string{"key2"}), - makeWriteSet([]string{"key4"}), - ) - - // Wait for all commits. - wg.Wait() - t2 := time.Now() - - // Expected total execution time: delta. - // 2 * commitDuration <= delta < 3 * commitDuration - delta := t2.Sub(t1) - require.LessOrEqual(t, int64(commitDuration*2), int64(delta)) - require.Greater(t, int64(commitDuration*3), int64(delta)) - - // Expect that the non-conflicting "free" transactions are executed - // before the blocking ones, and the blocking ones are executed in - // the order of addition. - require.Equal(t, - []string{"free", "free", "blocked1", "blocked2"}, - commits, - ) -} diff --git a/lnd/channeldb/kvdb/etcd/db.go b/lnd/channeldb/kvdb/etcd/db.go deleted file mode 100644 index 0c0b8d0c..00000000 --- a/lnd/channeldb/kvdb/etcd/db.go +++ /dev/null @@ -1,311 +0,0 @@ -// +build kvdb_etcd - -package etcd - -import ( - "context" - "fmt" - "io" - "runtime" - "sync" - "time" - - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/pkg/transport" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/pktwallet/walletdb" -) - -const ( - // etcdConnectionTimeout is the timeout until successful connection to - // the etcd instance. - etcdConnectionTimeout = 10 * time.Second - - // etcdLongTimeout is a timeout for longer taking etcd operatons. - etcdLongTimeout = 30 * time.Second -) - -// callerStats holds commit stats for a specific caller. Currently it only -// holds the max stat, meaning that for a particular caller the largest -// commit set is recorded. -type callerStats struct { - count int - commitStats CommitStats -} - -func (s callerStats) String() string { - return fmt.Sprintf("count: %d, retries: %d, rset: %d, wset: %d", - s.count, s.commitStats.Retries, s.commitStats.Rset, - s.commitStats.Wset) -} - -// commitStatsCollector collects commit stats for commits succeeding -// and also for commits failing. -type commitStatsCollector struct { - sync.RWMutex - succ map[string]*callerStats - fail map[string]*callerStats -} - -// newCommitStatsColletor creates a new commitStatsCollector instance. -func newCommitStatsColletor() *commitStatsCollector { - return &commitStatsCollector{ - succ: make(map[string]*callerStats), - fail: make(map[string]*callerStats), - } -} - -// PrintStats returns collected stats pretty printed into a string. -func (c *commitStatsCollector) PrintStats() string { - c.RLock() - defer c.RUnlock() - - s := "\nFailure:\n" - for k, v := range c.fail { - s += fmt.Sprintf("%s\t%s\n", k, v) - } - - s += "\nSuccess:\n" - for k, v := range c.succ { - s += fmt.Sprintf("%s\t%s\n", k, v) - } - - return s -} - -// updateStatsMap updatess commit stats map for a caller. -func updateStatMap( - caller string, stats CommitStats, m map[string]*callerStats) { - - if _, ok := m[caller]; !ok { - m[caller] = &callerStats{} - } - - curr := m[caller] - curr.count++ - - // Update only if the total commit set is greater or equal. - currTotal := curr.commitStats.Rset + curr.commitStats.Wset - if currTotal <= (stats.Rset + stats.Wset) { - curr.commitStats = stats - } -} - -// callback is an STM commit stats callback passed which can be passed -// using a WithCommitStatsCallback to the STM upon construction. -func (c *commitStatsCollector) callback(succ bool, stats CommitStats) { - caller := "unknown" - - // Get the caller. As this callback is called from - // the backend interface that means we need to ascend - // 4 frames in the callstack. - _, file, no, ok := runtime.Caller(4) - if ok { - caller = fmt.Sprintf("%s#%d", file, no) - } - - c.Lock() - defer c.Unlock() - - if succ { - updateStatMap(caller, stats, c.succ) - } else { - updateStatMap(caller, stats, c.fail) - } -} - -// db holds a reference to the etcd client connection. -type db struct { - config BackendConfig - cli *clientv3.Client - commitStatsCollector *commitStatsCollector - txQueue *commitQueue -} - -// Enforce db implements the walletdb.DB interface. -var _ walletdb.DB = (*db)(nil) - -// BackendConfig holds and etcd backend config and connection parameters. -type BackendConfig struct { - // Ctx is the context we use to cancel operations upon exit. - Ctx context.Context - - // Host holds the peer url of the etcd instance. - Host string - - // User is the username for the etcd peer. - User string - - // Pass is the password for the etcd peer. - Pass string - - // CertFile holds the path to the TLS certificate for etcd RPC. - CertFile string - - // KeyFile holds the path to the TLS private key for etcd RPC. - KeyFile string - - // InsecureSkipVerify should be set to true if we intend to - // skip TLS verification. - InsecureSkipVerify bool - - // Prefix the hash of the prefix will be used as the root - // bucket id. This enables key space separation similar to - // name spaces. - Prefix string - - // CollectCommitStats indicates wheter to commit commit stats. - CollectCommitStats bool -} - -// newEtcdBackend returns a db object initialized with the passed backend -// config. If etcd connection cannot be estabished, then returns error. -func newEtcdBackend(config BackendConfig) (*db, er.R) { - if config.Ctx == nil { - config.Ctx = context.Background() - } - - tlsInfo := transport.TLSInfo{ - CertFile: config.CertFile, - KeyFile: config.KeyFile, - InsecureSkipVerify: config.InsecureSkipVerify, - } - - tlsConfig, err := tlsInfo.ClientConfig() - if err != nil { - return nil, err - } - - cli, err := clientv3.New(clientv3.Config{ - Context: config.Ctx, - Endpoints: []string{config.Host}, - DialTimeout: etcdConnectionTimeout, - Username: config.User, - Password: config.Pass, - TLS: tlsConfig, - MaxCallSendMsgSize: 16384*1024 - 1, - }) - - if err != nil { - return nil, err - } - - backend := &db{ - cli: cli, - config: config, - txQueue: NewCommitQueue(config.Ctx), - } - - if config.CollectCommitStats { - backend.commitStatsCollector = newCommitStatsColletor() - } - - return backend, nil -} - -// getSTMOptions creats all STM options based on the backend config. -func (db *db) getSTMOptions() []STMOptionFunc { - opts := []STMOptionFunc{ - WithAbortContext(db.config.Ctx), - } - - if db.config.CollectCommitStats { - opts = append(opts, - WithCommitStatsCallback(db.commitStatsCollector.callback), - ) - } - - return opts -} - -// View opens a database read transaction and executes the function f with the -// transaction passed as a parameter. After f exits, the transaction is rolled -// back. If f errors, its error is returned, not a rollback error (if any -// occur). The passed reset function is called before the start of the -// transaction and can be used to reset intermediate state. As callers may -// expect retries of the f closure (depending on the database backend used), the -// reset function will be called before each retry respectively. -func (db *db) View(f func(tx walletdb.ReadTx) error, reset func()) er.R { - apply := func(stm STM) er.R { - reset() - return f(newReadWriteTx(stm, db.config.Prefix)) - } - - return RunSTM(db.cli, apply, db.txQueue, db.getSTMOptions()...) -} - -// Update opens a database read/write transaction and executes the function f -// with the transaction passed as a parameter. After f exits, if f did not -// error, the transaction is committed. Otherwise, if f did error, the -// transaction is rolled back. If the rollback fails, the original error -// returned by f is still returned. If the commit fails, the commit error is -// returned. As callers may expect retries of the f closure, the reset function -// will be called before each retry respectively. -func (db *db) Update(f func(tx walletdb.ReadWriteTx) error, reset func()) er.R { - apply := func(stm STM) er.R { - reset() - return f(newReadWriteTx(stm, db.config.Prefix)) - } - - return RunSTM(db.cli, apply, db.txQueue, db.getSTMOptions()...) -} - -// PrintStats returns all collected stats pretty printed into a string. -func (db *db) PrintStats() string { - if db.commitStatsCollector != nil { - return db.commitStatsCollector.PrintStats() - } - - return "" -} - -// BeginReadWriteTx opens a database read+write transaction. -func (db *db) BeginReadWriteTx() (walletdb.ReadWriteTx, er.R) { - return newReadWriteTx( - NewSTM(db.cli, db.txQueue, db.getSTMOptions()...), - db.config.Prefix, - ), nil -} - -// BeginReadTx opens a database read transaction. -func (db *db) BeginReadTx() (walletdb.ReadTx, er.R) { - return newReadWriteTx( - NewSTM(db.cli, db.txQueue, db.getSTMOptions()...), - db.config.Prefix, - ), nil -} - -// Copy writes a copy of the database to the provided writer. This call will -// start a read-only transaction to perform all operations. -// This function is part of the walletdb.Db interface implementation. -func (db *db) Copy(w io.Writer) er.R { - ctx, cancel := context.WithTimeout(db.config.Ctx, etcdLongTimeout) - defer cancel() - - readCloser, err := db.cli.Snapshot(ctx) - if err != nil { - return err - } - - _, err = io.Copy(w, readCloser) - - return err -} - -// Close cleanly shuts down the database and syncs all data. -// This function is part of the walletdb.Db interface implementation. -func (db *db) Close() er.R { - return db.cli.Close() -} - -// Batch opens a database read/write transaction and executes the function f -// with the transaction passed as a parameter. After f exits, if f did not -// error, the transaction is committed. Otherwise, if f did error, the -// transaction is rolled back. If the rollback fails, the original error -// returned by f is still returned. If the commit fails, the commit error is -// returned. -// -// Batch is only useful when there are multiple goroutines calling it. -func (db *db) Batch(apply func(tx walletdb.ReadWriteTx) er.R) er.R { - return db.Update(apply, func() {}) -} diff --git a/lnd/channeldb/kvdb/etcd/db_test.go b/lnd/channeldb/kvdb/etcd/db_test.go deleted file mode 100644 index 90ac734b..00000000 --- a/lnd/channeldb/kvdb/etcd/db_test.go +++ /dev/null @@ -1,76 +0,0 @@ -// +build kvdb_etcd - -package etcd - -import ( - "bytes" - "context" - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/pktwallet/walletdb" - "github.com/stretchr/testify/require" -) - -func TestCopy(t *testing.T) { - t.Parallel() - - f := NewEtcdTestFixture(t) - defer f.Cleanup() - - db, err := newEtcdBackend(f.BackendConfig()) - util.RequireNoErr(t, err) - - err = db.Update(func(tx walletdb.ReadWriteTx) er.R { - // "apple" - apple, err := tx.CreateTopLevelBucket([]byte("apple")) - util.RequireNoErr(t, err) - require.NotNil(t, apple) - - util.RequireNoErr(t, apple.Put([]byte("key"), []byte("val"))) - return nil - }, func() {}) - - // Expect non-zero copy. - var buf bytes.Buffer - - util.RequireNoErr(t, db.Copy(&buf)) - require.Greater(t, buf.Len(), 0) - require.Nil(t, err) - - expected := map[string]string{ - bkey("apple"): bval("apple"), - vkey("key", "apple"): "val", - } - require.Equal(t, expected, f.Dump()) -} - -func TestAbortContext(t *testing.T) { - t.Parallel() - - f := NewEtcdTestFixture(t) - defer f.Cleanup() - - ctx, cancel := context.WithCancel(context.Background()) - - config := f.BackendConfig() - config.Ctx = ctx - - // Pass abort context and abort right away. - db, err := newEtcdBackend(config) - util.RequireNoErr(t, err) - cancel() - - // Expect that the update will fail. - err = db.Update(func(tx walletdb.ReadWriteTx) er.R { - _, err := tx.CreateTopLevelBucket([]byte("bucket")) - util.RequireErr(t, err, "context canceled") - - return nil - }, func() {}) - - util.RequireErr(t, err, "context canceled") - - // No changes in the DB. - require.Equal(t, map[string]string{}, f.Dump()) -} diff --git a/lnd/channeldb/kvdb/etcd/driver.go b/lnd/channeldb/kvdb/etcd/driver.go deleted file mode 100644 index 4b4886f3..00000000 --- a/lnd/channeldb/kvdb/etcd/driver.go +++ /dev/null @@ -1,68 +0,0 @@ -// +build kvdb_etcd - -package etcd - -import ( - "fmt" - - "github.com/pkt-cash/pktd/pktwallet/walletdb" -) - -const ( - dbType = "etcd" -) - -// parseArgs parses the arguments from the walletdb Open/Create methods. -func parseArgs(funcName string, args ...interface{}) (*BackendConfig, er.R) { - if len(args) != 1 { - return nil, er.Errorf("invalid number of arguments to %s.%s -- "+ - "expected: etcd.BackendConfig", - dbType, funcName, - ) - } - - config, ok := args[0].(BackendConfig) - if !ok { - return nil, er.Errorf("argument to %s.%s is invalid -- "+ - "expected: etcd.BackendConfig", - dbType, funcName, - ) - } - - return &config, nil -} - -// createDBDriver is the callback provided during driver registration that -// creates, initializes, and opens a database for use. -func createDBDriver(args ...interface{}) (walletdb.DB, er.R) { - config, err := parseArgs("Create", args...) - if err != nil { - return nil, err - } - - return newEtcdBackend(*config) -} - -// openDBDriver is the callback provided during driver registration that opens -// an existing database for use. -func openDBDriver(args ...interface{}) (walletdb.DB, er.R) { - config, err := parseArgs("Open", args...) - if err != nil { - return nil, err - } - - return newEtcdBackend(*config) -} - -func init() { - // Register the driver. - driver := walletdb.Driver{ - DbType: dbType, - Create: createDBDriver, - Open: openDBDriver, - } - if err := walletdb.RegisterDriver(driver); err != nil { - panic(fmt.Sprintf("Failed to regiser database driver '%s': %v", - dbType, err)) - } -} diff --git a/lnd/channeldb/kvdb/etcd/driver_test.go b/lnd/channeldb/kvdb/etcd/driver_test.go deleted file mode 100644 index 59983dc3..00000000 --- a/lnd/channeldb/kvdb/etcd/driver_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build kvdb_etcd - -package etcd - -import ( - "testing" - - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/pktwallet/walletdb" - "github.com/stretchr/testify/require" -) - -func TestOpenCreateFailure(t *testing.T) { - t.Parallel() - - db, err := walletdb.Open(dbType) - util.RequireErr(t, err) - require.Nil(t, db) - - db, err = walletdb.Open(dbType, "wrong") - util.RequireErr(t, err) - require.Nil(t, db) - - db, err = walletdb.Create(dbType) - util.RequireErr(t, err) - require.Nil(t, db) - - db, err = walletdb.Create(dbType, "wrong") - util.RequireErr(t, err) - require.Nil(t, db) -} diff --git a/lnd/channeldb/kvdb/etcd/embed.go b/lnd/channeldb/kvdb/etcd/embed.go deleted file mode 100644 index 195396d5..00000000 --- a/lnd/channeldb/kvdb/etcd/embed.go +++ /dev/null @@ -1,81 +0,0 @@ -// +build kvdb_etcd - -package etcd - -import ( - "context" - "fmt" - "net" - "net/url" - "time" - - "github.com/coreos/etcd/embed" -) - -const ( - // readyTimeout is the time until the embedded etcd instance should start. - readyTimeout = 10 * time.Second -) - -// getFreePort returns a random open TCP port. -func getFreePort() int { - ln, err := net.Listen("tcp", "[::]:0") - if err != nil { - panic(err) - } - - port := ln.Addr().(*net.TCPAddr).Port - - err = ln.Close() - if err != nil { - panic(err) - } - - return port -} - -// NewEmbeddedEtcdInstance creates an embedded etcd instance for testing, -// listening on random open ports. Returns the backend config and a cleanup -// func that will stop the etcd instance. -func NewEmbeddedEtcdInstance(path string) (*BackendConfig, func(), er.R) { - cfg := embed.NewConfig() - cfg.Dir = path - - // To ensure that we can submit large transactions. - cfg.MaxTxnOps = 8192 - cfg.MaxRequestBytes = 16384 * 1024 - - // Listen on random free ports. - clientURL := fmt.Sprintf("127.0.0.1:%d", getFreePort()) - peerURL := fmt.Sprintf("127.0.0.1:%d", getFreePort()) - cfg.LCUrls = []url.URL{{Host: clientURL}} - cfg.LPUrls = []url.URL{{Host: peerURL}} - - etcd, err := embed.StartEtcd(cfg) - if err != nil { - return nil, nil, err - } - - select { - case <-etcd.Server.ReadyNotify(): - case <-time.After(readyTimeout): - etcd.Close() - return nil, nil, - er.Errorf("etcd failed to start after: %v", readyTimeout) - } - - ctx, cancel := context.WithCancel(context.Background()) - - connConfig := &BackendConfig{ - Ctx: ctx, - Host: "http://" + peerURL, - User: "user", - Pass: "pass", - InsecureSkipVerify: true, - } - - return connConfig, func() { - cancel() - etcd.Close() - }, nil -} diff --git a/lnd/channeldb/kvdb/etcd/fixture_test.go b/lnd/channeldb/kvdb/etcd/fixture_test.go deleted file mode 100644 index 56526906..00000000 --- a/lnd/channeldb/kvdb/etcd/fixture_test.go +++ /dev/null @@ -1,129 +0,0 @@ -// +build kvdb_etcd - -package etcd - -import ( - "context" - "io/ioutil" - "os" - "testing" - "time" - - "github.com/coreos/etcd/clientv3" -) - -const ( - // testEtcdTimeout is used for all RPC calls initiated by the test fixture. - testEtcdTimeout = 5 * time.Second -) - -// EtcdTestFixture holds internal state of the etcd test fixture. -type EtcdTestFixture struct { - t *testing.T - cli *clientv3.Client - config *BackendConfig - cleanup func() -} - -// NewTestEtcdInstance creates an embedded etcd instance for testing, listening -// on random open ports. Returns the connection config and a cleanup func that -// will stop the etcd instance. -func NewTestEtcdInstance(t *testing.T, path string) (*BackendConfig, func()) { - t.Helper() - - config, cleanup, err := NewEmbeddedEtcdInstance(path) - if err != nil { - t.Fatalf("error while staring embedded etcd instance: %v", err) - } - - return config, cleanup -} - -// NewTestEtcdTestFixture creates a new etcd-test fixture. This is helper -// object to facilitate etcd tests and ensure pre and post conditions. -func NewEtcdTestFixture(t *testing.T) *EtcdTestFixture { - tmpDir, err := ioutil.TempDir("", "etcd") - if err != nil { - t.Fatalf("unable to create temp dir: %v", err) - } - - config, etcdCleanup := NewTestEtcdInstance(t, tmpDir) - - cli, err := clientv3.New(clientv3.Config{ - Endpoints: []string{config.Host}, - Username: config.User, - Password: config.Pass, - }) - if err != nil { - os.RemoveAll(tmpDir) - t.Fatalf("unable to create etcd test fixture: %v", err) - } - - return &EtcdTestFixture{ - t: t, - cli: cli, - config: config, - cleanup: func() { - etcdCleanup() - os.RemoveAll(tmpDir) - }, - } -} - -// Put puts a string key/value into the test etcd database. -func (f *EtcdTestFixture) Put(key, value string) { - ctx, cancel := context.WithTimeout(context.TODO(), testEtcdTimeout) - defer cancel() - - _, err := f.cli.Put(ctx, key, value) - if err != nil { - f.t.Fatalf("etcd test fixture failed to put: %v", err) - } -} - -// Get queries a key and returns the stored value from the test etcd database. -func (f *EtcdTestFixture) Get(key string) string { - ctx, cancel := context.WithTimeout(context.TODO(), testEtcdTimeout) - defer cancel() - - resp, err := f.cli.Get(ctx, key) - if err != nil { - f.t.Fatalf("etcd test fixture failed to put: %v", err) - } - - if len(resp.Kvs) > 0 { - return string(resp.Kvs[0].Value) - } - - return "" -} - -// Dump scans and returns all key/values from the test etcd database. -func (f *EtcdTestFixture) Dump() map[string]string { - ctx, cancel := context.WithTimeout(context.TODO(), testEtcdTimeout) - defer cancel() - - resp, err := f.cli.Get(ctx, "", clientv3.WithPrefix()) - if err != nil { - f.t.Fatalf("etcd test fixture failed to put: %v", err) - } - - result := make(map[string]string) - for _, kv := range resp.Kvs { - result[string(kv.Key)] = string(kv.Value) - } - - return result -} - -// BackendConfig returns the backend config for connecting to theembedded -// etcd instance. -func (f *EtcdTestFixture) BackendConfig() BackendConfig { - return *f.config -} - -// Cleanup should be called at test fixture teardown to stop the embedded -// etcd instance and remove all temp db files form the filesystem. -func (f *EtcdTestFixture) Cleanup() { - f.cleanup() -} diff --git a/lnd/channeldb/kvdb/etcd/readwrite_bucket.go b/lnd/channeldb/kvdb/etcd/readwrite_bucket.go deleted file mode 100644 index 373f90b9..00000000 --- a/lnd/channeldb/kvdb/etcd/readwrite_bucket.go +++ /dev/null @@ -1,357 +0,0 @@ -// +build kvdb_etcd - -package etcd - -import ( - "strconv" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/pktwallet/walletdb" -) - -// readWriteBucket stores the bucket id and the buckets transaction. -type readWriteBucket struct { - // id is used to identify the bucket and is created by - // hashing the parent id with the bucket key. For each key/value, - // sub-bucket or the bucket sequence the bucket id is used with the - // appropriate prefix to prefix the key. - id []byte - - // tx holds the parent transaction. - tx *readWriteTx -} - -// newReadWriteBucket creates a new rw bucket with the passed transaction -// and bucket id. -func newReadWriteBucket(tx *readWriteTx, key, id []byte) *readWriteBucket { - return &readWriteBucket{ - id: id, - tx: tx, - } -} - -// NestedReadBucket retrieves a nested read bucket with the given key. -// Returns nil if the bucket does not exist. -func (b *readWriteBucket) NestedReadBucket(key []byte) walletdb.ReadBucket { - return b.NestedReadWriteBucket(key) -} - -// ForEach invokes the passed function with every key/value pair in -// the bucket. This includes nested buckets, in which case the value -// is nil, but it does not include the key/value pairs within those -// nested buckets. -func (b *readWriteBucket) ForEach(cb func(k, v []byte) er.R) er.R { - prefix := string(b.id) - - // Get the first matching key that is in the bucket. - kv, err := b.tx.stm.First(prefix) - if err != nil { - return err - } - - for kv != nil { - key, val := getKeyVal(kv) - - if err := cb(key, val); err != nil { - return err - } - - // Step to the next key. - kv, err = b.tx.stm.Next(prefix, kv.key) - if err != nil { - return err - } - } - - return nil -} - -// Get returns the value for the given key. Returns nil if the key does -// not exist in this bucket. -func (b *readWriteBucket) Get(key []byte) []byte { - // Return nil if the key is empty. - if len(key) == 0 { - return nil - } - - // Fetch the associated value. - val, err := b.tx.stm.Get(string(makeValueKey(b.id, key))) - if err != nil { - // TODO: we should return the error once the - // kvdb inteface is extended. - return nil - } - - if val == nil { - return nil - } - - return val -} - -func (b *readWriteBucket) ReadCursor() walletdb.ReadCursor { - return newReadWriteCursor(b) -} - -// NestedReadWriteBucket retrieves a nested bucket with the given key. -// Returns nil if the bucket does not exist. -func (b *readWriteBucket) NestedReadWriteBucket(key []byte) walletdb.ReadWriteBucket { - if len(key) == 0 { - return nil - } - - // Get the bucket id (and return nil if bucket doesn't exist). - bucketKey := makeBucketKey(b.id, key) - bucketVal, err := b.tx.stm.Get(string(bucketKey)) - if err != nil { - // TODO: we should return the error once the - // kvdb inteface is extended. - return nil - } - - if !isValidBucketID(bucketVal) { - return nil - } - - // Return the bucket with the fetched bucket id. - return newReadWriteBucket(b.tx, bucketKey, bucketVal) -} - -// assertNoValue checks if the value for the passed key exists. -func (b *readWriteBucket) assertNoValue(key []byte) er.R { - val, err := b.tx.stm.Get(string(makeValueKey(b.id, key))) - if err != nil { - return err - } - - if val != nil { - return walletdb.ErrIncompatibleValue.Default() - } - - return nil -} - -// CreateBucket creates and returns a new nested bucket with the given -// key. Returns ErrBucketExists if the bucket already exists, -// ErrBucketNameRequired if the key is empty, or ErrIncompatibleValue -// if the key value is otherwise invalid for the particular database -// implementation. Other errors are possible depending on the -// implementation. -func (b *readWriteBucket) CreateBucket(key []byte) ( - walletdb.ReadWriteBucket, er.R) { - - if len(key) == 0 { - return nil, walletdb.ErrBucketNameRequired.Default() - } - - // Check if the bucket already exists. - bucketKey := makeBucketKey(b.id, key) - - bucketVal, err := b.tx.stm.Get(string(bucketKey)) - if err != nil { - return nil, err - } - - if isValidBucketID(bucketVal) { - return nil, walletdb.ErrBucketExists.Default() - } - - if err := b.assertNoValue(key); err != nil { - return nil, err - } - - // Create a deterministic bucket id from the bucket key. - newID := makeBucketID(bucketKey) - - // Create the bucket. - b.tx.stm.Put(string(bucketKey), string(newID[:])) - - return newReadWriteBucket(b.tx, bucketKey, newID[:]), nil -} - -// CreateBucketIfNotExists creates and returns a new nested bucket with -// the given key if it does not already exist. Returns -// ErrBucketNameRequired if the key is empty or ErrIncompatibleValue -// if the key value is otherwise invalid for the particular database -// backend. Other errors are possible depending on the implementation. -func (b *readWriteBucket) CreateBucketIfNotExists(key []byte) ( - walletdb.ReadWriteBucket, er.R) { - - if len(key) == 0 { - return nil, walletdb.ErrBucketNameRequired.Default() - } - - // Check for the bucket and create if it doesn't exist. - bucketKey := makeBucketKey(b.id, key) - - bucketVal, err := b.tx.stm.Get(string(bucketKey)) - if err != nil { - return nil, err - } - - if !isValidBucketID(bucketVal) { - if err := b.assertNoValue(key); err != nil { - return nil, err - } - - newID := makeBucketID(bucketKey) - b.tx.stm.Put(string(bucketKey), string(newID[:])) - - return newReadWriteBucket(b.tx, bucketKey, newID[:]), nil - } - - // Otherwise return the bucket with the fetched bucket id. - return newReadWriteBucket(b.tx, bucketKey, bucketVal), nil -} - -// DeleteNestedBucket deletes the nested bucket and its sub-buckets -// pointed to by the passed key. All values in the bucket and sub-buckets -// will be deleted as well. -func (b *readWriteBucket) DeleteNestedBucket(key []byte) er.R { - // TODO shouldn't empty key return ErrBucketNameRequired ? - if len(key) == 0 { - return walletdb.ErrIncompatibleValue.Default() - } - - // Get the bucket first. - bucketKey := string(makeBucketKey(b.id, key)) - - bucketVal, err := b.tx.stm.Get(bucketKey) - if err != nil { - return err - } - - if !isValidBucketID(bucketVal) { - return walletdb.ErrBucketNotFound.Default() - } - - // Enqueue the top level bucket id. - queue := [][]byte{bucketVal} - - // Traverse the buckets breadth first. - for len(queue) != 0 { - if !isValidBucketID(queue[0]) { - return walletdb.ErrBucketNotFound.Default() - } - - id := queue[0] - queue = queue[1:] - - kv, err := b.tx.stm.First(string(id)) - if err != nil { - return err - } - - for kv != nil { - b.tx.stm.Del(kv.key) - - if isBucketKey(kv.key) { - queue = append(queue, []byte(kv.val)) - } - - kv, err = b.tx.stm.Next(string(id), kv.key) - if err != nil { - return err - } - } - - // Finally delete the sequence key for the bucket. - b.tx.stm.Del(string(makeSequenceKey(id))) - } - - // Delete the top level bucket and sequence key. - b.tx.stm.Del(bucketKey) - b.tx.stm.Del(string(makeSequenceKey(bucketVal))) - - return nil -} - -// Put updates the value for the passed key. -// Returns ErrKeyRequred if te passed key is empty. -func (b *readWriteBucket) Put(key, value []byte) er.R { - if len(key) == 0 { - return walletdb.ErrKeyRequired.Default() - } - - val, err := b.tx.stm.Get(string(makeBucketKey(b.id, key))) - if err != nil { - return err - } - - if val != nil { - return walletdb.ErrIncompatibleValue.Default() - } - - // Update the transaction with the new value. - b.tx.stm.Put(string(makeValueKey(b.id, key)), string(value)) - - return nil -} - -// Delete deletes the key/value pointed to by the passed key. -// Returns ErrKeyRequred if the passed key is empty. -func (b *readWriteBucket) Delete(key []byte) er.R { - if key == nil { - return nil - } - if len(key) == 0 { - return walletdb.ErrKeyRequired.Default() - } - - // Update the transaction to delete the key/value. - b.tx.stm.Del(string(makeValueKey(b.id, key))) - - return nil -} - -// ReadWriteCursor returns a new read-write cursor for this bucket. -func (b *readWriteBucket) ReadWriteCursor() walletdb.ReadWriteCursor { - return newReadWriteCursor(b) -} - -// Tx returns the buckets transaction. -func (b *readWriteBucket) Tx() walletdb.ReadWriteTx { - return b.tx -} - -// NextSequence returns an autoincrementing sequence number for this bucket. -// Note that this is not a thread safe function and as such it must not be used -// for synchronization. -func (b *readWriteBucket) NextSequence() (uint64, er.R) { - seq := b.Sequence() + 1 - - return seq, b.SetSequence(seq) -} - -// SetSequence updates the sequence number for the bucket. -func (b *readWriteBucket) SetSequence(v uint64) er.R { - // Convert the number to string. - val := strconv.FormatUint(v, 10) - - // Update the transaction with the new value for the sequence key. - b.tx.stm.Put(string(makeSequenceKey(b.id)), val) - - return nil -} - -// Sequence returns the current sequence number for this bucket without -// incrementing it. -func (b *readWriteBucket) Sequence() uint64 { - val, err := b.tx.stm.Get(string(makeSequenceKey(b.id))) - if err != nil { - // TODO: This update kvdb interface such that error - // may be returned here. - return 0 - } - - if val == nil { - // If the sequence number is not yet - // stored, then take the default value. - return 0 - } - - // Otherwise try to parse a 64 bit unsigned integer from the value. - num, _ := strconv.ParseUint(string(val), 10, 64) - - return num -} diff --git a/lnd/channeldb/kvdb/etcd/readwrite_bucket_test.go b/lnd/channeldb/kvdb/etcd/readwrite_bucket_test.go deleted file mode 100644 index dd846986..00000000 --- a/lnd/channeldb/kvdb/etcd/readwrite_bucket_test.go +++ /dev/null @@ -1,523 +0,0 @@ -// +build kvdb_etcd - -package etcd - -import ( - "math" - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/pktwallet/walletdb" - "github.com/stretchr/testify/require" -) - -func TestBucketCreation(t *testing.T) { - t.Parallel() - - f := NewEtcdTestFixture(t) - defer f.Cleanup() - - db, err := newEtcdBackend(f.BackendConfig()) - util.RequireNoErr(t, err) - - err = db.Update(func(tx walletdb.ReadWriteTx) er.R { - // empty bucket name - b, err := tx.CreateTopLevelBucket(nil) - util.RequireErr(t, walletdb.ErrBucketNameRequired, err) - require.Nil(t, b) - - // empty bucket name - b, err = tx.CreateTopLevelBucket([]byte("")) - util.RequireErr(t, walletdb.ErrBucketNameRequired, err) - require.Nil(t, b) - - // "apple" - apple, err := tx.CreateTopLevelBucket([]byte("apple")) - util.RequireNoErr(t, err) - require.NotNil(t, apple) - - // Check bucket tx. - require.Equal(t, tx, apple.Tx()) - - // "apple" already created - b, err = tx.CreateTopLevelBucket([]byte("apple")) - util.RequireNoErr(t, err) - require.NotNil(t, b) - - // "apple/banana" - banana, err := apple.CreateBucket([]byte("banana")) - util.RequireNoErr(t, err) - require.NotNil(t, banana) - - banana, err = apple.CreateBucketIfNotExists([]byte("banana")) - util.RequireNoErr(t, err) - require.NotNil(t, banana) - - // Try creating "apple/banana" again - b, err = apple.CreateBucket([]byte("banana")) - util.RequireErr(t, walletdb.ErrBucketExists, err) - require.Nil(t, b) - - // "apple/mango" - mango, err := apple.CreateBucket([]byte("mango")) - require.Nil(t, err) - require.NotNil(t, mango) - - // "apple/banana/pear" - pear, err := banana.CreateBucket([]byte("pear")) - require.Nil(t, err) - require.NotNil(t, pear) - - // empty bucket - require.Nil(t, apple.NestedReadWriteBucket(nil)) - require.Nil(t, apple.NestedReadWriteBucket([]byte(""))) - - // "apple/pear" doesn't exist - require.Nil(t, apple.NestedReadWriteBucket([]byte("pear"))) - - // "apple/banana" exits - require.NotNil(t, apple.NestedReadWriteBucket([]byte("banana"))) - require.NotNil(t, apple.NestedReadBucket([]byte("banana"))) - return nil - }, func() {}) - - require.Nil(t, err) - - expected := map[string]string{ - bkey("apple"): bval("apple"), - bkey("apple", "banana"): bval("apple", "banana"), - bkey("apple", "mango"): bval("apple", "mango"), - bkey("apple", "banana", "pear"): bval("apple", "banana", "pear"), - } - require.Equal(t, expected, f.Dump()) -} - -func TestBucketDeletion(t *testing.T) { - t.Parallel() - - f := NewEtcdTestFixture(t) - defer f.Cleanup() - - db, err := newEtcdBackend(f.BackendConfig()) - util.RequireNoErr(t, err) - - err = db.Update(func(tx walletdb.ReadWriteTx) er.R { - // "apple" - apple, err := tx.CreateTopLevelBucket([]byte("apple")) - require.Nil(t, err) - require.NotNil(t, apple) - - // "apple/banana" - banana, err := apple.CreateBucket([]byte("banana")) - require.Nil(t, err) - require.NotNil(t, banana) - - kvs := []KV{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}} - - for _, kv := range kvs { - util.RequireNoErr(t, banana.Put([]byte(kv.key), []byte(kv.val))) - require.Equal(t, []byte(kv.val), banana.Get([]byte(kv.key))) - } - - // Delete a k/v from "apple/banana" - util.RequireNoErr(t, banana.Delete([]byte("key2"))) - // Try getting/putting/deleting invalid k/v's. - require.Nil(t, banana.Get(nil)) - util.RequireErr(t, walletdb.ErrKeyRequired, banana.Put(nil, []byte("val"))) - util.RequireErr(t, walletdb.ErrKeyRequired, banana.Delete(nil)) - - // Try deleting a k/v that doesn't exist. - util.RequireNoErr(t, banana.Delete([]byte("nokey"))) - - // "apple/pear" - pear, err := apple.CreateBucket([]byte("pear")) - require.Nil(t, err) - require.NotNil(t, pear) - - // Put some values into "apple/pear" - for _, kv := range kvs { - require.Nil(t, pear.Put([]byte(kv.key), []byte(kv.val))) - require.Equal(t, []byte(kv.val), pear.Get([]byte(kv.key))) - } - - // Create nested bucket "apple/pear/cherry" - cherry, err := pear.CreateBucket([]byte("cherry")) - require.Nil(t, err) - require.NotNil(t, cherry) - - // Put some values into "apple/pear/cherry" - for _, kv := range kvs { - util.RequireNoErr(t, cherry.Put([]byte(kv.key), []byte(kv.val))) - } - - // Read back values in "apple/pear/cherry" trough a read bucket. - cherryReadBucket := pear.NestedReadBucket([]byte("cherry")) - for _, kv := range kvs { - require.Equal( - t, []byte(kv.val), - cherryReadBucket.Get([]byte(kv.key)), - ) - } - - // Try deleting some invalid buckets. - util.RequireErr(t, - walletdb.ErrBucketNameRequired, apple.DeleteNestedBucket(nil), - ) - - // Try deleting a non existing bucket. - util.RequireErr( - t, - walletdb.ErrBucketNotFound, - apple.DeleteNestedBucket([]byte("missing")), - ) - - // Delete "apple/pear" - require.Nil(t, apple.DeleteNestedBucket([]byte("pear"))) - - // "apple/pear" deleted - require.Nil(t, apple.NestedReadWriteBucket([]byte("pear"))) - - // "apple/pear/cherry" deleted - require.Nil(t, pear.NestedReadWriteBucket([]byte("cherry"))) - - // Values deleted too. - for _, kv := range kvs { - require.Nil(t, pear.Get([]byte(kv.key))) - require.Nil(t, cherry.Get([]byte(kv.key))) - } - - // "aple/banana" exists - require.NotNil(t, apple.NestedReadWriteBucket([]byte("banana"))) - return nil - }, func() {}) - - require.Nil(t, err) - - expected := map[string]string{ - bkey("apple"): bval("apple"), - bkey("apple", "banana"): bval("apple", "banana"), - vkey("key1", "apple", "banana"): "val1", - vkey("key3", "apple", "banana"): "val3", - } - require.Equal(t, expected, f.Dump()) -} - -func TestBucketForEach(t *testing.T) { - t.Parallel() - - f := NewEtcdTestFixture(t) - defer f.Cleanup() - - db, err := newEtcdBackend(f.BackendConfig()) - util.RequireNoErr(t, err) - - err = db.Update(func(tx walletdb.ReadWriteTx) er.R { - // "apple" - apple, err := tx.CreateTopLevelBucket([]byte("apple")) - require.Nil(t, err) - require.NotNil(t, apple) - - // "apple/banana" - banana, err := apple.CreateBucket([]byte("banana")) - require.Nil(t, err) - require.NotNil(t, banana) - - kvs := []KV{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}} - - // put some values into "apple" and "apple/banana" too - for _, kv := range kvs { - require.Nil(t, apple.Put([]byte(kv.key), []byte(kv.val))) - require.Equal(t, []byte(kv.val), apple.Get([]byte(kv.key))) - - require.Nil(t, banana.Put([]byte(kv.key), []byte(kv.val))) - require.Equal(t, []byte(kv.val), banana.Get([]byte(kv.key))) - } - - got := make(map[string]string) - err = apple.ForEach(func(key, val []byte) er.R { - got[string(key)] = string(val) - return nil - }) - - expected := map[string]string{ - "key1": "val1", - "key2": "val2", - "key3": "val3", - "banana": "", - } - - util.RequireNoErr(t, err) - require.Equal(t, expected, got) - - got = make(map[string]string) - err = banana.ForEach(func(key, val []byte) er.R { - got[string(key)] = string(val) - return nil - }) - - util.RequireNoErr(t, err) - // remove the sub-bucket key - delete(expected, "banana") - require.Equal(t, expected, got) - - return nil - }, func() {}) - - require.Nil(t, err) - - expected := map[string]string{ - bkey("apple"): bval("apple"), - bkey("apple", "banana"): bval("apple", "banana"), - vkey("key1", "apple"): "val1", - vkey("key2", "apple"): "val2", - vkey("key3", "apple"): "val3", - vkey("key1", "apple", "banana"): "val1", - vkey("key2", "apple", "banana"): "val2", - vkey("key3", "apple", "banana"): "val3", - } - require.Equal(t, expected, f.Dump()) -} - -func TestBucketForEachWithError(t *testing.T) { - t.Parallel() - - f := NewEtcdTestFixture(t) - defer f.Cleanup() - - db, err := newEtcdBackend(f.BackendConfig()) - util.RequireNoErr(t, err) - - err = db.Update(func(tx walletdb.ReadWriteTx) er.R { - // "apple" - apple, err := tx.CreateTopLevelBucket([]byte("apple")) - require.Nil(t, err) - require.NotNil(t, apple) - - // "apple/banana" - banana, err := apple.CreateBucket([]byte("banana")) - require.Nil(t, err) - require.NotNil(t, banana) - - // "apple/pear" - pear, err := apple.CreateBucket([]byte("pear")) - require.Nil(t, err) - require.NotNil(t, pear) - - kvs := []KV{{"key1", "val1"}, {"key2", "val2"}} - - // Put some values into "apple" and "apple/banana" too. - for _, kv := range kvs { - require.Nil(t, apple.Put([]byte(kv.key), []byte(kv.val))) - require.Equal(t, []byte(kv.val), apple.Get([]byte(kv.key))) - } - - got := make(map[string]string) - i := 0 - // Error while iterating value keys. - err = apple.ForEach(func(key, val []byte) er.R { - if i == 2 { - return er.Errorf("error") - } - - got[string(key)] = string(val) - i++ - return nil - }) - - expected := map[string]string{ - "banana": "", - "key1": "val1", - } - - require.Equal(t, expected, got) - util.RequireErr(t, err) - - got = make(map[string]string) - i = 0 - // Erro while iterating buckets. - err = apple.ForEach(func(key, val []byte) er.R { - if i == 3 { - return er.Errorf("error") - } - - got[string(key)] = string(val) - i++ - return nil - }) - - expected = map[string]string{ - "banana": "", - "key1": "val1", - "key2": "val2", - } - - require.Equal(t, expected, got) - util.RequireErr(t, err) - return nil - }, func() {}) - - require.Nil(t, err) - - expected := map[string]string{ - bkey("apple"): bval("apple"), - bkey("apple", "banana"): bval("apple", "banana"), - bkey("apple", "pear"): bval("apple", "pear"), - vkey("key1", "apple"): "val1", - vkey("key2", "apple"): "val2", - } - require.Equal(t, expected, f.Dump()) -} - -func TestBucketSequence(t *testing.T) { - t.Parallel() - - f := NewEtcdTestFixture(t) - defer f.Cleanup() - - db, err := newEtcdBackend(f.BackendConfig()) - util.RequireNoErr(t, err) - - err = db.Update(func(tx walletdb.ReadWriteTx) er.R { - apple, err := tx.CreateTopLevelBucket([]byte("apple")) - require.Nil(t, err) - require.NotNil(t, apple) - - banana, err := apple.CreateBucket([]byte("banana")) - require.Nil(t, err) - require.NotNil(t, banana) - - require.Equal(t, uint64(0), apple.Sequence()) - require.Equal(t, uint64(0), banana.Sequence()) - - require.Nil(t, apple.SetSequence(math.MaxUint64)) - require.Equal(t, uint64(math.MaxUint64), apple.Sequence()) - - for i := uint64(0); i < uint64(5); i++ { - s, err := apple.NextSequence() - require.Nil(t, err) - require.Equal(t, i, s) - } - - return nil - }, func() {}) - - require.Nil(t, err) -} - -// TestKeyClash tests that one cannot create a bucket if a value with the same -// key exists and the same is true in reverse: that a value cannot be put if -// a bucket with the same key exists. -func TestKeyClash(t *testing.T) { - t.Parallel() - - f := NewEtcdTestFixture(t) - defer f.Cleanup() - - db, err := newEtcdBackend(f.BackendConfig()) - util.RequireNoErr(t, err) - - // First: - // put: /apple/key -> val - // create bucket: /apple/banana - err = db.Update(func(tx walletdb.ReadWriteTx) er.R { - apple, err := tx.CreateTopLevelBucket([]byte("apple")) - require.Nil(t, err) - require.NotNil(t, apple) - - util.RequireNoErr(t, apple.Put([]byte("key"), []byte("val"))) - - banana, err := apple.CreateBucket([]byte("banana")) - require.Nil(t, err) - require.NotNil(t, banana) - - return nil - }, func() {}) - - require.Nil(t, err) - - // Next try to: - // put: /apple/banana -> val => will fail (as /apple/banana is a bucket) - // create bucket: /apple/key => will fail (as /apple/key is a value) - err = db.Update(func(tx walletdb.ReadWriteTx) er.R { - apple, err := tx.CreateTopLevelBucket([]byte("apple")) - require.Nil(t, err) - require.NotNil(t, apple) - - util.RequireErr(t, - walletdb.ErrIncompatibleValue, - apple.Put([]byte("banana"), []byte("val")), - ) - - b, err := apple.CreateBucket([]byte("key")) - require.Nil(t, b) - util.RequireErr(t, walletdb.ErrIncompatibleValue, b) - - b, err = apple.CreateBucketIfNotExists([]byte("key")) - require.Nil(t, b) - util.RequireErr(t, walletdb.ErrIncompatibleValue, b) - - return nil - }, func() {}) - - require.Nil(t, err) - - // Except that the only existing items in the db are: - // bucket: /apple - // bucket: /apple/banana - // value: /apple/key -> val - expected := map[string]string{ - bkey("apple"): bval("apple"), - bkey("apple", "banana"): bval("apple", "banana"), - vkey("key", "apple"): "val", - } - require.Equal(t, expected, f.Dump()) - -} - -// TestBucketCreateDelete tests that creating then deleting then creating a -// bucket suceeds. -func TestBucketCreateDelete(t *testing.T) { - t.Parallel() - f := NewEtcdTestFixture(t) - defer f.Cleanup() - - db, err := newEtcdBackend(f.BackendConfig()) - util.RequireNoErr(t, err) - - err = db.Update(func(tx walletdb.ReadWriteTx) er.R { - apple, err := tx.CreateTopLevelBucket([]byte("apple")) - util.RequireNoErr(t, err) - require.NotNil(t, apple) - - banana, err := apple.CreateBucket([]byte("banana")) - util.RequireNoErr(t, err) - require.NotNil(t, banana) - - return nil - }, func() {}) - util.RequireNoErr(t, err) - - err = db.Update(func(tx walletdb.ReadWriteTx) er.R { - apple := tx.ReadWriteBucket([]byte("apple")) - require.NotNil(t, apple) - util.RequireNoErr(t, apple.DeleteNestedBucket([]byte("banana"))) - - return nil - }, func() {}) - util.RequireNoErr(t, err) - - err = db.Update(func(tx walletdb.ReadWriteTx) er.R { - apple := tx.ReadWriteBucket([]byte("apple")) - require.NotNil(t, apple) - util.RequireNoErr(t, apple.Put([]byte("banana"), []byte("value"))) - - return nil - }, func() {}) - util.RequireNoErr(t, err) - - expected := map[string]string{ - vkey("banana", "apple"): "value", - bkey("apple"): bval("apple"), - } - require.Equal(t, expected, f.Dump()) -} diff --git a/lnd/channeldb/kvdb/etcd/readwrite_cursor.go b/lnd/channeldb/kvdb/etcd/readwrite_cursor.go deleted file mode 100644 index 251b4c67..00000000 --- a/lnd/channeldb/kvdb/etcd/readwrite_cursor.go +++ /dev/null @@ -1,143 +0,0 @@ -// +build kvdb_etcd - -package etcd - -// readWriteCursor holds a reference to the cursors bucket, the value -// prefix and the current key used while iterating. -type readWriteCursor struct { - // bucket holds the reference to the parent bucket. - bucket *readWriteBucket - - // prefix holds the value prefix which is in front of each - // value key in the bucket. - prefix string - - // currKey holds the current key of the cursor. - currKey string -} - -func newReadWriteCursor(bucket *readWriteBucket) *readWriteCursor { - return &readWriteCursor{ - bucket: bucket, - prefix: string(bucket.id), - } -} - -// First positions the cursor at the first key/value pair and returns -// the pair. -func (c *readWriteCursor) First() (key, value []byte) { - // Get the first key with the value prefix. - kv, err := c.bucket.tx.stm.First(c.prefix) - if err != nil { - // TODO: revise this once kvdb interface supports errors - return nil, nil - } - - if kv != nil { - c.currKey = kv.key - return getKeyVal(kv) - } - - return nil, nil -} - -// Last positions the cursor at the last key/value pair and returns the -// pair. -func (c *readWriteCursor) Last() (key, value []byte) { - kv, err := c.bucket.tx.stm.Last(c.prefix) - if err != nil { - // TODO: revise this once kvdb interface supports errors - return nil, nil - } - - if kv != nil { - c.currKey = kv.key - return getKeyVal(kv) - } - - return nil, nil -} - -// Next moves the cursor one key/value pair forward and returns the new -// pair. -func (c *readWriteCursor) Next() (key, value []byte) { - kv, err := c.bucket.tx.stm.Next(c.prefix, c.currKey) - if err != nil { - // TODO: revise this once kvdb interface supports errors - return nil, nil - } - - if kv != nil { - c.currKey = kv.key - return getKeyVal(kv) - } - - return nil, nil -} - -// Prev moves the cursor one key/value pair backward and returns the new -// pair. -func (c *readWriteCursor) Prev() (key, value []byte) { - kv, err := c.bucket.tx.stm.Prev(c.prefix, c.currKey) - if err != nil { - // TODO: revise this once kvdb interface supports errors - return nil, nil - } - - if kv != nil { - c.currKey = kv.key - return getKeyVal(kv) - } - - return nil, nil -} - -// Seek positions the cursor at the passed seek key. If the key does -// not exist, the cursor is moved to the next key after seek. Returns -// the new pair. -func (c *readWriteCursor) Seek(seek []byte) (key, value []byte) { - // Return nil if trying to seek to an empty key. - if seek == nil { - return nil, nil - } - - // Seek to the first key with prefix + seek. If that key is not present - // STM will seek to the next matching key with prefix. - kv, err := c.bucket.tx.stm.Seek(c.prefix, c.prefix+string(seek)) - if err != nil { - // TODO: revise this once kvdb interface supports errors - return nil, nil - } - - if kv != nil { - c.currKey = kv.key - return getKeyVal(kv) - } - - return nil, nil -} - -// Delete removes the current key/value pair the cursor is at without -// invalidating the cursor. Returns ErrIncompatibleValue if attempted -// when the cursor points to a nested bucket. -func (c *readWriteCursor) Delete() er.R { - // Get the next key after the current one. We could do this - // after deletion too but it's one step more efficient here. - nextKey, err := c.bucket.tx.stm.Next(c.prefix, c.currKey) - if err != nil { - return err - } - - if isBucketKey(c.currKey) { - c.bucket.DeleteNestedBucket(getKey(c.currKey)) - } else { - c.bucket.Delete(getKey(c.currKey)) - } - - if nextKey != nil { - // Set current key to the next one. - c.currKey = nextKey.key - } - - return nil -} diff --git a/lnd/channeldb/kvdb/etcd/readwrite_cursor_test.go b/lnd/channeldb/kvdb/etcd/readwrite_cursor_test.go deleted file mode 100644 index 7fc3a3c2..00000000 --- a/lnd/channeldb/kvdb/etcd/readwrite_cursor_test.go +++ /dev/null @@ -1,369 +0,0 @@ -// +build kvdb_etcd - -package etcd - -import ( - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/pktwallet/walletdb" - "github.com/stretchr/testify/require" -) - -func TestReadCursorEmptyInterval(t *testing.T) { - t.Parallel() - - f := NewEtcdTestFixture(t) - defer f.Cleanup() - - db, err := newEtcdBackend(f.BackendConfig()) - util.RequireNoErr(t, err) - - err = db.Update(func(tx walletdb.ReadWriteTx) er.R { - b, err := tx.CreateTopLevelBucket([]byte("apple")) - util.RequireNoErr(t, err) - require.NotNil(t, b) - - return nil - }, func() {}) - util.RequireNoErr(t, err) - - err = db.View(func(tx walletdb.ReadTx) er.R { - b := tx.ReadBucket([]byte("apple")) - require.NotNil(t, b) - - cursor := b.ReadCursor() - k, v := cursor.First() - require.Nil(t, k) - require.Nil(t, v) - - k, v = cursor.Next() - require.Nil(t, k) - require.Nil(t, v) - - k, v = cursor.Last() - require.Nil(t, k) - require.Nil(t, v) - - k, v = cursor.Prev() - require.Nil(t, k) - require.Nil(t, v) - - return nil - }, func() {}) - util.RequireNoErr(t, err) -} - -func TestReadCursorNonEmptyInterval(t *testing.T) { - t.Parallel() - - f := NewEtcdTestFixture(t) - defer f.Cleanup() - - db, err := newEtcdBackend(f.BackendConfig()) - util.RequireNoErr(t, err) - - testKeyValues := []KV{ - {"b", "1"}, - {"c", "2"}, - {"da", "3"}, - {"e", "4"}, - } - - err = db.Update(func(tx walletdb.ReadWriteTx) er.R { - b, err := tx.CreateTopLevelBucket([]byte("apple")) - util.RequireNoErr(t, err) - require.NotNil(t, b) - - for _, kv := range testKeyValues { - util.RequireNoErr(t, b.Put([]byte(kv.key), []byte(kv.val))) - } - return nil - }, func() {}) - - util.RequireNoErr(t, err) - - err = db.View(func(tx walletdb.ReadTx) er.R { - b := tx.ReadBucket([]byte("apple")) - require.NotNil(t, b) - - // Iterate from the front. - var kvs []KV - cursor := b.ReadCursor() - k, v := cursor.First() - - for k != nil && v != nil { - kvs = append(kvs, KV{string(k), string(v)}) - k, v = cursor.Next() - } - require.Equal(t, testKeyValues, kvs) - - // Iterate from the back. - kvs = []KV{} - k, v = cursor.Last() - - for k != nil && v != nil { - kvs = append(kvs, KV{string(k), string(v)}) - k, v = cursor.Prev() - } - require.Equal(t, reverseKVs(testKeyValues), kvs) - - // Random access - perm := []int{3, 0, 2, 1} - for _, i := range perm { - k, v := cursor.Seek([]byte(testKeyValues[i].key)) - require.Equal(t, []byte(testKeyValues[i].key), k) - require.Equal(t, []byte(testKeyValues[i].val), v) - } - - // Seek to nonexisting key. - k, v = cursor.Seek(nil) - require.Nil(t, k) - require.Nil(t, v) - - k, v = cursor.Seek([]byte("x")) - require.Nil(t, k) - require.Nil(t, v) - - return nil - }, func() {}) - - util.RequireNoErr(t, err) -} - -func TestReadWriteCursor(t *testing.T) { - t.Parallel() - - f := NewEtcdTestFixture(t) - defer f.Cleanup() - - db, err := newEtcdBackend(f.BackendConfig()) - util.RequireNoErr(t, err) - - testKeyValues := []KV{ - {"b", "1"}, - {"c", "2"}, - {"da", "3"}, - {"e", "4"}, - } - - count := len(testKeyValues) - - // Pre-store the first half of the interval. - util.RequireNoErr(t, db.Update(func(tx walletdb.ReadWriteTx) er.R { - b, err := tx.CreateTopLevelBucket([]byte("apple")) - util.RequireNoErr(t, err) - require.NotNil(t, b) - - for i := 0; i < count/2; i++ { - err = b.Put( - []byte(testKeyValues[i].key), - []byte(testKeyValues[i].val), - ) - util.RequireNoErr(t, err) - } - return nil - }, func() {})) - - err = db.Update(func(tx walletdb.ReadWriteTx) er.R { - b := tx.ReadWriteBucket([]byte("apple")) - require.NotNil(t, b) - - // Store the second half of the interval. - for i := count / 2; i < count; i++ { - err = b.Put( - []byte(testKeyValues[i].key), - []byte(testKeyValues[i].val), - ) - util.RequireNoErr(t, err) - } - - cursor := b.ReadWriteCursor() - - // First on valid interval. - fk, fv := cursor.First() - require.Equal(t, []byte("b"), fk) - require.Equal(t, []byte("1"), fv) - - // Prev(First()) = nil - k, v := cursor.Prev() - require.Nil(t, k) - require.Nil(t, v) - - // Last on valid interval. - lk, lv := cursor.Last() - require.Equal(t, []byte("e"), lk) - require.Equal(t, []byte("4"), lv) - - // Next(Last()) = nil - k, v = cursor.Next() - require.Nil(t, k) - require.Nil(t, v) - - // Delete first item, then add an item before the - // deleted one. Check that First/Next will "jump" - // over the deleted item and return the new first. - _, _ = cursor.First() - util.RequireNoErr(t, cursor.Delete()) - util.RequireNoErr(t, b.Put([]byte("a"), []byte("0"))) - fk, fv = cursor.First() - - require.Equal(t, []byte("a"), fk) - require.Equal(t, []byte("0"), fv) - - k, v = cursor.Next() - require.Equal(t, []byte("c"), k) - require.Equal(t, []byte("2"), v) - - // Similarly test that a new end is returned if - // the old end is deleted first. - _, _ = cursor.Last() - util.RequireNoErr(t, cursor.Delete()) - util.RequireNoErr(t, b.Put([]byte("f"), []byte("5"))) - - lk, lv = cursor.Last() - require.Equal(t, []byte("f"), lk) - require.Equal(t, []byte("5"), lv) - - k, v = cursor.Prev() - require.Equal(t, []byte("da"), k) - require.Equal(t, []byte("3"), v) - - // Overwrite k/v in the middle of the interval. - util.RequireNoErr(t, b.Put([]byte("c"), []byte("3"))) - k, v = cursor.Prev() - require.Equal(t, []byte("c"), k) - require.Equal(t, []byte("3"), v) - - // Insert new key/values. - util.RequireNoErr(t, b.Put([]byte("cx"), []byte("x"))) - util.RequireNoErr(t, b.Put([]byte("cy"), []byte("y"))) - - k, v = cursor.Next() - require.Equal(t, []byte("cx"), k) - require.Equal(t, []byte("x"), v) - - k, v = cursor.Next() - require.Equal(t, []byte("cy"), k) - require.Equal(t, []byte("y"), v) - - expected := []KV{ - {"a", "0"}, - {"c", "3"}, - {"cx", "x"}, - {"cy", "y"}, - {"da", "3"}, - {"f", "5"}, - } - - // Iterate from the front. - var kvs []KV - k, v = cursor.First() - - for k != nil && v != nil { - kvs = append(kvs, KV{string(k), string(v)}) - k, v = cursor.Next() - } - require.Equal(t, expected, kvs) - - // Iterate from the back. - kvs = []KV{} - k, v = cursor.Last() - - for k != nil && v != nil { - kvs = append(kvs, KV{string(k), string(v)}) - k, v = cursor.Prev() - } - require.Equal(t, reverseKVs(expected), kvs) - - return nil - }, func() {}) - - util.RequireNoErr(t, err) - - expected := map[string]string{ - bkey("apple"): bval("apple"), - vkey("a", "apple"): "0", - vkey("c", "apple"): "3", - vkey("cx", "apple"): "x", - vkey("cy", "apple"): "y", - vkey("da", "apple"): "3", - vkey("f", "apple"): "5", - } - require.Equal(t, expected, f.Dump()) -} - -// TestReadWriteCursorWithBucketAndValue tests that cursors are able to iterate -// over both bucket and value keys if both are present in the iterated bucket. -func TestReadWriteCursorWithBucketAndValue(t *testing.T) { - t.Parallel() - - f := NewEtcdTestFixture(t) - defer f.Cleanup() - - db, err := newEtcdBackend(f.BackendConfig()) - util.RequireNoErr(t, err) - - // Pre-store the first half of the interval. - util.RequireNoErr(t, db.Update(func(tx walletdb.ReadWriteTx) er.R { - b, err := tx.CreateTopLevelBucket([]byte("apple")) - util.RequireNoErr(t, err) - require.NotNil(t, b) - - util.RequireNoErr(t, b.Put([]byte("key"), []byte("val"))) - - b1, err := b.CreateBucket([]byte("banana")) - util.RequireNoErr(t, err) - require.NotNil(t, b1) - - b2, err := b.CreateBucket([]byte("pear")) - util.RequireNoErr(t, err) - require.NotNil(t, b2) - - return nil - }, func() {})) - - err = db.View(func(tx walletdb.ReadTx) er.R { - b := tx.ReadBucket([]byte("apple")) - require.NotNil(t, b) - - cursor := b.ReadCursor() - - // First on valid interval. - k, v := cursor.First() - require.Equal(t, []byte("banana"), k) - require.Nil(t, v) - - k, v = cursor.Next() - require.Equal(t, []byte("key"), k) - require.Equal(t, []byte("val"), v) - - k, v = cursor.Last() - require.Equal(t, []byte("pear"), k) - require.Nil(t, v) - - k, v = cursor.Seek([]byte("k")) - require.Equal(t, []byte("key"), k) - require.Equal(t, []byte("val"), v) - - k, v = cursor.Seek([]byte("banana")) - require.Equal(t, []byte("banana"), k) - require.Nil(t, v) - - k, v = cursor.Next() - require.Equal(t, []byte("key"), k) - require.Equal(t, []byte("val"), v) - - return nil - }, func() {}) - - util.RequireNoErr(t, err) - - expected := map[string]string{ - bkey("apple"): bval("apple"), - bkey("apple", "banana"): bval("apple", "banana"), - bkey("apple", "pear"): bval("apple", "pear"), - vkey("key", "apple"): "val", - } - require.Equal(t, expected, f.Dump()) -} diff --git a/lnd/channeldb/kvdb/etcd/readwrite_tx.go b/lnd/channeldb/kvdb/etcd/readwrite_tx.go deleted file mode 100644 index 5d10c463..00000000 --- a/lnd/channeldb/kvdb/etcd/readwrite_tx.go +++ /dev/null @@ -1,100 +0,0 @@ -// +build kvdb_etcd - -package etcd - -import ( - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/pktwallet/walletdb" -) - -// readWriteTx holds a reference to the STM transaction. -type readWriteTx struct { - // stm is the reference to the parent STM. - stm STM - - // rootBucketID holds the sha256 hash of the root bucket id, which is used - // for key space spearation. - rootBucketID [bucketIDLength]byte - - // active is true if the transaction hasn't been committed yet. - active bool -} - -// newReadWriteTx creates an rw transaction with the passed STM. -func newReadWriteTx(stm STM, prefix string) *readWriteTx { - return &readWriteTx{ - stm: stm, - active: true, - rootBucketID: makeBucketID([]byte(prefix)), - } -} - -// rooBucket is a helper function to return the always present -// pseudo root bucket. -func rootBucket(tx *readWriteTx) *readWriteBucket { - return newReadWriteBucket(tx, tx.rootBucketID[:], tx.rootBucketID[:]) -} - -// ReadBucket opens the root bucket for read only access. If the bucket -// described by the key does not exist, nil is returned. -func (tx *readWriteTx) ReadBucket(key []byte) walletdb.ReadBucket { - return rootBucket(tx).NestedReadWriteBucket(key) -} - -// Rollback closes the transaction, discarding changes (if any) if the -// database was modified by a write transaction. -func (tx *readWriteTx) Rollback() er.R { - // If the transaction has been closed roolback will fail. - if !tx.active { - return walletdb.ErrTxClosed.Default() - } - - // Rollback the STM and set the tx to inactive. - tx.stm.Rollback() - tx.active = false - - return nil -} - -// ReadWriteBucket opens the root bucket for read/write access. If the -// bucket described by the key does not exist, nil is returned. -func (tx *readWriteTx) ReadWriteBucket(key []byte) walletdb.ReadWriteBucket { - return rootBucket(tx).NestedReadWriteBucket(key) -} - -// CreateTopLevelBucket creates the top level bucket for a key if it -// does not exist. The newly-created bucket it returned. -func (tx *readWriteTx) CreateTopLevelBucket(key []byte) (walletdb.ReadWriteBucket, er.R) { - return rootBucket(tx).CreateBucketIfNotExists(key) -} - -// DeleteTopLevelBucket deletes the top level bucket for a key. This -// errors if the bucket can not be found or the key keys a single value -// instead of a bucket. -func (tx *readWriteTx) DeleteTopLevelBucket(key []byte) er.R { - return rootBucket(tx).DeleteNestedBucket(key) -} - -// Commit commits the transaction if not already committed. Will return -// error if the underlying STM fails. -func (tx *readWriteTx) Commit() er.R { - // Commit will fail if the transaction is already committed. - if !tx.active { - return walletdb.ErrTxClosed.Default() - } - - // Try committing the transaction. - if err := tx.stm.Commit(); err != nil { - return err - } - - // Mark the transaction as not active after commit. - tx.active = false - - return nil -} - -// OnCommit sets the commit callback (overriding if already set). -func (tx *readWriteTx) OnCommit(cb func()) { - tx.stm.OnCommit(cb) -} diff --git a/lnd/channeldb/kvdb/etcd/readwrite_tx_test.go b/lnd/channeldb/kvdb/etcd/readwrite_tx_test.go deleted file mode 100644 index 40a2bfc9..00000000 --- a/lnd/channeldb/kvdb/etcd/readwrite_tx_test.go +++ /dev/null @@ -1,158 +0,0 @@ -// +build kvdb_etcd - -package etcd - -import ( - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/pktwallet/walletdb" - "github.com/stretchr/testify/require" -) - -func TestTxManualCommit(t *testing.T) { - t.Parallel() - - f := NewEtcdTestFixture(t) - defer f.Cleanup() - - db, err := newEtcdBackend(f.BackendConfig()) - util.RequireNoErr(t, err) - - tx, err := db.BeginReadWriteTx() - util.RequireNoErr(t, err) - require.NotNil(t, tx) - - committed := false - - tx.OnCommit(func() { - committed = true - }) - - apple, err := tx.CreateTopLevelBucket([]byte("apple")) - util.RequireNoErr(t, err) - require.NotNil(t, apple) - util.RequireNoErr(t, apple.Put([]byte("testKey"), []byte("testVal"))) - - banana, err := tx.CreateTopLevelBucket([]byte("banana")) - util.RequireNoErr(t, err) - require.NotNil(t, banana) - util.RequireNoErr(t, banana.Put([]byte("testKey"), []byte("testVal"))) - util.RequireNoErr(t, tx.DeleteTopLevelBucket([]byte("banana"))) - - util.RequireNoErr(t, tx.Commit()) - require.True(t, committed) - - expected := map[string]string{ - bkey("apple"): bval("apple"), - vkey("testKey", "apple"): "testVal", - } - require.Equal(t, expected, f.Dump()) -} - -func TestTxRollback(t *testing.T) { - t.Parallel() - - f := NewEtcdTestFixture(t) - defer f.Cleanup() - - db, err := newEtcdBackend(f.BackendConfig()) - util.RequireNoErr(t, err) - - tx, err := db.BeginReadWriteTx() - require.Nil(t, err) - require.NotNil(t, tx) - - apple, err := tx.CreateTopLevelBucket([]byte("apple")) - require.Nil(t, err) - require.NotNil(t, apple) - - util.RequireNoErr(t, apple.Put([]byte("testKey"), []byte("testVal"))) - - util.RequireNoErr(t, tx.Rollback()) - util.RequireErr(t, walletdb.ErrTxClosed, tx.Commit()) - require.Equal(t, map[string]string{}, f.Dump()) -} - -func TestChangeDuringManualTx(t *testing.T) { - t.Parallel() - - f := NewEtcdTestFixture(t) - defer f.Cleanup() - - db, err := newEtcdBackend(f.BackendConfig()) - util.RequireNoErr(t, err) - - tx, err := db.BeginReadWriteTx() - require.Nil(t, err) - require.NotNil(t, tx) - - apple, err := tx.CreateTopLevelBucket([]byte("apple")) - require.Nil(t, err) - require.NotNil(t, apple) - - util.RequireNoErr(t, apple.Put([]byte("testKey"), []byte("testVal"))) - - // Try overwriting the bucket key. - f.Put(bkey("apple"), "banana") - - // TODO: translate error - require.NotNil(t, tx.Commit()) - require.Equal(t, map[string]string{ - bkey("apple"): "banana", - }, f.Dump()) -} - -func TestChangeDuringUpdate(t *testing.T) { - t.Parallel() - - f := NewEtcdTestFixture(t) - defer f.Cleanup() - - db, err := newEtcdBackend(f.BackendConfig()) - util.RequireNoErr(t, err) - - count := 0 - - err = db.Update(func(tx walletdb.ReadWriteTx) er.R { - apple, err := tx.CreateTopLevelBucket([]byte("apple")) - util.RequireNoErr(t, err) - require.NotNil(t, apple) - - util.RequireNoErr(t, apple.Put([]byte("key"), []byte("value"))) - - if count == 0 { - f.Put(vkey("key", "apple"), "new_value") - f.Put(vkey("key2", "apple"), "value2") - } - - cursor := apple.ReadCursor() - k, v := cursor.First() - require.Equal(t, []byte("key"), k) - require.Equal(t, []byte("value"), v) - require.Equal(t, v, apple.Get([]byte("key"))) - - k, v = cursor.Next() - if count == 0 { - require.Nil(t, k) - require.Nil(t, v) - } else { - require.Equal(t, []byte("key2"), k) - require.Equal(t, []byte("value2"), v) - } - - count++ - return nil - }, func() {}) - - require.Nil(t, err) - require.Equal(t, count, 2) - - expected := map[string]string{ - bkey("apple"): bval("apple"), - vkey("key", "apple"): "value", - vkey("key2", "apple"): "value2", - } - require.Equal(t, expected, f.Dump()) -} diff --git a/lnd/channeldb/kvdb/etcd/stm.go b/lnd/channeldb/kvdb/etcd/stm.go deleted file mode 100644 index de111297..00000000 --- a/lnd/channeldb/kvdb/etcd/stm.go +++ /dev/null @@ -1,806 +0,0 @@ -// +build kvdb_etcd - -package etcd - -import ( - "context" - "fmt" - "math" - "strings" - - v3 "github.com/coreos/etcd/clientv3" - "github.com/pkt-cash/pktd/btcutil/er" -) - -type CommitStats struct { - Rset int - Wset int - Retries int -} - -// KV stores a key/value pair. -type KV struct { - key string - val string -} - -// STM is an interface for software transactional memory. -// All calls that return error will do so only if STM is manually handled and -// abort the apply closure otherwise. In both case the returned error is a -// DatabaseError. -type STM interface { - // Get returns the value for a key and inserts the key in the txn's read - // set. Returns nil if there's no matching key, or the key is empty. - Get(key string) ([]byte, er.R) - - // Put adds a value for a key to the txn's write set. - Put(key, val string) - - // Del adds a delete operation for the key to the txn's write set. - Del(key string) - - // First returns the first k/v that begins with prefix or nil if there's - // no such k/v pair. If the key is found it is inserted to the txn's - // read set. Returns nil if there's no match. - First(prefix string) (*KV, er.R) - - // Last returns the last k/v that begins with prefix or nil if there's - // no such k/v pair. If the key is found it is inserted to the txn's - // read set. Returns nil if there's no match. - Last(prefix string) (*KV, er.R) - - // Prev returns the previous k/v before key that begins with prefix or - // nil if there's no such k/v. If the key is found it is inserted to the - // read set. Returns nil if there's no match. - Prev(prefix, key string) (*KV, er.R) - - // Next returns the next k/v after key that begins with prefix or nil - // if there's no such k/v. If the key is found it is inserted to the - // txn's read set. Returns nil if there's no match. - Next(prefix, key string) (*KV, er.R) - - // Seek will return k/v at key beginning with prefix. If the key doesn't - // exists Seek will return the next k/v after key beginning with prefix. - // If a matching k/v is found it is inserted to the txn's read set. Returns - // nil if there's no match. - Seek(prefix, key string) (*KV, er.R) - - // OnCommit calls the passed callback func upon commit. - OnCommit(func()) - - // Commit attempts to apply the txn's changes to the server. - // Commit may return CommitError if transaction is outdated and needs retry. - Commit() er.R - - // Rollback emties the read and write sets such that a subsequent commit - // won't alter the database. - Rollback() -} - -// CommitError is used to check if there was an error -// due to stale data in the transaction. -type CommitError struct{} - -// Error returns a static string for CommitError for -// debugging/logging purposes. -func (e CommitError) Error() string { - return "commit failed" -} - -// DatabaseError is used to wrap errors that are not -// related to stale data in the transaction. -type DatabaseError struct { - msg string - err error -} - -// Unwrap returns the wrapped error in a DatabaseError. -func (e *DatabaseError) Unwrap() er.R { - return e.err -} - -// Error simply converts DatabaseError to a string that -// includes both the message and the wrapped error. -func (e DatabaseError) Error() string { - return fmt.Sprintf("etcd error: %v - %v", e.msg, e.err) -} - -// stmGet is the result of a read operation, -// a value and the mod revision of the key/value. -type stmGet struct { - val string - rev int64 -} - -// readSet stores all reads done in an STM. -type readSet map[string]stmGet - -// stmPut stores a value and an operation (put/delete). -type stmPut struct { - val string - op v3.Op -} - -// writeSet stroes all writes done in an STM. -type writeSet map[string]stmPut - -// stm implements repeatable-read software transactional memory -// over etcd. -type stm struct { - // client is an etcd client handling all RPC communications - // to the etcd instance/cluster. - client *v3.Client - - // manual is set to true for manual transactions which don't - // execute in the STM run loop. - manual bool - - // txQueue is lightweight contention manager, which is used to detect - // transaction conflicts and reduce retries. - txQueue *commitQueue - - // options stores optional settings passed by the user. - options *STMOptions - - // prefetch hold prefetched key values and revisions. - prefetch readSet - - // rset holds read key values and revisions. - rset readSet - - // wset holds overwritten keys and their values. - wset writeSet - - // getOpts are the opts used for gets. - getOpts []v3.OpOption - - // revision stores the snapshot revision after first read. - revision int64 - - // onCommit gets called upon commit. - onCommit func() -} - -// STMOptions can be used to pass optional settings -// when an STM is created. -type STMOptions struct { - // ctx holds an externally provided abort context. - ctx context.Context - commitStatsCallback func(bool, CommitStats) -} - -// STMOptionFunc is a function that updates the passed STMOptions. -type STMOptionFunc func(*STMOptions) - -// WithAbortContext specifies the context for permanently -// aborting the transaction. -func WithAbortContext(ctx context.Context) STMOptionFunc { - return func(so *STMOptions) { - so.ctx = ctx - } -} - -func WithCommitStatsCallback(cb func(bool, CommitStats)) STMOptionFunc { - return func(so *STMOptions) { - so.commitStatsCallback = cb - } -} - -// RunSTM runs the apply function by creating an STM using serializable snapshot -// isolation, passing it to the apply and handling commit errors and retries. -func RunSTM(cli *v3.Client, apply func(STM) error, txQueue *commitQueue, - so ...STMOptionFunc) er.R { - - return runSTM(makeSTM(cli, false, txQueue, so...), apply) -} - -// NewSTM creates a new STM instance, using serializable snapshot isolation. -func NewSTM(cli *v3.Client, txQueue *commitQueue, so ...STMOptionFunc) STM { - return makeSTM(cli, true, txQueue, so...) -} - -// makeSTM is the actual constructor of the stm. It first apply all passed -// options then creates the stm object and resets it before returning. -func makeSTM(cli *v3.Client, manual bool, txQueue *commitQueue, - so ...STMOptionFunc) *stm { - - opts := &STMOptions{ - ctx: cli.Ctx(), - } - - // Apply all functional options. - for _, fo := range so { - fo(opts) - } - - s := &stm{ - client: cli, - manual: manual, - txQueue: txQueue, - options: opts, - prefetch: make(map[string]stmGet), - } - - // Reset read and write set. - s.Rollback() - - return s -} - -// runSTM implements the run loop of the STM, running the apply func, catching -// errors and handling commit. The loop will quit on every error except -// CommitError which is used to indicate a necessary retry. -func runSTM(s *stm, apply func(STM) error) er.R { - var ( - retries int - stats CommitStats - executeErr error - ) - - done := make(chan struct{}) - - execute := func() { - defer close(done) - - for { - select { - // Check if the STM is aborted and break the retry loop - // if it is. - case <-s.options.ctx.Done(): - executeErr = er.Errorf("aborted") - return - - default: - } - - stats, executeErr = s.commit() - - // Re-apply only upon commit error (meaning the - // keys were changed). - if _, ok := executeErr.(CommitError); !ok { - // Anything that's not a CommitError - // aborts the transaction. - return - } - - // Rollback before trying to re-apply. - s.Rollback() - retries++ - - // Re-apply the transaction closure. - if executeErr = apply(s); executeErr != nil { - return - } - } - } - - // Run the tx closure to construct the read and write sets. - // Also we expect that if there are no conflicting transactions - // in the queue, then we only run apply once. - if preApplyErr := apply(s); preApplyErr != nil { - return preApplyErr - } - - // Queue up the transaction for execution. - s.txQueue.Add(execute, s.rset, s.wset) - - // Wait for the transaction to execute, or break if aborted. - select { - case <-done: - case <-s.options.ctx.Done(): - } - - s.txQueue.Done(s.rset, s.wset) - - if s.options.commitStatsCallback != nil { - stats.Retries = retries - s.options.commitStatsCallback(executeErr == nil, stats) - } - - return executeErr -} - -// add inserts a txn response to the read set. This is useful when the txn -// fails due to conflict where the txn response can be used to prefetch -// key/values. -func (rs readSet) add(txnResp *v3.TxnResponse) { - for _, resp := range txnResp.Responses { - getResp := (*v3.GetResponse)(resp.GetResponseRange()) - for _, kv := range getResp.Kvs { - rs[string(kv.Key)] = stmGet{ - val: string(kv.Value), - rev: kv.ModRevision, - } - } - } -} - -// gets is a helper to create an op slice for transaction -// construction. -func (rs readSet) gets() []v3.Op { - ops := make([]v3.Op, 0, len(rs)) - - for k := range rs { - ops = append(ops, v3.OpGet(k)) - } - - return ops -} - -// cmps returns a compare list which will serve as a precondition testing that -// the values in the read set didn't change. -func (rs readSet) cmps() []v3.Cmp { - cmps := make([]v3.Cmp, 0, len(rs)) - for key, getValue := range rs { - cmps = append(cmps, v3.Compare( - v3.ModRevision(key), "=", getValue.rev, - )) - } - - return cmps -} - -// cmps returns a cmp list testing no writes have happened past rev. -func (ws writeSet) cmps(rev int64) []v3.Cmp { - cmps := make([]v3.Cmp, 0, len(ws)) - for key := range ws { - cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev)) - } - - return cmps -} - -// puts is the list of ops for all pending writes. -func (ws writeSet) puts() []v3.Op { - puts := make([]v3.Op, 0, len(ws)) - for _, v := range ws { - puts = append(puts, v.op) - } - - return puts -} - -// fetch is a helper to fetch key/value given options. If a value is returned -// then fetch will try to fix the STM's snapshot revision (if not already set). -// We'll also cache the returned key/value in the read set. -func (s *stm) fetch(key string, opts ...v3.OpOption) ([]KV, er.R) { - resp, err := s.client.Get( - s.options.ctx, key, append(opts, s.getOpts...)..., - ) - if err != nil { - return nil, DatabaseError{ - msg: "stm.fetch() failed", - err: err, - } - } - - // Set revison and serializable options upon first fetch - // for any subsequent fetches. - if s.getOpts == nil { - s.revision = resp.Header.Revision - s.getOpts = []v3.OpOption{ - v3.WithRev(s.revision), - v3.WithSerializable(), - } - } - - if len(resp.Kvs) == 0 { - // Add assertion to the read set which will extend our commit - // constraint such that the commit will fail if the key is - // present in the database. - s.rset[key] = stmGet{ - rev: 0, - } - } - - var result []KV - - // Fill the read set with key/values returned. - for _, kv := range resp.Kvs { - // Remove from prefetch. - key := string(kv.Key) - val := string(kv.Value) - - delete(s.prefetch, key) - - // Add to read set. - s.rset[key] = stmGet{ - val: val, - rev: kv.ModRevision, - } - - result = append(result, KV{key, val}) - } - - return result, nil -} - -// Get returns the value for key. If there's no such -// key/value in the database or the passed key is empty -// Get will return nil. -func (s *stm) Get(key string) ([]byte, er.R) { - if key == "" { - return nil, nil - } - - // Return freshly written value if present. - if put, ok := s.wset[key]; ok { - if put.op.IsDelete() { - return nil, nil - } - - return []byte(put.val), nil - } - - // Populate read set if key is present in - // the prefetch set. - if getValue, ok := s.prefetch[key]; ok { - delete(s.prefetch, key) - - // Use the prefetched value only if it is for - // an existing key. - if getValue.rev != 0 { - s.rset[key] = getValue - } - } - - // Return value if alread in read set. - if getValue, ok := s.rset[key]; ok { - // Return the value if the rset contains an existing key. - if getValue.rev != 0 { - return []byte(getValue.val), nil - } else { - return nil, nil - } - } - - // Fetch and return value. - kvs, err := s.fetch(key) - if err != nil { - return nil, err - } - - if len(kvs) > 0 { - return []byte(kvs[0].val), nil - } - - // Return empty result if key not in DB. - return nil, nil -} - -// First returns the first key/value matching prefix. If there's no key starting -// with prefix, Last will return nil. -func (s *stm) First(prefix string) (*KV, er.R) { - return s.next(prefix, prefix, true) -} - -// Last returns the last key/value with prefix. If there's no key starting with -// prefix, Last will return nil. -func (s *stm) Last(prefix string) (*KV, er.R) { - // As we don't know the full range, fetch the last - // key/value with this prefix first. - resp, err := s.fetch(prefix, v3.WithLastKey()...) - if err != nil { - return nil, err - } - - var ( - kv KV - found bool - ) - - if len(resp) > 0 { - kv = resp[0] - found = true - } - - // Now make sure there's nothing in the write set - // that is a better match, meaning it has the same - // prefix but is greater or equal than the current - // best candidate. Note that this is not efficient - // when the write set is large! - for k, put := range s.wset { - if put.op.IsDelete() { - continue - } - - if strings.HasPrefix(k, prefix) && k >= kv.key { - kv.key = k - kv.val = put.val - found = true - } - } - - if found { - return &kv, nil - } - - return nil, nil -} - -// Prev returns the prior key/value before key (with prefix). If there's no such -// key Next will return nil. -func (s *stm) Prev(prefix, startKey string) (*KV, er.R) { - var result KV - - fetchKey := startKey - matchFound := false - - for { - // Ask etcd to retrieve one key that is a - // match in descending order from the passed key. - opts := []v3.OpOption{ - v3.WithRange(fetchKey), - v3.WithSort(v3.SortByKey, v3.SortDescend), - v3.WithLimit(1), - } - - kvs, err := s.fetch(prefix, opts...) - if err != nil { - return nil, err - } - - if len(kvs) == 0 { - break - } - - kv := &kvs[0] - - // WithRange and WithPrefix can't be used - // together, so check prefix here. If the - // returned key no longer has the prefix, - // then break out. - if !strings.HasPrefix(kv.key, prefix) { - break - } - - // Fetch the prior key if this is deleted. - if put, ok := s.wset[kv.key]; ok && put.op.IsDelete() { - fetchKey = kv.key - continue - } - - result = *kv - matchFound = true - - break - } - - // Closre holding all checks to find a possibly - // better match. - matches := func(key string) bool { - if !strings.HasPrefix(key, prefix) { - return false - } - - if !matchFound { - return key < startKey - } - - // matchFound == true - return result.key <= key && key < startKey - } - - // Now go trough the write set and check - // if there's an even better match. - for k, put := range s.wset { - if !put.op.IsDelete() && matches(k) { - result.key = k - result.val = put.val - matchFound = true - } - } - - if !matchFound { - return nil, nil - } - - return &result, nil -} - -// Next returns the next key/value after key (with prefix). If there's no such -// key Next will return nil. -func (s *stm) Next(prefix string, key string) (*KV, er.R) { - return s.next(prefix, key, false) -} - -// Seek "seeks" to the key (with prefix). If the key doesn't exists it'll get -// the next key with the same prefix. If no key fills this criteria, Seek will -// return nil. -func (s *stm) Seek(prefix, key string) (*KV, er.R) { - return s.next(prefix, key, true) -} - -// next will try to retrieve the next match that has prefix and starts with the -// passed startKey. If includeStartKey is set to true, it'll return the value -// of startKey (essentially implementing seek). -func (s *stm) next(prefix, startKey string, includeStartKey bool) (*KV, er.R) { - var result KV - - fetchKey := startKey - firstFetch := true - matchFound := false - - for { - // Ask etcd to retrieve one key that is a - // match in ascending order from the passed key. - opts := []v3.OpOption{ - v3.WithFromKey(), - v3.WithSort(v3.SortByKey, v3.SortAscend), - v3.WithLimit(1), - } - - // By default we include the start key too - // if it is a full match. - if includeStartKey && firstFetch { - firstFetch = false - } else { - // If we'd like to retrieve the first key - // after the start key. - fetchKey += "\x00" - } - - kvs, err := s.fetch(fetchKey, opts...) - if err != nil { - return nil, err - } - - if len(kvs) == 0 { - break - } - - kv := &kvs[0] - // WithRange and WithPrefix can't be used - // together, so check prefix here. If the - // returned key no longer has the prefix, - // then break the fetch loop. - if !strings.HasPrefix(kv.key, prefix) { - break - } - - // Move on to fetch starting with the next - // key if this one is marked deleted. - if put, ok := s.wset[kv.key]; ok && put.op.IsDelete() { - fetchKey = kv.key - continue - } - - result = *kv - matchFound = true - - break - } - - // Closure holding all checks to find a possibly - // better match. - matches := func(k string) bool { - if !strings.HasPrefix(k, prefix) { - return false - } - - if includeStartKey && !matchFound { - return startKey <= k - } - - if !includeStartKey && !matchFound { - return startKey < k - } - - if includeStartKey && matchFound { - return startKey <= k && k <= result.key - } - - // !includeStartKey && matchFound. - return startKey < k && k <= result.key - } - - // Now go trough the write set and check - // if there's an even better match. - for k, put := range s.wset { - if !put.op.IsDelete() && matches(k) { - result.key = k - result.val = put.val - matchFound = true - } - } - - if !matchFound { - return nil, nil - } - - return &result, nil -} - -// Put sets the value of the passed key. The actual put will happen upon commit. -func (s *stm) Put(key, val string) { - s.wset[key] = stmPut{ - val: val, - op: v3.OpPut(key, val), - } -} - -// Del marks a key as deleted. The actual delete will happen upon commit. -func (s *stm) Del(key string) { - s.wset[key] = stmPut{ - val: "", - op: v3.OpDelete(key), - } -} - -// OnCommit sets the callback that is called upon committing the STM -// transaction. -func (s *stm) OnCommit(cb func()) { - s.onCommit = cb -} - -// commit builds the final transaction and tries to execute it. If commit fails -// because the keys have changed return a CommitError, otherwise return a -// DatabaseError. -func (s *stm) commit() (CommitStats, er.R) { - rset := s.rset.cmps() - wset := s.wset.cmps(s.revision + 1) - - stats := CommitStats{ - Rset: len(rset), - Wset: len(wset), - } - - // Create the compare set. - cmps := append(rset, wset...) - // Create a transaction with the optional abort context. - txn := s.client.Txn(s.options.ctx) - - // If the compare set holds, try executing the puts. - txn = txn.If(cmps...) - txn = txn.Then(s.wset.puts()...) - - // Prefetch keys in case of conflict to save - // a round trip to etcd. - txn = txn.Else(s.rset.gets()...) - - txnresp, err := txn.Commit() - if err != nil { - return stats, DatabaseError{ - msg: "stm.Commit() failed", - err: err, - } - } - - // Call the commit callback if the transaction - // was successful. - if txnresp.Succeeded { - if s.onCommit != nil { - s.onCommit() - } - - return stats, nil - } - - // Load prefetch before if commit failed. - s.rset.add(txnresp) - s.prefetch = s.rset - - // Return CommitError indicating that the transaction - // can be retried. - return stats, CommitError{} -} - -// Commit simply calls commit and the commit stats callback if set. -func (s *stm) Commit() er.R { - stats, err := s.commit() - - if s.options.commitStatsCallback != nil { - s.options.commitStatsCallback(err == nil, stats) - } - - return err -} - -// Rollback resets the STM. This is useful for uncommitted transaction rollback -// and also used in the STM main loop to reset state if commit fails. -func (s *stm) Rollback() { - s.rset = make(map[string]stmGet) - s.wset = make(map[string]stmPut) - s.getOpts = nil - s.revision = math.MaxInt64 - 1 -} diff --git a/lnd/channeldb/kvdb/etcd/stm_test.go b/lnd/channeldb/kvdb/etcd/stm_test.go deleted file mode 100644 index 1d1bd63a..00000000 --- a/lnd/channeldb/kvdb/etcd/stm_test.go +++ /dev/null @@ -1,366 +0,0 @@ -// +build kvdb_etcd - -package etcd - -import ( - "errors" - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/stretchr/testify/require" -) - -func reverseKVs(a []KV) []KV { - for i, j := 0, len(a)-1; i < j; i, j = i+1, j-1 { - a[i], a[j] = a[j], a[i] - } - - return a -} - -func TestPutToEmpty(t *testing.T) { - t.Parallel() - - f := NewEtcdTestFixture(t) - txQueue := NewCommitQueue(f.config.Ctx) - defer func() { - f.Cleanup() - txQueue.Wait() - }() - - db, err := newEtcdBackend(f.BackendConfig()) - util.RequireNoErr(t, err) - - apply := func(stm STM) er.R { - stm.Put("123", "abc") - return nil - } - - err = RunSTM(db.cli, apply, txQueue) - util.RequireNoErr(t, err) - - require.Equal(t, "abc", f.Get("123")) -} - -func TestGetPutDel(t *testing.T) { - t.Parallel() - - f := NewEtcdTestFixture(t) - txQueue := NewCommitQueue(f.config.Ctx) - defer func() { - f.Cleanup() - txQueue.Wait() - }() - - testKeyValues := []KV{ - {"a", "1"}, - {"b", "2"}, - {"c", "3"}, - {"d", "4"}, - {"e", "5"}, - } - - for _, kv := range testKeyValues { - f.Put(kv.key, kv.val) - } - - db, err := newEtcdBackend(f.BackendConfig()) - util.RequireNoErr(t, err) - - apply := func(stm STM) er.R { - // Get some non existing keys. - v, err := stm.Get("") - util.RequireNoErr(t, err) - require.Nil(t, v) - - v, err = stm.Get("x") - util.RequireNoErr(t, err) - require.Nil(t, v) - - // Get all existing keys. - for _, kv := range testKeyValues { - v, err = stm.Get(kv.key) - util.RequireNoErr(t, err) - require.Equal(t, []byte(kv.val), v) - } - - // Overwrite, then delete an existing key. - stm.Put("c", "6") - - v, err = stm.Get("c") - util.RequireNoErr(t, err) - require.Equal(t, []byte("6"), v) - - stm.Del("c") - - v, err = stm.Get("c") - util.RequireNoErr(t, err) - require.Nil(t, v) - - // Re-add the deleted key. - stm.Put("c", "7") - - v, err = stm.Get("c") - util.RequireNoErr(t, err) - require.Equal(t, []byte("7"), v) - - // Add a new key. - stm.Put("x", "x") - - v, err = stm.Get("x") - util.RequireNoErr(t, err) - require.Equal(t, []byte("x"), v) - - return nil - } - - err = RunSTM(db.cli, apply, txQueue) - util.RequireNoErr(t, err) - - require.Equal(t, "1", f.Get("a")) - require.Equal(t, "2", f.Get("b")) - require.Equal(t, "7", f.Get("c")) - require.Equal(t, "4", f.Get("d")) - require.Equal(t, "5", f.Get("e")) - require.Equal(t, "x", f.Get("x")) -} - -func TestFirstLastNextPrev(t *testing.T) { - t.Parallel() - - f := NewEtcdTestFixture(t) - txQueue := NewCommitQueue(f.config.Ctx) - defer func() { - f.Cleanup() - txQueue.Wait() - }() - - testKeyValues := []KV{ - {"kb", "1"}, - {"kc", "2"}, - {"kda", "3"}, - {"ke", "4"}, - {"w", "w"}, - } - for _, kv := range testKeyValues { - f.Put(kv.key, kv.val) - } - - db, err := newEtcdBackend(f.BackendConfig()) - util.RequireNoErr(t, err) - - apply := func(stm STM) er.R { - // First/Last on valid multi item interval. - kv, err := stm.First("k") - util.RequireNoErr(t, err) - require.Equal(t, &KV{"kb", "1"}, kv) - - kv, err = stm.Last("k") - util.RequireNoErr(t, err) - require.Equal(t, &KV{"ke", "4"}, kv) - - // First/Last on single item interval. - kv, err = stm.First("w") - util.RequireNoErr(t, err) - require.Equal(t, &KV{"w", "w"}, kv) - - kv, err = stm.Last("w") - util.RequireNoErr(t, err) - require.Equal(t, &KV{"w", "w"}, kv) - - // Next/Prev on start/end. - kv, err = stm.Next("k", "ke") - util.RequireNoErr(t, err) - require.Nil(t, kv) - - kv, err = stm.Prev("k", "kb") - util.RequireNoErr(t, err) - require.Nil(t, kv) - - // Next/Prev in the middle. - kv, err = stm.Next("k", "kc") - util.RequireNoErr(t, err) - require.Equal(t, &KV{"kda", "3"}, kv) - - kv, err = stm.Prev("k", "ke") - util.RequireNoErr(t, err) - require.Equal(t, &KV{"kda", "3"}, kv) - - // Delete first item, then add an item before the - // deleted one. Check that First/Next will "jump" - // over the deleted item and return the new first. - stm.Del("kb") - stm.Put("ka", "0") - - kv, err = stm.First("k") - util.RequireNoErr(t, err) - require.Equal(t, &KV{"ka", "0"}, kv) - - kv, err = stm.Prev("k", "kc") - util.RequireNoErr(t, err) - require.Equal(t, &KV{"ka", "0"}, kv) - - // Similarly test that a new end is returned if - // the old end is deleted first. - stm.Del("ke") - stm.Put("kf", "5") - - kv, err = stm.Last("k") - util.RequireNoErr(t, err) - require.Equal(t, &KV{"kf", "5"}, kv) - - kv, err = stm.Next("k", "kda") - util.RequireNoErr(t, err) - require.Equal(t, &KV{"kf", "5"}, kv) - - // Overwrite one in the middle. - stm.Put("kda", "6") - - kv, err = stm.Next("k", "kc") - util.RequireNoErr(t, err) - require.Equal(t, &KV{"kda", "6"}, kv) - - // Add three in the middle, then delete one. - stm.Put("kdb", "7") - stm.Put("kdc", "8") - stm.Put("kdd", "9") - stm.Del("kdc") - - // Check that stepping from first to last returns - // the expected sequence. - var kvs []KV - - curr, err := stm.First("k") - util.RequireNoErr(t, err) - - for curr != nil { - kvs = append(kvs, *curr) - curr, err = stm.Next("k", curr.key) - util.RequireNoErr(t, err) - } - - expected := []KV{ - {"ka", "0"}, - {"kc", "2"}, - {"kda", "6"}, - {"kdb", "7"}, - {"kdd", "9"}, - {"kf", "5"}, - } - require.Equal(t, expected, kvs) - - // Similarly check that stepping from last to first - // returns the expected sequence. - kvs = []KV{} - - curr, err = stm.Last("k") - util.RequireNoErr(t, err) - - for curr != nil { - kvs = append(kvs, *curr) - curr, err = stm.Prev("k", curr.key) - util.RequireNoErr(t, err) - } - - expected = reverseKVs(expected) - require.Equal(t, expected, kvs) - - return nil - } - - err = RunSTM(db.cli, apply, txQueue) - util.RequireNoErr(t, err) - - require.Equal(t, "0", f.Get("ka")) - require.Equal(t, "2", f.Get("kc")) - require.Equal(t, "6", f.Get("kda")) - require.Equal(t, "7", f.Get("kdb")) - require.Equal(t, "9", f.Get("kdd")) - require.Equal(t, "5", f.Get("kf")) - require.Equal(t, "w", f.Get("w")) -} - -func TestCommitError(t *testing.T) { - t.Parallel() - - f := NewEtcdTestFixture(t) - txQueue := NewCommitQueue(f.config.Ctx) - defer func() { - f.Cleanup() - txQueue.Wait() - }() - - db, err := newEtcdBackend(f.BackendConfig()) - util.RequireNoErr(t, err) - - // Preset DB state. - f.Put("123", "xyz") - - // Count the number of applies. - cnt := 0 - - apply := func(stm STM) er.R { - // STM must have the key/value. - val, err := stm.Get("123") - util.RequireNoErr(t, err) - - if cnt == 0 { - require.Equal(t, []byte("xyz"), val) - - // Put a conflicting key/value during the first apply. - f.Put("123", "def") - } - - // We'd expect to - stm.Put("123", "abc") - - cnt++ - return nil - } - - err = RunSTM(db.cli, apply, txQueue) - util.RequireNoErr(t, err) - require.Equal(t, 2, cnt) - - require.Equal(t, "abc", f.Get("123")) -} - -func TestManualTxError(t *testing.T) { - t.Parallel() - - f := NewEtcdTestFixture(t) - txQueue := NewCommitQueue(f.config.Ctx) - defer func() { - f.Cleanup() - txQueue.Wait() - }() - - db, err := newEtcdBackend(f.BackendConfig()) - util.RequireNoErr(t, err) - - // Preset DB state. - f.Put("123", "xyz") - - stm := NewSTM(db.cli, txQueue) - - val, err := stm.Get("123") - util.RequireNoErr(t, err) - require.Equal(t, []byte("xyz"), val) - - // Put a conflicting key/value. - f.Put("123", "def") - - // Should still get the original version. - val, err = stm.Get("123") - util.RequireNoErr(t, err) - require.Equal(t, []byte("xyz"), val) - - // Commit will fail with CommitError. - err = stm.Commit() - var e CommitError - require.True(t, errors.As(err, &e)) - - // We expect that the transacton indeed did not commit. - require.Equal(t, "def", f.Get("123")) -} diff --git a/lnd/channeldb/kvdb/etcd/walletdb_interface_test.go b/lnd/channeldb/kvdb/etcd/walletdb_interface_test.go deleted file mode 100644 index aeb06d72..00000000 --- a/lnd/channeldb/kvdb/etcd/walletdb_interface_test.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build kvdb_etcd - -package etcd - -import ( - "testing" - - "github.com/pkt-cash/pktd/pktwallet/walletdb/walletdbtest" -) - -// TestWalletDBInterface performs the WalletDB interface test suite for the -// etcd database driver. -func TestWalletDBInterface(t *testing.T) { - f := NewEtcdTestFixture(t) - defer f.Cleanup() - walletdbtest.TestInterface(t, dbType, f.BackendConfig()) -} diff --git a/lnd/channeldb/kvdb/interface.go b/lnd/channeldb/kvdb/interface.go deleted file mode 100644 index ffb855c0..00000000 --- a/lnd/channeldb/kvdb/interface.go +++ /dev/null @@ -1,142 +0,0 @@ -package kvdb - -import ( - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/pktwallet/walletdb" - _ "github.com/pkt-cash/pktd/pktwallet/walletdb/bdb" // Import to register backend. -) - -// Update opens a database read/write transaction and executes the function f -// with the transaction passed as a parameter. After f exits, if f did not -// error, the transaction is committed. Otherwise, if f did error, the -// transaction is rolled back. If the rollback fails, the original error -// returned by f is still returned. If the commit fails, the commit error is -// returned. As callers may expect retries of the f closure (depending on the -// database backend used), the reset function will be called before each retry -// respectively. -func Update(db Backend, f func(tx RwTx) er.R, reset func()) er.R { - if extendedDB, ok := db.(ExtendedBackend); ok { - return extendedDB.Update(f, reset) - } - - reset() - return walletdb.Update(db, f) -} - -// View opens a database read transaction and executes the function f with the -// transaction passed as a parameter. After f exits, the transaction is rolled -// back. If f errors, its error is returned, not a rollback error (if any -// occur). The passed reset function is called before the start of the -// transaction and can be used to reset intermediate state. As callers may -// expect retries of the f closure (depending on the database backend used), the -// reset function will be called before each retry respectively. -func View(db Backend, f func(tx RTx) er.R, reset func()) er.R { - if extendedDB, ok := db.(ExtendedBackend); ok { - return extendedDB.View(f, reset) - } - - // Since we know that walletdb simply calls into bbolt which never - // retries transactions, we'll call the reset function here before View. - reset() - - return walletdb.View(db, f) -} - -// Batch is identical to the Update call, but it attempts to combine several -// individual Update transactions into a single write database transaction on -// an optimistic basis. This only has benefits if multiple goroutines call -// Batch. -var Batch = walletdb.Batch - -// Create initializes and opens a database for the specified type. The -// arguments are specific to the database type driver. See the documentation -// for the database driver for further details. -// -// ErrDbUnknownType will be returned if the database type is not registered. -var Create = walletdb.Create - -// Backend represents an ACID database. All database access is performed -// through read or read+write transactions. -type Backend = walletdb.DB - -// ExtendedBackend is and interface that supports View and Update and also able -// to collect database access patterns. -type ExtendedBackend interface { - Backend - - // PrintStats returns all collected stats pretty printed into a string. - PrintStats() string - - // View opens a database read transaction and executes the function f - // with the transaction passed as a parameter. After f exits, the - // transaction is rolled back. If f errors, its error is returned, not a - // rollback error (if any occur). The passed reset function is called - // before the start of the transaction and can be used to reset - // intermediate state. As callers may expect retries of the f closure - // (depending on the database backend used), the reset function will be - //called before each retry respectively. - View(f func(tx walletdb.ReadTx) er.R, reset func()) er.R - - // Update opens a database read/write transaction and executes the - // function f with the transaction passed as a parameter. After f exits, - // if f did not error, the transaction is committed. Otherwise, if f did - // error, the transaction is rolled back. If the rollback fails, the - // original error returned by f is still returned. If the commit fails, - // the commit error is returned. As callers may expect retries of the f - // closure (depending on the database backend used), the reset function - // will be called before each retry respectively. - Update(f func(tx walletdb.ReadWriteTx) er.R, reset func()) er.R -} - -// Open opens an existing database for the specified type. The arguments are -// specific to the database type driver. See the documentation for the database -// driver for further details. -// -// ErrDbUnknownType will be returned if the database type is not registered. -var Open = walletdb.Open - -// Driver defines a structure for backend drivers to use when they registered -// themselves as a backend which implements the Backend interface. -type Driver = walletdb.Driver - -// RBucket represents a bucket (a hierarchical structure within the -// database) that is only allowed to perform read operations. -type RBucket = walletdb.ReadBucket - -// RCursor represents a bucket cursor that can be positioned at the start or -// end of the bucket's key/value pairs and iterate over pairs in the bucket. -// This type is only allowed to perform database read operations. -type RCursor = walletdb.ReadCursor - -// RTx represents a database transaction that can only be used for reads. If -// a database update must occur, use a RwTx. -type RTx = walletdb.ReadTx - -// RwBucket represents a bucket (a hierarchical structure within the database) -// that is allowed to perform both read and write operations. -type RwBucket = walletdb.ReadWriteBucket - -// RwCursor represents a bucket cursor that can be positioned at the start or -// end of the bucket's key/value pairs and iterate over pairs in the bucket. -// This abstraction is allowed to perform both database read and write -// operations. -type RwCursor = walletdb.ReadWriteCursor - -// ReadWriteTx represents a database transaction that can be used for both -// reads and writes. When only reads are necessary, consider using a RTx -// instead. -type RwTx = walletdb.ReadWriteTx - -var ( - // ErrBucketNotFound is returned when trying to access a bucket that - // has not been created yet. - ErrBucketNotFound = walletdb.ErrBucketNotFound - - // ErrBucketExists is returned when creating a bucket that already - // exists. - ErrBucketExists = walletdb.ErrBucketExists - - // ErrDatabaseNotOpen is returned when a database instance is accessed - // before it is opened or after it is closed. - ErrDatabaseNotOpen = walletdb.ErrDbNotOpen -) diff --git a/lnd/channeldb/kvdb/kvdb_etcd.go b/lnd/channeldb/kvdb/kvdb_etcd.go deleted file mode 100644 index d923d414..00000000 --- a/lnd/channeldb/kvdb/kvdb_etcd.go +++ /dev/null @@ -1,54 +0,0 @@ -// +build kvdb_etcd - -package kvdb - -import ( - "context" - - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb/etcd" -) - -// TestBackend is conditionally set to etcd when the kvdb_etcd build tag is -// defined, allowing testing our database code with etcd backend. -const TestBackend = EtcdBackendName - -// GetEtcdBackend returns an etcd backend configured according to the -// passed etcdConfig. -func GetEtcdBackend(ctx context.Context, prefix string, - etcdConfig *EtcdConfig) (Backend, er.R) { - - // Config translation is needed here in order to keep the - // etcd package fully independent from the rest of the source tree. - backendConfig := etcd.BackendConfig{ - Ctx: ctx, - Host: etcdConfig.Host, - User: etcdConfig.User, - Pass: etcdConfig.Pass, - CertFile: etcdConfig.CertFile, - KeyFile: etcdConfig.KeyFile, - InsecureSkipVerify: etcdConfig.InsecureSkipVerify, - Prefix: prefix, - CollectCommitStats: etcdConfig.CollectStats, - } - - return Open(EtcdBackendName, backendConfig) -} - -// GetEtcdTestBackend creates an embedded etcd backend for testing -// storig the database at the passed path. -func GetEtcdTestBackend(path, name string) (Backend, func(), er.R) { - empty := func() {} - - config, cleanup, err := etcd.NewEmbeddedEtcdInstance(path) - if err != nil { - return nil, empty, err - } - - backend, err := Open(EtcdBackendName, *config) - if err != nil { - cleanup() - return nil, empty, err - } - - return backend, cleanup, nil -} diff --git a/lnd/channeldb/kvdb/kvdb_no_etcd.go b/lnd/channeldb/kvdb/kvdb_no_etcd.go deleted file mode 100644 index edba337b..00000000 --- a/lnd/channeldb/kvdb/kvdb_no_etcd.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build !kvdb_etcd - -package kvdb - -import ( - "context" - - "github.com/pkt-cash/pktd/btcutil/er" -) - -// TestBackend is conditionally set to bdb when the kvdb_etcd build tag is -// not defined, allowing testing our database code with bolt backend. -const TestBackend = BoltBackendName - -var errEtcdNotAvailable = er.GenericErrorType.CodeWithDetail( - "errEtcdNotAvailable", - "etcd backend not available") - -// GetEtcdBackend is a stub returning nil and errEtcdNotAvailable error. -func GetEtcdBackend(ctx context.Context, prefix string, - etcdConfig *EtcdConfig) (Backend, er.R) { - - return nil, errEtcdNotAvailable.Default() -} - -// GetTestEtcdBackend is a stub returning nil, an empty closure and an -// errEtcdNotAvailable error. -func GetEtcdTestBackend(path, name string) (Backend, func(), er.R) { - return nil, func() {}, errEtcdNotAvailable.Default() -} diff --git a/lnd/channeldb/legacy_serialization.go b/lnd/channeldb/legacy_serialization.go deleted file mode 100644 index 6ff54b32..00000000 --- a/lnd/channeldb/legacy_serialization.go +++ /dev/null @@ -1,57 +0,0 @@ -package channeldb - -import ( - "io" - - "github.com/pkt-cash/pktd/btcutil/er" -) - -// deserializeCloseChannelSummaryV6 reads the v6 database format for -// ChannelCloseSummary. -// -// NOTE: deprecated, only for migration. -func deserializeCloseChannelSummaryV6(r io.Reader) (*ChannelCloseSummary, er.R) { - c := &ChannelCloseSummary{} - - err := ReadElements(r, - &c.ChanPoint, &c.ShortChanID, &c.ChainHash, &c.ClosingTXID, - &c.CloseHeight, &c.RemotePub, &c.Capacity, &c.SettledBalance, - &c.TimeLockedBalance, &c.CloseType, &c.IsPending, - ) - if err != nil { - return nil, err - } - - // We'll now check to see if the channel close summary was encoded with - // any of the additional optional fields. - err = ReadElements(r, &c.RemoteCurrentRevocation) - switch { - case er.EOF.Is(err): - return c, nil - - // If we got a non-eof error, then we know there's an actually issue. - // Otherwise, it may have been the case that this summary didn't have - // the set of optional fields. - case err != nil: - return nil, err - } - - if err := readChanConfig(r, &c.LocalChanConfig); err != nil { - return nil, err - } - - // Finally, we'll attempt to read the next unrevoked commitment point - // for the remote party. If we closed the channel before receiving a - // funding locked message, then this can be nil. As a result, we'll use - // the same technique to read the field, only if there's still data - // left in the buffer. - err = ReadElements(r, &c.RemoteNextRevocation) - if err != nil && !er.EOF.Is(err) { - // If we got a non-eof error, then we know there's an actually - // issue. Otherwise, it may have been the case that this - // summary didn't have the set of optional fields. - return nil, err - } - - return c, nil -} diff --git a/lnd/channeldb/meta.go b/lnd/channeldb/meta.go deleted file mode 100644 index 78adf908..00000000 --- a/lnd/channeldb/meta.go +++ /dev/null @@ -1,83 +0,0 @@ -package channeldb - -import ( - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" -) - -var ( - // metaBucket stores all the meta information concerning the state of - // the database. - metaBucket = []byte("metadata") - - // dbVersionKey is a boltdb key and it's used for storing/retrieving - // current database version. - dbVersionKey = []byte("dbp") -) - -// Meta structure holds the database meta information. -type Meta struct { - // DbVersionNumber is the current schema version of the database. - DbVersionNumber uint32 -} - -// FetchMeta fetches the meta data from boltdb and returns filled meta -// structure. -func (d *DB) FetchMeta(tx kvdb.RTx) (*Meta, er.R) { - var meta *Meta - - err := kvdb.View(d, func(tx kvdb.RTx) er.R { - return fetchMeta(meta, tx) - }, func() { - meta = &Meta{} - }) - if err != nil { - return nil, err - } - - return meta, nil -} - -// fetchMeta is an internal helper function used in order to allow callers to -// re-use a database transaction. See the publicly exported FetchMeta method -// for more information. -func fetchMeta(meta *Meta, tx kvdb.RTx) er.R { - metaBucket := tx.ReadBucket(metaBucket) - if metaBucket == nil { - return ErrMetaNotFound.Default() - } - - data := metaBucket.Get(dbVersionKey) - if data == nil { - meta.DbVersionNumber = getLatestDBVersion(dbVersions) - } else { - meta.DbVersionNumber = byteOrder.Uint32(data) - } - - return nil -} - -// PutMeta writes the passed instance of the database met-data struct to disk. -func (d *DB) PutMeta(meta *Meta) er.R { - return kvdb.Update(d, func(tx kvdb.RwTx) er.R { - return putMeta(meta, tx) - }, func() {}) -} - -// putMeta is an internal helper function used in order to allow callers to -// re-use a database transaction. See the publicly exported PutMeta method for -// more information. -func putMeta(meta *Meta, tx kvdb.RwTx) er.R { - metaBucket, err := tx.CreateTopLevelBucket(metaBucket) - if err != nil { - return err - } - - return putDbVersion(metaBucket, meta) -} - -func putDbVersion(metaBucket kvdb.RwBucket, meta *Meta) er.R { - scratch := make([]byte, 4) - byteOrder.PutUint32(scratch, meta.DbVersionNumber) - return metaBucket.Put(dbVersionKey, scratch) -} diff --git a/lnd/channeldb/meta_test.go b/lnd/channeldb/meta_test.go deleted file mode 100644 index 095cb624..00000000 --- a/lnd/channeldb/meta_test.go +++ /dev/null @@ -1,508 +0,0 @@ -package channeldb - -import ( - "bytes" - "io/ioutil" - "os" - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/pktlog/log" -) - -// applyMigration is a helper test function that encapsulates the general steps -// which are needed to properly check the result of applying migration function. -func applyMigration(t *testing.T, beforeMigration, afterMigration func(d *DB), - migrationFunc migration, shouldFail bool, dryRun bool) { - - cdb, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatal(err) - } - cdb.dryRun = dryRun - - // Create a test node that will be our source node. - testNode, err := createTestVertex(cdb) - if err != nil { - t.Fatal(err) - } - graph := cdb.ChannelGraph() - if err := graph.SetSourceNode(testNode); err != nil { - t.Fatal(err) - } - - // beforeMigration usually used for populating the database - // with test data. - beforeMigration(cdb) - - // Create test meta info with zero database version and put it on disk. - // Than creating the version list pretending that new version was added. - meta := &Meta{DbVersionNumber: 0} - if err := cdb.PutMeta(meta); err != nil { - t.Fatalf("unable to store meta data: %v", err) - } - - versions := []version{ - { - number: 0, - migration: nil, - }, - { - number: 1, - migration: migrationFunc, - }, - } - - defer func() { - if r := recover(); r != nil { - if dryRun && !ErrDryRunMigrationOK.Is(err) { - t.Fatalf("expected dry run migration OK") - } - err = er.Errorf("%v", r) - } - - if err == nil && shouldFail { - t.Fatal("error wasn't received on migration stage") - } else if err != nil && !shouldFail { - t.Fatalf("error was received on migration stage: %v", err) - } - - // afterMigration usually used for checking the database state and - // throwing the error if something went wrong. - afterMigration(cdb) - }() - - // Sync with the latest version - applying migration function. - err = cdb.syncVersions(versions) - if err != nil { - log.Error(err) - } -} - -// TestVersionFetchPut checks the propernces of fetch/put methods -// and also initialization of meta data in case if don't have any in -// database. -func TestVersionFetchPut(t *testing.T) { - t.Parallel() - - db, cleanUp, err := MakeTestDB() - defer cleanUp() - if err != nil { - t.Fatal(err) - } - - meta, err := db.FetchMeta(nil) - if err != nil { - t.Fatal(err) - } - - if meta.DbVersionNumber != getLatestDBVersion(dbVersions) { - t.Fatal("initialization of meta information wasn't performed") - } - - newVersion := getLatestDBVersion(dbVersions) + 1 - meta.DbVersionNumber = newVersion - - if err := db.PutMeta(meta); err != nil { - t.Fatalf("update of meta failed %v", err) - } - - meta, err = db.FetchMeta(nil) - if err != nil { - t.Fatal(err) - } - - if meta.DbVersionNumber != newVersion { - t.Fatal("update of meta information wasn't performed") - } -} - -// TestOrderOfMigrations checks that migrations are applied in proper order. -func TestOrderOfMigrations(t *testing.T) { - t.Parallel() - - appliedMigration := -1 - versions := []version{ - {0, nil}, - {1, nil}, - {2, func(tx kvdb.RwTx) er.R { - appliedMigration = 2 - return nil - }}, - {3, func(tx kvdb.RwTx) er.R { - appliedMigration = 3 - return nil - }}, - } - - // Retrieve the migration that should be applied to db, as far as - // current version is 1, we skip zero and first versions. - migrations, _ := getMigrationsToApply(versions, 1) - - if len(migrations) != 2 { - t.Fatal("incorrect number of migrations to apply") - } - - // Apply first migration. - migrations[0](nil) - - // Check that first migration corresponds to the second version. - if appliedMigration != 2 { - t.Fatal("incorrect order of applying migrations") - } - - // Apply second migration. - migrations[1](nil) - - // Check that second migration corresponds to the third version. - if appliedMigration != 3 { - t.Fatal("incorrect order of applying migrations") - } -} - -// TestGlobalVersionList checks that there is no mistake in global version list -// in terms of version ordering. -func TestGlobalVersionList(t *testing.T) { - t.Parallel() - - if dbVersions == nil { - t.Fatal("can't find versions list") - } - - if len(dbVersions) == 0 { - t.Fatal("db versions list is empty") - } - - prev := dbVersions[0].number - for i := 1; i < len(dbVersions); i++ { - version := dbVersions[i].number - - if version == prev { - t.Fatal("duplicates db versions") - } - if version < prev { - t.Fatal("order of db versions is wrong") - } - - prev = version - } -} - -// TestMigrationWithPanic asserts that if migration logic panics, we will return -// to the original state unaltered. -func TestMigrationWithPanic(t *testing.T) { - t.Parallel() - - bucketPrefix := []byte("somebucket") - keyPrefix := []byte("someprefix") - beforeMigration := []byte("beforemigration") - afterMigration := []byte("aftermigration") - - beforeMigrationFunc := func(d *DB) { - // Insert data in database and in order then make sure that the - // key isn't changes in case of panic or fail. - err := kvdb.Update(d, func(tx kvdb.RwTx) er.R { - bucket, err := tx.CreateTopLevelBucket(bucketPrefix) - if err != nil { - return err - } - - return bucket.Put(keyPrefix, beforeMigration) - }, func() {}) - if err != nil { - t.Fatalf("unable to insert: %v", err) - } - } - - // Create migration function which changes the initially created data and - // throw the panic, in this case we pretending that something goes. - migrationWithPanic := func(tx kvdb.RwTx) er.R { - bucket, err := tx.CreateTopLevelBucket(bucketPrefix) - if err != nil { - return err - } - - bucket.Put(keyPrefix, afterMigration) - panic("panic!") - } - - // Check that version of database and data wasn't changed. - afterMigrationFunc := func(d *DB) { - meta, err := d.FetchMeta(nil) - if err != nil { - t.Fatal(err) - } - - if meta.DbVersionNumber != 0 { - t.Fatal("migration panicked but version is changed") - } - - err = kvdb.Update(d, func(tx kvdb.RwTx) er.R { - bucket, err := tx.CreateTopLevelBucket(bucketPrefix) - if err != nil { - return err - } - - value := bucket.Get(keyPrefix) - if !bytes.Equal(value, beforeMigration) { - return er.New("migration failed but data is " + - "changed") - } - - return nil - }, func() {}) - if err != nil { - t.Fatal(err) - } - } - - applyMigration(t, - beforeMigrationFunc, - afterMigrationFunc, - migrationWithPanic, - true, - false) -} - -// TestMigrationWithFatal asserts that migrations which fail do not modify the -// database. -func TestMigrationWithFatal(t *testing.T) { - t.Parallel() - - bucketPrefix := []byte("somebucket") - keyPrefix := []byte("someprefix") - beforeMigration := []byte("beforemigration") - afterMigration := []byte("aftermigration") - - beforeMigrationFunc := func(d *DB) { - err := kvdb.Update(d, func(tx kvdb.RwTx) er.R { - bucket, err := tx.CreateTopLevelBucket(bucketPrefix) - if err != nil { - return err - } - - return bucket.Put(keyPrefix, beforeMigration) - }, func() {}) - if err != nil { - t.Fatalf("unable to insert pre migration key: %v", err) - } - } - - // Create migration function which changes the initially created data and - // return the error, in this case we pretending that something goes - // wrong. - migrationWithFatal := func(tx kvdb.RwTx) er.R { - bucket, err := tx.CreateTopLevelBucket(bucketPrefix) - if err != nil { - return err - } - - bucket.Put(keyPrefix, afterMigration) - return er.New("some error") - } - - // Check that version of database and initial data wasn't changed. - afterMigrationFunc := func(d *DB) { - meta, err := d.FetchMeta(nil) - if err != nil { - t.Fatal(err) - } - - if meta.DbVersionNumber != 0 { - t.Fatal("migration failed but version is changed") - } - - err = kvdb.Update(d, func(tx kvdb.RwTx) er.R { - bucket, err := tx.CreateTopLevelBucket(bucketPrefix) - if err != nil { - return err - } - - value := bucket.Get(keyPrefix) - if !bytes.Equal(value, beforeMigration) { - return er.New("migration failed but data is " + - "changed") - } - - return nil - }, func() {}) - if err != nil { - t.Fatal(err) - } - } - - applyMigration(t, - beforeMigrationFunc, - afterMigrationFunc, - migrationWithFatal, - true, - false) -} - -// TestMigrationWithoutErrors asserts that a successful migration has its -// changes applied to the database. -func TestMigrationWithoutErrors(t *testing.T) { - t.Parallel() - - bucketPrefix := []byte("somebucket") - keyPrefix := []byte("someprefix") - beforeMigration := []byte("beforemigration") - afterMigration := []byte("aftermigration") - - // Populate database with initial data. - beforeMigrationFunc := func(d *DB) { - err := kvdb.Update(d, func(tx kvdb.RwTx) er.R { - bucket, err := tx.CreateTopLevelBucket(bucketPrefix) - if err != nil { - return err - } - - return bucket.Put(keyPrefix, beforeMigration) - }, func() {}) - if err != nil { - t.Fatalf("unable to update db pre migration: %v", err) - } - } - - // Create migration function which changes the initially created data. - migrationWithoutErrors := func(tx kvdb.RwTx) er.R { - bucket, err := tx.CreateTopLevelBucket(bucketPrefix) - if err != nil { - return err - } - - bucket.Put(keyPrefix, afterMigration) - return nil - } - - // Check that version of database and data was properly changed. - afterMigrationFunc := func(d *DB) { - meta, err := d.FetchMeta(nil) - if err != nil { - t.Fatal(err) - } - - if meta.DbVersionNumber != 1 { - t.Fatal("version number isn't changed after " + - "successfully applied migration") - } - - err = kvdb.Update(d, func(tx kvdb.RwTx) er.R { - bucket, err := tx.CreateTopLevelBucket(bucketPrefix) - if err != nil { - return err - } - - value := bucket.Get(keyPrefix) - if !bytes.Equal(value, afterMigration) { - return er.New("migration wasn't applied " + - "properly") - } - - return nil - }, func() {}) - if err != nil { - t.Fatal(err) - } - } - - applyMigration(t, - beforeMigrationFunc, - afterMigrationFunc, - migrationWithoutErrors, - false, - false) -} - -// TestMigrationReversion tests after performing a migration to a higher -// database version, opening the database with a lower latest db version returns -// ErrDBReversion. -func TestMigrationReversion(t *testing.T) { - t.Parallel() - - tempDirName, errr := ioutil.TempDir("", "channeldb") - defer func() { - os.RemoveAll(tempDirName) - }() - if errr != nil { - t.Fatalf("unable to create temp dir: %v", errr) - } - - backend, cleanup, err := kvdb.GetTestBackend(tempDirName, "cdb") - if err != nil { - t.Fatalf("unable to get test db backend: %v", err) - } - - cdb, err := CreateWithBackend(backend) - if err != nil { - cleanup() - t.Fatalf("unable to open channeldb: %v", err) - } - - // Update the database metadata to point to one more than the highest - // known version. - err = kvdb.Update(cdb, func(tx kvdb.RwTx) er.R { - newMeta := &Meta{ - DbVersionNumber: getLatestDBVersion(dbVersions) + 1, - } - - return putMeta(newMeta, tx) - }, func() {}) - - // Close the database. Even if we succeeded, our next step is to reopen. - cdb.Close() - cleanup() - - if err != nil { - t.Fatalf("unable to increase db version: %v", err) - } - - backend, cleanup, err = kvdb.GetTestBackend(tempDirName, "cdb") - if err != nil { - t.Fatalf("unable to get test db backend: %v", err) - } - defer cleanup() - - _, err = CreateWithBackend(backend) - if !ErrDBReversion.Is(err) { - t.Fatalf("unexpected error when opening channeldb, "+ - "want: %v, got: %v", ErrDBReversion, err) - } -} - -// TestMigrationDryRun ensures that opening the database in dry run migration -// mode will fail and not commit the migration. -func TestMigrationDryRun(t *testing.T) { - t.Parallel() - - // Nothing to do, will inspect version number. - beforeMigrationFunc := func(d *DB) {} - - // Check that version of database version is not modified. - afterMigrationFunc := func(d *DB) { - err := kvdb.View(d, func(tx kvdb.RTx) er.R { - meta, err := d.FetchMeta(nil) - if err != nil { - t.Fatal(err) - } - - if meta.DbVersionNumber != 0 { - t.Fatal("dry run migration was not aborted") - } - - return nil - }, func() {}) - if err != nil { - t.Fatalf("unable to apply after func: %v", err) - } - } - - applyMigration(t, - beforeMigrationFunc, - afterMigrationFunc, - func(kvdb.RwTx) er.R { return nil }, - true, - true) -} diff --git a/lnd/channeldb/migration/create_tlb.go b/lnd/channeldb/migration/create_tlb.go deleted file mode 100644 index aad0a210..00000000 --- a/lnd/channeldb/migration/create_tlb.go +++ /dev/null @@ -1,27 +0,0 @@ -package migration - -import ( - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/pktlog/log" -) - -// CreateTLB creates a new top-level bucket with the passed bucket identifier. -func CreateTLB(bucket []byte) func(kvdb.RwTx) er.R { - return func(tx kvdb.RwTx) er.R { - log.Infof("Creating top-level bucket: \"%s\" ...", bucket) - - if tx.ReadBucket(bucket) != nil { - return er.Errorf("top-level bucket \"%s\" "+ - "already exists", bucket) - } - - _, err := tx.CreateTopLevelBucket(bucket) - if err != nil { - return err - } - - log.Infof("Created top-level bucket: \"%s\"", bucket) - return nil - } -} diff --git a/lnd/channeldb/migration/create_tlb_test.go b/lnd/channeldb/migration/create_tlb_test.go deleted file mode 100644 index dc32c011..00000000 --- a/lnd/channeldb/migration/create_tlb_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package migration_test - -import ( - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/channeldb/migration" - "github.com/pkt-cash/pktd/lnd/channeldb/migtest" -) - -// TestCreateTLB asserts that a CreateTLB properly initializes a new top-level -// bucket, and that it succeeds even if the bucket already exists. It would -// probably be better if the latter failed, but the kvdb abstraction doesn't -// support this. -func TestCreateTLB(t *testing.T) { - newBucket := []byte("hello") - - tests := []struct { - name string - beforeMigration func(kvdb.RwTx) er.R - shouldFail bool - }{ - { - name: "already exists", - beforeMigration: func(tx kvdb.RwTx) er.R { - _, err := tx.CreateTopLevelBucket(newBucket) - return err - }, - shouldFail: true, - }, - { - name: "does not exist", - beforeMigration: func(_ kvdb.RwTx) er.R { return nil }, - shouldFail: false, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - migtest.ApplyMigration( - t, - test.beforeMigration, - func(tx kvdb.RwTx) er.R { - if tx.ReadBucket(newBucket) != nil { - return nil - } - return er.Errorf("bucket \"%s\" not "+ - "created", newBucket) - }, - migration.CreateTLB(newBucket), - test.shouldFail, - ) - }) - } -} diff --git a/lnd/channeldb/migration12/invoices.go b/lnd/channeldb/migration12/invoices.go deleted file mode 100644 index 3474494e..00000000 --- a/lnd/channeldb/migration12/invoices.go +++ /dev/null @@ -1,320 +0,0 @@ -package migration12 - -import ( - "bytes" - "encoding/binary" - "io" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/tlv" - "github.com/pkt-cash/pktd/wire" -) - -const ( - // MaxMemoSize is maximum size of the memo field within invoices stored - // in the database. - MaxMemoSize = 1024 - - // maxReceiptSize is the maximum size of the payment receipt stored - // within the database along side incoming/outgoing invoices. - maxReceiptSize = 1024 - - // MaxPaymentRequestSize is the max size of a payment request for - // this invoice. - // TODO(halseth): determine the max length payment request when field - // lengths are final. - MaxPaymentRequestSize = 4096 - - memoType tlv.Type = 0 - payReqType tlv.Type = 1 - createTimeType tlv.Type = 2 - settleTimeType tlv.Type = 3 - addIndexType tlv.Type = 4 - settleIndexType tlv.Type = 5 - preimageType tlv.Type = 6 - valueType tlv.Type = 7 - cltvDeltaType tlv.Type = 8 - expiryType tlv.Type = 9 - paymentAddrType tlv.Type = 10 - featuresType tlv.Type = 11 - invStateType tlv.Type = 12 - amtPaidType tlv.Type = 13 -) - -var ( - // invoiceBucket is the name of the bucket within the database that - // stores all data related to invoices no matter their final state. - // Within the invoice bucket, each invoice is keyed by its invoice ID - // which is a monotonically increasing uint32. - invoiceBucket = []byte("invoices") - - // Big endian is the preferred byte order, due to cursor scans over - // integer keys iterating in order. - byteOrder = binary.BigEndian -) - -// ContractState describes the state the invoice is in. -type ContractState uint8 - -// ContractTerm is a companion struct to the Invoice struct. This struct houses -// the necessary conditions required before the invoice can be considered fully -// settled by the payee. -type ContractTerm struct { - // PaymentPreimage is the preimage which is to be revealed in the - // occasion that an HTLC paying to the hash of this preimage is - // extended. - PaymentPreimage lntypes.Preimage - - // Value is the expected amount of milli-satoshis to be paid to an HTLC - // which can be satisfied by the above preimage. - Value lnwire.MilliSatoshi - - // State describes the state the invoice is in. - State ContractState - - // PaymentAddr is a randomly generated value include in the MPP record - // by the sender to prevent probing of the receiver. - PaymentAddr [32]byte - - // Features is the feature vectors advertised on the payment request. - Features *lnwire.FeatureVector -} - -// Invoice is a payment invoice generated by a payee in order to request -// payment for some good or service. The inclusion of invoices within Lightning -// creates a payment work flow for merchants very similar to that of the -// existing financial system within PayPal, etc. Invoices are added to the -// database when a payment is requested, then can be settled manually once the -// payment is received at the upper layer. For record keeping purposes, -// invoices are never deleted from the database, instead a bit is toggled -// denoting the invoice has been fully settled. Within the database, all -// invoices must have a unique payment hash which is generated by taking the -// sha256 of the payment preimage. -type Invoice struct { - // Memo is an optional memo to be stored along side an invoice. The - // memo may contain further details pertaining to the invoice itself, - // or any other message which fits within the size constraints. - Memo []byte - - // PaymentRequest is an optional field where a payment request created - // for this invoice can be stored. - PaymentRequest []byte - - // FinalCltvDelta is the minimum required number of blocks before htlc - // expiry when the invoice is accepted. - FinalCltvDelta int32 - - // Expiry defines how long after creation this invoice should expire. - Expiry time.Duration - - // CreationDate is the exact time the invoice was created. - CreationDate time.Time - - // SettleDate is the exact time the invoice was settled. - SettleDate time.Time - - // Terms are the contractual payment terms of the invoice. Once all the - // terms have been satisfied by the payer, then the invoice can be - // considered fully fulfilled. - // - // TODO(roasbeef): later allow for multiple terms to fulfill the final - // invoice: payment fragmentation, etc. - Terms ContractTerm - - // AddIndex is an auto-incrementing integer that acts as a - // monotonically increasing sequence number for all invoices created. - // Clients can then use this field as a "checkpoint" of sorts when - // implementing a streaming RPC to notify consumers of instances where - // an invoice has been added before they re-connected. - // - // NOTE: This index starts at 1. - AddIndex uint64 - - // SettleIndex is an auto-incrementing integer that acts as a - // monotonically increasing sequence number for all settled invoices. - // Clients can then use this field as a "checkpoint" of sorts when - // implementing a streaming RPC to notify consumers of instances where - // an invoice has been settled before they re-connected. - // - // NOTE: This index starts at 1. - SettleIndex uint64 - - // AmtPaid is the final amount that we ultimately accepted for pay for - // this invoice. We specify this value independently as it's possible - // that the invoice originally didn't specify an amount, or the sender - // overpaid. - AmtPaid lnwire.MilliSatoshi - - // Htlcs records all htlcs that paid to this invoice. Some of these - // htlcs may have been marked as canceled. - Htlcs []byte -} - -// LegacyDeserializeInvoice decodes an invoice from the passed io.Reader using -// the pre-TLV serialization. -func LegacyDeserializeInvoice(r io.Reader) (Invoice, er.R) { - var err er.R - invoice := Invoice{} - - // TODO(roasbeef): use read full everywhere - invoice.Memo, err = wire.ReadVarBytes(r, 0, MaxMemoSize, "") - if err != nil { - return invoice, err - } - _, err = wire.ReadVarBytes(r, 0, maxReceiptSize, "") - if err != nil { - return invoice, err - } - - invoice.PaymentRequest, err = wire.ReadVarBytes(r, 0, MaxPaymentRequestSize, "") - if err != nil { - return invoice, err - } - - if err := util.ReadBin(r, byteOrder, &invoice.FinalCltvDelta); err != nil { - return invoice, err - } - - var expiry int64 - if err := util.ReadBin(r, byteOrder, &expiry); err != nil { - return invoice, err - } - invoice.Expiry = time.Duration(expiry) - - birthBytes, err := wire.ReadVarBytes(r, 0, 300, "birth") - if err != nil { - return invoice, err - } - if err := invoice.CreationDate.UnmarshalBinary(birthBytes); err != nil { - return invoice, er.E(err) - } - - settledBytes, err := wire.ReadVarBytes(r, 0, 300, "settled") - if err != nil { - return invoice, err - } - if err := invoice.SettleDate.UnmarshalBinary(settledBytes); err != nil { - return invoice, er.E(err) - } - - if _, err := util.ReadFull(r, invoice.Terms.PaymentPreimage[:]); err != nil { - return invoice, err - } - var scratch [8]byte - if _, err := util.ReadFull(r, scratch[:]); err != nil { - return invoice, err - } - invoice.Terms.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:])) - - if err := util.ReadBin(r, byteOrder, &invoice.Terms.State); err != nil { - return invoice, err - } - - if err := util.ReadBin(r, byteOrder, &invoice.AddIndex); err != nil { - return invoice, err - } - if err := util.ReadBin(r, byteOrder, &invoice.SettleIndex); err != nil { - return invoice, err - } - if err := util.ReadBin(r, byteOrder, &invoice.AmtPaid); err != nil { - return invoice, err - } - - invoice.Htlcs, err = deserializeHtlcs(r) - if err != nil { - return Invoice{}, err - } - - return invoice, nil -} - -// deserializeHtlcs reads a list of invoice htlcs from a reader and returns it -// as a flattened byte slice. -func deserializeHtlcs(r io.Reader) ([]byte, er.R) { - var b bytes.Buffer - _, err := io.Copy(&b, r) - return b.Bytes(), er.E(err) -} - -// SerializeInvoice serializes an invoice to a writer. -// -// nolint: dupl -func SerializeInvoice(w io.Writer, i *Invoice) er.R { - creationDateBytes, errr := i.CreationDate.MarshalBinary() - if errr != nil { - return er.E(errr) - } - - settleDateBytes, errr := i.SettleDate.MarshalBinary() - if errr != nil { - return er.E(errr) - } - - var fb bytes.Buffer - err := i.Terms.Features.EncodeBase256(&fb) - if err != nil { - return err - } - featureBytes := fb.Bytes() - - preimage := [32]byte(i.Terms.PaymentPreimage) - value := uint64(i.Terms.Value) - cltvDelta := uint32(i.FinalCltvDelta) - expiry := uint64(i.Expiry) - - amtPaid := uint64(i.AmtPaid) - state := uint8(i.Terms.State) - - tlvStream, err := tlv.NewStream( - // Memo and payreq. - tlv.MakePrimitiveRecord(memoType, &i.Memo), - tlv.MakePrimitiveRecord(payReqType, &i.PaymentRequest), - - // Add/settle metadata. - tlv.MakePrimitiveRecord(createTimeType, &creationDateBytes), - tlv.MakePrimitiveRecord(settleTimeType, &settleDateBytes), - tlv.MakePrimitiveRecord(addIndexType, &i.AddIndex), - tlv.MakePrimitiveRecord(settleIndexType, &i.SettleIndex), - - // Terms. - tlv.MakePrimitiveRecord(preimageType, &preimage), - tlv.MakePrimitiveRecord(valueType, &value), - tlv.MakePrimitiveRecord(cltvDeltaType, &cltvDelta), - tlv.MakePrimitiveRecord(expiryType, &expiry), - tlv.MakePrimitiveRecord(paymentAddrType, &i.Terms.PaymentAddr), - tlv.MakePrimitiveRecord(featuresType, &featureBytes), - - // Invoice state. - tlv.MakePrimitiveRecord(invStateType, &state), - tlv.MakePrimitiveRecord(amtPaidType, &amtPaid), - ) - if err != nil { - return err - } - - var b bytes.Buffer - if err = tlvStream.Encode(&b); err != nil { - return err - } - - err = util.WriteBin(w, byteOrder, uint64(b.Len())) - if err != nil { - return err - } - - if _, err = util.Write(w, b.Bytes()); err != nil { - return err - } - - return serializeHtlcs(w, i.Htlcs) -} - -// serializeHtlcs writes a serialized list of invoice htlcs into a writer. -func serializeHtlcs(w io.Writer, htlcs []byte) er.R { - _, err := util.Write(w, htlcs) - return err -} diff --git a/lnd/channeldb/migration12/migration.go b/lnd/channeldb/migration12/migration.go deleted file mode 100644 index 28d9fec3..00000000 --- a/lnd/channeldb/migration12/migration.go +++ /dev/null @@ -1,76 +0,0 @@ -package migration12 - -import ( - "bytes" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/pktlog/log" -) - -var emptyFeatures = lnwire.NewFeatureVector(nil, nil) - -// MigrateInvoiceTLV migrates all existing invoice bodies over to be serialized -// in a single TLV stream. In the process, we drop the Receipt field and add -// PaymentAddr and Features to the invoice Terms. -func MigrateInvoiceTLV(tx kvdb.RwTx) er.R { - log.Infof("Migrating invoice bodies to TLV, " + - "adding payment addresses and feature vectors.") - - invoiceB := tx.ReadWriteBucket(invoiceBucket) - if invoiceB == nil { - return nil - } - - type keyedInvoice struct { - key []byte - invoice Invoice - } - - // Read in all existing invoices using the old format. - var invoices []keyedInvoice - err := invoiceB.ForEach(func(k, v []byte) er.R { - if v == nil { - return nil - } - - invoiceReader := bytes.NewReader(v) - invoice, err := LegacyDeserializeInvoice(invoiceReader) - if err != nil { - return err - } - - // Insert an empty feature vector on all old payments. - invoice.Terms.Features = emptyFeatures - - invoices = append(invoices, keyedInvoice{ - key: k, - invoice: invoice, - }) - - return nil - }) - if err != nil { - return err - } - - // Write out each one under its original key using TLV. - for _, ki := range invoices { - var b bytes.Buffer - errr := SerializeInvoice(&b, &ki.invoice) - if errr != nil { - return errr - } - - err = invoiceB.Put(ki.key, b.Bytes()) - if err != nil { - return err - } - } - - log.Infof("Migration to TLV invoice bodies, " + - "payment address, and features complete!") - - return nil -} diff --git a/lnd/channeldb/migration12/migration_test.go b/lnd/channeldb/migration12/migration_test.go deleted file mode 100644 index 6e9a20d7..00000000 --- a/lnd/channeldb/migration12/migration_test.go +++ /dev/null @@ -1,206 +0,0 @@ -package migration12_test - -import ( - "bytes" - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/channeldb/migration12" - "github.com/pkt-cash/pktd/lnd/channeldb/migtest" - "github.com/pkt-cash/pktd/lnd/lntypes" -) - -var ( - // invoiceBucket is the name of the bucket within the database that - // stores all data related to invoices no matter their final state. - // Within the invoice bucket, each invoice is keyed by its invoice ID - // which is a monotonically increasing uint32. - invoiceBucket = []byte("invoices") - - preimage = lntypes.Preimage{ - 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, - 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, - 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, - 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, - } - - hash = preimage.Hash() - - beforeInvoice0Htlcs = []byte{ - 0x0b, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, - 0x6c, 0x64, 0x09, 0x62, 0x79, 0x65, 0x20, 0x77, 0x6f, 0x72, - 0x6c, 0x64, 0x06, 0x70, 0x61, 0x79, 0x72, 0x65, 0x71, 0x00, - 0x00, 0x00, 0x20, 0x00, 0x00, 0x4e, 0x94, 0x91, 0x4f, 0x00, - 0x00, 0x0f, 0x01, 0x00, 0x00, 0x00, 0x0e, 0x77, 0xc4, 0xd3, - 0xd5, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x20, 0x0f, 0x01, 0x00, - 0x00, 0x00, 0x0e, 0x77, 0xd5, 0xc8, 0x1c, 0x00, 0x00, 0x00, - 0x00, 0xfe, 0x20, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, - 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, - 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, - 0x42, 0x42, 0x42, 0x42, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x03, 0xe8, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xa4, - } - - afterInvoice0Htlcs = []byte{ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, 0x00, 0x0b, - 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, - 0x64, 0x01, 0x06, 0x70, 0x61, 0x79, 0x72, 0x65, 0x71, 0x02, - 0x0f, 0x01, 0x00, 0x00, 0x00, 0x0e, 0x77, 0xc4, 0xd3, 0xd5, - 0x00, 0x00, 0x00, 0x00, 0xfe, 0x20, 0x03, 0x0f, 0x01, 0x00, - 0x00, 0x00, 0x0e, 0x77, 0xd5, 0xc8, 0x1c, 0x00, 0x00, 0x00, - 0x00, 0xfe, 0x20, 0x04, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x05, 0x05, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x06, 0x06, 0x20, 0x42, 0x42, 0x42, 0x42, 0x42, - 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, - 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, - 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x07, 0x08, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xe8, 0x08, 0x04, 0x00, - 0x00, 0x00, 0x20, 0x09, 0x08, 0x00, 0x00, 0x4e, 0x94, 0x91, - 0x4f, 0x00, 0x00, 0x0a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x0c, - 0x01, 0x03, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0xa4, - } - - testHtlc = []byte{ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, - 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - 0x03, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x05, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, - 0x07, 0x04, 0x00, 0x00, 0x00, 0x58, 0x09, 0x08, 0x00, 0x13, - 0xbc, 0xbf, 0x72, 0x4e, 0x1e, 0x00, 0x0b, 0x08, 0x00, 0x17, - 0xaf, 0x4c, 0x22, 0xc4, 0x24, 0x00, 0x0d, 0x04, 0x00, 0x00, - 0x23, 0x1d, 0x0f, 0x01, 0x02, - } - - beforeInvoice1Htlc = append([]byte{ - 0x0b, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, - 0x6c, 0x64, 0x09, 0x62, 0x79, 0x65, 0x20, 0x77, 0x6f, 0x72, - 0x6c, 0x64, 0x06, 0x70, 0x61, 0x79, 0x72, 0x65, 0x71, 0x00, - 0x00, 0x00, 0x20, 0x00, 0x00, 0x4e, 0x94, 0x91, 0x4f, 0x00, - 0x00, 0x0f, 0x01, 0x00, 0x00, 0x00, 0x0e, 0x77, 0xc4, 0xd3, - 0xd5, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x20, 0x0f, 0x01, 0x00, - 0x00, 0x00, 0x0e, 0x77, 0xd5, 0xc8, 0x1c, 0x00, 0x00, 0x00, - 0x00, 0xfe, 0x20, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, - 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, - 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, - 0x42, 0x42, 0x42, 0x42, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x03, 0xe8, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xa4, - }, testHtlc...) - - afterInvoice1Htlc = append([]byte{ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, 0x00, 0x0b, - 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, - 0x64, 0x01, 0x06, 0x70, 0x61, 0x79, 0x72, 0x65, 0x71, 0x02, - 0x0f, 0x01, 0x00, 0x00, 0x00, 0x0e, 0x77, 0xc4, 0xd3, 0xd5, - 0x00, 0x00, 0x00, 0x00, 0xfe, 0x20, 0x03, 0x0f, 0x01, 0x00, - 0x00, 0x00, 0x0e, 0x77, 0xd5, 0xc8, 0x1c, 0x00, 0x00, 0x00, - 0x00, 0xfe, 0x20, 0x04, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x05, 0x05, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x06, 0x06, 0x20, 0x42, 0x42, 0x42, 0x42, 0x42, - 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, - 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, - 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x07, 0x08, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xe8, 0x08, 0x04, 0x00, - 0x00, 0x00, 0x20, 0x09, 0x08, 0x00, 0x00, 0x4e, 0x94, 0x91, - 0x4f, 0x00, 0x00, 0x0a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x0c, - 0x01, 0x03, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0xa4, - }, testHtlc...) -) - -type migrationTest struct { - name string - beforeMigration func(kvdb.RwTx) er.R - afterMigration func(kvdb.RwTx) er.R -} - -var migrationTests = []migrationTest{ - { - name: "no invoices", - beforeMigration: func(kvdb.RwTx) er.R { return nil }, - afterMigration: func(kvdb.RwTx) er.R { return nil }, - }, - { - name: "zero htlcs", - beforeMigration: genBeforeMigration(beforeInvoice0Htlcs), - afterMigration: genAfterMigration(afterInvoice0Htlcs), - }, - { - name: "one htlc", - beforeMigration: genBeforeMigration(beforeInvoice1Htlc), - afterMigration: genAfterMigration(afterInvoice1Htlc), - }, -} - -// genBeforeMigration creates a closure that inserts an invoice serialized under -// the old format under the test payment hash. -func genBeforeMigration(beforeBytes []byte) func(kvdb.RwTx) er.R { - return func(tx kvdb.RwTx) er.R { - invoices, err := tx.CreateTopLevelBucket( - invoiceBucket, - ) - if err != nil { - return err - } - - return invoices.Put(hash[:], beforeBytes) - } -} - -// genAfterMigration creates a closure that verifies the tlv invoice migration -// succeeded, but comparing the resulting encoding of the invoice to the -// expected serialization. In addition, the decoded invoice is compared against -// the expected invoice for equality. -func genAfterMigration(afterBytes []byte) func(kvdb.RwTx) er.R { - return func(tx kvdb.RwTx) er.R { - invoices := tx.ReadWriteBucket(invoiceBucket) - if invoices == nil { - return er.Errorf("invoice bucket not found") - } - - // Fetch the new invoice bytes and check that they match our - // expected serialization. - invoiceBytes := invoices.Get(hash[:]) - if !bytes.Equal(invoiceBytes, afterBytes) { - return er.Errorf("invoice bytes mismatch, "+ - "want: %x, got: %x", - invoiceBytes, afterBytes) - } - - return nil - } -} - -// TestTLVInvoiceMigration executes a suite of migration tests for moving -// invoices to use TLV for their bodies. In the process, feature bits and -// payment addresses are added to the invoice while the receipt field is -// dropped. We test a few different invoices with a varying number of HTLCs, as -// well as the case where there are no invoices present. -// -// NOTE: The test vectors each include a receipt that is not present on the -// final struct, but verifies that the field is properly removed. -func TestTLVInvoiceMigration(t *testing.T) { - for _, test := range migrationTests { - test := test - t.Run(test.name, func(t *testing.T) { - migtest.ApplyMigration( - t, - test.beforeMigration, - test.afterMigration, - migration12.MigrateInvoiceTLV, - false, - ) - }) - } -} diff --git a/lnd/channeldb/migration13/migration.go b/lnd/channeldb/migration13/migration.go deleted file mode 100644 index 916d95bb..00000000 --- a/lnd/channeldb/migration13/migration.go +++ /dev/null @@ -1,203 +0,0 @@ -package migration13 - -import ( - "encoding/binary" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/pktlog/log" -) - -var ( - paymentsRootBucket = []byte("payments-root-bucket") - - // paymentCreationInfoKey is a key used in the payment's sub-bucket to - // store the creation info of the payment. - paymentCreationInfoKey = []byte("payment-creation-info") - - // paymentFailInfoKey is a key used in the payment's sub-bucket to - // store information about the reason a payment failed. - paymentFailInfoKey = []byte("payment-fail-info") - - // paymentAttemptInfoKey is a key used in the payment's sub-bucket to - // store the info about the latest attempt that was done for the - // payment in question. - paymentAttemptInfoKey = []byte("payment-attempt-info") - - // paymentSettleInfoKey is a key used in the payment's sub-bucket to - // store the settle info of the payment. - paymentSettleInfoKey = []byte("payment-settle-info") - - // paymentHtlcsBucket is a bucket where we'll store the information - // about the HTLCs that were attempted for a payment. - paymentHtlcsBucket = []byte("payment-htlcs-bucket") - - // htlcAttemptInfoKey is a key used in a HTLC's sub-bucket to store the - // info about the attempt that was done for the HTLC in question. - htlcAttemptInfoKey = []byte("htlc-attempt-info") - - // htlcSettleInfoKey is a key used in a HTLC's sub-bucket to store the - // settle info, if any. - htlcSettleInfoKey = []byte("htlc-settle-info") - - // htlcFailInfoKey is a key used in a HTLC's sub-bucket to store - // failure information, if any. - htlcFailInfoKey = []byte("htlc-fail-info") - - byteOrder = binary.BigEndian -) - -// MigrateMPP migrates the payments to a new structure that accommodates for mpp -// payments. -func MigrateMPP(tx kvdb.RwTx) er.R { - log.Infof("Migrating payments to mpp structure") - - // Iterate over all payments and store their indexing keys. This is - // needed, because no modifications are allowed inside a Bucket.ForEach - // loop. - paymentsBucket := tx.ReadWriteBucket(paymentsRootBucket) - if paymentsBucket == nil { - return nil - } - - var paymentKeys [][]byte - err := paymentsBucket.ForEach(func(k, v []byte) er.R { - paymentKeys = append(paymentKeys, k) - return nil - }) - if err != nil { - return err - } - - // With all keys retrieved, start the migration. - for _, k := range paymentKeys { - bucket := paymentsBucket.NestedReadWriteBucket(k) - - // We only expect sub-buckets to be found in - // this top-level bucket. - if bucket == nil { - return er.Errorf("non bucket element in " + - "payments bucket") - } - - // Fetch old format creation info. - creationInfo := bucket.Get(paymentCreationInfoKey) - if creationInfo == nil { - return er.Errorf("creation info not found") - } - - // Make a copy because bbolt doesn't allow this value to be - // changed in-place. - newCreationInfo := make([]byte, len(creationInfo)) - copy(newCreationInfo, creationInfo) - - // Convert to nano seconds. - timeBytes := newCreationInfo[32+8 : 32+8+8] - time := byteOrder.Uint64(timeBytes) - timeNs := time * 1000000000 - byteOrder.PutUint64(timeBytes, timeNs) - - // Write back new format creation info. - err := bucket.Put(paymentCreationInfoKey, newCreationInfo) - if err != nil { - return err - } - - // No migration needed if there is no attempt stored. - attemptInfo := bucket.Get(paymentAttemptInfoKey) - if attemptInfo == nil { - continue - } - - // Delete attempt info on the payment level. - if err := bucket.Delete(paymentAttemptInfoKey); err != nil { - return err - } - - // Save attempt id for later use. - attemptID := attemptInfo[:8] - - // Discard attempt id. It will become a bucket key in the new - // structure. - attemptInfo = attemptInfo[8:] - - // Append unknown (zero) attempt time. - var zero [8]byte - attemptInfo = append(attemptInfo, zero[:]...) - - // Create bucket that contains all htlcs. - htlcsBucket, err := bucket.CreateBucket(paymentHtlcsBucket) - if err != nil { - return err - } - - // Create an htlc for this attempt. - htlcBucket, err := htlcsBucket.CreateBucket(attemptID) - if err != nil { - return err - } - - // Save migrated attempt info. - err = htlcBucket.Put(htlcAttemptInfoKey, attemptInfo) - if err != nil { - return err - } - - // Migrate settle info. - settleInfo := bucket.Get(paymentSettleInfoKey) - if settleInfo != nil { - // Payment-level settle info can be deleted. - err := bucket.Delete(paymentSettleInfoKey) - if err != nil { - return err - } - - // Append unknown (zero) settle time. - settleInfo = append(settleInfo, zero[:]...) - - // Save settle info. - err = htlcBucket.Put(htlcSettleInfoKey, settleInfo) - if err != nil { - return err - } - - // Migration for settled htlc completed. - continue - } - - // If there is no payment-level failure reason, the payment is - // still in flight and nothing else needs to be migrated. - // Otherwise the payment-level failure reason can remain - // unchanged. - inFlight := bucket.Get(paymentFailInfoKey) == nil - if inFlight { - continue - } - - // The htlc failed. Add htlc fail info with reason unknown. We - // don't have access to the original failure reason anymore. - failInfo := []byte{ - // Fail time unknown. - 0, 0, 0, 0, 0, 0, 0, 0, - - // Zero length wire message. - 0, - - // Failure reason unknown. - 0, - - // Failure source index zero. - 0, 0, 0, 0, - } - - // Save fail info. - err = htlcBucket.Put(htlcFailInfoKey, failInfo) - if err != nil { - return err - } - } - - log.Infof("Migration of payments to mpp structure complete!") - - return nil -} diff --git a/lnd/channeldb/migration13/migration_test.go b/lnd/channeldb/migration13/migration_test.go deleted file mode 100644 index 101c008c..00000000 --- a/lnd/channeldb/migration13/migration_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package migration13 - -import ( - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/channeldb/migtest" -) - -var ( - hex = migtest.Hex - - zeroTime = hex("0000000000000000") - noFailureMessage = hex("00") - failureReasonUnknown = hex("00") - zeroFailureSourceIdx = hex("00000000") - - hash1 = hex("02acee76ebd53d00824410cf6adecad4f50334dac702bd5a2d3ba01b91709f0e") - creationInfoAmt1 = hex("00000000004c4b40") - creationInfoTime1 = hex("000000005e4fb7ab") // 1582282667 (decimal) - creationInfoTimeNano1 = hex("15f565b3cccaee00") // 1582282667000000000 (decimal) - creationInfoPayReq1 = hex("00000000") - attemptInfo1 = hex("2997a72e129fc9d638ef2fa4e233567d808d4f18a4f087637582427962eb3bf800005ce600000000004c4b402102ec12e83eafe27ce6d03bbe0c0de4b79fe2b9934615c8aa7693f73d2e41b089700000000121028c2dd128c7a6c1a0fceb3e3eb5ed55e0a0ae1a939eb786b097322d830d47db75005ca4000001000000005ce600000000004c4b400000000000") - attemptID1 = hex("0000000000000001") - paymentID1 = hex("0000000000000001") - - hash2 = hex("62eb3f0a48f954e495d0c14ac63df04a67cefa59dafdbcd3d5046d1f5647840c") - preimage2 = hex("479593b7d3cbb45beb22d448451a2f3619b2095adfb38f4d92e9886e96534368") - attemptID2 = hex("00000000000003e8") - paymentID2 = hex("0000000000000002") - attemptInfo2 = hex("8de663f9bb4b8d1ebdb496d22dc1cb657a346215607308549f41b01e2adf2ce900005ce600000000005b8d802102ec12e83eafe27ce6d03bbe0c0de4b79fe2b9934615c8aa7693f73d2e41b089700000000121028c2dd128c7a6c1a0fceb3e3eb5ed55e0a0ae1a939eb786b097322d830d47db75005ca4000001000000005ce600000000005b8d8000000000010000000000000008233d281e2cbe01f0b82dd6750967c9233426b98ae6549c696365f57f86f942a3795b8d80") - creationInfoAmt2 = hex("00000000005b8d80") - creationInfoTime2 = hex("000000005e4fb97f") // 1582283135 (decimal) - creationInfoTimeNano2 = hex("15F56620C3C43600") // 1582283135000000000 (decimal) - creationInfoPayReq2 = hex("000000fc6c6e62637274363075317030796c7774367070357674346e377a6a676c39327766397773633939767630307366666e7561376a656d74376d6535373471336b3337346a387373787164717163717a70677370353835357075743937713863747374776b7735796b306a667278736e746e7a6878326a77786a636d3937346c636437327a3564757339717939717371653872336b3578733379367868667366366d6a6e706d717172306661797a677a63336a6b663571787a6c376866787a6666763578667a7679647564327275767974706571787072376868796830726a747574373033333274737774686661616e303773766b6667716b7174667275") - - hash3 = hex("62eb3f0a48f954e495d0c14ac63df04a67cefa59dafdbcd3d5046d1f5647840d") - attemptInfo3 = hex("53ce0a4c1507cc5ea00ec88b76bd43a3978ac13605497030b821af6ce9c110f300005ce600000000006acfc02102ec12e83eafe27ce6d03bbe0c0de4b79fe2b9934615c8aa7693f73d2e41b089700000000121028c2dd128c7a6c1a0fceb3e3eb5ed55e0a0ae1a939eb786b097322d830d47db75005ca4000001000000005ce600000000006acfc000000000010000000000000008233044f235354472318b381fad3e21eb5a58f5099918868b0610e7b7bcb7a4adc96acfc0") - attemptID3 = hex("00000000000003e9") - paymentID3 = hex("0000000000000003") - creationInfoAmt3 = hex("00000000006acfc0") - creationInfoTime3 = hex("000000005e4fb98d") // 1582283149 - creationInfoTimeNano3 = hex("15F56624063B4200") // 1582283149000000000 (decimal) - creationInfoPayReq3 = hex("000000fc6c6e62637274373075317030796c7776327070357674346e377a6a676c39327766397773633939767630307366666e7561376a656d74376d6535373471336b3337346a387373787364717163717a706773703578707a307964663467336572727a656372376b6e7567307474667630327a7665727a72676b70737375376d6d6564617934687973397179397173717774656479336e666c323534787a36787a75763974746767757a647473356e617a7461616a6735667772686438396b336d70753971726d7a6c3779637a306e30666e6e763077753032726632706e64636c393761646c667636376a7a6e7063677477356434366771323571326e32") - - // pre is the data in the payments root bucket in database version 12 format. - pre = map[string]interface{}{ - // A failed payment. - hash1: map[string]interface{}{ - "payment-attempt-info": attemptID1 + attemptInfo1, - "payment-creation-info": hash1 + creationInfoAmt1 + creationInfoTime1 + creationInfoPayReq1, - "payment-fail-info": hex("03"), - "payment-sequence-key": paymentID1, - }, - - // A settled payment. - hash2: map[string]interface{}{ - "payment-attempt-info": attemptID2 + attemptInfo2, - "payment-creation-info": hash2 + creationInfoAmt2 + creationInfoTime2 + creationInfoPayReq2, - "payment-sequence-key": paymentID2, - "payment-settle-info": preimage2, - }, - - // An in-flight payment. - hash3: map[string]interface{}{ - "payment-attempt-info": attemptID3 + attemptInfo3, - "payment-creation-info": hash3 + creationInfoAmt3 + creationInfoTime3 + creationInfoPayReq3, - "payment-sequence-key": paymentID3, - }, - } - - // post is the expected data after migration. - post = map[string]interface{}{ - hash1: map[string]interface{}{ - "payment-creation-info": hash1 + creationInfoAmt1 + creationInfoTimeNano1 + creationInfoPayReq1, - "payment-fail-info": hex("03"), - "payment-htlcs-bucket": map[string]interface{}{ - attemptID1: map[string]interface{}{ - "htlc-attempt-info": attemptInfo1 + zeroTime, - "htlc-fail-info": zeroTime + noFailureMessage + failureReasonUnknown + zeroFailureSourceIdx, - }, - }, - "payment-sequence-key": paymentID1, - }, - hash2: map[string]interface{}{ - "payment-creation-info": hash2 + creationInfoAmt2 + creationInfoTimeNano2 + creationInfoPayReq2, - "payment-htlcs-bucket": map[string]interface{}{ - attemptID2: map[string]interface{}{ - "htlc-attempt-info": attemptInfo2 + zeroTime, - "htlc-settle-info": preimage2 + zeroTime, - }, - }, - "payment-sequence-key": paymentID2, - }, - hash3: map[string]interface{}{ - "payment-creation-info": hash3 + creationInfoAmt3 + creationInfoTimeNano3 + creationInfoPayReq3, - "payment-htlcs-bucket": map[string]interface{}{ - attemptID3: map[string]interface{}{ - "htlc-attempt-info": attemptInfo3 + zeroTime, - }, - }, - "payment-sequence-key": paymentID3, - }, - } -) - -// TestMigrateMpp asserts that the database is properly migrated to the mpp -// payment structure. -func TestMigrateMpp(t *testing.T) { - var paymentsRootBucket = []byte("payments-root-bucket") - - migtest.ApplyMigration( - t, - func(tx kvdb.RwTx) er.R { - return migtest.RestoreDB(tx, paymentsRootBucket, pre) - }, - func(tx kvdb.RwTx) er.R { - return migtest.VerifyDB(tx, paymentsRootBucket, post) - }, - MigrateMPP, - false, - ) -} diff --git a/lnd/channeldb/migration16/migration.go b/lnd/channeldb/migration16/migration.go deleted file mode 100644 index 22c4868b..00000000 --- a/lnd/channeldb/migration16/migration.go +++ /dev/null @@ -1,192 +0,0 @@ -package migration16 - -import ( - "bytes" - "encoding/binary" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/wire" -) - -var ( - paymentsRootBucket = []byte("payments-root-bucket") - - paymentSequenceKey = []byte("payment-sequence-key") - - duplicatePaymentsBucket = []byte("payment-duplicate-bucket") - - paymentsIndexBucket = []byte("payments-index-bucket") - - byteOrder = binary.BigEndian -) - -// paymentIndexType indicates the type of index we have recorded in the payment -// indexes bucket. -type paymentIndexType uint8 - -// paymentIndexTypeHash is a payment index type which indicates that we have -// created an index of payment sequence number to payment hash. -const paymentIndexTypeHash paymentIndexType = 0 - -// paymentIndex stores all the information we require to create an index by -// sequence number for a payment. -type paymentIndex struct { - // paymentHash is the hash of the payment, which is its key in the - // payment root bucket. - paymentHash []byte - - // sequenceNumbers is the set of sequence numbers associated with this - // payment hash. There will be more than one sequence number in the - // case where duplicate payments are present. - sequenceNumbers [][]byte -} - -// MigrateSequenceIndex migrates the payments db to contain a new bucket which -// provides an index from sequence number to payment hash. This is required -// for more efficient sequential lookup of payments, which are keyed by payment -// hash before this migration. -func MigrateSequenceIndex(tx kvdb.RwTx) er.R { - log.Infof("Migrating payments to add sequence number index") - - // Get a list of indices we need to write. - indexList, err := getPaymentIndexList(tx) - if err != nil { - return err - } - - // Create the top level bucket that we will use to index payments in. - bucket, err := tx.CreateTopLevelBucket(paymentsIndexBucket) - if err != nil { - return err - } - - // Write an index for each of our payments. - for _, index := range indexList { - // Write indexes for each of our sequence numbers. - for _, seqNr := range index.sequenceNumbers { - err := putIndex(bucket, seqNr, index.paymentHash) - if err != nil { - return err - } - } - } - - return nil -} - -// putIndex performs a sanity check that ensures we are not writing duplicate -// indexes to disk then creates the index provided. -func putIndex(bucket kvdb.RwBucket, sequenceNr, paymentHash []byte) er.R { - // Add a sanity check that we do not already have an entry with - // this sequence number. - existingEntry := bucket.Get(sequenceNr) - if existingEntry != nil { - return er.Errorf("sequence number: %x duplicated", - sequenceNr) - } - - bytes, err := serializePaymentIndexEntry(paymentHash) - if err != nil { - return err - } - - return bucket.Put(sequenceNr, bytes) -} - -// serializePaymentIndexEntry serializes a payment hash typed index. The value -// produced contains a payment index type (which can be used in future to -// signal different payment index types) and the payment hash. -func serializePaymentIndexEntry(hash []byte) ([]byte, er.R) { - var b bytes.Buffer - - err := util.WriteBin(&b, byteOrder, paymentIndexTypeHash) - if err != nil { - return nil, err - } - - if err := wire.WriteVarBytes(&b, 0, hash); err != nil { - return nil, err - } - - return b.Bytes(), nil -} - -// getPaymentIndexList gets a list of indices we need to write for our current -// set of payments. -func getPaymentIndexList(tx kvdb.RTx) ([]paymentIndex, er.R) { - // Iterate over all payments and store their indexing keys. This is - // needed, because no modifications are allowed inside a Bucket.ForEach - // loop. - paymentsBucket := tx.ReadBucket(paymentsRootBucket) - if paymentsBucket == nil { - return nil, nil - } - - var indexList []paymentIndex - err := paymentsBucket.ForEach(func(k, v []byte) er.R { - // Get the bucket which contains the payment, fail if the key - // does not have a bucket. - bucket := paymentsBucket.NestedReadBucket(k) - if bucket == nil { - return er.Errorf("non bucket element in " + - "payments bucket") - } - seqBytes := bucket.Get(paymentSequenceKey) - if seqBytes == nil { - return er.Errorf("nil sequence number bytes") - } - - seqNrs, err := fetchSequenceNumbers(bucket) - if err != nil { - return err - } - - // Create an index object with our payment hash and sequence - // numbers and append it to our set of indexes. - index := paymentIndex{ - paymentHash: k, - sequenceNumbers: seqNrs, - } - - indexList = append(indexList, index) - return nil - }) - if err != nil { - return nil, err - } - - return indexList, nil -} - -// fetchSequenceNumbers fetches all the sequence numbers associated with a -// payment, including those belonging to any duplicate payments. -func fetchSequenceNumbers(paymentBucket kvdb.RBucket) ([][]byte, er.R) { - seqNum := paymentBucket.Get(paymentSequenceKey) - if seqNum == nil { - return nil, er.New("expected sequence number") - } - - sequenceNumbers := [][]byte{seqNum} - - // Get the duplicate payments bucket, if it has no duplicates, just - // return early with the payment sequence number. - duplicates := paymentBucket.NestedReadBucket(duplicatePaymentsBucket) - if duplicates == nil { - return sequenceNumbers, nil - } - - // If we do have duplicated, they are keyed by sequence number, so we - // iterate through the duplicates bucket and add them to our set of - // sequence numbers. - if err := duplicates.ForEach(func(k, v []byte) er.R { - sequenceNumbers = append(sequenceNumbers, k) - return nil - }); err != nil { - return nil, err - } - - return sequenceNumbers, nil -} diff --git a/lnd/channeldb/migration16/migration_test.go b/lnd/channeldb/migration16/migration_test.go deleted file mode 100644 index d20e80ac..00000000 --- a/lnd/channeldb/migration16/migration_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package migration16 - -import ( - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/channeldb/migtest" -) - -var ( - hexStr = migtest.Hex - - hash1Str = "02acee76ebd53d00824410cf6adecad4f50334dac702bd5a2d3ba01b91709f0e" - hash1 = hexStr(hash1Str) - paymentID1 = hexStr("0000000000000001") - - hash2Str = "62eb3f0a48f954e495d0c14ac63df04a67cefa59dafdbcd3d5046d1f5647840c" - hash2 = hexStr(hash2Str) - paymentID2 = hexStr("0000000000000002") - - paymentID3 = hexStr("0000000000000003") - - // pre is the data in the payments root bucket in database version 13 format. - pre = map[string]interface{}{ - // A payment without duplicates. - hash1: map[string]interface{}{ - "payment-sequence-key": paymentID1, - }, - - // A payment with a duplicate. - hash2: map[string]interface{}{ - "payment-sequence-key": paymentID2, - "payment-duplicate-bucket": map[string]interface{}{ - paymentID3: map[string]interface{}{ - "payment-sequence-key": paymentID3, - }, - }, - }, - } - - preFails = map[string]interface{}{ - // A payment without duplicates. - hash1: map[string]interface{}{ - "payment-sequence-key": paymentID1, - "payment-duplicate-bucket": map[string]interface{}{ - paymentID1: map[string]interface{}{ - "payment-sequence-key": paymentID1, - }, - }, - }, - } - - // post is the expected data after migration. - post = map[string]interface{}{ - paymentID1: paymentHashIndex(hash1Str), - paymentID2: paymentHashIndex(hash2Str), - paymentID3: paymentHashIndex(hash2Str), - } -) - -// paymentHashIndex produces a string that represents the value we expect for -// our payment indexes from a hex encoded payment hash string. -func paymentHashIndex(hashStr string) string { - hash, err := util.DecodeHex(hashStr) - if err != nil { - panic(err) - } - - bytes, err := serializePaymentIndexEntry(hash) - if err != nil { - panic(err) - } - - return string(bytes) -} - -// MigrateSequenceIndex asserts that the database is properly migrated to -// contain a payments index. -func TestMigrateSequenceIndex(t *testing.T) { - tests := []struct { - name string - shouldFail bool - pre map[string]interface{} - post map[string]interface{} - }{ - { - name: "migration ok", - shouldFail: false, - pre: pre, - post: post, - }, - { - name: "duplicate sequence number", - shouldFail: true, - pre: preFails, - post: post, - }, - { - name: "no payments", - shouldFail: false, - pre: nil, - post: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - // Before the migration we have a payments bucket. - before := func(tx kvdb.RwTx) er.R { - return migtest.RestoreDB( - tx, paymentsRootBucket, test.pre, - ) - } - - // After the migration, we should have an untouched - // payments bucket and a new index bucket. - after := func(tx kvdb.RwTx) er.R { - if err := migtest.VerifyDB( - tx, paymentsRootBucket, test.pre, - ); err != nil { - return err - } - - // If we expect our migration to fail, we don't - // expect an index bucket. - if test.shouldFail { - return nil - } - - return migtest.VerifyDB( - tx, paymentsIndexBucket, test.post, - ) - } - - migtest.ApplyMigration( - t, before, after, MigrateSequenceIndex, - test.shouldFail, - ) - }) - } -} diff --git a/lnd/channeldb/migration_01_to_11/addr.go b/lnd/channeldb/migration_01_to_11/addr.go deleted file mode 100644 index 3af04fd6..00000000 --- a/lnd/channeldb/migration_01_to_11/addr.go +++ /dev/null @@ -1,221 +0,0 @@ -package migration_01_to_11 - -import ( - "encoding/binary" - "io" - "net" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/tor" -) - -// addressType specifies the network protocol and version that should be used -// when connecting to a node at a particular address. -type addressType uint8 - -const ( - // tcp4Addr denotes an IPv4 TCP address. - tcp4Addr addressType = 0 - - // tcp6Addr denotes an IPv6 TCP address. - tcp6Addr addressType = 1 - - // v2OnionAddr denotes a version 2 Tor onion service address. - v2OnionAddr addressType = 2 - - // v3OnionAddr denotes a version 3 Tor (prop224) onion service address. - v3OnionAddr addressType = 3 -) - -// encodeTCPAddr serializes a TCP address into its compact raw bytes -// representation. -func encodeTCPAddr(w io.Writer, addr *net.TCPAddr) er.R { - var ( - addrType byte - ip []byte - ) - - if addr.IP.To4() != nil { - addrType = byte(tcp4Addr) - ip = addr.IP.To4() - } else { - addrType = byte(tcp6Addr) - ip = addr.IP.To16() - } - - if ip == nil { - return er.Errorf("unable to encode IP %v", addr.IP) - } - - if _, err := util.Write(w, []byte{addrType}); err != nil { - return err - } - - if _, err := util.Write(w, ip); err != nil { - return err - } - - var port [2]byte - byteOrder.PutUint16(port[:], uint16(addr.Port)) - if _, err := util.Write(w, port[:]); err != nil { - return err - } - - return nil -} - -// encodeOnionAddr serializes an onion address into its compact raw bytes -// representation. -func encodeOnionAddr(w io.Writer, addr *tor.OnionAddr) er.R { - var suffixIndex int - hostLen := len(addr.OnionService) - switch hostLen { - case tor.V2Len: - if _, err := util.Write(w, []byte{byte(v2OnionAddr)}); err != nil { - return err - } - suffixIndex = tor.V2Len - tor.OnionSuffixLen - case tor.V3Len: - if _, err := util.Write(w, []byte{byte(v3OnionAddr)}); err != nil { - return err - } - suffixIndex = tor.V3Len - tor.OnionSuffixLen - default: - return er.New("unknown onion service length") - } - - suffix := addr.OnionService[suffixIndex:] - if suffix != tor.OnionSuffix { - return er.Errorf("invalid suffix \"%v\"", suffix) - } - - host, err := tor.Base32Encoding.DecodeString( - addr.OnionService[:suffixIndex], - ) - if err != nil { - return er.E(err) - } - - // Sanity check the decoded length. - switch { - case hostLen == tor.V2Len && len(host) != tor.V2DecodedLen: - return er.Errorf("onion service %v decoded to invalid host %x", - addr.OnionService, host) - - case hostLen == tor.V3Len && len(host) != tor.V3DecodedLen: - return er.Errorf("onion service %v decoded to invalid host %x", - addr.OnionService, host) - } - - if _, err := util.Write(w, host); err != nil { - return err - } - - var port [2]byte - byteOrder.PutUint16(port[:], uint16(addr.Port)) - if _, err := util.Write(w, port[:]); err != nil { - return err - } - - return nil -} - -// deserializeAddr reads the serialized raw representation of an address and -// deserializes it into the actual address. This allows us to avoid address -// resolution within the channeldb package. -func deserializeAddr(r io.Reader) (net.Addr, er.R) { - var addrType [1]byte - if _, err := r.Read(addrType[:]); err != nil { - return nil, er.E(err) - } - - var address net.Addr - switch addressType(addrType[0]) { - case tcp4Addr: - var ip [4]byte - if _, err := r.Read(ip[:]); err != nil { - return nil, er.E(err) - } - - var port [2]byte - if _, err := r.Read(port[:]); err != nil { - return nil, er.E(err) - } - - address = &net.TCPAddr{ - IP: net.IP(ip[:]), - Port: int(binary.BigEndian.Uint16(port[:])), - } - case tcp6Addr: - var ip [16]byte - if _, err := r.Read(ip[:]); err != nil { - return nil, er.E(err) - } - - var port [2]byte - if _, err := r.Read(port[:]); err != nil { - return nil, er.E(err) - } - - address = &net.TCPAddr{ - IP: net.IP(ip[:]), - Port: int(binary.BigEndian.Uint16(port[:])), - } - case v2OnionAddr: - var h [tor.V2DecodedLen]byte - if _, err := r.Read(h[:]); err != nil { - return nil, er.E(err) - } - - var p [2]byte - if _, err := r.Read(p[:]); err != nil { - return nil, er.E(err) - } - - onionService := tor.Base32Encoding.EncodeToString(h[:]) - onionService += tor.OnionSuffix - port := int(binary.BigEndian.Uint16(p[:])) - - address = &tor.OnionAddr{ - OnionService: onionService, - Port: port, - } - case v3OnionAddr: - var h [tor.V3DecodedLen]byte - if _, err := r.Read(h[:]); err != nil { - return nil, er.E(err) - } - - var p [2]byte - if _, err := r.Read(p[:]); err != nil { - return nil, er.E(err) - } - - onionService := tor.Base32Encoding.EncodeToString(h[:]) - onionService += tor.OnionSuffix - port := int(binary.BigEndian.Uint16(p[:])) - - address = &tor.OnionAddr{ - OnionService: onionService, - Port: port, - } - default: - return nil, ErrUnknownAddressType.Default() - } - - return address, nil -} - -// serializeAddr serializes an address into its raw bytes representation so that -// it can be deserialized without requiring address resolution. -func serializeAddr(w io.Writer, address net.Addr) er.R { - switch addr := address.(type) { - case *net.TCPAddr: - return encodeTCPAddr(w, addr) - case *tor.OnionAddr: - return encodeOnionAddr(w, addr) - default: - return ErrUnknownAddressType.Default() - } -} diff --git a/lnd/channeldb/migration_01_to_11/channel.go b/lnd/channeldb/migration_01_to_11/channel.go deleted file mode 100644 index 4f2c6010..00000000 --- a/lnd/channeldb/migration_01_to_11/channel.go +++ /dev/null @@ -1,751 +0,0 @@ -package migration_01_to_11 - -import ( - "fmt" - "io" - "strconv" - "strings" - "sync" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/shachain" - "github.com/pkt-cash/pktd/wire" -) - -var ( - // closedChannelBucket stores summarization information concerning - // previously open, but now closed channels. - closedChannelBucket = []byte("closed-chan-bucket") - - // openChanBucket stores all the currently open channels. This bucket - // has a second, nested bucket which is keyed by a node's ID. Within - // that node ID bucket, all attributes required to track, update, and - // close a channel are stored. - // - // openChan -> nodeID -> chanPoint - // - // TODO(roasbeef): flesh out comment - openChannelBucket = []byte("open-chan-bucket") -) - -// ChannelType is an enum-like type that describes one of several possible -// channel types. Each open channel is associated with a particular type as the -// channel type may determine how higher level operations are conducted such as -// fee negotiation, channel closing, the format of HTLCs, etc. -// TODO(roasbeef): split up per-chain? -type ChannelType uint8 - -const ( - // NOTE: iota isn't used here for this enum needs to be stable - // long-term as it will be persisted to the database. - - // SingleFunder represents a channel wherein one party solely funds the - // entire capacity of the channel. - SingleFunder ChannelType = 0 -) - -// ChannelConstraints represents a set of constraints meant to allow a node to -// limit their exposure, enact flow control and ensure that all HTLCs are -// economically relevant. This struct will be mirrored for both sides of the -// channel, as each side will enforce various constraints that MUST be adhered -// to for the life time of the channel. The parameters for each of these -// constraints are static for the duration of the channel, meaning the channel -// must be torn down for them to change. -type ChannelConstraints struct { - // DustLimit is the threshold (in satoshis) below which any outputs - // should be trimmed. When an output is trimmed, it isn't materialized - // as an actual output, but is instead burned to miner's fees. - DustLimit btcutil.Amount - - // ChanReserve is an absolute reservation on the channel for the - // owner of this set of constraints. This means that the current - // settled balance for this node CANNOT dip below the reservation - // amount. This acts as a defense against costless attacks when - // either side no longer has any skin in the game. - ChanReserve btcutil.Amount - - // MaxPendingAmount is the maximum pending HTLC value that the - // owner of these constraints can offer the remote node at a - // particular time. - MaxPendingAmount lnwire.MilliSatoshi - - // MinHTLC is the minimum HTLC value that the owner of these - // constraints can offer the remote node. If any HTLCs below this - // amount are offered, then the HTLC will be rejected. This, in - // tandem with the dust limit allows a node to regulate the - // smallest HTLC that it deems economically relevant. - MinHTLC lnwire.MilliSatoshi - - // MaxAcceptedHtlcs is the maximum number of HTLCs that the owner of - // this set of constraints can offer the remote node. This allows each - // node to limit their over all exposure to HTLCs that may need to be - // acted upon in the case of a unilateral channel closure or a contract - // breach. - MaxAcceptedHtlcs uint16 - - // CsvDelay is the relative time lock delay expressed in blocks. Any - // settled outputs that pay to the owner of this channel configuration - // MUST ensure that the delay branch uses this value as the relative - // time lock. Similarly, any HTLC's offered by this node should use - // this value as well. - CsvDelay uint16 -} - -// ChannelConfig is a struct that houses the various configuration opens for -// channels. Each side maintains an instance of this configuration file as it -// governs: how the funding and commitment transaction to be created, the -// nature of HTLC's allotted, the keys to be used for delivery, and relative -// time lock parameters. -type ChannelConfig struct { - // ChannelConstraints is the set of constraints that must be upheld for - // the duration of the channel for the owner of this channel - // configuration. Constraints govern a number of flow control related - // parameters, also including the smallest HTLC that will be accepted - // by a participant. - ChannelConstraints - - // MultiSigKey is the key to be used within the 2-of-2 output script - // for the owner of this channel config. - MultiSigKey keychain.KeyDescriptor - - // RevocationBasePoint is the base public key to be used when deriving - // revocation keys for the remote node's commitment transaction. This - // will be combined along with a per commitment secret to derive a - // unique revocation key for each state. - RevocationBasePoint keychain.KeyDescriptor - - // PaymentBasePoint is the base public key to be used when deriving - // the key used within the non-delayed pay-to-self output on the - // commitment transaction for a node. This will be combined with a - // tweak derived from the per-commitment point to ensure unique keys - // for each commitment transaction. - PaymentBasePoint keychain.KeyDescriptor - - // DelayBasePoint is the base public key to be used when deriving the - // key used within the delayed pay-to-self output on the commitment - // transaction for a node. This will be combined with a tweak derived - // from the per-commitment point to ensure unique keys for each - // commitment transaction. - DelayBasePoint keychain.KeyDescriptor - - // HtlcBasePoint is the base public key to be used when deriving the - // local HTLC key. The derived key (combined with the tweak derived - // from the per-commitment point) is used within the "to self" clause - // within any HTLC output scripts. - HtlcBasePoint keychain.KeyDescriptor -} - -// ChannelCommitment is a snapshot of the commitment state at a particular -// point in the commitment chain. With each state transition, a snapshot of the -// current state along with all non-settled HTLCs are recorded. These snapshots -// detail the state of the _remote_ party's commitment at a particular state -// number. For ourselves (the local node) we ONLY store our most recent -// (unrevoked) state for safety purposes. -type ChannelCommitment struct { - // CommitHeight is the update number that this ChannelDelta represents - // the total number of commitment updates to this point. This can be - // viewed as sort of a "commitment height" as this number is - // monotonically increasing. - CommitHeight uint64 - - // LocalLogIndex is the cumulative log index index of the local node at - // this point in the commitment chain. This value will be incremented - // for each _update_ added to the local update log. - LocalLogIndex uint64 - - // LocalHtlcIndex is the current local running HTLC index. This value - // will be incremented for each outgoing HTLC the local node offers. - LocalHtlcIndex uint64 - - // RemoteLogIndex is the cumulative log index index of the remote node - // at this point in the commitment chain. This value will be - // incremented for each _update_ added to the remote update log. - RemoteLogIndex uint64 - - // RemoteHtlcIndex is the current remote running HTLC index. This value - // will be incremented for each outgoing HTLC the remote node offers. - RemoteHtlcIndex uint64 - - // LocalBalance is the current available settled balance within the - // channel directly spendable by us. - LocalBalance lnwire.MilliSatoshi - - // RemoteBalance is the current available settled balance within the - // channel directly spendable by the remote node. - RemoteBalance lnwire.MilliSatoshi - - // CommitFee is the amount calculated to be paid in fees for the - // current set of commitment transactions. The fee amount is persisted - // with the channel in order to allow the fee amount to be removed and - // recalculated with each channel state update, including updates that - // happen after a system restart. - CommitFee btcutil.Amount - - // FeePerKw is the min satoshis/kilo-weight that should be paid within - // the commitment transaction for the entire duration of the channel's - // lifetime. This field may be updated during normal operation of the - // channel as on-chain conditions change. - // - // TODO(halseth): make this SatPerKWeight. Cannot be done atm because - // this will cause the import cycle lnwallet<->channeldb. Fee - // estimation stuff should be in its own package. - FeePerKw btcutil.Amount - - // CommitTx is the latest version of the commitment state, broadcast - // able by us. - CommitTx *wire.MsgTx - - // CommitSig is one half of the signature required to fully complete - // the script for the commitment transaction above. This is the - // signature signed by the remote party for our version of the - // commitment transactions. - CommitSig []byte - - // Htlcs is the set of HTLC's that are pending at this particular - // commitment height. - Htlcs []HTLC - - // TODO(roasbeef): pending commit pointer? - // * lets just walk through -} - -// ChannelStatus is a bit vector used to indicate whether an OpenChannel is in -// the default usable state, or a state where it shouldn't be used. -type ChannelStatus uint8 - -var ( - // ChanStatusDefault is the normal state of an open channel. - ChanStatusDefault ChannelStatus - - // ChanStatusBorked indicates that the channel has entered an - // irreconcilable state, triggered by a state desynchronization or - // channel breach. Channels in this state should never be added to the - // htlc switch. - ChanStatusBorked ChannelStatus = 1 - - // ChanStatusCommitBroadcasted indicates that a commitment for this - // channel has been broadcasted. - ChanStatusCommitBroadcasted ChannelStatus = 1 << 1 - - // ChanStatusLocalDataLoss indicates that we have lost channel state - // for this channel, and broadcasting our latest commitment might be - // considered a breach. - // - // TODO(halseh): actually enforce that we are not force closing such a - // channel. - ChanStatusLocalDataLoss ChannelStatus = 1 << 2 - - // ChanStatusRestored is a status flag that signals that the channel - // has been restored, and doesn't have all the fields a typical channel - // will have. - ChanStatusRestored ChannelStatus = 1 << 3 -) - -// chanStatusStrings maps a ChannelStatus to a human friendly string that -// describes that status. -var chanStatusStrings = map[ChannelStatus]string{ - ChanStatusDefault: "ChanStatusDefault", - ChanStatusBorked: "ChanStatusBorked", - ChanStatusCommitBroadcasted: "ChanStatusCommitBroadcasted", - ChanStatusLocalDataLoss: "ChanStatusLocalDataLoss", - ChanStatusRestored: "ChanStatusRestored", -} - -// orderedChanStatusFlags is an in-order list of all that channel status flags. -var orderedChanStatusFlags = []ChannelStatus{ - ChanStatusDefault, - ChanStatusBorked, - ChanStatusCommitBroadcasted, - ChanStatusLocalDataLoss, - ChanStatusRestored, -} - -// String returns a human-readable representation of the ChannelStatus. -func (c ChannelStatus) String() string { - // If no flags are set, then this is the default case. - if c == 0 { - return chanStatusStrings[ChanStatusDefault] - } - - // Add individual bit flags. - statusStr := "" - for _, flag := range orderedChanStatusFlags { - if c&flag == flag { - statusStr += chanStatusStrings[flag] + "|" - c -= flag - } - } - - // Remove anything to the right of the final bar, including it as well. - statusStr = strings.TrimRight(statusStr, "|") - - // Add any remaining flags which aren't accounted for as hex. - if c != 0 { - statusStr += "|0x" + strconv.FormatUint(uint64(c), 16) - } - - // If this was purely an unknown flag, then remove the extra bar at the - // start of the string. - statusStr = strings.TrimLeft(statusStr, "|") - - return statusStr -} - -// OpenChannel encapsulates the persistent and dynamic state of an open channel -// with a remote node. An open channel supports several options for on-disk -// serialization depending on the exact context. Full (upon channel creation) -// state commitments, and partial (due to a commitment update) writes are -// supported. Each partial write due to a state update appends the new update -// to an on-disk log, which can then subsequently be queried in order to -// "time-travel" to a prior state. -type OpenChannel struct { - // ChanType denotes which type of channel this is. - ChanType ChannelType - - // ChainHash is a hash which represents the blockchain that this - // channel will be opened within. This value is typically the genesis - // hash. In the case that the original chain went through a contentious - // hard-fork, then this value will be tweaked using the unique fork - // point on each branch. - ChainHash chainhash.Hash - - // FundingOutpoint is the outpoint of the final funding transaction. - // This value uniquely and globally identifies the channel within the - // target blockchain as specified by the chain hash parameter. - FundingOutpoint wire.OutPoint - - // ShortChannelID encodes the exact location in the chain in which the - // channel was initially confirmed. This includes: the block height, - // transaction index, and the output within the target transaction. - ShortChannelID lnwire.ShortChannelID - - // IsPending indicates whether a channel's funding transaction has been - // confirmed. - IsPending bool - - // IsInitiator is a bool which indicates if we were the original - // initiator for the channel. This value may affect how higher levels - // negotiate fees, or close the channel. - IsInitiator bool - - // FundingBroadcastHeight is the height in which the funding - // transaction was broadcast. This value can be used by higher level - // sub-systems to determine if a channel is stale and/or should have - // been confirmed before a certain height. - FundingBroadcastHeight uint32 - - // NumConfsRequired is the number of confirmations a channel's funding - // transaction must have received in order to be considered available - // for normal transactional use. - NumConfsRequired uint16 - - // ChannelFlags holds the flags that were sent as part of the - // open_channel message. - ChannelFlags lnwire.FundingFlag - - // IdentityPub is the identity public key of the remote node this - // channel has been established with. - IdentityPub *btcec.PublicKey - - // Capacity is the total capacity of this channel. - Capacity btcutil.Amount - - // TotalMSatSent is the total number of milli-satoshis we've sent - // within this channel. - TotalMSatSent lnwire.MilliSatoshi - - // TotalMSatReceived is the total number of milli-satoshis we've - // received within this channel. - TotalMSatReceived lnwire.MilliSatoshi - - // LocalChanCfg is the channel configuration for the local node. - LocalChanCfg ChannelConfig - - // RemoteChanCfg is the channel configuration for the remote node. - RemoteChanCfg ChannelConfig - - // LocalCommitment is the current local commitment state for the local - // party. This is stored distinct from the state of the remote party - // as there are certain asymmetric parameters which affect the - // structure of each commitment. - LocalCommitment ChannelCommitment - - // RemoteCommitment is the current remote commitment state for the - // remote party. This is stored distinct from the state of the local - // party as there are certain asymmetric parameters which affect the - // structure of each commitment. - RemoteCommitment ChannelCommitment - - // RemoteCurrentRevocation is the current revocation for their - // commitment transaction. However, since this the derived public key, - // we don't yet have the private key so we aren't yet able to verify - // that it's actually in the hash chain. - RemoteCurrentRevocation *btcec.PublicKey - - // RemoteNextRevocation is the revocation key to be used for the *next* - // commitment transaction we create for the local node. Within the - // specification, this value is referred to as the - // per-commitment-point. - RemoteNextRevocation *btcec.PublicKey - - // RevocationProducer is used to generate the revocation in such a way - // that remote side might store it efficiently and have the ability to - // restore the revocation by index if needed. Current implementation of - // secret producer is shachain producer. - RevocationProducer shachain.Producer - - // RevocationStore is used to efficiently store the revocations for - // previous channels states sent to us by remote side. Current - // implementation of secret store is shachain store. - RevocationStore shachain.Store - - // FundingTxn is the transaction containing this channel's funding - // outpoint. Upon restarts, this txn will be rebroadcast if the channel - // is found to be pending. - // - // NOTE: This value will only be populated for single-funder channels - // for which we are the initiator. - FundingTxn *wire.MsgTx - - // TODO(roasbeef): eww - Db *DB - - // TODO(roasbeef): just need to store local and remote HTLC's? - - sync.RWMutex -} - -// ShortChanID returns the current ShortChannelID of this channel. -func (c *OpenChannel) ShortChanID() lnwire.ShortChannelID { - c.RLock() - defer c.RUnlock() - - return c.ShortChannelID -} - -// HTLC is the on-disk representation of a hash time-locked contract. HTLCs are -// contained within ChannelDeltas which encode the current state of the -// commitment between state updates. -// -// TODO(roasbeef): save space by using smaller ints at tail end? -type HTLC struct { - // Signature is the signature for the second level covenant transaction - // for this HTLC. The second level transaction is a timeout tx in the - // case that this is an outgoing HTLC, and a success tx in the case - // that this is an incoming HTLC. - // - // TODO(roasbeef): make [64]byte instead? - Signature []byte - - // RHash is the payment hash of the HTLC. - RHash [32]byte - - // Amt is the amount of milli-satoshis this HTLC escrows. - Amt lnwire.MilliSatoshi - - // RefundTimeout is the absolute timeout on the HTLC that the sender - // must wait before reclaiming the funds in limbo. - RefundTimeout uint32 - - // OutputIndex is the output index for this particular HTLC output - // within the commitment transaction. - OutputIndex int32 - - // Incoming denotes whether we're the receiver or the sender of this - // HTLC. - Incoming bool - - // OnionBlob is an opaque blob which is used to complete multi-hop - // routing. - OnionBlob []byte - - // HtlcIndex is the HTLC counter index of this active, outstanding - // HTLC. This differs from the LogIndex, as the HtlcIndex is only - // incremented for each offered HTLC, while they LogIndex is - // incremented for each update (includes settle+fail). - HtlcIndex uint64 - - // LogIndex is the cumulative log index of this HTLC. This differs - // from the HtlcIndex as this will be incremented for each new log - // update added. - LogIndex uint64 -} - -// CircuitKey is used by a channel to uniquely identify the HTLCs it receives -// from the switch, and is used to purge our in-memory state of HTLCs that have -// already been processed by a link. Two list of CircuitKeys are included in -// each CommitDiff to allow a link to determine which in-memory htlcs directed -// the opening and closing of circuits in the switch's circuit map. -type CircuitKey struct { - // ChanID is the short chanid indicating the HTLC's origin. - // - // NOTE: It is fine for this value to be blank, as this indicates a - // locally-sourced payment. - ChanID lnwire.ShortChannelID - - // HtlcID is the unique htlc index predominately assigned by links, - // though can also be assigned by switch in the case of locally-sourced - // payments. - HtlcID uint64 -} - -// String returns a string representation of the CircuitKey. -func (k CircuitKey) String() string { - return fmt.Sprintf("(Chan ID=%s, HTLC ID=%d)", k.ChanID, k.HtlcID) -} - -// ClosureType is an enum like structure that details exactly _how_ a channel -// was closed. Three closure types are currently possible: none, cooperative, -// local force close, remote force close, and (remote) breach. -type ClosureType uint8 - -const ( - // RemoteForceClose indicates that the remote peer has unilaterally - // broadcast their current commitment state on-chain. - RemoteForceClose ClosureType = 4 -) - -// ChannelCloseSummary contains the final state of a channel at the point it -// was closed. Once a channel is closed, all the information pertaining to that -// channel within the openChannelBucket is deleted, and a compact summary is -// put in place instead. -type ChannelCloseSummary struct { - // ChanPoint is the outpoint for this channel's funding transaction, - // and is used as a unique identifier for the channel. - ChanPoint wire.OutPoint - - // ShortChanID encodes the exact location in the chain in which the - // channel was initially confirmed. This includes: the block height, - // transaction index, and the output within the target transaction. - ShortChanID lnwire.ShortChannelID - - // ChainHash is the hash of the genesis block that this channel resides - // within. - ChainHash chainhash.Hash - - // ClosingTXID is the txid of the transaction which ultimately closed - // this channel. - ClosingTXID chainhash.Hash - - // RemotePub is the public key of the remote peer that we formerly had - // a channel with. - RemotePub *btcec.PublicKey - - // Capacity was the total capacity of the channel. - Capacity btcutil.Amount - - // CloseHeight is the height at which the funding transaction was - // spent. - CloseHeight uint32 - - // SettledBalance is our total balance settled balance at the time of - // channel closure. This _does not_ include the sum of any outputs that - // have been time-locked as a result of the unilateral channel closure. - SettledBalance btcutil.Amount - - // TimeLockedBalance is the sum of all the time-locked outputs at the - // time of channel closure. If we triggered the force closure of this - // channel, then this value will be non-zero if our settled output is - // above the dust limit. If we were on the receiving side of a channel - // force closure, then this value will be non-zero if we had any - // outstanding outgoing HTLC's at the time of channel closure. - TimeLockedBalance btcutil.Amount - - // CloseType details exactly _how_ the channel was closed. Five closure - // types are possible: cooperative, local force, remote force, breach - // and funding canceled. - CloseType ClosureType - - // IsPending indicates whether this channel is in the 'pending close' - // state, which means the channel closing transaction has been - // confirmed, but not yet been fully resolved. In the case of a channel - // that has been cooperatively closed, it will go straight into the - // fully resolved state as soon as the closing transaction has been - // confirmed. However, for channels that have been force closed, they'll - // stay marked as "pending" until _all_ the pending funds have been - // swept. - IsPending bool - - // RemoteCurrentRevocation is the current revocation for their - // commitment transaction. However, since this is the derived public key, - // we don't yet have the private key so we aren't yet able to verify - // that it's actually in the hash chain. - RemoteCurrentRevocation *btcec.PublicKey - - // RemoteNextRevocation is the revocation key to be used for the *next* - // commitment transaction we create for the local node. Within the - // specification, this value is referred to as the - // per-commitment-point. - RemoteNextRevocation *btcec.PublicKey - - // LocalChanCfg is the channel configuration for the local node. - LocalChanConfig ChannelConfig - - // LastChanSyncMsg is the ChannelReestablish message for this channel - // for the state at the point where it was closed. - LastChanSyncMsg *lnwire.ChannelReestablish -} - -func serializeChannelCloseSummary(w io.Writer, cs *ChannelCloseSummary) er.R { - err := WriteElements(w, - cs.ChanPoint, cs.ShortChanID, cs.ChainHash, cs.ClosingTXID, - cs.CloseHeight, cs.RemotePub, cs.Capacity, cs.SettledBalance, - cs.TimeLockedBalance, cs.CloseType, cs.IsPending, - ) - if err != nil { - return err - } - - // If this is a close channel summary created before the addition of - // the new fields, then we can exit here. - if cs.RemoteCurrentRevocation == nil { - return WriteElements(w, false) - } - - // If fields are present, write boolean to indicate this, and continue. - if err := WriteElements(w, true); err != nil { - return err - } - - if err := WriteElements(w, cs.RemoteCurrentRevocation); err != nil { - return err - } - - if err := writeChanConfig(w, &cs.LocalChanConfig); err != nil { - return err - } - - // The RemoteNextRevocation field is optional, as it's possible for a - // channel to be closed before we learn of the next unrevoked - // revocation point for the remote party. Write a boolen indicating - // whether this field is present or not. - if err := WriteElements(w, cs.RemoteNextRevocation != nil); err != nil { - return err - } - - // Write the field, if present. - if cs.RemoteNextRevocation != nil { - if err = WriteElements(w, cs.RemoteNextRevocation); err != nil { - return err - } - } - - // Write whether the channel sync message is present. - if err := WriteElements(w, cs.LastChanSyncMsg != nil); err != nil { - return err - } - - // Write the channel sync message, if present. - if cs.LastChanSyncMsg != nil { - if err := WriteElements(w, cs.LastChanSyncMsg); err != nil { - return err - } - } - - return nil -} - -func deserializeCloseChannelSummary(r io.Reader) (*ChannelCloseSummary, er.R) { - c := &ChannelCloseSummary{} - - err := ReadElements(r, - &c.ChanPoint, &c.ShortChanID, &c.ChainHash, &c.ClosingTXID, - &c.CloseHeight, &c.RemotePub, &c.Capacity, &c.SettledBalance, - &c.TimeLockedBalance, &c.CloseType, &c.IsPending, - ) - if err != nil { - return nil, err - } - - // We'll now check to see if the channel close summary was encoded with - // any of the additional optional fields. - var hasNewFields bool - err = ReadElements(r, &hasNewFields) - if err != nil { - return nil, err - } - - // If fields are not present, we can return. - if !hasNewFields { - return c, nil - } - - // Otherwise read the new fields. - if err := ReadElements(r, &c.RemoteCurrentRevocation); err != nil { - return nil, err - } - - if err := readChanConfig(r, &c.LocalChanConfig); err != nil { - return nil, err - } - - // Finally, we'll attempt to read the next unrevoked commitment point - // for the remote party. If we closed the channel before receiving a - // funding locked message then this might not be present. A boolean - // indicating whether the field is present will come first. - var hasRemoteNextRevocation bool - err = ReadElements(r, &hasRemoteNextRevocation) - if err != nil { - return nil, err - } - - // If this field was written, read it. - if hasRemoteNextRevocation { - err = ReadElements(r, &c.RemoteNextRevocation) - if err != nil { - return nil, err - } - } - - // Check if we have a channel sync message to read. - var hasChanSyncMsg bool - err = ReadElements(r, &hasChanSyncMsg) - if er.Wrapped(err) == io.EOF { - return c, nil - } else if err != nil { - return nil, err - } - - // If a chan sync message is present, read it. - if hasChanSyncMsg { - // We must pass in reference to a lnwire.Message for the codec - // to support it. - var msg lnwire.Message - if err := ReadElements(r, &msg); err != nil { - return nil, err - } - - chanSync, ok := msg.(*lnwire.ChannelReestablish) - if !ok { - return nil, er.New("unable cast db Message to " + - "ChannelReestablish") - } - c.LastChanSyncMsg = chanSync - } - - return c, nil -} - -func writeChanConfig(b io.Writer, c *ChannelConfig) er.R { - return WriteElements(b, - c.DustLimit, c.MaxPendingAmount, c.ChanReserve, c.MinHTLC, - c.MaxAcceptedHtlcs, c.CsvDelay, c.MultiSigKey, - c.RevocationBasePoint, c.PaymentBasePoint, c.DelayBasePoint, - c.HtlcBasePoint, - ) -} - -func readChanConfig(b io.Reader, c *ChannelConfig) er.R { - return ReadElements(b, - &c.DustLimit, &c.MaxPendingAmount, &c.ChanReserve, - &c.MinHTLC, &c.MaxAcceptedHtlcs, &c.CsvDelay, - &c.MultiSigKey, &c.RevocationBasePoint, - &c.PaymentBasePoint, &c.DelayBasePoint, - &c.HtlcBasePoint, - ) -} diff --git a/lnd/channeldb/migration_01_to_11/channel_test.go b/lnd/channeldb/migration_01_to_11/channel_test.go deleted file mode 100644 index f65bec8f..00000000 --- a/lnd/channeldb/migration_01_to_11/channel_test.go +++ /dev/null @@ -1,222 +0,0 @@ -package migration_01_to_11 - -import ( - "bytes" - "io/ioutil" - "math/rand" - "os" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/shachain" - _ "github.com/pkt-cash/pktd/pktwallet/walletdb/bdb" - "github.com/pkt-cash/pktd/wire" -) - -var ( - key = [chainhash.HashSize]byte{ - 0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, - 0x68, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, - 0xd, 0xe7, 0x93, 0xe4, 0xb7, 0x25, 0xb8, 0x4d, - 0x1e, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9, - } - rev = [chainhash.HashSize]byte{ - 0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, - 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, - 0x2d, 0xe7, 0x93, 0xe4, - } - testTx = &wire.MsgTx{ - Version: 1, - TxIn: []*wire.TxIn{ - { - PreviousOutPoint: wire.OutPoint{ - Hash: chainhash.Hash{}, - Index: 0xffffffff, - }, - SignatureScript: []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62}, - Sequence: 0xffffffff, - }, - }, - TxOut: []*wire.TxOut{ - { - Value: 5000000000, - PkScript: []byte{ - 0x41, // OP_DATA_65 - 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5, - 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42, - 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1, - 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24, - 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97, - 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78, - 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20, - 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63, - 0xa6, // 65-byte signature - 0xac, // OP_CHECKSIG - }, - }, - }, - LockTime: 5, - } - privKey, pubKey = btcec.PrivKeyFromBytes(btcec.S256(), key[:]) -) - -// makeTestDB creates a new instance of the ChannelDB for testing purposes. A -// callback which cleans up the created temporary directories is also returned -// and intended to be executed after the test completes. -func makeTestDB() (*DB, func(), er.R) { - // First, create a temporary directory to be used for the duration of - // this test. - tempDirName, errr := ioutil.TempDir("", "channeldb") - if errr != nil { - return nil, nil, er.E(errr) - } - - // Next, create channeldb for the first time. - cdb, err := Open(tempDirName) - if err != nil { - return nil, nil, err - } - - cleanUp := func() { - cdb.Close() - os.RemoveAll(tempDirName) - } - - return cdb, cleanUp, nil -} - -func createTestChannelState(cdb *DB) (*OpenChannel, er.R) { - // Simulate 1000 channel updates. - producer, err := shachain.NewRevocationProducerFromBytes(key[:]) - if err != nil { - return nil, err - } - store := shachain.NewRevocationStore() - for i := 0; i < 1; i++ { - preImage, err := producer.AtIndex(uint64(i)) - if err != nil { - return nil, err - } - - if err := store.AddNextEntry(preImage); err != nil { - return nil, err - } - } - - localCfg := ChannelConfig{ - ChannelConstraints: ChannelConstraints{ - DustLimit: btcutil.Amount(rand.Int63()), - MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()), - ChanReserve: btcutil.Amount(rand.Int63()), - MinHTLC: lnwire.MilliSatoshi(rand.Int63()), - MaxAcceptedHtlcs: uint16(rand.Int31()), - CsvDelay: uint16(rand.Int31()), - }, - MultiSigKey: keychain.KeyDescriptor{ - PubKey: privKey.PubKey(), - }, - RevocationBasePoint: keychain.KeyDescriptor{ - PubKey: privKey.PubKey(), - }, - PaymentBasePoint: keychain.KeyDescriptor{ - PubKey: privKey.PubKey(), - }, - DelayBasePoint: keychain.KeyDescriptor{ - PubKey: privKey.PubKey(), - }, - HtlcBasePoint: keychain.KeyDescriptor{ - PubKey: privKey.PubKey(), - }, - } - remoteCfg := ChannelConfig{ - ChannelConstraints: ChannelConstraints{ - DustLimit: btcutil.Amount(rand.Int63()), - MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()), - ChanReserve: btcutil.Amount(rand.Int63()), - MinHTLC: lnwire.MilliSatoshi(rand.Int63()), - MaxAcceptedHtlcs: uint16(rand.Int31()), - CsvDelay: uint16(rand.Int31()), - }, - MultiSigKey: keychain.KeyDescriptor{ - PubKey: privKey.PubKey(), - KeyLocator: keychain.KeyLocator{ - Family: keychain.KeyFamilyMultiSig, - Index: 9, - }, - }, - RevocationBasePoint: keychain.KeyDescriptor{ - PubKey: privKey.PubKey(), - KeyLocator: keychain.KeyLocator{ - Family: keychain.KeyFamilyRevocationBase, - Index: 8, - }, - }, - PaymentBasePoint: keychain.KeyDescriptor{ - PubKey: privKey.PubKey(), - KeyLocator: keychain.KeyLocator{ - Family: keychain.KeyFamilyPaymentBase, - Index: 7, - }, - }, - DelayBasePoint: keychain.KeyDescriptor{ - PubKey: privKey.PubKey(), - KeyLocator: keychain.KeyLocator{ - Family: keychain.KeyFamilyDelayBase, - Index: 6, - }, - }, - HtlcBasePoint: keychain.KeyDescriptor{ - PubKey: privKey.PubKey(), - KeyLocator: keychain.KeyLocator{ - Family: keychain.KeyFamilyHtlcBase, - Index: 5, - }, - }, - } - - chanID := lnwire.NewShortChanIDFromInt(uint64(rand.Int63())) - - return &OpenChannel{ - ChanType: SingleFunder, - ChainHash: key, - FundingOutpoint: wire.OutPoint{Hash: key, Index: rand.Uint32()}, - ShortChannelID: chanID, - IsInitiator: true, - IsPending: true, - IdentityPub: pubKey, - Capacity: btcutil.Amount(10000), - LocalChanCfg: localCfg, - RemoteChanCfg: remoteCfg, - TotalMSatSent: 8, - TotalMSatReceived: 2, - LocalCommitment: ChannelCommitment{ - CommitHeight: 0, - LocalBalance: lnwire.MilliSatoshi(9000), - RemoteBalance: lnwire.MilliSatoshi(3000), - CommitFee: btcutil.Amount(rand.Int63()), - FeePerKw: btcutil.Amount(5000), - CommitTx: testTx, - CommitSig: bytes.Repeat([]byte{1}, 71), - }, - RemoteCommitment: ChannelCommitment{ - CommitHeight: 0, - LocalBalance: lnwire.MilliSatoshi(3000), - RemoteBalance: lnwire.MilliSatoshi(9000), - CommitFee: btcutil.Amount(rand.Int63()), - FeePerKw: btcutil.Amount(5000), - CommitTx: testTx, - CommitSig: bytes.Repeat([]byte{1}, 71), - }, - NumConfsRequired: 4, - RemoteCurrentRevocation: privKey.PubKey(), - RemoteNextRevocation: privKey.PubKey(), - RevocationProducer: producer, - RevocationStore: store, - Db: cdb, - FundingTxn: testTx, - }, nil -} diff --git a/lnd/channeldb/migration_01_to_11/codec.go b/lnd/channeldb/migration_01_to_11/codec.go deleted file mode 100644 index 7892cb2d..00000000 --- a/lnd/channeldb/migration_01_to_11/codec.go +++ /dev/null @@ -1,449 +0,0 @@ -package migration_01_to_11 - -import ( - "fmt" - "io" - "net" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/shachain" - "github.com/pkt-cash/pktd/wire" -) - -// writeOutpoint writes an outpoint to the passed writer using the minimal -// amount of bytes possible. -func writeOutpoint(w io.Writer, o *wire.OutPoint) er.R { - if _, err := util.Write(w, o.Hash[:]); err != nil { - return err - } - if err := util.WriteBin(w, byteOrder, o.Index); err != nil { - return err - } - - return nil -} - -// readOutpoint reads an outpoint from the passed reader that was previously -// written using the writeOutpoint struct. -func readOutpoint(r io.Reader, o *wire.OutPoint) er.R { - if _, err := util.ReadFull(r, o.Hash[:]); err != nil { - return err - } - if err := util.ReadBin(r, byteOrder, &o.Index); err != nil { - return err - } - - return nil -} - -// UnknownElementType is an error returned when the codec is unable to encode or -// decode a particular type. -type UnknownElementType struct { - method string - element interface{} -} - -// Error returns the name of the method that encountered the error, as well as -// the type that was unsupported. -func (e UnknownElementType) Error() string { - return fmt.Sprintf("Unknown type in %s: %T", e.method, e.element) -} - -// WriteElement is a one-stop shop to write the big endian representation of -// any element which is to be serialized for storage on disk. The passed -// io.Writer should be backed by an appropriately sized byte slice, or be able -// to dynamically expand to accommodate additional data. -func WriteElement(w io.Writer, element interface{}) er.R { - switch e := element.(type) { - case keychain.KeyDescriptor: - if err := util.WriteBin(w, byteOrder, e.Family); err != nil { - return err - } - if err := util.WriteBin(w, byteOrder, e.Index); err != nil { - return err - } - - if e.PubKey != nil { - if err := util.WriteBin(w, byteOrder, true); err != nil { - return er.Errorf("error writing serialized element: %s", err) - } - - return WriteElement(w, e.PubKey) - } - - return util.WriteBin(w, byteOrder, false) - case ChannelType: - if err := util.WriteBin(w, byteOrder, e); err != nil { - return err - } - - case chainhash.Hash: - if _, err := util.Write(w, e[:]); err != nil { - return err - } - - case wire.OutPoint: - return writeOutpoint(w, &e) - - case lnwire.ShortChannelID: - if err := util.WriteBin(w, byteOrder, e.ToUint64()); err != nil { - return err - } - - case lnwire.ChannelID: - if _, err := util.Write(w, e[:]); err != nil { - return err - } - - case int64, uint64: - if err := util.WriteBin(w, byteOrder, e); err != nil { - return err - } - - case uint32: - if err := util.WriteBin(w, byteOrder, e); err != nil { - return err - } - - case int32: - if err := util.WriteBin(w, byteOrder, e); err != nil { - return err - } - - case uint16: - if err := util.WriteBin(w, byteOrder, e); err != nil { - return err - } - - case uint8: - if err := util.WriteBin(w, byteOrder, e); err != nil { - return err - } - - case bool: - if err := util.WriteBin(w, byteOrder, e); err != nil { - return err - } - - case btcutil.Amount: - if err := util.WriteBin(w, byteOrder, uint64(e)); err != nil { - return err - } - - case lnwire.MilliSatoshi: - if err := util.WriteBin(w, byteOrder, uint64(e)); err != nil { - return err - } - - case *btcec.PrivateKey: - b := e.Serialize() - if _, err := util.Write(w, b); err != nil { - return err - } - - case *btcec.PublicKey: - b := e.SerializeCompressed() - if _, err := util.Write(w, b); err != nil { - return err - } - - case shachain.Producer: - return e.Encode(w) - - case shachain.Store: - return e.Encode(w) - - case *wire.MsgTx: - return e.Serialize(w) - - case [32]byte: - if _, err := util.Write(w, e[:]); err != nil { - return err - } - - case []byte: - if err := wire.WriteVarBytes(w, 0, e); err != nil { - return err - } - - case lnwire.Message: - if _, err := lnwire.WriteMessage(w, e, 0); err != nil { - return err - } - - case ChannelStatus: - if err := util.WriteBin(w, byteOrder, e); err != nil { - return err - } - - case ClosureType: - if err := util.WriteBin(w, byteOrder, e); err != nil { - return err - } - - case lnwire.FundingFlag: - if err := util.WriteBin(w, byteOrder, e); err != nil { - return err - } - - case net.Addr: - if err := serializeAddr(w, e); err != nil { - return err - } - - case []net.Addr: - if err := WriteElement(w, uint32(len(e))); err != nil { - return err - } - - for _, addr := range e { - if err := serializeAddr(w, addr); err != nil { - return err - } - } - - default: - return er.E(UnknownElementType{"WriteElement", e}) - } - - return nil -} - -// WriteElements is writes each element in the elements slice to the passed -// io.Writer using WriteElement. -func WriteElements(w io.Writer, elements ...interface{}) er.R { - for _, element := range elements { - err := WriteElement(w, element) - if err != nil { - return err - } - } - return nil -} - -// ReadElement is a one-stop utility function to deserialize any datastructure -// encoded using the serialization format of the database. -func ReadElement(r io.Reader, element interface{}) er.R { - switch e := element.(type) { - case *keychain.KeyDescriptor: - if err := util.ReadBin(r, byteOrder, &e.Family); err != nil { - return err - } - if err := util.ReadBin(r, byteOrder, &e.Index); err != nil { - return err - } - - var hasPubKey bool - if err := util.ReadBin(r, byteOrder, &hasPubKey); err != nil { - return err - } - - if hasPubKey { - return ReadElement(r, &e.PubKey) - } - - case *ChannelType: - if err := util.ReadBin(r, byteOrder, e); err != nil { - return err - } - - case *chainhash.Hash: - if _, err := util.ReadFull(r, e[:]); err != nil { - return err - } - - case *wire.OutPoint: - return readOutpoint(r, e) - - case *lnwire.ShortChannelID: - var a uint64 - if err := util.ReadBin(r, byteOrder, &a); err != nil { - return err - } - *e = lnwire.NewShortChanIDFromInt(a) - - case *lnwire.ChannelID: - if _, err := util.ReadFull(r, e[:]); err != nil { - return err - } - - case *int64, *uint64: - if err := util.ReadBin(r, byteOrder, e); err != nil { - return err - } - - case *uint32: - if err := util.ReadBin(r, byteOrder, e); err != nil { - return err - } - - case *int32: - if err := util.ReadBin(r, byteOrder, e); err != nil { - return err - } - - case *uint16: - if err := util.ReadBin(r, byteOrder, e); err != nil { - return err - } - - case *uint8: - if err := util.ReadBin(r, byteOrder, e); err != nil { - return err - } - - case *bool: - if err := util.ReadBin(r, byteOrder, e); err != nil { - return err - } - - case *btcutil.Amount: - var a uint64 - if err := util.ReadBin(r, byteOrder, &a); err != nil { - return err - } - - *e = btcutil.Amount(a) - - case *lnwire.MilliSatoshi: - var a uint64 - if err := util.ReadBin(r, byteOrder, &a); err != nil { - return err - } - - *e = lnwire.MilliSatoshi(a) - - case **btcec.PrivateKey: - var b [btcec.PrivKeyBytesLen]byte - if _, err := util.ReadFull(r, b[:]); err != nil { - return err - } - - priv, _ := btcec.PrivKeyFromBytes(btcec.S256(), b[:]) - *e = priv - - case **btcec.PublicKey: - var b [btcec.PubKeyBytesLenCompressed]byte - if _, err := util.ReadFull(r, b[:]); err != nil { - return err - } - - pubKey, err := btcec.ParsePubKey(b[:], btcec.S256()) - if err != nil { - return err - } - *e = pubKey - - case *shachain.Producer: - var root [32]byte - if _, err := util.ReadFull(r, root[:]); err != nil { - return err - } - - // TODO(roasbeef): remove - producer, err := shachain.NewRevocationProducerFromBytes(root[:]) - if err != nil { - return err - } - - *e = producer - - case *shachain.Store: - store, err := shachain.NewRevocationStoreFromBytes(r) - if err != nil { - return err - } - - *e = store - - case **wire.MsgTx: - tx := wire.NewMsgTx(2) - if err := tx.Deserialize(r); err != nil { - return err - } - - *e = tx - - case *[32]byte: - if _, err := util.ReadFull(r, e[:]); err != nil { - return err - } - - case *[]byte: - bytes, err := wire.ReadVarBytes(r, 0, 66000, "[]byte") - if err != nil { - return err - } - - *e = bytes - - case *lnwire.Message: - msg, err := lnwire.ReadMessage(r, 0) - if err != nil { - return err - } - - *e = msg - - case *ChannelStatus: - if err := util.ReadBin(r, byteOrder, e); err != nil { - return err - } - - case *ClosureType: - if err := util.ReadBin(r, byteOrder, e); err != nil { - return err - } - - case *lnwire.FundingFlag: - if err := util.ReadBin(r, byteOrder, e); err != nil { - return err - } - - case *net.Addr: - addr, err := deserializeAddr(r) - if err != nil { - return err - } - *e = addr - - case *[]net.Addr: - var numAddrs uint32 - if err := ReadElement(r, &numAddrs); err != nil { - return err - } - - *e = make([]net.Addr, numAddrs) - for i := uint32(0); i < numAddrs; i++ { - addr, err := deserializeAddr(r) - if err != nil { - return err - } - (*e)[i] = addr - } - - default: - return er.E(UnknownElementType{"ReadElement", e}) - } - - return nil -} - -// ReadElements deserializes a variable number of elements into the passed -// io.Reader, with each element being deserialized according to the ReadElement -// function. -func ReadElements(r io.Reader, elements ...interface{}) er.R { - for _, element := range elements { - err := ReadElement(r, element) - if err != nil { - return err - } - } - return nil -} diff --git a/lnd/channeldb/migration_01_to_11/db.go b/lnd/channeldb/migration_01_to_11/db.go deleted file mode 100644 index 50e072d2..00000000 --- a/lnd/channeldb/migration_01_to_11/db.go +++ /dev/null @@ -1,218 +0,0 @@ -package migration_01_to_11 - -import ( - "bytes" - "encoding/binary" - "os" - "path/filepath" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" -) - -const ( - dbName = "channel.db" - dbFilePermission = 0600 -) - -// migration is a function which takes a prior outdated version of the database -// instances and mutates the key/bucket structure to arrive at a more -// up-to-date version of the database. -type migration func(tx kvdb.RwTx) er.R - -var ( - // Big endian is the preferred byte order, due to cursor scans over - // integer keys iterating in order. - byteOrder = binary.BigEndian -) - -// DB is the primary datastore for the lnd daemon. The database stores -// information related to nodes, routing data, open/closed channels, fee -// schedules, and reputation data. -type DB struct { - kvdb.Backend - dbPath string - graph *ChannelGraph - now func() time.Time -} - -// Open opens an existing channeldb. Any necessary schemas migrations due to -// updates will take place as necessary. -func Open(dbPath string, modifiers ...OptionModifier) (*DB, er.R) { - path := filepath.Join(dbPath, dbName) - - if !fileExists(path) { - if err := createChannelDB(dbPath); err != nil { - return nil, err - } - } - - opts := DefaultOptions() - for _, modifier := range modifiers { - modifier(&opts) - } - - // Specify bbolt freelist options to reduce heap pressure in case the - // freelist grows to be very large. - bdb, err := kvdb.Open(kvdb.BoltBackendName, path, opts.NoFreelistSync) - if err != nil { - return nil, err - } - - chanDB := &DB{ - Backend: bdb, - dbPath: dbPath, - now: time.Now, - } - chanDB.graph = newChannelGraph( - chanDB, opts.RejectCacheSize, opts.ChannelCacheSize, - ) - - return chanDB, nil -} - -// createChannelDB creates and initializes a fresh version of channeldb. In -// the case that the target path has not yet been created or doesn't yet exist, -// then the path is created. Additionally, all required top-level buckets used -// within the database are created. -func createChannelDB(dbPath string) er.R { - if !fileExists(dbPath) { - if err := os.MkdirAll(dbPath, 0700); err != nil { - return er.E(err) - } - } - - path := filepath.Join(dbPath, dbName) - bdb, err := kvdb.Create(kvdb.BoltBackendName, path, false) - if err != nil { - return err - } - - errr := kvdb.Update(bdb, func(tx kvdb.RwTx) er.R { - if _, err := tx.CreateTopLevelBucket(openChannelBucket); err != nil { - return err - } - if _, err := tx.CreateTopLevelBucket(closedChannelBucket); err != nil { - return err - } - - if _, err := tx.CreateTopLevelBucket(invoiceBucket); err != nil { - return err - } - - if _, err := tx.CreateTopLevelBucket(paymentBucket); err != nil { - return err - } - - nodes, err := tx.CreateTopLevelBucket(nodeBucket) - if err != nil { - return err - } - _, err = nodes.CreateBucket(aliasIndexBucket) - if err != nil { - return err - } - _, err = nodes.CreateBucket(nodeUpdateIndexBucket) - if err != nil { - return err - } - - edges, err := tx.CreateTopLevelBucket(edgeBucket) - if err != nil { - return err - } - if _, err := edges.CreateBucket(edgeIndexBucket); err != nil { - return err - } - if _, err := edges.CreateBucket(edgeUpdateIndexBucket); err != nil { - return err - } - if _, err := edges.CreateBucket(channelPointBucket); err != nil { - return err - } - if _, err := edges.CreateBucket(zombieBucket); err != nil { - return err - } - - graphMeta, err := tx.CreateTopLevelBucket(graphMetaBucket) - if err != nil { - return err - } - _, err = graphMeta.CreateBucket(pruneLogBucket) - if err != nil { - return err - } - - if _, err := tx.CreateTopLevelBucket(metaBucket); err != nil { - return err - } - - meta := &Meta{ - DbVersionNumber: 0, - } - return putMeta(meta, tx) - }, func() {}) - if errr != nil { - return er.Errorf("unable to create new channeldb") - } - - return bdb.Close() -} - -// fileExists returns true if the file exists, and false otherwise. -func fileExists(path string) bool { - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - return false - } - } - - return true -} - -// FetchClosedChannels attempts to fetch all closed channels from the database. -// The pendingOnly bool toggles if channels that aren't yet fully closed should -// be returned in the response or not. When a channel was cooperatively closed, -// it becomes fully closed after a single confirmation. When a channel was -// forcibly closed, it will become fully closed after _all_ the pending funds -// (if any) have been swept. -func (d *DB) FetchClosedChannels(pendingOnly bool) ([]*ChannelCloseSummary, er.R) { - var chanSummaries []*ChannelCloseSummary - - if err := kvdb.View(d, func(tx kvdb.RTx) er.R { - closeBucket := tx.ReadBucket(closedChannelBucket) - if closeBucket == nil { - return ErrNoClosedChannels.Default() - } - - return closeBucket.ForEach(func(chanID []byte, summaryBytes []byte) er.R { - summaryReader := bytes.NewReader(summaryBytes) - chanSummary, err := deserializeCloseChannelSummary(summaryReader) - if err != nil { - return err - } - - // If the query specified to only include pending - // channels, then we'll skip any channels which aren't - // currently pending. - if !chanSummary.IsPending && pendingOnly { - return nil - } - - chanSummaries = append(chanSummaries, chanSummary) - return nil - }) - }, func() { - chanSummaries = nil - }); err != nil { - return nil, err - } - - return chanSummaries, nil -} - -// ChannelGraph returns a new instance of the directed channel graph. -func (d *DB) ChannelGraph() *ChannelGraph { - return d.graph -} diff --git a/lnd/channeldb/migration_01_to_11/error.go b/lnd/channeldb/migration_01_to_11/error.go deleted file mode 100644 index 69cee0de..00000000 --- a/lnd/channeldb/migration_01_to_11/error.go +++ /dev/null @@ -1,63 +0,0 @@ -package migration_01_to_11 - -import "github.com/pkt-cash/pktd/btcutil/er" - -var ( - Err = er.NewErrorType("migration_01_to_11") - // ErrNoInvoicesCreated is returned when we don't have invoices in - // our database to return. - ErrNoInvoicesCreated = Err.CodeWithDetail("ErrNoInvoicesCreated", - "there are no existing invoices") - - // ErrNoPaymentsCreated is returned when bucket of payments hasn't been - // created. - ErrNoPaymentsCreated = Err.CodeWithDetail("ErrNoPaymentsCreated", - "there are no existing payments") - - // ErrGraphNotFound is returned when at least one of the components of - // graph doesn't exist. - ErrGraphNotFound = Err.CodeWithDetail("ErrGraphNotFound", - "graph bucket not initialized") - - // ErrSourceNodeNotSet is returned if the source node of the graph - // hasn't been added The source node is the center node within a - // star-graph. - ErrSourceNodeNotSet = Err.CodeWithDetail("ErrSourceNodeNotSet", - "source node does not exist") - - // ErrGraphNodeNotFound is returned when we're unable to find the target - // node. - ErrGraphNodeNotFound = Err.CodeWithDetail("ErrGraphNodeNotFound", - "unable to find node") - - // ErrEdgeNotFound is returned when an edge for the target chanID - // can't be found. - ErrEdgeNotFound = Err.CodeWithDetail("ErrEdgeNotFound", - "edge not found") - - // ErrUnknownAddressType is returned when a node's addressType is not - // an expected value. - ErrUnknownAddressType = Err.CodeWithDetail("ErrUnknownAddressType", - "address type cannot be resolved") - - // ErrNoClosedChannels is returned when a node is queries for all the - // channels it has closed, but it hasn't yet closed any channels. - ErrNoClosedChannels = Err.CodeWithDetail("ErrNoClosedChannels", - "no channel have been closed yet") - - // ErrEdgePolicyOptionalFieldNotFound is an error returned if a channel - // policy field is not found in the db even though its message flags - // indicate it should be. - ErrEdgePolicyOptionalFieldNotFound = Err.CodeWithDetail("ErrEdgePolicyOptionalFieldNotFound", - "optional field not present") -) - -// ErrTooManyExtraOpaqueBytes creates an error which should be returned if the -// caller attempts to write an announcement message which bares too many extra -// opaque bytes. We limit this value in order to ensure that we don't waste -// disk space due to nodes unnecessarily padding out their announcements with -// garbage data. -func ErrTooManyExtraOpaqueBytes(numBytes int) er.R { - return er.Errorf("max allowed number of opaque bytes is %v, received "+ - "%v bytes", MaxAllowedExtraOpaqueBytes, numBytes) -} diff --git a/lnd/channeldb/migration_01_to_11/graph.go b/lnd/channeldb/migration_01_to_11/graph.go deleted file mode 100644 index 07b6d246..00000000 --- a/lnd/channeldb/migration_01_to_11/graph.go +++ /dev/null @@ -1,1181 +0,0 @@ -package migration_01_to_11 - -import ( - "bytes" - "image/color" - "io" - "net" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/wire" -) - -var ( - // nodeBucket is a bucket which houses all the vertices or nodes within - // the channel graph. This bucket has a single-sub bucket which adds an - // additional index from pubkey -> alias. Within the top-level of this - // bucket, the key space maps a node's compressed public key to the - // serialized information for that node. Additionally, there's a - // special key "source" which stores the pubkey of the source node. The - // source node is used as the starting point for all graph/queries and - // traversals. The graph is formed as a star-graph with the source node - // at the center. - // - // maps: pubKey -> nodeInfo - // maps: source -> selfPubKey - nodeBucket = []byte("graph-node") - - // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket - // will be used to quickly look up the "freshness" of a node's last - // update to the network. The bucket only contains keys, and no values, - // it's mapping: - // - // maps: updateTime || nodeID -> nil - nodeUpdateIndexBucket = []byte("graph-node-update-index") - - // sourceKey is a special key that resides within the nodeBucket. The - // sourceKey maps a key to the public key of the "self node". - sourceKey = []byte("source") - - // aliasIndexBucket is a sub-bucket that's nested within the main - // nodeBucket. This bucket maps the public key of a node to its - // current alias. This bucket is provided as it can be used within a - // future UI layer to add an additional degree of confirmation. - aliasIndexBucket = []byte("alias") - - // edgeBucket is a bucket which houses all of the edge or channel - // information within the channel graph. This bucket essentially acts - // as an adjacency list, which in conjunction with a range scan, can be - // used to iterate over all the incoming and outgoing edges for a - // particular node. Key in the bucket use a prefix scheme which leads - // with the node's public key and sends with the compact edge ID. - // For each chanID, there will be two entries within the bucket, as the - // graph is directed: nodes may have different policies w.r.t to fees - // for their respective directions. - // - // maps: pubKey || chanID -> channel edge policy for node - edgeBucket = []byte("graph-edge") - - // unknownPolicy is represented as an empty slice. It is - // used as the value in edgeBucket for unknown channel edge policies. - // Unknown policies are still stored in the database to enable efficient - // lookup of incoming channel edges. - unknownPolicy = []byte{} - - // edgeIndexBucket is an index which can be used to iterate all edges - // in the bucket, grouping them according to their in/out nodes. - // Additionally, the items in this bucket also contain the complete - // edge information for a channel. The edge information includes the - // capacity of the channel, the nodes that made the channel, etc. This - // bucket resides within the edgeBucket above. Creation of an edge - // proceeds in two phases: first the edge is added to the edge index, - // afterwards the edgeBucket can be updated with the latest details of - // the edge as they are announced on the network. - // - // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo - edgeIndexBucket = []byte("edge-index") - - // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This - // bucket contains an index which allows us to gauge the "freshness" of - // a channel's last updates. - // - // maps: updateTime || chanID -> nil - edgeUpdateIndexBucket = []byte("edge-update-index") - - // channelPointBucket maps a channel's full outpoint (txid:index) to - // its short 8-byte channel ID. This bucket resides within the - // edgeBucket above, and can be used to quickly remove an edge due to - // the outpoint being spent, or to query for existence of a channel. - // - // maps: outPoint -> chanID - channelPointBucket = []byte("chan-index") - - // zombieBucket is a sub-bucket of the main edgeBucket bucket - // responsible for maintaining an index of zombie channels. Each entry - // exists within the bucket as follows: - // - // maps: chanID -> pubKey1 || pubKey2 - // - // The chanID represents the channel ID of the edge that is marked as a - // zombie and is used as the key, which maps to the public keys of the - // edge's participants. - zombieBucket = []byte("zombie-index") - - // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket bucket - // responsible for maintaining an index of disabled edge policies. Each - // entry exists within the bucket as follows: - // - // maps: -> []byte{} - // - // The chanID represents the channel ID of the edge and the direction is - // one byte representing the direction of the edge. The main purpose of - // this index is to allow pruning disabled channels in a fast way without - // the need to iterate all over the graph. - disabledEdgePolicyBucket = []byte("disabled-edge-policy-index") - - // graphMetaBucket is a top-level bucket which stores various meta-deta - // related to the on-disk channel graph. Data stored in this bucket - // includes the block to which the graph has been synced to, the total - // number of channels, etc. - graphMetaBucket = []byte("graph-meta") - - // pruneLogBucket is a bucket within the graphMetaBucket that stores - // a mapping from the block height to the hash for the blocks used to - // prune the graph. - // Once a new block is discovered, any channels that have been closed - // (by spending the outpoint) can safely be removed from the graph, and - // the block is added to the prune log. We need to keep such a log for - // the case where a reorg happens, and we must "rewind" the state of the - // graph by removing channels that were previously confirmed. In such a - // case we'll remove all entries from the prune log with a block height - // that no longer exists. - pruneLogBucket = []byte("prune-log") -) - -const ( - // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that - // we'll permit to be written to disk. We limit this as otherwise, it - // would be possible for a node to create a ton of updates and slowly - // fill our disk, and also waste bandwidth due to relaying. - MaxAllowedExtraOpaqueBytes = 10000 -) - -// ChannelGraph is a persistent, on-disk graph representation of the Lightning -// Network. This struct can be used to implement path finding algorithms on top -// of, and also to update a node's view based on information received from the -// p2p network. Internally, the graph is stored using a modified adjacency list -// representation with some added object interaction possible with each -// serialized edge/node. The graph is stored is directed, meaning that are two -// edges stored for each channel: an inbound/outbound edge for each node pair. -// Nodes, edges, and edge information can all be added to the graph -// independently. Edge removal results in the deletion of all edge information -// for that edge. -type ChannelGraph struct { - db *DB -} - -// newChannelGraph allocates a new ChannelGraph backed by a DB instance. The -// returned instance has its own unique reject cache and channel cache. -func newChannelGraph(db *DB, rejectCacheSize, chanCacheSize int) *ChannelGraph { - return &ChannelGraph{ - db: db, - } -} - -// SourceNode returns the source node of the graph. The source node is treated -// as the center node within a star-graph. This method may be used to kick off -// a path finding algorithm in order to explore the reachability of another -// node based off the source node. -func (c *ChannelGraph) SourceNode() (*LightningNode, er.R) { - var source *LightningNode - err := kvdb.View(c.db, func(tx kvdb.RTx) er.R { - // First grab the nodes bucket which stores the mapping from - // pubKey to node information. - nodes := tx.ReadBucket(nodeBucket) - if nodes == nil { - return ErrGraphNotFound.Default() - } - - node, err := c.sourceNode(nodes) - if err != nil { - return err - } - source = node - - return nil - }, func() { - source = nil - }) - if err != nil { - return nil, err - } - - return source, nil -} - -// sourceNode uses an existing database transaction and returns the source node -// of the graph. The source node is treated as the center node within a -// star-graph. This method may be used to kick off a path finding algorithm in -// order to explore the reachability of another node based off the source node. -func (c *ChannelGraph) sourceNode(nodes kvdb.RBucket) (*LightningNode, er.R) { - selfPub := nodes.Get(sourceKey) - if selfPub == nil { - return nil, ErrSourceNodeNotSet.Default() - } - - // With the pubKey of the source node retrieved, we're able to - // fetch the full node information. - node, err := fetchLightningNode(nodes, selfPub) - if err != nil { - return nil, err - } - node.db = c.db - - return &node, nil -} - -// SetSourceNode sets the source node within the graph database. The source -// node is to be used as the center of a star-graph within path finding -// algorithms. -func (c *ChannelGraph) SetSourceNode(node *LightningNode) er.R { - nodePubBytes := node.PubKeyBytes[:] - - return kvdb.Update(c.db, func(tx kvdb.RwTx) er.R { - // First grab the nodes bucket which stores the mapping from - // pubKey to node information. - nodes, err := tx.CreateTopLevelBucket(nodeBucket) - if err != nil { - return err - } - - // Next we create the mapping from source to the targeted - // public key. - if err := nodes.Put(sourceKey, nodePubBytes); err != nil { - return err - } - - // Finally, we commit the information of the lightning node - // itself. - return addLightningNode(tx, node) - }, func() {}) -} - -func addLightningNode(tx kvdb.RwTx, node *LightningNode) er.R { - nodes, err := tx.CreateTopLevelBucket(nodeBucket) - if err != nil { - return err - } - - aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket) - if err != nil { - return err - } - - updateIndex, err := nodes.CreateBucketIfNotExists( - nodeUpdateIndexBucket, - ) - if err != nil { - return err - } - - return putLightningNode(nodes, aliases, updateIndex, node) -} - -// updateEdgePolicy attempts to update an edge's policy within the relevant -// buckets using an existing database transaction. The returned boolean will be -// true if the updated policy belongs to node1, and false if the policy belonged -// to node2. -func updateEdgePolicy(tx kvdb.RwTx, edge *ChannelEdgePolicy) (bool, er.R) { - edges, err := tx.CreateTopLevelBucket(edgeBucket) - if err != nil { - return false, ErrEdgeNotFound.Default() - - } - edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket) - if edgeIndex == nil { - return false, ErrEdgeNotFound.Default() - } - nodes, err := tx.CreateTopLevelBucket(nodeBucket) - if err != nil { - return false, err - } - - // Create the channelID key be converting the channel ID - // integer into a byte slice. - var chanID [8]byte - byteOrder.PutUint64(chanID[:], edge.ChannelID) - - // With the channel ID, we then fetch the value storing the two - // nodes which connect this channel edge. - nodeInfo := edgeIndex.Get(chanID[:]) - if nodeInfo == nil { - return false, ErrEdgeNotFound.Default() - } - - // Depending on the flags value passed above, either the first - // or second edge policy is being updated. - var fromNode, toNode []byte - var isUpdate1 bool - if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 { - fromNode = nodeInfo[:33] - toNode = nodeInfo[33:66] - isUpdate1 = true - } else { - fromNode = nodeInfo[33:66] - toNode = nodeInfo[:33] - isUpdate1 = false - } - - // Finally, with the direction of the edge being updated - // identified, we update the on-disk edge representation. - errr := putChanEdgePolicy(edges, nodes, edge, fromNode, toNode) - if errr != nil { - return false, errr - } - - return isUpdate1, nil -} - -// LightningNode represents an individual vertex/node within the channel graph. -// A node is connected to other nodes by one or more channel edges emanating -// from it. As the graph is directed, a node will also have an incoming edge -// attached to it for each outgoing edge. -type LightningNode struct { - // PubKeyBytes is the raw bytes of the public key of the target node. - PubKeyBytes [33]byte - pubKey *btcec.PublicKey - - // HaveNodeAnnouncement indicates whether we received a node - // announcement for this particular node. If true, the remaining fields - // will be set, if false only the PubKey is known for this node. - HaveNodeAnnouncement bool - - // LastUpdate is the last time the vertex information for this node has - // been updated. - LastUpdate time.Time - - // Address is the TCP address this node is reachable over. - Addresses []net.Addr - - // Color is the selected color for the node. - Color color.RGBA - - // Alias is a nick-name for the node. The alias can be used to confirm - // a node's identity or to serve as a short ID for an address book. - Alias string - - // AuthSigBytes is the raw signature under the advertised public key - // which serves to authenticate the attributes announced by this node. - AuthSigBytes []byte - - // Features is the list of protocol features supported by this node. - Features *lnwire.FeatureVector - - // ExtraOpaqueData is the set of data that was appended to this - // message, some of which we may not actually know how to iterate or - // parse. By holding onto this data, we ensure that we're able to - // properly validate the set of signatures that cover these new fields, - // and ensure we're able to make upgrades to the network in a forwards - // compatible manner. - ExtraOpaqueData []byte - - db *DB - - // TODO(roasbeef): discovery will need storage to keep it's last IP - // address and re-announce if interface changes? - - // TODO(roasbeef): add update method and fetch? -} - -// PubKey is the node's long-term identity public key. This key will be used to -// authenticated any advertisements/updates sent by the node. -// -// NOTE: By having this method to access an attribute, we ensure we only need -// to fully deserialize the pubkey if absolutely necessary. -func (l *LightningNode) PubKey() (*btcec.PublicKey, er.R) { - if l.pubKey != nil { - return l.pubKey, nil - } - - key, err := btcec.ParsePubKey(l.PubKeyBytes[:], btcec.S256()) - if err != nil { - return nil, err - } - l.pubKey = key - - return key, nil -} - -// ChannelEdgeInfo represents a fully authenticated channel along with all its -// unique attributes. Once an authenticated channel announcement has been -// processed on the network, then an instance of ChannelEdgeInfo encapsulating -// the channels attributes is stored. The other portions relevant to routing -// policy of a channel are stored within a ChannelEdgePolicy for each direction -// of the channel. -type ChannelEdgeInfo struct { - // ChannelID is the unique channel ID for the channel. The first 3 - // bytes are the block height, the next 3 the index within the block, - // and the last 2 bytes are the output index for the channel. - ChannelID uint64 - - // ChainHash is the hash that uniquely identifies the chain that this - // channel was opened within. - // - // TODO(roasbeef): need to modify db keying for multi-chain - // * must add chain hash to prefix as well - ChainHash chainhash.Hash - - // NodeKey1Bytes is the raw public key of the first node. - NodeKey1Bytes [33]byte - - // NodeKey2Bytes is the raw public key of the first node. - NodeKey2Bytes [33]byte - - // BitcoinKey1Bytes is the raw public key of the first node. - BitcoinKey1Bytes [33]byte - - // BitcoinKey2Bytes is the raw public key of the first node. - BitcoinKey2Bytes [33]byte - - // Features is an opaque byte slice that encodes the set of channel - // specific features that this channel edge supports. - Features []byte - - // AuthProof is the authentication proof for this channel. This proof - // contains a set of signatures binding four identities, which attests - // to the legitimacy of the advertised channel. - AuthProof *ChannelAuthProof - - // ChannelPoint is the funding outpoint of the channel. This can be - // used to uniquely identify the channel within the channel graph. - ChannelPoint wire.OutPoint - - // Capacity is the total capacity of the channel, this is determined by - // the value output in the outpoint that created this channel. - Capacity btcutil.Amount - - // ExtraOpaqueData is the set of data that was appended to this - // message, some of which we may not actually know how to iterate or - // parse. By holding onto this data, we ensure that we're able to - // properly validate the set of signatures that cover these new fields, - // and ensure we're able to make upgrades to the network in a forwards - // compatible manner. - ExtraOpaqueData []byte -} - -// ChannelAuthProof is the authentication proof (the signature portion) for a -// channel. Using the four signatures contained in the struct, and some -// auxiliary knowledge (the funding script, node identities, and outpoint) nodes -// on the network are able to validate the authenticity and existence of a -// channel. Each of these signatures signs the following digest: chanID || -// nodeID1 || nodeID2 || bitcoinKey1|| bitcoinKey2 || 2-byte-feature-len || -// features. -type ChannelAuthProof struct { - // NodeSig1Bytes are the raw bytes of the first node signature encoded - // in DER format. - NodeSig1Bytes []byte - - // NodeSig2Bytes are the raw bytes of the second node signature - // encoded in DER format. - NodeSig2Bytes []byte - - // BitcoinSig1Bytes are the raw bytes of the first bitcoin signature - // encoded in DER format. - BitcoinSig1Bytes []byte - - // BitcoinSig2Bytes are the raw bytes of the second bitcoin signature - // encoded in DER format. - BitcoinSig2Bytes []byte -} - -// IsEmpty check is the authentication proof is empty Proof is empty if at -// least one of the signatures are equal to nil. -func (c *ChannelAuthProof) IsEmpty() bool { - return len(c.NodeSig1Bytes) == 0 || - len(c.NodeSig2Bytes) == 0 || - len(c.BitcoinSig1Bytes) == 0 || - len(c.BitcoinSig2Bytes) == 0 -} - -// ChannelEdgePolicy represents a *directed* edge within the channel graph. For -// each channel in the database, there are two distinct edges: one for each -// possible direction of travel along the channel. The edges themselves hold -// information concerning fees, and minimum time-lock information which is -// utilized during path finding. -type ChannelEdgePolicy struct { - // SigBytes is the raw bytes of the signature of the channel edge - // policy. We'll only parse these if the caller needs to access the - // signature for validation purposes. Do not set SigBytes directly, but - // use SetSigBytes instead to make sure that the cache is invalidated. - SigBytes []byte - - // ChannelID is the unique channel ID for the channel. The first 3 - // bytes are the block height, the next 3 the index within the block, - // and the last 2 bytes are the output index for the channel. - ChannelID uint64 - - // LastUpdate is the last time an authenticated edge for this channel - // was received. - LastUpdate time.Time - - // MessageFlags is a bitfield which indicates the presence of optional - // fields (like max_htlc) in the policy. - MessageFlags lnwire.ChanUpdateMsgFlags - - // ChannelFlags is a bitfield which signals the capabilities of the - // channel as well as the directed edge this update applies to. - ChannelFlags lnwire.ChanUpdateChanFlags - - // TimeLockDelta is the number of blocks this node will subtract from - // the expiry of an incoming HTLC. This value expresses the time buffer - // the node would like to HTLC exchanges. - TimeLockDelta uint16 - - // MinHTLC is the smallest value HTLC this node will accept, expressed - // in millisatoshi. - MinHTLC lnwire.MilliSatoshi - - // MaxHTLC is the largest value HTLC this node will accept, expressed - // in millisatoshi. - MaxHTLC lnwire.MilliSatoshi - - // FeeBaseMSat is the base HTLC fee that will be charged for forwarding - // ANY HTLC, expressed in mSAT's. - FeeBaseMSat lnwire.MilliSatoshi - - // FeeProportionalMillionths is the rate that the node will charge for - // HTLCs for each millionth of a satoshi forwarded. - FeeProportionalMillionths lnwire.MilliSatoshi - - // Node is the LightningNode that this directed edge leads to. Using - // this pointer the channel graph can further be traversed. - Node *LightningNode - - // ExtraOpaqueData is the set of data that was appended to this - // message, some of which we may not actually know how to iterate or - // parse. By holding onto this data, we ensure that we're able to - // properly validate the set of signatures that cover these new fields, - // and ensure we're able to make upgrades to the network in a forwards - // compatible manner. - ExtraOpaqueData []byte -} - -// IsDisabled determines whether the edge has the disabled bit set. -func (c *ChannelEdgePolicy) IsDisabled() bool { - return c.ChannelFlags&lnwire.ChanUpdateDisabled == - lnwire.ChanUpdateDisabled -} - -func putLightningNode(nodeBucket kvdb.RwBucket, aliasBucket kvdb.RwBucket, - updateIndex kvdb.RwBucket, node *LightningNode) er.R { - - var ( - scratch [16]byte - b bytes.Buffer - ) - - pub, err := node.PubKey() - if err != nil { - return err - } - nodePub := pub.SerializeCompressed() - - // If the node has the update time set, write it, else write 0. - updateUnix := uint64(0) - if node.LastUpdate.Unix() > 0 { - updateUnix = uint64(node.LastUpdate.Unix()) - } - - byteOrder.PutUint64(scratch[:8], updateUnix) - if _, err := b.Write(scratch[:8]); err != nil { - return er.E(err) - } - - if _, err := b.Write(nodePub); err != nil { - return er.E(err) - } - - // If we got a node announcement for this node, we will have the rest - // of the data available. If not we don't have more data to write. - if !node.HaveNodeAnnouncement { - // Write HaveNodeAnnouncement=0. - byteOrder.PutUint16(scratch[:2], 0) - if _, err := b.Write(scratch[:2]); err != nil { - return er.E(err) - } - - return nodeBucket.Put(nodePub, b.Bytes()) - } - - // Write HaveNodeAnnouncement=1. - byteOrder.PutUint16(scratch[:2], 1) - if _, err := b.Write(scratch[:2]); err != nil { - return er.E(err) - } - - if err := util.WriteBin(&b, byteOrder, node.Color.R); err != nil { - return err - } - if err := util.WriteBin(&b, byteOrder, node.Color.G); err != nil { - return err - } - if err := util.WriteBin(&b, byteOrder, node.Color.B); err != nil { - return err - } - - if err := wire.WriteVarString(&b, 0, node.Alias); err != nil { - return err - } - - if err := node.Features.Encode(&b); err != nil { - return err - } - - numAddresses := uint16(len(node.Addresses)) - byteOrder.PutUint16(scratch[:2], numAddresses) - if _, err := b.Write(scratch[:2]); err != nil { - return er.E(err) - } - - for _, address := range node.Addresses { - if err := serializeAddr(&b, address); err != nil { - return err - } - } - - sigLen := len(node.AuthSigBytes) - if sigLen > 80 { - return er.Errorf("max sig len allowed is 80, had %v", - sigLen) - } - - err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes) - if err != nil { - return err - } - - if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes { - return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData)) - } - err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData) - if err != nil { - return err - } - - if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil { - return err - } - - // With the alias bucket updated, we'll now update the index that - // tracks the time series of node updates. - var indexKey [8 + 33]byte - byteOrder.PutUint64(indexKey[:8], updateUnix) - copy(indexKey[8:], nodePub) - - // If there was already an old index entry for this node, then we'll - // delete the old one before we write the new entry. - if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil { - // Extract out the old update time to we can reconstruct the - // prior index key to delete it from the index. - oldUpdateTime := nodeBytes[:8] - - var oldIndexKey [8 + 33]byte - copy(oldIndexKey[:8], oldUpdateTime) - copy(oldIndexKey[8:], nodePub) - - if err := updateIndex.Delete(oldIndexKey[:]); err != nil { - return err - } - } - - if err := updateIndex.Put(indexKey[:], nil); err != nil { - return err - } - - return nodeBucket.Put(nodePub, b.Bytes()) -} - -func fetchLightningNode(nodeBucket kvdb.RBucket, - nodePub []byte) (LightningNode, er.R) { - - nodeBytes := nodeBucket.Get(nodePub) - if nodeBytes == nil { - return LightningNode{}, ErrGraphNodeNotFound.Default() - } - - nodeReader := bytes.NewReader(nodeBytes) - return deserializeLightningNode(nodeReader) -} - -func deserializeLightningNode(r io.Reader) (LightningNode, er.R) { - var ( - node LightningNode - scratch [8]byte - err er.R - ) - - if _, err := r.Read(scratch[:]); err != nil { - return LightningNode{}, er.E(err) - } - - unix := int64(byteOrder.Uint64(scratch[:])) - node.LastUpdate = time.Unix(unix, 0) - - if _, err := util.ReadFull(r, node.PubKeyBytes[:]); err != nil { - return LightningNode{}, err - } - - if _, err := r.Read(scratch[:2]); err != nil { - return LightningNode{}, er.E(err) - } - - hasNodeAnn := byteOrder.Uint16(scratch[:2]) - if hasNodeAnn == 1 { - node.HaveNodeAnnouncement = true - } else { - node.HaveNodeAnnouncement = false - } - - // The rest of the data is optional, and will only be there if we got a node - // announcement for this node. - if !node.HaveNodeAnnouncement { - return node, nil - } - - // We did get a node announcement for this node, so we'll have the rest - // of the data available. - if err := util.ReadBin(r, byteOrder, &node.Color.R); err != nil { - return LightningNode{}, err - } - if err := util.ReadBin(r, byteOrder, &node.Color.G); err != nil { - return LightningNode{}, err - } - if err := util.ReadBin(r, byteOrder, &node.Color.B); err != nil { - return LightningNode{}, err - } - - node.Alias, err = wire.ReadVarString(r, 0) - if err != nil { - return LightningNode{}, err - } - - fv := lnwire.NewFeatureVector(nil, nil) - err = fv.Decode(r) - if err != nil { - return LightningNode{}, err - } - node.Features = fv - - if _, err := r.Read(scratch[:2]); err != nil { - return LightningNode{}, er.E(err) - } - numAddresses := int(byteOrder.Uint16(scratch[:2])) - - var addresses []net.Addr - for i := 0; i < numAddresses; i++ { - address, err := deserializeAddr(r) - if err != nil { - return LightningNode{}, err - } - addresses = append(addresses, address) - } - node.Addresses = addresses - - node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig") - if err != nil { - return LightningNode{}, err - } - - // We'll try and see if there are any opaque bytes left, if not, then - // we'll ignore the EOF error and return the node as is. - node.ExtraOpaqueData, err = wire.ReadVarBytes( - r, 0, MaxAllowedExtraOpaqueBytes, "blob", - ) - switch { - case er.Wrapped(err) == io.ErrUnexpectedEOF: - case er.Wrapped(err) == io.EOF: - case err != nil: - return LightningNode{}, err - } - - return node, nil -} - -func deserializeChanEdgeInfo(r io.Reader) (ChannelEdgeInfo, er.R) { - var ( - err er.R - edgeInfo ChannelEdgeInfo - ) - - if _, err := util.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil { - return ChannelEdgeInfo{}, err - } - if _, err := util.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil { - return ChannelEdgeInfo{}, err - } - if _, err := util.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil { - return ChannelEdgeInfo{}, err - } - if _, err := util.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil { - return ChannelEdgeInfo{}, err - } - - edgeInfo.Features, err = wire.ReadVarBytes(r, 0, 900, "features") - if err != nil { - return ChannelEdgeInfo{}, err - } - - proof := &ChannelAuthProof{} - - proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") - if err != nil { - return ChannelEdgeInfo{}, err - } - proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") - if err != nil { - return ChannelEdgeInfo{}, err - } - proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") - if err != nil { - return ChannelEdgeInfo{}, err - } - proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") - if err != nil { - return ChannelEdgeInfo{}, err - } - - if !proof.IsEmpty() { - edgeInfo.AuthProof = proof - } - - edgeInfo.ChannelPoint = wire.OutPoint{} - if err := readOutpoint(r, &edgeInfo.ChannelPoint); err != nil { - return ChannelEdgeInfo{}, err - } - if err := util.ReadBin(r, byteOrder, &edgeInfo.Capacity); err != nil { - return ChannelEdgeInfo{}, err - } - if err := util.ReadBin(r, byteOrder, &edgeInfo.ChannelID); err != nil { - return ChannelEdgeInfo{}, err - } - - if _, err := util.ReadFull(r, edgeInfo.ChainHash[:]); err != nil { - return ChannelEdgeInfo{}, err - } - - // We'll try and see if there are any opaque bytes left, if not, then - // we'll ignore the EOF error and return the edge as is. - edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes( - r, 0, MaxAllowedExtraOpaqueBytes, "blob", - ) - switch { - case er.Wrapped(err) == io.ErrUnexpectedEOF: - case er.Wrapped(err) == io.EOF: - case err != nil: - return ChannelEdgeInfo{}, err - } - - return edgeInfo, nil -} - -func putChanEdgePolicy(edges, nodes kvdb.RwBucket, edge *ChannelEdgePolicy, - from, to []byte) er.R { - - var edgeKey [33 + 8]byte - copy(edgeKey[:], from) - byteOrder.PutUint64(edgeKey[33:], edge.ChannelID) - - var b bytes.Buffer - if err := serializeChanEdgePolicy(&b, edge, to); err != nil { - return err - } - - // Before we write out the new edge, we'll create a new entry in the - // update index in order to keep it fresh. - updateUnix := uint64(edge.LastUpdate.Unix()) - var indexKey [8 + 8]byte - byteOrder.PutUint64(indexKey[:8], updateUnix) - byteOrder.PutUint64(indexKey[8:], edge.ChannelID) - - updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket) - if err != nil { - return err - } - - // If there was already an entry for this edge, then we'll need to - // delete the old one to ensure we don't leave around any after-images. - // An unknown policy value does not have a update time recorded, so - // it also does not need to be removed. - if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil && - !bytes.Equal(edgeBytes[:], unknownPolicy) { - - // In order to delete the old entry, we'll need to obtain the - // *prior* update time in order to delete it. To do this, we'll - // need to deserialize the existing policy within the database - // (now outdated by the new one), and delete its corresponding - // entry within the update index. We'll ignore any - // ErrEdgePolicyOptionalFieldNotFound error, as we only need - // the channel ID and update time to delete the entry. - // TODO(halseth): get rid of these invalid policies in a - // migration. - oldEdgePolicy, err := deserializeChanEdgePolicy( - bytes.NewReader(edgeBytes), nodes, - ) - if err != nil && !ErrEdgePolicyOptionalFieldNotFound.Is(err) { - return err - } - - oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix()) - - var oldIndexKey [8 + 8]byte - byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime) - byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID) - - if err := updateIndex.Delete(oldIndexKey[:]); err != nil { - return err - } - } - - if err := updateIndex.Put(indexKey[:], nil); err != nil { - return err - } - - updateEdgePolicyDisabledIndex( - edges, edge.ChannelID, - edge.ChannelFlags&lnwire.ChanUpdateDirection > 0, - edge.IsDisabled(), - ) - - return edges.Put(edgeKey[:], b.Bytes()[:]) -} - -// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex -// bucket by either add a new disabled ChannelEdgePolicy or remove an existing -// one. -// The direction represents the direction of the edge and disabled is used for -// deciding whether to remove or add an entry to the bucket. -// In general a channel is disabled if two entries for the same chanID exist -// in this bucket. -// Maintaining the bucket this way allows a fast retrieval of disabled -// channels, for example when prune is needed. -func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64, - direction bool, disabled bool) er.R { - - var disabledEdgeKey [8 + 1]byte - byteOrder.PutUint64(disabledEdgeKey[0:], chanID) - if direction { - disabledEdgeKey[8] = 1 - } - - disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists( - disabledEdgePolicyBucket, - ) - if err != nil { - return err - } - - if disabled { - return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{}) - } - - return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:]) -} - -// putChanEdgePolicyUnknown marks the edge policy as unknown -// in the edges bucket. -func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64, - from []byte) er.R { - - var edgeKey [33 + 8]byte - copy(edgeKey[:], from) - byteOrder.PutUint64(edgeKey[33:], channelID) - - if edges.Get(edgeKey[:]) != nil { - return er.Errorf("Cannot write unknown policy for channel %v "+ - " when there is already a policy present", channelID) - } - - return edges.Put(edgeKey[:], unknownPolicy) -} - -func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte, - nodePub []byte, nodes kvdb.RBucket) (*ChannelEdgePolicy, er.R) { - - var edgeKey [33 + 8]byte - copy(edgeKey[:], nodePub) - copy(edgeKey[33:], chanID[:]) - - edgeBytes := edges.Get(edgeKey[:]) - if edgeBytes == nil { - return nil, ErrEdgeNotFound.Default() - } - - // No need to deserialize unknown policy. - if bytes.Equal(edgeBytes[:], unknownPolicy) { - return nil, nil - } - - edgeReader := bytes.NewReader(edgeBytes) - - ep, err := deserializeChanEdgePolicy(edgeReader, nodes) - switch { - // If the db policy was missing an expected optional field, we return - // nil as if the policy was unknown. - case ErrEdgePolicyOptionalFieldNotFound.Is(err): - return nil, nil - - case err != nil: - return nil, err - } - - return ep, nil -} - -func serializeChanEdgePolicy(w io.Writer, edge *ChannelEdgePolicy, - to []byte) er.R { - - err := wire.WriteVarBytes(w, 0, edge.SigBytes) - if err != nil { - return err - } - - if err := util.WriteBin(w, byteOrder, edge.ChannelID); err != nil { - return err - } - - var scratch [8]byte - updateUnix := uint64(edge.LastUpdate.Unix()) - byteOrder.PutUint64(scratch[:], updateUnix) - if _, err := util.Write(w, scratch[:]); err != nil { - return err - } - - if err := util.WriteBin(w, byteOrder, edge.MessageFlags); err != nil { - return err - } - if err := util.WriteBin(w, byteOrder, edge.ChannelFlags); err != nil { - return err - } - if err := util.WriteBin(w, byteOrder, edge.TimeLockDelta); err != nil { - return err - } - if err := util.WriteBin(w, byteOrder, uint64(edge.MinHTLC)); err != nil { - return err - } - if err := util.WriteBin(w, byteOrder, uint64(edge.FeeBaseMSat)); err != nil { - return err - } - if err := util.WriteBin(w, byteOrder, uint64(edge.FeeProportionalMillionths)); err != nil { - return err - } - - if _, err := util.Write(w, to); err != nil { - return err - } - - // If the max_htlc field is present, we write it. To be compatible with - // older versions that wasn't aware of this field, we write it as part - // of the opaque data. - // TODO(halseth): clean up when moving to TLV. - var opaqueBuf bytes.Buffer - if edge.MessageFlags.HasMaxHtlc() { - err := util.WriteBin(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC)) - if err != nil { - return err - } - } - - if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes { - return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData)) - } - if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil { - return er.E(err) - } - - if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil { - return err - } - return nil -} - -func deserializeChanEdgePolicy(r io.Reader, - nodes kvdb.RBucket) (*ChannelEdgePolicy, er.R) { - - edge := &ChannelEdgePolicy{} - - var err er.R - edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig") - if err != nil { - return nil, err - } - - if err := util.ReadBin(r, byteOrder, &edge.ChannelID); err != nil { - return nil, err - } - - var scratch [8]byte - if _, err := r.Read(scratch[:]); err != nil { - return nil, er.E(err) - } - unix := int64(byteOrder.Uint64(scratch[:])) - edge.LastUpdate = time.Unix(unix, 0) - - if err := util.ReadBin(r, byteOrder, &edge.MessageFlags); err != nil { - return nil, err - } - if err := util.ReadBin(r, byteOrder, &edge.ChannelFlags); err != nil { - return nil, err - } - if err := util.ReadBin(r, byteOrder, &edge.TimeLockDelta); err != nil { - return nil, err - } - - var n uint64 - if err := util.ReadBin(r, byteOrder, &n); err != nil { - return nil, err - } - edge.MinHTLC = lnwire.MilliSatoshi(n) - - if err := util.ReadBin(r, byteOrder, &n); err != nil { - return nil, err - } - edge.FeeBaseMSat = lnwire.MilliSatoshi(n) - - if err := util.ReadBin(r, byteOrder, &n); err != nil { - return nil, err - } - edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n) - - var pub [33]byte - if _, err := r.Read(pub[:]); err != nil { - return nil, er.E(err) - } - - node, err := fetchLightningNode(nodes, pub[:]) - if err != nil { - return nil, er.Errorf("unable to fetch node: %x, %v", - pub[:], err) - } - edge.Node = &node - - // We'll try and see if there are any opaque bytes left, if not, then - // we'll ignore the EOF error and return the edge as is. - edge.ExtraOpaqueData, err = wire.ReadVarBytes( - r, 0, MaxAllowedExtraOpaqueBytes, "blob", - ) - switch { - case er.Wrapped(err) == io.ErrUnexpectedEOF: - case er.Wrapped(err) == io.EOF: - case err != nil: - return nil, err - } - - // See if optional fields are present. - if edge.MessageFlags.HasMaxHtlc() { - // The max_htlc field should be at the beginning of the opaque - // bytes. - opq := edge.ExtraOpaqueData - - // If the max_htlc field is not present, it might be old data - // stored before this field was validated. We'll return the - // edge along with an error. - if len(opq) < 8 { - return edge, ErrEdgePolicyOptionalFieldNotFound.Default() - } - - maxHtlc := byteOrder.Uint64(opq[:8]) - edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc) - - // Exclude the parsed field from the rest of the opaque data. - edge.ExtraOpaqueData = opq[8:] - } - - return edge, nil -} diff --git a/lnd/channeldb/migration_01_to_11/graph_test.go b/lnd/channeldb/migration_01_to_11/graph_test.go deleted file mode 100644 index a403c6ec..00000000 --- a/lnd/channeldb/migration_01_to_11/graph_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package migration_01_to_11 - -import ( - "image/color" - "math/big" - prand "math/rand" - "net" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -var ( - testAddr = &net.TCPAddr{IP: (net.IP)([]byte{0xA, 0x0, 0x0, 0x1}), - Port: 9000} - anotherAddr, _ = net.ResolveTCPAddr("tcp", - "[2001:db8:85a3:0:0:8a2e:370:7334]:80") - testAddrs = []net.Addr{testAddr, anotherAddr} - - testSig = &btcec.Signature{ - R: new(big.Int), - S: new(big.Int), - } - _, _ = testSig.R.SetString("63724406601629180062774974542967536251589935445068131219452686511677818569431", 10) - _, _ = testSig.S.SetString("18801056069249825825291287104931333862866033135609736119018462340006816851118", 10) - - testFeatures = lnwire.NewFeatureVector(nil, nil) -) - -func createLightningNode(db *DB, priv *btcec.PrivateKey) (*LightningNode, er.R) { - updateTime := prand.Int63() - - pub := priv.PubKey().SerializeCompressed() - n := &LightningNode{ - HaveNodeAnnouncement: true, - AuthSigBytes: testSig.Serialize(), - LastUpdate: time.Unix(updateTime, 0), - Color: color.RGBA{1, 2, 3, 0}, - Alias: "kek" + string(pub[:]), - Features: testFeatures, - Addresses: testAddrs, - db: db, - } - copy(n.PubKeyBytes[:], priv.PubKey().SerializeCompressed()) - - return n, nil -} - -func createTestVertex(db *DB) (*LightningNode, er.R) { - priv, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - return nil, err - } - - return createLightningNode(db, priv) -} diff --git a/lnd/channeldb/migration_01_to_11/invoices.go b/lnd/channeldb/migration_01_to_11/invoices.go deleted file mode 100644 index d3639edc..00000000 --- a/lnd/channeldb/migration_01_to_11/invoices.go +++ /dev/null @@ -1,552 +0,0 @@ -package migration_01_to_11 - -import ( - "bytes" - "io" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/tlv" - "github.com/pkt-cash/pktd/wire" -) - -var ( - - // invoiceBucket is the name of the bucket within the database that - // stores all data related to invoices no matter their final state. - // Within the invoice bucket, each invoice is keyed by its invoice ID - // which is a monotonically increasing uint32. - invoiceBucket = []byte("invoices") - - // addIndexBucket is an index bucket that we'll use to create a - // monotonically increasing set of add indexes. Each time we add a new - // invoice, this sequence number will be incremented and then populated - // within the new invoice. - // - // In addition to this sequence number, we map: - // - // addIndexNo => invoiceKey - addIndexBucket = []byte("invoice-add-index") - - // settleIndexBucket is an index bucket that we'll use to create a - // monotonically increasing integer for tracking a "settle index". Each - // time an invoice is settled, this sequence number will be incremented - // as populate within the newly settled invoice. - // - // In addition to this sequence number, we map: - // - // settleIndexNo => invoiceKey - settleIndexBucket = []byte("invoice-settle-index") -) - -const ( - // MaxMemoSize is maximum size of the memo field within invoices stored - // in the database. - MaxMemoSize = 1024 - - // MaxReceiptSize is the maximum size of the payment receipt stored - // within the database along side incoming/outgoing invoices. - MaxReceiptSize = 1024 - - // MaxPaymentRequestSize is the max size of a payment request for - // this invoice. - // TODO(halseth): determine the max length payment request when field - // lengths are final. - MaxPaymentRequestSize = 4096 - - // A set of tlv type definitions used to serialize invoice htlcs to the - // database. - chanIDType tlv.Type = 1 - htlcIDType tlv.Type = 3 - amtType tlv.Type = 5 - acceptHeightType tlv.Type = 7 - acceptTimeType tlv.Type = 9 - resolveTimeType tlv.Type = 11 - expiryHeightType tlv.Type = 13 - stateType tlv.Type = 15 -) - -// ContractState describes the state the invoice is in. -type ContractState uint8 - -const ( - // ContractOpen means the invoice has only been created. - ContractOpen ContractState = 0 - - // ContractSettled means the htlc is settled and the invoice has been - // paid. - ContractSettled ContractState = 1 - - // ContractCanceled means the invoice has been canceled. - ContractCanceled ContractState = 2 - - // ContractAccepted means the HTLC has been accepted but not settled - // yet. - ContractAccepted ContractState = 3 -) - -// String returns a human readable identifier for the ContractState type. -func (c ContractState) String() string { - switch c { - case ContractOpen: - return "Open" - case ContractSettled: - return "Settled" - case ContractCanceled: - return "Canceled" - case ContractAccepted: - return "Accepted" - } - - return "Unknown" -} - -// ContractTerm is a companion struct to the Invoice struct. This struct houses -// the necessary conditions required before the invoice can be considered fully -// settled by the payee. -type ContractTerm struct { - // PaymentPreimage is the preimage which is to be revealed in the - // occasion that an HTLC paying to the hash of this preimage is - // extended. - PaymentPreimage lntypes.Preimage - - // Value is the expected amount of milli-satoshis to be paid to an HTLC - // which can be satisfied by the above preimage. - Value lnwire.MilliSatoshi - - // State describes the state the invoice is in. - State ContractState -} - -// Invoice is a payment invoice generated by a payee in order to request -// payment for some good or service. The inclusion of invoices within Lightning -// creates a payment work flow for merchants very similar to that of the -// existing financial system within PayPal, etc. Invoices are added to the -// database when a payment is requested, then can be settled manually once the -// payment is received at the upper layer. For record keeping purposes, -// invoices are never deleted from the database, instead a bit is toggled -// denoting the invoice has been fully settled. Within the database, all -// invoices must have a unique payment hash which is generated by taking the -// sha256 of the payment preimage. -type Invoice struct { - // Memo is an optional memo to be stored along side an invoice. The - // memo may contain further details pertaining to the invoice itself, - // or any other message which fits within the size constraints. - Memo []byte - - // Receipt is an optional field dedicated for storing a - // cryptographically binding receipt of payment. - // - // TODO(roasbeef): document scheme. - Receipt []byte - - // PaymentRequest is an optional field where a payment request created - // for this invoice can be stored. - PaymentRequest []byte - - // FinalCltvDelta is the minimum required number of blocks before htlc - // expiry when the invoice is accepted. - FinalCltvDelta int32 - - // Expiry defines how long after creation this invoice should expire. - Expiry time.Duration - - // CreationDate is the exact time the invoice was created. - CreationDate time.Time - - // SettleDate is the exact time the invoice was settled. - SettleDate time.Time - - // Terms are the contractual payment terms of the invoice. Once all the - // terms have been satisfied by the payer, then the invoice can be - // considered fully fulfilled. - // - // TODO(roasbeef): later allow for multiple terms to fulfill the final - // invoice: payment fragmentation, etc. - Terms ContractTerm - - // AddIndex is an auto-incrementing integer that acts as a - // monotonically increasing sequence number for all invoices created. - // Clients can then use this field as a "checkpoint" of sorts when - // implementing a streaming RPC to notify consumers of instances where - // an invoice has been added before they re-connected. - // - // NOTE: This index starts at 1. - AddIndex uint64 - - // SettleIndex is an auto-incrementing integer that acts as a - // monotonically increasing sequence number for all settled invoices. - // Clients can then use this field as a "checkpoint" of sorts when - // implementing a streaming RPC to notify consumers of instances where - // an invoice has been settled before they re-connected. - // - // NOTE: This index starts at 1. - SettleIndex uint64 - - // AmtPaid is the final amount that we ultimately accepted for pay for - // this invoice. We specify this value independently as it's possible - // that the invoice originally didn't specify an amount, or the sender - // overpaid. - AmtPaid lnwire.MilliSatoshi - - // Htlcs records all htlcs that paid to this invoice. Some of these - // htlcs may have been marked as canceled. - Htlcs map[CircuitKey]*InvoiceHTLC -} - -// HtlcState defines the states an htlc paying to an invoice can be in. -type HtlcState uint8 - -// InvoiceHTLC contains details about an htlc paying to this invoice. -type InvoiceHTLC struct { - // Amt is the amount that is carried by this htlc. - Amt lnwire.MilliSatoshi - - // AcceptHeight is the block height at which the invoice registry - // decided to accept this htlc as a payment to the invoice. At this - // height, the invoice cltv delay must have been met. - AcceptHeight uint32 - - // AcceptTime is the wall clock time at which the invoice registry - // decided to accept the htlc. - AcceptTime time.Time - - // ResolveTime is the wall clock time at which the invoice registry - // decided to settle the htlc. - ResolveTime time.Time - - // Expiry is the expiry height of this htlc. - Expiry uint32 - - // State indicates the state the invoice htlc is currently in. A - // canceled htlc isn't just removed from the invoice htlcs map, because - // we need AcceptHeight to properly cancel the htlc back. - State HtlcState -} - -func validateInvoice(i *Invoice) er.R { - if len(i.Memo) > MaxMemoSize { - return er.Errorf("max length a memo is %v, and invoice "+ - "of length %v was provided", MaxMemoSize, len(i.Memo)) - } - if len(i.Receipt) > MaxReceiptSize { - return er.Errorf("max length a receipt is %v, and invoice "+ - "of length %v was provided", MaxReceiptSize, - len(i.Receipt)) - } - if len(i.PaymentRequest) > MaxPaymentRequestSize { - return er.Errorf("max length of payment request is %v, length "+ - "provided was %v", MaxPaymentRequestSize, - len(i.PaymentRequest)) - } - return nil -} - -// FetchAllInvoices returns all invoices currently stored within the database. -// If the pendingOnly param is true, then only unsettled invoices will be -// returned, skipping all invoices that are fully settled. -func (d *DB) FetchAllInvoices(pendingOnly bool) ([]Invoice, er.R) { - var invoices []Invoice - - err := kvdb.View(d, func(tx kvdb.RTx) er.R { - invoiceB := tx.ReadBucket(invoiceBucket) - if invoiceB == nil { - return ErrNoInvoicesCreated.Default() - } - - // Iterate through the entire key space of the top-level - // invoice bucket. If key with a non-nil value stores the next - // invoice ID which maps to the corresponding invoice. - return invoiceB.ForEach(func(k, v []byte) er.R { - if v == nil { - return nil - } - - invoiceReader := bytes.NewReader(v) - invoice, err := deserializeInvoice(invoiceReader) - if err != nil { - return err - } - - if pendingOnly && - invoice.Terms.State == ContractSettled { - - return nil - } - - invoices = append(invoices, invoice) - - return nil - }) - }, func() { - invoices = nil - }) - if err != nil { - return nil, err - } - - return invoices, nil -} - -// serializeInvoice serializes an invoice to a writer. -// -// Note: this function is in use for a migration. Before making changes that -// would modify the on disk format, make a copy of the original code and store -// it with the migration. -func serializeInvoice(w io.Writer, i *Invoice) er.R { - if err := wire.WriteVarBytes(w, 0, i.Memo[:]); err != nil { - return err - } - if err := wire.WriteVarBytes(w, 0, i.Receipt[:]); err != nil { - return err - } - if err := wire.WriteVarBytes(w, 0, i.PaymentRequest[:]); err != nil { - return err - } - - if err := util.WriteBin(w, byteOrder, i.FinalCltvDelta); err != nil { - return err - } - - if err := util.WriteBin(w, byteOrder, int64(i.Expiry)); err != nil { - return err - } - - birthBytes, err := i.CreationDate.MarshalBinary() - if err != nil { - return er.E(err) - } - - if err := wire.WriteVarBytes(w, 0, birthBytes); err != nil { - return err - } - - settleBytes, err := i.SettleDate.MarshalBinary() - if err != nil { - return er.E(err) - } - - if err := wire.WriteVarBytes(w, 0, settleBytes); err != nil { - return err - } - - if _, err := util.Write(w, i.Terms.PaymentPreimage[:]); err != nil { - return err - } - - var scratch [8]byte - byteOrder.PutUint64(scratch[:], uint64(i.Terms.Value)) - if _, err := util.Write(w, scratch[:]); err != nil { - return err - } - - if err := util.WriteBin(w, byteOrder, i.Terms.State); err != nil { - return err - } - - if err := util.WriteBin(w, byteOrder, i.AddIndex); err != nil { - return err - } - if err := util.WriteBin(w, byteOrder, i.SettleIndex); err != nil { - return err - } - if err := util.WriteBin(w, byteOrder, int64(i.AmtPaid)); err != nil { - return err - } - - if err := serializeHtlcs(w, i.Htlcs); err != nil { - return err - } - - return nil -} - -// serializeHtlcs serializes a map containing circuit keys and invoice htlcs to -// a writer. -func serializeHtlcs(w io.Writer, htlcs map[CircuitKey]*InvoiceHTLC) er.R { - for key, htlc := range htlcs { - // Encode the htlc in a tlv stream. - chanID := key.ChanID.ToUint64() - amt := uint64(htlc.Amt) - acceptTime := uint64(htlc.AcceptTime.UnixNano()) - resolveTime := uint64(htlc.ResolveTime.UnixNano()) - state := uint8(htlc.State) - - tlvStream, err := tlv.NewStream( - tlv.MakePrimitiveRecord(chanIDType, &chanID), - tlv.MakePrimitiveRecord(htlcIDType, &key.HtlcID), - tlv.MakePrimitiveRecord(amtType, &amt), - tlv.MakePrimitiveRecord( - acceptHeightType, &htlc.AcceptHeight, - ), - tlv.MakePrimitiveRecord(acceptTimeType, &acceptTime), - tlv.MakePrimitiveRecord(resolveTimeType, &resolveTime), - tlv.MakePrimitiveRecord(expiryHeightType, &htlc.Expiry), - tlv.MakePrimitiveRecord(stateType, &state), - ) - if err != nil { - return err - } - - var b bytes.Buffer - if err := tlvStream.Encode(&b); err != nil { - return err - } - - // Write the length of the tlv stream followed by the stream - // bytes. - err = util.WriteBin(w, byteOrder, uint64(b.Len())) - if err != nil { - return err - } - - if _, err := util.Write(w, b.Bytes()); err != nil { - return err - } - } - - return nil -} - -func deserializeInvoice(r io.Reader) (Invoice, er.R) { - var err er.R - invoice := Invoice{} - - // TODO(roasbeef): use read full everywhere - invoice.Memo, err = wire.ReadVarBytes(r, 0, MaxMemoSize, "") - if err != nil { - return invoice, err - } - invoice.Receipt, err = wire.ReadVarBytes(r, 0, MaxReceiptSize, "") - if err != nil { - return invoice, err - } - - invoice.PaymentRequest, err = wire.ReadVarBytes(r, 0, MaxPaymentRequestSize, "") - if err != nil { - return invoice, err - } - - if err := util.ReadBin(r, byteOrder, &invoice.FinalCltvDelta); err != nil { - return invoice, err - } - - var expiry int64 - if err := util.ReadBin(r, byteOrder, &expiry); err != nil { - return invoice, err - } - invoice.Expiry = time.Duration(expiry) - - birthBytes, err := wire.ReadVarBytes(r, 0, 300, "birth") - if err != nil { - return invoice, err - } - if err := invoice.CreationDate.UnmarshalBinary(birthBytes); err != nil { - return invoice, er.E(err) - } - - settledBytes, err := wire.ReadVarBytes(r, 0, 300, "settled") - if err != nil { - return invoice, err - } - if err := invoice.SettleDate.UnmarshalBinary(settledBytes); err != nil { - return invoice, er.E(err) - } - - if _, err := util.ReadFull(r, invoice.Terms.PaymentPreimage[:]); err != nil { - return invoice, err - } - var scratch [8]byte - if _, err := util.ReadFull(r, scratch[:]); err != nil { - return invoice, err - } - invoice.Terms.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:])) - - if err := util.ReadBin(r, byteOrder, &invoice.Terms.State); err != nil { - return invoice, err - } - - if err := util.ReadBin(r, byteOrder, &invoice.AddIndex); err != nil { - return invoice, err - } - if err := util.ReadBin(r, byteOrder, &invoice.SettleIndex); err != nil { - return invoice, err - } - if err := util.ReadBin(r, byteOrder, &invoice.AmtPaid); err != nil { - return invoice, err - } - - invoice.Htlcs, err = deserializeHtlcs(r) - if err != nil { - return Invoice{}, err - } - - return invoice, nil -} - -// deserializeHtlcs reads a list of invoice htlcs from a reader and returns it -// as a map. -func deserializeHtlcs(r io.Reader) (map[CircuitKey]*InvoiceHTLC, er.R) { - htlcs := make(map[CircuitKey]*InvoiceHTLC, 0) - - for { - // Read the length of the tlv stream for this htlc. - var streamLen uint64 - if err := util.ReadBin(r, byteOrder, &streamLen); err != nil { - if er.Wrapped(err) == io.EOF { - break - } - - return nil, err - } - - streamBytes := make([]byte, streamLen) - if _, err := r.Read(streamBytes); err != nil { - return nil, er.E(err) - } - streamReader := bytes.NewReader(streamBytes) - - // Decode the contents into the htlc fields. - var ( - htlc InvoiceHTLC - key CircuitKey - chanID uint64 - state uint8 - acceptTime, resolveTime uint64 - amt uint64 - ) - tlvStream, err := tlv.NewStream( - tlv.MakePrimitiveRecord(chanIDType, &chanID), - tlv.MakePrimitiveRecord(htlcIDType, &key.HtlcID), - tlv.MakePrimitiveRecord(amtType, &amt), - tlv.MakePrimitiveRecord( - acceptHeightType, &htlc.AcceptHeight, - ), - tlv.MakePrimitiveRecord(acceptTimeType, &acceptTime), - tlv.MakePrimitiveRecord(resolveTimeType, &resolveTime), - tlv.MakePrimitiveRecord(expiryHeightType, &htlc.Expiry), - tlv.MakePrimitiveRecord(stateType, &state), - ) - if err != nil { - return nil, err - } - - if err := tlvStream.Decode(streamReader); err != nil { - return nil, err - } - - key.ChanID = lnwire.NewShortChanIDFromInt(chanID) - htlc.AcceptTime = time.Unix(0, int64(acceptTime)) - htlc.ResolveTime = time.Unix(0, int64(resolveTime)) - htlc.State = HtlcState(state) - htlc.Amt = lnwire.MilliSatoshi(amt) - - htlcs[key] = &htlc - } - - return htlcs, nil -} diff --git a/lnd/channeldb/migration_01_to_11/legacy_serialization.go b/lnd/channeldb/migration_01_to_11/legacy_serialization.go deleted file mode 100644 index d96aa6bc..00000000 --- a/lnd/channeldb/migration_01_to_11/legacy_serialization.go +++ /dev/null @@ -1,57 +0,0 @@ -package migration_01_to_11 - -import ( - "io" - - "github.com/pkt-cash/pktd/btcutil/er" -) - -// deserializeCloseChannelSummaryV6 reads the v6 database format for -// ChannelCloseSummary. -// -// NOTE: deprecated, only for migration. -func deserializeCloseChannelSummaryV6(r io.Reader) (*ChannelCloseSummary, er.R) { - c := &ChannelCloseSummary{} - - err := ReadElements(r, - &c.ChanPoint, &c.ShortChanID, &c.ChainHash, &c.ClosingTXID, - &c.CloseHeight, &c.RemotePub, &c.Capacity, &c.SettledBalance, - &c.TimeLockedBalance, &c.CloseType, &c.IsPending, - ) - if err != nil { - return nil, err - } - - // We'll now check to see if the channel close summary was encoded with - // any of the additional optional fields. - err = ReadElements(r, &c.RemoteCurrentRevocation) - switch { - case er.Wrapped(err) == io.EOF: - return c, nil - - // If we got a non-eof error, then we know there's an actually issue. - // Otherwise, it may have been the case that this summary didn't have - // the set of optional fields. - case err != nil: - return nil, err - } - - if err := readChanConfig(r, &c.LocalChanConfig); err != nil { - return nil, err - } - - // Finally, we'll attempt to read the next unrevoked commitment point - // for the remote party. If we closed the channel before receiving a - // funding locked message, then this can be nil. As a result, we'll use - // the same technique to read the field, only if there's still data - // left in the buffer. - err = ReadElements(r, &c.RemoteNextRevocation) - if err != nil && er.Wrapped(err) != io.EOF { - // If we got a non-eof error, then we know there's an actually - // issue. Otherwise, it may have been the case that this - // summary didn't have the set of optional fields. - return nil, err - } - - return c, nil -} diff --git a/lnd/channeldb/migration_01_to_11/meta.go b/lnd/channeldb/migration_01_to_11/meta.go deleted file mode 100644 index 6f778a07..00000000 --- a/lnd/channeldb/migration_01_to_11/meta.go +++ /dev/null @@ -1,40 +0,0 @@ -package migration_01_to_11 - -import ( - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" -) - -var ( - // metaBucket stores all the meta information concerning the state of - // the database. - metaBucket = []byte("metadata") - - // dbVersionKey is a boltdb key and it's used for storing/retrieving - // current database version. - dbVersionKey = []byte("dbp") -) - -// Meta structure holds the database meta information. -type Meta struct { - // DbVersionNumber is the current schema version of the database. - DbVersionNumber uint32 -} - -// putMeta is an internal helper function used in order to allow callers to -// re-use a database transaction. See the publicly exported PutMeta method for -// more information. -func putMeta(meta *Meta, tx kvdb.RwTx) er.R { - metaBucket, err := tx.CreateTopLevelBucket(metaBucket) - if err != nil { - return err - } - - return putDbVersion(metaBucket, meta) -} - -func putDbVersion(metaBucket kvdb.RwBucket, meta *Meta) er.R { - scratch := make([]byte, 4) - byteOrder.PutUint32(scratch, meta.DbVersionNumber) - return metaBucket.Put(dbVersionKey, scratch) -} diff --git a/lnd/channeldb/migration_01_to_11/meta_test.go b/lnd/channeldb/migration_01_to_11/meta_test.go deleted file mode 100644 index ba230e5a..00000000 --- a/lnd/channeldb/migration_01_to_11/meta_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package migration_01_to_11 - -import ( - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/pktlog/log" -) - -// applyMigration is a helper test function that encapsulates the general steps -// which are needed to properly check the result of applying migration function. -func applyMigration(t *testing.T, beforeMigration, afterMigration func(d *DB), - migrationFunc migration, shouldFail bool) { - - cdb, cleanUp, err := makeTestDB() - defer cleanUp() - if err != nil { - t.Fatal(err) - } - - // Create a test node that will be our source node. - testNode, err := createTestVertex(cdb) - if err != nil { - t.Fatal(err) - } - graph := cdb.ChannelGraph() - if err := graph.SetSourceNode(testNode); err != nil { - t.Fatal(err) - } - - // beforeMigration usually used for populating the database - // with test data. - beforeMigration(cdb) - - defer func() { - if r := recover(); r != nil { - err = er.Errorf("%v", r) - } - - if err == nil && shouldFail { - t.Fatal("error wasn't received on migration stage") - } else if err != nil && !shouldFail { - t.Fatalf("error was received on migration stage: %v", err) - } - - // afterMigration usually used for checking the database state and - // throwing the error if something went wrong. - afterMigration(cdb) - }() - - // Apply migration. - err = kvdb.Update(cdb, func(tx kvdb.RwTx) er.R { - return migrationFunc(tx) - }, func() {}) - if err != nil { - log.Error(err) - } -} diff --git a/lnd/channeldb/migration_01_to_11/migration_09_legacy_serialization.go b/lnd/channeldb/migration_01_to_11/migration_09_legacy_serialization.go deleted file mode 100644 index 355489f4..00000000 --- a/lnd/channeldb/migration_01_to_11/migration_09_legacy_serialization.go +++ /dev/null @@ -1,503 +0,0 @@ -package migration_01_to_11 - -import ( - "bytes" - "encoding/binary" - "io" - "sort" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -var ( - // paymentBucket is the name of the bucket within the database that - // stores all data related to payments. - // - // Within the payments bucket, each invoice is keyed by its invoice ID - // which is a monotonically increasing uint64. BoltDB's sequence - // feature is used for generating monotonically increasing id. - // - // NOTE: Deprecated. Kept around for migration purposes. - paymentBucket = []byte("payments") - - // paymentStatusBucket is the name of the bucket within the database - // that stores the status of a payment indexed by the payment's - // preimage. - // - // NOTE: Deprecated. Kept around for migration purposes. - paymentStatusBucket = []byte("payment-status") -) - -// outgoingPayment represents a successful payment between the daemon and a -// remote node. Details such as the total fee paid, and the time of the payment -// are stored. -// -// NOTE: Deprecated. Kept around for migration purposes. -type outgoingPayment struct { - Invoice - - // Fee is the total fee paid for the payment in milli-satoshis. - Fee lnwire.MilliSatoshi - - // TotalTimeLock is the total cumulative time-lock in the HTLC extended - // from the second-to-last hop to the destination. - TimeLockLength uint32 - - // Path encodes the path the payment took through the network. The path - // excludes the outgoing node and consists of the hex-encoded - // compressed public key of each of the nodes involved in the payment. - Path [][33]byte - - // PaymentPreimage is the preImage of a successful payment. This is used - // to calculate the PaymentHash as well as serve as a proof of payment. - PaymentPreimage [32]byte -} - -// addPayment saves a successful payment to the database. It is assumed that -// all payment are sent using unique payment hashes. -// -// NOTE: Deprecated. Kept around for migration purposes. -func (db *DB) addPayment(payment *outgoingPayment) er.R { - // Validate the field of the inner voice within the outgoing payment, - // these must also adhere to the same constraints as regular invoices. - if err := validateInvoice(&payment.Invoice); err != nil { - return err - } - - // We first serialize the payment before starting the database - // transaction so we can avoid creating a DB payment in the case of a - // serialization error. - var b bytes.Buffer - if err := serializeOutgoingPayment(&b, payment); err != nil { - return err - } - paymentBytes := b.Bytes() - - return kvdb.Update(db, func(tx kvdb.RwTx) er.R { - payments, err := tx.CreateTopLevelBucket(paymentBucket) - if err != nil { - return err - } - - // Obtain the new unique sequence number for this payment. - paymentID, err := payments.NextSequence() - if err != nil { - return err - } - - // We use BigEndian for keys as it orders keys in - // ascending order. This allows bucket scans to order payments - // in the order in which they were created. - paymentIDBytes := make([]byte, 8) - binary.BigEndian.PutUint64(paymentIDBytes, paymentID) - - return payments.Put(paymentIDBytes, paymentBytes) - }, func() {}) -} - -// fetchAllPayments returns all outgoing payments in DB. -// -// NOTE: Deprecated. Kept around for migration purposes. -func (db *DB) fetchAllPayments() ([]*outgoingPayment, er.R) { - var payments []*outgoingPayment - - err := kvdb.View(db, func(tx kvdb.RTx) er.R { - bucket := tx.ReadBucket(paymentBucket) - if bucket == nil { - return ErrNoPaymentsCreated.Default() - } - - return bucket.ForEach(func(k, v []byte) er.R { - // If the value is nil, then we ignore it as it may be - // a sub-bucket. - if v == nil { - return nil - } - - r := bytes.NewReader(v) - payment, err := deserializeOutgoingPayment(r) - if err != nil { - return err - } - - payments = append(payments, payment) - return nil - }) - }, func() { - payments = nil - }) - if err != nil { - return nil, err - } - - return payments, nil -} - -// fetchPaymentStatus returns the payment status for outgoing payment. -// If status of the payment isn't found, it will default to "StatusUnknown". -// -// NOTE: Deprecated. Kept around for migration purposes. -func (db *DB) fetchPaymentStatus(paymentHash [32]byte) (PaymentStatus, er.R) { - var paymentStatus = StatusUnknown - err := kvdb.View(db, func(tx kvdb.RTx) er.R { - var err er.R - paymentStatus, err = fetchPaymentStatusTx(tx, paymentHash) - return err - }, func() { - paymentStatus = StatusUnknown - }) - if err != nil { - return StatusUnknown, err - } - - return paymentStatus, nil -} - -// fetchPaymentStatusTx is a helper method that returns the payment status for -// outgoing payment. If status of the payment isn't found, it will default to -// "StatusUnknown". It accepts the boltdb transactions such that this method -// can be composed into other atomic operations. -// -// NOTE: Deprecated. Kept around for migration purposes. -func fetchPaymentStatusTx(tx kvdb.RTx, paymentHash [32]byte) (PaymentStatus, er.R) { - // The default status for all payments that aren't recorded in database. - var paymentStatus = StatusUnknown - - bucket := tx.ReadBucket(paymentStatusBucket) - if bucket == nil { - return paymentStatus, nil - } - - paymentStatusBytes := bucket.Get(paymentHash[:]) - if paymentStatusBytes == nil { - return paymentStatus, nil - } - - paymentStatus.FromBytes(paymentStatusBytes) - - return paymentStatus, nil -} - -func serializeOutgoingPayment(w io.Writer, p *outgoingPayment) er.R { - var scratch [8]byte - - if err := serializeInvoiceLegacy(w, &p.Invoice); err != nil { - return err - } - - byteOrder.PutUint64(scratch[:], uint64(p.Fee)) - if _, err := util.Write(w, scratch[:]); err != nil { - return err - } - - // First write out the length of the bytes to prefix the value. - pathLen := uint32(len(p.Path)) - byteOrder.PutUint32(scratch[:4], pathLen) - if _, err := util.Write(w, scratch[:4]); err != nil { - return err - } - - // Then with the path written, we write out the series of public keys - // involved in the path. - for _, hop := range p.Path { - if _, err := util.Write(w, hop[:]); err != nil { - return err - } - } - - byteOrder.PutUint32(scratch[:4], p.TimeLockLength) - if _, err := util.Write(w, scratch[:4]); err != nil { - return err - } - - if _, err := util.Write(w, p.PaymentPreimage[:]); err != nil { - return err - } - - return nil -} - -func deserializeOutgoingPayment(r io.Reader) (*outgoingPayment, er.R) { - var scratch [8]byte - - p := &outgoingPayment{} - - inv, err := deserializeInvoiceLegacy(r) - if err != nil { - return nil, err - } - p.Invoice = inv - - if _, err := r.Read(scratch[:]); err != nil { - return nil, er.E(err) - } - p.Fee = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:])) - - if _, err := r.Read(scratch[:4]); err != nil { - return nil, er.E(err) - } - pathLen := byteOrder.Uint32(scratch[:4]) - - path := make([][33]byte, pathLen) - for i := uint32(0); i < pathLen; i++ { - if _, err := r.Read(path[i][:]); err != nil { - return nil, er.E(err) - } - } - p.Path = path - - if _, err := r.Read(scratch[:4]); err != nil { - return nil, er.E(err) - } - p.TimeLockLength = byteOrder.Uint32(scratch[:4]) - - if _, err := r.Read(p.PaymentPreimage[:]); err != nil { - return nil, er.E(err) - } - - return p, nil -} - -// serializePaymentAttemptInfoMigration9 is the serializePaymentAttemptInfo -// version as existed when migration #9 was created. We keep this around, along -// with the methods below to ensure that clients that upgrade will use the -// correct version of this method. -func serializePaymentAttemptInfoMigration9(w io.Writer, a *PaymentAttemptInfo) er.R { - if err := WriteElements(w, a.PaymentID, a.SessionKey); err != nil { - return err - } - - if err := serializeRouteMigration9(w, a.Route); err != nil { - return err - } - - return nil -} - -func serializeHopMigration9(w io.Writer, h *Hop) er.R { - if err := WriteElements(w, - h.PubKeyBytes[:], h.ChannelID, h.OutgoingTimeLock, - h.AmtToForward, - ); err != nil { - return err - } - - return nil -} - -func serializeRouteMigration9(w io.Writer, r Route) er.R { - if err := WriteElements(w, - r.TotalTimeLock, r.TotalAmount, r.SourcePubKey[:], - ); err != nil { - return err - } - - if err := WriteElements(w, uint32(len(r.Hops))); err != nil { - return err - } - - for _, h := range r.Hops { - if err := serializeHopMigration9(w, h); err != nil { - return err - } - } - - return nil -} - -func deserializePaymentAttemptInfoMigration9(r io.Reader) (*PaymentAttemptInfo, er.R) { - a := &PaymentAttemptInfo{} - err := ReadElements(r, &a.PaymentID, &a.SessionKey) - if err != nil { - return nil, err - } - a.Route, err = deserializeRouteMigration9(r) - if err != nil { - return nil, err - } - return a, nil -} - -func deserializeRouteMigration9(r io.Reader) (Route, er.R) { - rt := Route{} - if err := ReadElements(r, - &rt.TotalTimeLock, &rt.TotalAmount, - ); err != nil { - return rt, err - } - - var pub []byte - if err := ReadElements(r, &pub); err != nil { - return rt, err - } - copy(rt.SourcePubKey[:], pub) - - var numHops uint32 - if err := ReadElements(r, &numHops); err != nil { - return rt, err - } - - var hops []*Hop - for i := uint32(0); i < numHops; i++ { - hop, err := deserializeHopMigration9(r) - if err != nil { - return rt, err - } - hops = append(hops, hop) - } - rt.Hops = hops - - return rt, nil -} - -func deserializeHopMigration9(r io.Reader) (*Hop, er.R) { - h := &Hop{} - - var pub []byte - if err := ReadElements(r, &pub); err != nil { - return nil, err - } - copy(h.PubKeyBytes[:], pub) - - if err := ReadElements(r, - &h.ChannelID, &h.OutgoingTimeLock, &h.AmtToForward, - ); err != nil { - return nil, err - } - - return h, nil -} - -// fetchPaymentsMigration9 returns all sent payments found in the DB using the -// payment attempt info format that was present as of migration #9. We need -// this as otherwise, the current FetchPayments version will use the latest -// decoding format. Note that we only need this for the -// TestOutgoingPaymentsMigration migration test case. -func (db *DB) fetchPaymentsMigration9() ([]*Payment, er.R) { - var payments []*Payment - - err := kvdb.View(db, func(tx kvdb.RTx) er.R { - paymentsBucket := tx.ReadBucket(paymentsRootBucket) - if paymentsBucket == nil { - return nil - } - - return paymentsBucket.ForEach(func(k, v []byte) er.R { - bucket := paymentsBucket.NestedReadBucket(k) - if bucket == nil { - // We only expect sub-buckets to be found in - // this top-level bucket. - return er.Errorf("non bucket element in " + - "payments bucket") - } - - p, err := fetchPaymentMigration9(bucket) - if err != nil { - return err - } - - payments = append(payments, p) - - // For older versions of lnd, duplicate payments to a - // payment has was possible. These will be found in a - // sub-bucket indexed by their sequence number if - // available. - dup := bucket.NestedReadBucket(paymentDuplicateBucket) - if dup == nil { - return nil - } - - return dup.ForEach(func(k, v []byte) er.R { - subBucket := dup.NestedReadBucket(k) - if subBucket == nil { - // We one bucket for each duplicate to - // be found. - return er.Errorf("non bucket element" + - "in duplicate bucket") - } - - p, err := fetchPaymentMigration9(subBucket) - if err != nil { - return err - } - - payments = append(payments, p) - return nil - }) - }) - }, func() { - payments = nil - }) - if err != nil { - return nil, err - } - - // Before returning, sort the payments by their sequence number. - sort.Slice(payments, func(i, j int) bool { - return payments[i].sequenceNum < payments[j].sequenceNum - }) - - return payments, nil -} - -func fetchPaymentMigration9(bucket kvdb.RBucket) (*Payment, er.R) { - var ( - err er.R - p = &Payment{} - ) - - seqBytes := bucket.Get(paymentSequenceKey) - if seqBytes == nil { - return nil, er.Errorf("sequence number not found") - } - - p.sequenceNum = binary.BigEndian.Uint64(seqBytes) - - // Get the payment status. - p.Status = fetchPaymentStatus(bucket) - - // Get the PaymentCreationInfo. - b := bucket.Get(paymentCreationInfoKey) - if b == nil { - return nil, er.Errorf("creation info not found") - } - - r := bytes.NewReader(b) - p.Info, err = deserializePaymentCreationInfo(r) - if err != nil { - return nil, err - - } - - // Get the PaymentAttemptInfo. This can be unset. - b = bucket.Get(paymentAttemptInfoKey) - if b != nil { - r = bytes.NewReader(b) - p.Attempt, err = deserializePaymentAttemptInfoMigration9(r) - if err != nil { - return nil, err - } - } - - // Get the payment preimage. This is only found for - // completed payments. - b = bucket.Get(paymentSettleInfoKey) - if b != nil { - var preimg lntypes.Preimage - copy(preimg[:], b[:]) - p.PaymentPreimage = &preimg - } - - // Get failure reason if available. - b = bucket.Get(paymentFailInfoKey) - if b != nil { - reason := FailureReason(b[0]) - p.Failure = &reason - } - - return p, nil -} diff --git a/lnd/channeldb/migration_01_to_11/migration_10_route_tlv_records.go b/lnd/channeldb/migration_01_to_11/migration_10_route_tlv_records.go deleted file mode 100644 index 3d146878..00000000 --- a/lnd/channeldb/migration_01_to_11/migration_10_route_tlv_records.go +++ /dev/null @@ -1,237 +0,0 @@ -package migration_01_to_11 - -import ( - "bytes" - "io" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/pktlog/log" -) - -// MigrateRouteSerialization migrates the way we serialize routes across the -// entire database. At the time of writing of this migration, this includes our -// payment attempts, as well as the payment results in mission control. -func MigrateRouteSerialization(tx kvdb.RwTx) er.R { - // First, we'll do all the payment attempts. - rootPaymentBucket := tx.ReadWriteBucket(paymentsRootBucket) - if rootPaymentBucket == nil { - return nil - } - - // As we can't mutate a bucket while we're iterating over it with - // ForEach, we'll need to collect all the known payment hashes in - // memory first. - var payHashes [][]byte - err := rootPaymentBucket.ForEach(func(k, v []byte) er.R { - if v != nil { - return nil - } - - payHashes = append(payHashes, k) - return nil - }) - if err != nil { - return err - } - - // Now that we have all the payment hashes, we can carry out the - // migration itself. - for _, payHash := range payHashes { - payHashBucket := rootPaymentBucket.NestedReadWriteBucket(payHash) - - // First, we'll migrate the main (non duplicate) payment to - // this hash. - err := migrateAttemptEncoding(tx, payHashBucket) - if err != nil { - return err - } - - // Now that we've migrated the main payment, we'll also check - // for any duplicate payments to the same payment hash. - dupBucket := payHashBucket.NestedReadWriteBucket(paymentDuplicateBucket) - - // If there's no dup bucket, then we can move on to the next - // payment. - if dupBucket == nil { - continue - } - - // Otherwise, we'll now iterate through all the duplicate pay - // hashes and migrate those. - var dupSeqNos [][]byte - err = dupBucket.ForEach(func(k, v []byte) er.R { - dupSeqNos = append(dupSeqNos, k) - return nil - }) - if err != nil { - return err - } - - // Now in this second pass, we'll re-serialize their duplicate - // payment attempts under the new encoding. - for _, seqNo := range dupSeqNos { - dupPayHashBucket := dupBucket.NestedReadWriteBucket(seqNo) - err := migrateAttemptEncoding(tx, dupPayHashBucket) - if err != nil { - return err - } - } - } - - log.Infof("Migration of route/hop serialization complete!") - - log.Infof("Migrating to new mission control store by clearing " + - "existing data") - - resultsKey := []byte("missioncontrol-results") - err = tx.DeleteTopLevelBucket(resultsKey) - if err != nil && !kvdb.ErrBucketNotFound.Is(err) { - return err - } - - log.Infof("Migration to new mission control completed!") - - return nil -} - -// migrateAttemptEncoding migrates payment attempts using the legacy format to -// the new format. -func migrateAttemptEncoding(tx kvdb.RwTx, payHashBucket kvdb.RwBucket) er.R { - payAttemptBytes := payHashBucket.Get(paymentAttemptInfoKey) - if payAttemptBytes == nil { - return nil - } - - // For our migration, we'll first read out the existing payment attempt - // using the legacy serialization of the attempt. - payAttemptReader := bytes.NewReader(payAttemptBytes) - payAttempt, err := deserializePaymentAttemptInfoLegacy( - payAttemptReader, - ) - if err != nil { - return err - } - - // Now that we have the old attempts, we'll explicitly mark this as - // needing a legacy payload, since after this migration, the modern - // payload will be the default if signalled. - for _, hop := range payAttempt.Route.Hops { - hop.LegacyPayload = true - } - - // Finally, we'll write out the payment attempt using the new encoding. - var b bytes.Buffer - err = serializePaymentAttemptInfo(&b, payAttempt) - if err != nil { - return err - } - - return payHashBucket.Put(paymentAttemptInfoKey, b.Bytes()) -} - -func deserializePaymentAttemptInfoLegacy(r io.Reader) (*PaymentAttemptInfo, er.R) { - a := &PaymentAttemptInfo{} - err := ReadElements(r, &a.PaymentID, &a.SessionKey) - if err != nil { - return nil, err - } - a.Route, err = deserializeRouteLegacy(r) - if err != nil { - return nil, err - } - return a, nil -} - -func serializePaymentAttemptInfoLegacy(w io.Writer, a *PaymentAttemptInfo) er.R { - if err := WriteElements(w, a.PaymentID, a.SessionKey); err != nil { - return err - } - - if err := serializeRouteLegacy(w, a.Route); err != nil { - return err - } - - return nil -} - -func deserializeHopLegacy(r io.Reader) (*Hop, er.R) { - h := &Hop{} - - var pub []byte - if err := ReadElements(r, &pub); err != nil { - return nil, err - } - copy(h.PubKeyBytes[:], pub) - - if err := ReadElements(r, - &h.ChannelID, &h.OutgoingTimeLock, &h.AmtToForward, - ); err != nil { - return nil, err - } - - return h, nil -} - -func serializeHopLegacy(w io.Writer, h *Hop) er.R { - if err := WriteElements(w, - h.PubKeyBytes[:], h.ChannelID, h.OutgoingTimeLock, - h.AmtToForward, - ); err != nil { - return err - } - - return nil -} - -func deserializeRouteLegacy(r io.Reader) (Route, er.R) { - rt := Route{} - if err := ReadElements(r, - &rt.TotalTimeLock, &rt.TotalAmount, - ); err != nil { - return rt, err - } - - var pub []byte - if err := ReadElements(r, &pub); err != nil { - return rt, err - } - copy(rt.SourcePubKey[:], pub) - - var numHops uint32 - if err := ReadElements(r, &numHops); err != nil { - return rt, err - } - - var hops []*Hop - for i := uint32(0); i < numHops; i++ { - hop, err := deserializeHopLegacy(r) - if err != nil { - return rt, err - } - hops = append(hops, hop) - } - rt.Hops = hops - - return rt, nil -} - -func serializeRouteLegacy(w io.Writer, r Route) er.R { - if err := WriteElements(w, - r.TotalTimeLock, r.TotalAmount, r.SourcePubKey[:], - ); err != nil { - return err - } - - if err := WriteElements(w, uint32(len(r.Hops))); err != nil { - return err - } - - for _, h := range r.Hops { - if err := serializeHopLegacy(w, h); err != nil { - return err - } - } - - return nil -} diff --git a/lnd/channeldb/migration_01_to_11/migration_11_invoices.go b/lnd/channeldb/migration_01_to_11/migration_11_invoices.go deleted file mode 100644 index 8af06bee..00000000 --- a/lnd/channeldb/migration_01_to_11/migration_11_invoices.go +++ /dev/null @@ -1,231 +0,0 @@ -package migration_01_to_11 - -import ( - "bytes" - "io" - - litecoinCfg "github.com/ltcsuite/ltcd/chaincfg" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - bitcoinCfg "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/channeldb/migration_01_to_11/zpay32" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/wire" -) - -// MigrateInvoices adds invoice htlcs and a separate cltv delta field to the -// invoices. -func MigrateInvoices(tx kvdb.RwTx) er.R { - log.Infof("Migrating invoices to new invoice format") - - invoiceB := tx.ReadWriteBucket(invoiceBucket) - if invoiceB == nil { - return nil - } - - // Iterate through the entire key space of the top-level invoice bucket. - // If key with a non-nil value stores the next invoice ID which maps to - // the corresponding invoice. Store those keys first, because it isn't - // safe to modify the bucket inside a ForEach loop. - var invoiceKeys [][]byte - err := invoiceB.ForEach(func(k, v []byte) er.R { - if v == nil { - return nil - } - - invoiceKeys = append(invoiceKeys, k) - - return nil - }) - if err != nil { - return err - } - - nets := []*bitcoinCfg.Params{ - &bitcoinCfg.MainNetParams, &bitcoinCfg.SimNetParams, - &bitcoinCfg.RegressionNetParams, &bitcoinCfg.TestNet3Params, - } - - ltcNets := []*litecoinCfg.Params{ - &litecoinCfg.MainNetParams, &litecoinCfg.SimNetParams, - &litecoinCfg.RegressionNetParams, &litecoinCfg.TestNet4Params, - } - for _, net := range ltcNets { - var convertedNet bitcoinCfg.Params - convertedNet.Bech32HRPSegwit = net.Bech32HRPSegwit - nets = append(nets, &convertedNet) - } - - // Iterate over all stored keys and migrate the invoices. - for _, k := range invoiceKeys { - v := invoiceB.Get(k) - - // Deserialize the invoice with the deserializing function that - // was in use for this version of the database. - invoiceReader := bytes.NewReader(v) - invoice, err := deserializeInvoiceLegacy(invoiceReader) - if err != nil { - return err - } - - if invoice.Terms.State == ContractAccepted { - return er.Errorf("cannot upgrade with invoice(s) " + - "in accepted state, see release notes") - } - - // Try to decode the payment request for every possible net to - // avoid passing a the active network to channeldb. This would - // be a layering violation, while this migration is only running - // once and will likely be removed in the future. - var payReq *zpay32.Invoice - for _, net := range nets { - payReq, err = zpay32.Decode( - string(invoice.PaymentRequest), net, - ) - if err == nil { - break - } - } - if payReq == nil { - return er.Errorf("cannot decode payreq") - } - invoice.FinalCltvDelta = int32(payReq.MinFinalCLTVExpiry()) - invoice.Expiry = payReq.Expiry() - - // Serialize the invoice in the new format and use it to replace - // the old invoice in the database. - var buf bytes.Buffer - if err := serializeInvoice(&buf, &invoice); err != nil { - return err - } - - err = invoiceB.Put(k, buf.Bytes()) - if err != nil { - return err - } - } - - log.Infof("Migration of invoices completed!") - return nil -} - -func deserializeInvoiceLegacy(r io.Reader) (Invoice, er.R) { - var err er.R - invoice := Invoice{} - - // TODO(roasbeef): use read full everywhere - invoice.Memo, err = wire.ReadVarBytes(r, 0, MaxMemoSize, "") - if err != nil { - return invoice, err - } - invoice.Receipt, err = wire.ReadVarBytes(r, 0, MaxReceiptSize, "") - if err != nil { - return invoice, err - } - - invoice.PaymentRequest, err = wire.ReadVarBytes(r, 0, MaxPaymentRequestSize, "") - if err != nil { - return invoice, err - } - - birthBytes, err := wire.ReadVarBytes(r, 0, 300, "birth") - if err != nil { - return invoice, err - } - if err := invoice.CreationDate.UnmarshalBinary(birthBytes); err != nil { - return invoice, er.E(err) - } - - settledBytes, err := wire.ReadVarBytes(r, 0, 300, "settled") - if err != nil { - return invoice, err - } - if err := invoice.SettleDate.UnmarshalBinary(settledBytes); err != nil { - return invoice, er.E(err) - } - - if _, err := util.ReadFull(r, invoice.Terms.PaymentPreimage[:]); err != nil { - return invoice, err - } - var scratch [8]byte - if _, err := util.ReadFull(r, scratch[:]); err != nil { - return invoice, err - } - invoice.Terms.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:])) - - if err := util.ReadBin(r, byteOrder, &invoice.Terms.State); err != nil { - return invoice, err - } - - if err := util.ReadBin(r, byteOrder, &invoice.AddIndex); err != nil { - return invoice, err - } - if err := util.ReadBin(r, byteOrder, &invoice.SettleIndex); err != nil { - return invoice, err - } - if err := util.ReadBin(r, byteOrder, &invoice.AmtPaid); err != nil { - return invoice, err - } - - return invoice, nil -} - -// serializeInvoiceLegacy serializes an invoice in the format of the previous db -// version. -func serializeInvoiceLegacy(w io.Writer, i *Invoice) er.R { - if err := wire.WriteVarBytes(w, 0, i.Memo[:]); err != nil { - return err - } - if err := wire.WriteVarBytes(w, 0, i.Receipt[:]); err != nil { - return err - } - if err := wire.WriteVarBytes(w, 0, i.PaymentRequest[:]); err != nil { - return err - } - - birthBytes, errr := i.CreationDate.MarshalBinary() - if errr != nil { - return er.E(errr) - } - - if err := wire.WriteVarBytes(w, 0, birthBytes); err != nil { - return err - } - - settleBytes, errr := i.SettleDate.MarshalBinary() - if errr != nil { - return er.E(errr) - } - - if err := wire.WriteVarBytes(w, 0, settleBytes); err != nil { - return err - } - - if _, err := util.Write(w, i.Terms.PaymentPreimage[:]); err != nil { - return err - } - - var scratch [8]byte - byteOrder.PutUint64(scratch[:], uint64(i.Terms.Value)) - if _, err := util.Write(w, scratch[:]); err != nil { - return err - } - - if err := util.WriteBin(w, byteOrder, i.Terms.State); err != nil { - return err - } - - if err := util.WriteBin(w, byteOrder, i.AddIndex); err != nil { - return err - } - if err := util.WriteBin(w, byteOrder, i.SettleIndex); err != nil { - return err - } - if err := util.WriteBin(w, byteOrder, int64(i.AmtPaid)); err != nil { - return err - } - - return nil -} diff --git a/lnd/channeldb/migration_01_to_11/migration_11_invoices_test.go b/lnd/channeldb/migration_01_to_11/migration_11_invoices_test.go deleted file mode 100644 index ff7264d5..00000000 --- a/lnd/channeldb/migration_01_to_11/migration_11_invoices_test.go +++ /dev/null @@ -1,184 +0,0 @@ -package migration_01_to_11 - -import ( - "bytes" - "testing" - "time" - - litecoinCfg "github.com/ltcsuite/ltcd/chaincfg" - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - bitcoinCfg "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/zpay32" -) - -var ( - testPrivKeyBytes = []byte{ - 0x2b, 0xd8, 0x06, 0xc9, 0x7f, 0x0e, 0x00, 0xaf, - 0x1a, 0x1f, 0xc3, 0x32, 0x8f, 0xa7, 0x63, 0xa9, - 0x26, 0x97, 0x23, 0xc8, 0xdb, 0x8f, 0xac, 0x4f, - 0x93, 0xaf, 0x71, 0xdb, 0x18, 0x6d, 0x6e, 0x90, - } - - testCltvDelta = int32(50) -) - -// beforeMigrationFuncV11 insert the test invoices in the database. -func beforeMigrationFuncV11(t *testing.T, d *DB, invoices []Invoice) { - err := kvdb.Update(d, func(tx kvdb.RwTx) er.R { - invoicesBucket, err := tx.CreateTopLevelBucket( - invoiceBucket, - ) - if err != nil { - return err - } - - invoiceNum := uint32(1) - for _, invoice := range invoices { - var invoiceKey [4]byte - byteOrder.PutUint32(invoiceKey[:], invoiceNum) - invoiceNum++ - - var buf bytes.Buffer - err := serializeInvoiceLegacy(&buf, &invoice) // nolint:scopelint - if err != nil { - return err - } - - err = invoicesBucket.Put( - invoiceKey[:], buf.Bytes(), - ) - if err != nil { - return err - } - } - - return nil - }, func() {}) - if err != nil { - t.Fatal(err) - } -} - -// TestMigrateInvoices checks that invoices are migrated correctly. -func TestMigrateInvoices(t *testing.T) { - t.Parallel() - - payReqBtc, err := getPayReq(&bitcoinCfg.MainNetParams) - if err != nil { - t.Fatal(err) - } - - var ltcNetParams bitcoinCfg.Params - ltcNetParams.Bech32HRPSegwit = litecoinCfg.MainNetParams.Bech32HRPSegwit - payReqLtc, err := getPayReq(<cNetParams) - if err != nil { - t.Fatal(err) - } - - invoices := []Invoice{ - { - PaymentRequest: []byte(payReqBtc), - }, - { - PaymentRequest: []byte(payReqLtc), - }, - } - - // Verify that all invoices were migrated. - afterMigrationFunc := func(d *DB) { - dbInvoices, err := d.FetchAllInvoices(false) - if err != nil { - t.Fatalf("unable to fetch invoices: %v", err) - } - - if len(invoices) != len(dbInvoices) { - t.Fatalf("expected %d invoices, got %d", len(invoices), - len(dbInvoices)) - } - - for _, dbInvoice := range dbInvoices { - if dbInvoice.FinalCltvDelta != testCltvDelta { - t.Fatal("incorrect final cltv delta") - } - if dbInvoice.Expiry != 3600*time.Second { - t.Fatal("incorrect expiry") - } - if len(dbInvoice.Htlcs) != 0 { - t.Fatal("expected no htlcs after migration") - } - } - } - - applyMigration(t, - func(d *DB) { beforeMigrationFuncV11(t, d, invoices) }, - afterMigrationFunc, - MigrateInvoices, - false) -} - -// TestMigrateInvoicesHodl checks that a hodl invoice in the accepted state -// fails the migration. -func TestMigrateInvoicesHodl(t *testing.T) { - t.Parallel() - - payReqBtc, err := getPayReq(&bitcoinCfg.MainNetParams) - if err != nil { - t.Fatal(err) - } - - invoices := []Invoice{ - { - PaymentRequest: []byte(payReqBtc), - Terms: ContractTerm{ - State: ContractAccepted, - }, - }, - } - - applyMigration(t, - func(d *DB) { beforeMigrationFuncV11(t, d, invoices) }, - func(d *DB) {}, - MigrateInvoices, - true) -} - -// signDigestCompact generates a test signature to be used in the generation of -// test payment requests. -func signDigestCompact(hash []byte) ([]byte, er.R) { - // Should the signature reference a compressed public key or not. - isCompressedKey := true - - privKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), testPrivKeyBytes) - - // btcec.SignCompact returns a pubkey-recoverable signature - sig, err := btcec.SignCompact( - btcec.S256(), privKey, hash, isCompressedKey, - ) - if err != nil { - return nil, er.Errorf("can't sign the hash: %v", err) - } - - return sig, nil -} - -// getPayReq creates a payment request for the given net. -func getPayReq(net *bitcoinCfg.Params) (string, er.R) { - options := []func(*zpay32.Invoice){ - zpay32.CLTVExpiry(uint64(testCltvDelta)), - zpay32.Description("test"), - } - - payReq, err := zpay32.NewInvoice( - net, [32]byte{}, time.Unix(1, 0), options..., - ) - if err != nil { - return "", err - } - return payReq.Encode( - zpay32.MessageSigner{ - SignCompact: signDigestCompact, - }, - ) -} diff --git a/lnd/channeldb/migration_01_to_11/migrations.go b/lnd/channeldb/migration_01_to_11/migrations.go deleted file mode 100644 index ab3c1499..00000000 --- a/lnd/channeldb/migration_01_to_11/migrations.go +++ /dev/null @@ -1,939 +0,0 @@ -package migration_01_to_11 - -import ( - "bytes" - "crypto/sha256" - "encoding/binary" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/pktlog/log" -) - -// MigrateNodeAndEdgeUpdateIndex is a migration function that will update the -// database from version 0 to version 1. In version 1, we add two new indexes -// (one for nodes and one for edges) to keep track of the last time a node or -// edge was updated on the network. These new indexes allow us to implement the -// new graph sync protocol added. -func MigrateNodeAndEdgeUpdateIndex(tx kvdb.RwTx) er.R { - // First, we'll populating the node portion of the new index. Before we - // can add new values to the index, we'll first create the new bucket - // where these items will be housed. - nodes, err := tx.CreateTopLevelBucket(nodeBucket) - if err != nil { - return er.Errorf("unable to create node bucket: %v", err) - } - nodeUpdateIndex, err := nodes.CreateBucketIfNotExists( - nodeUpdateIndexBucket, - ) - if err != nil { - return er.Errorf("unable to create node update index: %v", err) - } - - log.Infof("Populating new node update index bucket") - - // Now that we know the bucket has been created, we'll iterate over the - // entire node bucket so we can add the (updateTime || nodePub) key - // into the node update index. - err = nodes.ForEach(func(nodePub, nodeInfo []byte) er.R { - if len(nodePub) != 33 { - return nil - } - - log.Tracef("Adding %x to node update index", nodePub) - - // The first 8 bytes of a node's serialize data is the update - // time, so we can extract that without decoding the entire - // structure. - updateTime := nodeInfo[:8] - - // Now that we have the update time, we can construct the key - // to insert into the index. - var indexKey [8 + 33]byte - copy(indexKey[:8], updateTime) - copy(indexKey[8:], nodePub) - - return nodeUpdateIndex.Put(indexKey[:], nil) - }) - if err != nil { - return er.Errorf("unable to update node indexes: %v", err) - } - - log.Infof("Populating new edge update index bucket") - - // With the set of nodes updated, we'll now update all edges to have a - // corresponding entry in the edge update index. - edges, err := tx.CreateTopLevelBucket(edgeBucket) - if err != nil { - return er.Errorf("unable to create edge bucket: %v", err) - } - edgeUpdateIndex, err := edges.CreateBucketIfNotExists( - edgeUpdateIndexBucket, - ) - if err != nil { - return er.Errorf("unable to create edge update index: %v", err) - } - - // We'll now run through each edge policy in the database, and update - // the index to ensure each edge has the proper record. - err = edges.ForEach(func(edgeKey, edgePolicyBytes []byte) er.R { - if len(edgeKey) != 41 { - return nil - } - - // Now that we know this is the proper record, we'll grab the - // channel ID (last 8 bytes of the key), and then decode the - // edge policy so we can access the update time. - chanID := edgeKey[33:] - edgePolicyReader := bytes.NewReader(edgePolicyBytes) - - edgePolicy, err := deserializeChanEdgePolicy( - edgePolicyReader, nodes, - ) - if err != nil { - return err - } - - log.Tracef("Adding chan_id=%v to edge update index", - edgePolicy.ChannelID) - - // We'll now construct the index key using the channel ID, and - // the last time it was updated: (updateTime || chanID). - var indexKey [8 + 8]byte - byteOrder.PutUint64( - indexKey[:], uint64(edgePolicy.LastUpdate.Unix()), - ) - copy(indexKey[8:], chanID) - - return edgeUpdateIndex.Put(indexKey[:], nil) - }) - if err != nil { - return er.Errorf("unable to update edge indexes: %v", err) - } - - log.Infof("Migration to node and edge update indexes complete!") - - return nil -} - -// MigrateInvoiceTimeSeries is a database migration that assigns all existing -// invoices an index in the add and/or the settle index. Additionally, all -// existing invoices will have their bytes padded out in order to encode the -// add+settle index as well as the amount paid. -func MigrateInvoiceTimeSeries(tx kvdb.RwTx) er.R { - invoices, err := tx.CreateTopLevelBucket(invoiceBucket) - if err != nil { - return err - } - - addIndex, err := invoices.CreateBucketIfNotExists( - addIndexBucket, - ) - if err != nil { - return err - } - settleIndex, err := invoices.CreateBucketIfNotExists( - settleIndexBucket, - ) - if err != nil { - return err - } - - log.Infof("Migrating invoice database to new time series format") - - // Now that we have all the buckets we need, we'll run through each - // invoice in the database, and update it to reflect the new format - // expected post migration. - // NOTE: we store the converted invoices and put them back into the - // database after the loop, since modifying the bucket within the - // ForEach loop is not safe. - var invoicesKeys [][]byte - var invoicesValues [][]byte - err = invoices.ForEach(func(invoiceNum, invoiceBytes []byte) er.R { - // If this is a sub bucket, then we'll skip it. - if invoiceBytes == nil { - return nil - } - - // First, we'll make a copy of the encoded invoice bytes. - invoiceBytesCopy := make([]byte, len(invoiceBytes)) - copy(invoiceBytesCopy, invoiceBytes) - - // With the bytes copied over, we'll append 24 additional - // bytes. We do this so we can decode the invoice under the new - // serialization format. - padding := bytes.Repeat([]byte{0}, 24) - invoiceBytesCopy = append(invoiceBytesCopy, padding...) - - invoiceReader := bytes.NewReader(invoiceBytesCopy) - invoice, errr := deserializeInvoiceLegacy(invoiceReader) - if errr != nil { - return er.Errorf("unable to decode invoice: %v", errr) - } - - // Now that we have the fully decoded invoice, we can update - // the various indexes that we're added, and finally the - // invoice itself before re-inserting it. - - // First, we'll get the new sequence in the addIndex in order - // to create the proper mapping. - nextAddSeqNo, err := addIndex.NextSequence() - if err != nil { - return err - } - var seqNoBytes [8]byte - byteOrder.PutUint64(seqNoBytes[:], nextAddSeqNo) - err = addIndex.Put(seqNoBytes[:], invoiceNum[:]) - if err != nil { - return err - } - - log.Tracef("Adding invoice (preimage=%x, add_index=%v) to add "+ - "time series", invoice.Terms.PaymentPreimage[:], - nextAddSeqNo) - - // Next, we'll check if the invoice has been settled or not. If - // so, then we'll also add it to the settle index. - var nextSettleSeqNo uint64 - if invoice.Terms.State == ContractSettled { - nextSettleSeqNo, err = settleIndex.NextSequence() - if err != nil { - return err - } - - var seqNoBytes [8]byte - byteOrder.PutUint64(seqNoBytes[:], nextSettleSeqNo) - err := settleIndex.Put(seqNoBytes[:], invoiceNum) - if err != nil { - return err - } - - invoice.AmtPaid = invoice.Terms.Value - - log.Tracef("Adding invoice (preimage=%x, "+ - "settle_index=%v) to add time series", - invoice.Terms.PaymentPreimage[:], - nextSettleSeqNo) - } - - // Finally, we'll update the invoice itself with the new - // indexing information as well as the amount paid if it has - // been settled or not. - invoice.AddIndex = nextAddSeqNo - invoice.SettleIndex = nextSettleSeqNo - - // We've fully migrated an invoice, so we'll now update the - // invoice in-place. - var b bytes.Buffer - if err := serializeInvoiceLegacy(&b, &invoice); err != nil { - return err - } - - // Save the key and value pending update for after the ForEach - // is done. - invoicesKeys = append(invoicesKeys, invoiceNum) - invoicesValues = append(invoicesValues, b.Bytes()) - return nil - }) - if err != nil { - return err - } - - // Now put the converted invoices into the DB. - for i := range invoicesKeys { - key := invoicesKeys[i] - value := invoicesValues[i] - if err := invoices.Put(key, value); err != nil { - return err - } - } - - log.Infof("Migration to invoice time series index complete!") - - return nil -} - -// MigrateInvoiceTimeSeriesOutgoingPayments is a follow up to the -// migrateInvoiceTimeSeries migration. As at the time of writing, the -// OutgoingPayment struct embeddeds an instance of the Invoice struct. As a -// result, we also need to migrate the internal invoice to the new format. -func MigrateInvoiceTimeSeriesOutgoingPayments(tx kvdb.RwTx) er.R { - payBucket := tx.ReadWriteBucket(paymentBucket) - if payBucket == nil { - return nil - } - - log.Infof("Migrating invoice database to new outgoing payment format") - - // We store the keys and values we want to modify since it is not safe - // to modify them directly within the ForEach loop. - var paymentKeys [][]byte - var paymentValues [][]byte - err := payBucket.ForEach(func(payID, paymentBytes []byte) er.R { - log.Tracef("Migrating payment %x", payID[:]) - - // The internal invoices for each payment only contain a - // populated contract term, and creation date, as a result, - // most of the bytes will be "empty". - - // We'll calculate the end of the invoice index assuming a - // "minimal" index that's embedded within the greater - // OutgoingPayment. The breakdown is: - // 3 bytes empty var bytes, 16 bytes creation date, 16 bytes - // settled date, 32 bytes payment pre-image, 8 bytes value, 1 - // byte settled. - endOfInvoiceIndex := 1 + 1 + 1 + 16 + 16 + 32 + 8 + 1 - - // We'll now extract the prefix of the pure invoice embedded - // within. - invoiceBytes := paymentBytes[:endOfInvoiceIndex] - - // With the prefix extracted, we'll copy over the invoice, and - // also add padding for the new 24 bytes of fields, and finally - // append the remainder of the outgoing payment. - paymentCopy := make([]byte, len(invoiceBytes)) - copy(paymentCopy[:], invoiceBytes) - - padding := bytes.Repeat([]byte{0}, 24) - paymentCopy = append(paymentCopy, padding...) - paymentCopy = append( - paymentCopy, paymentBytes[endOfInvoiceIndex:]..., - ) - - // At this point, we now have the new format of the outgoing - // payments, we'll attempt to deserialize it to ensure the - // bytes are properly formatted. - paymentReader := bytes.NewReader(paymentCopy) - _, err := deserializeOutgoingPayment(paymentReader) - if err != nil { - return er.Errorf("unable to deserialize payment: %v", err) - } - - // Now that we know the modifications was successful, we'll - // store it to our slice of keys and values, and write it back - // to disk in the new format after the ForEach loop is over. - paymentKeys = append(paymentKeys, payID) - paymentValues = append(paymentValues, paymentCopy) - return nil - }) - if err != nil { - return err - } - - // Finally store the updated payments to the bucket. - for i := range paymentKeys { - key := paymentKeys[i] - value := paymentValues[i] - if err := payBucket.Put(key, value); err != nil { - return err - } - } - - log.Infof("Migration to outgoing payment invoices complete!") - - return nil -} - -// MigrateEdgePolicies is a migration function that will update the edges -// bucket. It ensure that edges with unknown policies will also have an entry -// in the bucket. After the migration, there will be two edge entries for -// every channel, regardless of whether the policies are known. -func MigrateEdgePolicies(tx kvdb.RwTx) er.R { - nodes := tx.ReadWriteBucket(nodeBucket) - if nodes == nil { - return nil - } - - edges := tx.ReadWriteBucket(edgeBucket) - if edges == nil { - return nil - } - - edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket) - if edgeIndex == nil { - return nil - } - - // checkKey gets the policy from the database with a low-level call - // so that it is still possible to distinguish between unknown and - // not present. - checkKey := func(channelId uint64, keyBytes []byte) er.R { - var channelID [8]byte - byteOrder.PutUint64(channelID[:], channelId) - - _, err := fetchChanEdgePolicy(edges, - channelID[:], keyBytes, nodes) - - if ErrEdgeNotFound.Is(err) { - log.Tracef("Adding unknown edge policy present for node %x, channel %v", - keyBytes, channelId) - - err := putChanEdgePolicyUnknown(edges, channelId, keyBytes) - if err != nil { - return err - } - - return nil - } - - return err - } - - // Iterate over all channels and check both edge policies. - err := edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) er.R { - infoReader := bytes.NewReader(edgeInfoBytes) - edgeInfo, err := deserializeChanEdgeInfo(infoReader) - if err != nil { - return err - } - - for _, key := range [][]byte{edgeInfo.NodeKey1Bytes[:], - edgeInfo.NodeKey2Bytes[:]} { - - if err := checkKey(edgeInfo.ChannelID, key); err != nil { - return err - } - } - - return nil - }) - - if err != nil { - return er.Errorf("unable to update edge policies: %v", err) - } - - log.Infof("Migration of edge policies complete!") - - return nil -} - -// PaymentStatusesMigration is a database migration intended for adding payment -// statuses for each existing payment entity in bucket to be able control -// transitions of statuses and prevent cases such as double payment -func PaymentStatusesMigration(tx kvdb.RwTx) er.R { - // Get the bucket dedicated to storing statuses of payments, - // where a key is payment hash, value is payment status. - paymentStatuses, err := tx.CreateTopLevelBucket(paymentStatusBucket) - if err != nil { - return err - } - - log.Infof("Migrating database to support payment statuses") - - circuitAddKey := []byte("circuit-adds") - circuits := tx.ReadWriteBucket(circuitAddKey) - if circuits != nil { - log.Infof("Marking all known circuits with status InFlight") - - err = circuits.ForEach(func(k, v []byte) er.R { - // Parse the first 8 bytes as the short chan ID for the - // circuit. We'll skip all short chan IDs are not - // locally initiated, which includes all non-zero short - // chan ids. - chanID := binary.BigEndian.Uint64(k[:8]) - if chanID != 0 { - return nil - } - - // The payment hash is the third item in the serialized - // payment circuit. The first two items are an AddRef - // (10 bytes) and the incoming circuit key (16 bytes). - const payHashOffset = 10 + 16 - - paymentHash := v[payHashOffset : payHashOffset+32] - - return paymentStatuses.Put( - paymentHash[:], StatusInFlight.Bytes(), - ) - }) - if err != nil { - return err - } - } - - log.Infof("Marking all existing payments with status Completed") - - // Get the bucket dedicated to storing payments - bucket := tx.ReadWriteBucket(paymentBucket) - if bucket == nil { - return nil - } - - // For each payment in the bucket, deserialize the payment and mark it - // as completed. - err = bucket.ForEach(func(k, v []byte) er.R { - // Ignores if it is sub-bucket. - if v == nil { - return nil - } - - r := bytes.NewReader(v) - payment, err := deserializeOutgoingPayment(r) - if err != nil { - return err - } - - // Calculate payment hash for current payment. - paymentHash := sha256.Sum256(payment.PaymentPreimage[:]) - - // Update status for current payment to completed. If it fails, - // the migration is aborted and the payment bucket is returned - // to its previous state. - return paymentStatuses.Put(paymentHash[:], StatusSucceeded.Bytes()) - }) - if err != nil { - return err - } - - log.Infof("Migration of payment statuses complete!") - - return nil -} - -// MigratePruneEdgeUpdateIndex is a database migration that attempts to resolve -// some lingering bugs with regards to edge policies and their update index. -// Stale entries within the edge update index were not being properly pruned due -// to a miscalculation on the offset of an edge's policy last update. This -// migration also fixes the case where the public keys within edge policies were -// being serialized with an extra byte, causing an even greater error when -// attempting to perform the offset calculation described earlier. -func MigratePruneEdgeUpdateIndex(tx kvdb.RwTx) er.R { - // To begin the migration, we'll retrieve the update index bucket. If it - // does not exist, we have nothing left to do so we can simply exit. - edges := tx.ReadWriteBucket(edgeBucket) - if edges == nil { - return nil - } - edgeUpdateIndex := edges.NestedReadWriteBucket(edgeUpdateIndexBucket) - if edgeUpdateIndex == nil { - return nil - } - - // Retrieve some buckets that will be needed later on. These should - // already exist given the assumption that the buckets above do as - // well. - edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket) - if err != nil { - return er.Errorf("error creating edge index bucket: %s", err) - } - if edgeIndex == nil { - return er.Errorf("unable to create/fetch edge index " + - "bucket") - } - nodes, err := tx.CreateTopLevelBucket(nodeBucket) - if err != nil { - return er.Errorf("unable to make node bucket") - } - - log.Info("Migrating database to properly prune edge update index") - - // We'll need to properly prune all the outdated entries within the edge - // update index. To do so, we'll gather all of the existing policies - // within the graph to re-populate them later on. - var edgeKeys [][]byte - err = edges.ForEach(func(edgeKey, edgePolicyBytes []byte) er.R { - // All valid entries are indexed by a public key (33 bytes) - // followed by a channel ID (8 bytes), so we'll skip any entries - // with keys that do not match this. - if len(edgeKey) != 33+8 { - return nil - } - - edgeKeys = append(edgeKeys, edgeKey) - - return nil - }) - if err != nil { - return er.Errorf("unable to gather existing edge policies: %v", - err) - } - - log.Info("Constructing set of edge update entries to purge.") - - // Build the set of keys that we will remove from the edge update index. - // This will include all keys contained within the bucket. - var updateKeysToRemove [][]byte - err = edgeUpdateIndex.ForEach(func(updKey, _ []byte) er.R { - updateKeysToRemove = append(updateKeysToRemove, updKey) - return nil - }) - if err != nil { - return er.Errorf("unable to gather existing edge updates: %v", - err) - } - - log.Infof("Removing %d entries from edge update index.", - len(updateKeysToRemove)) - - // With the set of keys contained in the edge update index constructed, - // we'll proceed in purging all of them from the index. - for _, updKey := range updateKeysToRemove { - if err := edgeUpdateIndex.Delete(updKey); err != nil { - return err - } - } - - log.Infof("Repopulating edge update index with %d valid entries.", - len(edgeKeys)) - - // For each edge key, we'll retrieve the policy, deserialize it, and - // re-add it to the different buckets. By doing so, we'll ensure that - // all existing edge policies are serialized correctly within their - // respective buckets and that the correct entries are populated within - // the edge update index. - for _, edgeKey := range edgeKeys { - edgePolicyBytes := edges.Get(edgeKey) - - // Skip any entries with unknown policies as there will not be - // any entries for them in the edge update index. - if bytes.Equal(edgePolicyBytes[:], unknownPolicy) { - continue - } - - edgePolicy, err := deserializeChanEdgePolicy( - bytes.NewReader(edgePolicyBytes), nodes, - ) - if err != nil { - return err - } - - _, err = updateEdgePolicy(tx, edgePolicy) - if err != nil { - return err - } - } - - log.Info("Migration to properly prune edge update index complete!") - - return nil -} - -// MigrateOptionalChannelCloseSummaryFields migrates the serialized format of -// ChannelCloseSummary to a format where optional fields' presence is indicated -// with boolean markers. -func MigrateOptionalChannelCloseSummaryFields(tx kvdb.RwTx) er.R { - closedChanBucket := tx.ReadWriteBucket(closedChannelBucket) - if closedChanBucket == nil { - return nil - } - - log.Info("Migrating to new closed channel format...") - - // We store the converted keys and values and put them back into the - // database after the loop, since modifying the bucket within the - // ForEach loop is not safe. - var closedChansKeys [][]byte - var closedChansValues [][]byte - err := closedChanBucket.ForEach(func(chanID, summary []byte) er.R { - r := bytes.NewReader(summary) - - // Read the old (v6) format from the database. - c, err := deserializeCloseChannelSummaryV6(r) - if err != nil { - return err - } - - // Serialize using the new format, and put back into the - // bucket. - var b bytes.Buffer - if err := serializeChannelCloseSummary(&b, c); err != nil { - return err - } - - // Now that we know the modifications was successful, we'll - // Store the key and value to our slices, and write it back to - // disk in the new format after the ForEach loop is over. - closedChansKeys = append(closedChansKeys, chanID) - closedChansValues = append(closedChansValues, b.Bytes()) - return nil - }) - if err != nil { - return er.Errorf("unable to update closed channels: %v", err) - } - - // Now put the new format back into the DB. - for i := range closedChansKeys { - key := closedChansKeys[i] - value := closedChansValues[i] - if err := closedChanBucket.Put(key, value); err != nil { - return err - } - } - - log.Info("Migration to new closed channel format complete!") - - return nil -} - -var messageStoreBucket = []byte("message-store") - -// MigrateGossipMessageStoreKeys migrates the key format for gossip messages -// found in the message store to a new one that takes into consideration the of -// the message being stored. -func MigrateGossipMessageStoreKeys(tx kvdb.RwTx) er.R { - // We'll start by retrieving the bucket in which these messages are - // stored within. If there isn't one, there's nothing left for us to do - // so we can avoid the migration. - messageStore := tx.ReadWriteBucket(messageStoreBucket) - if messageStore == nil { - return nil - } - - log.Info("Migrating to the gossip message store new key format") - - // Otherwise we'll proceed with the migration. We'll start by coalescing - // all the current messages within the store, which are indexed by the - // public key of the peer which they should be sent to, followed by the - // short channel ID of the channel for which the message belongs to. We - // should only expect to find channel announcement signatures as that - // was the only support message type previously. - msgs := make(map[[33 + 8]byte]*lnwire.AnnounceSignatures) - err := messageStore.ForEach(func(k, v []byte) er.R { - var msgKey [33 + 8]byte - copy(msgKey[:], k) - - msg := &lnwire.AnnounceSignatures{} - if err := msg.Decode(bytes.NewReader(v), 0); err != nil { - return err - } - - msgs[msgKey] = msg - - return nil - - }) - if err != nil { - return err - } - - // Then, we'll go over all of our messages, remove their previous entry, - // and add another with the new key format. Once we've done this for - // every message, we can consider the migration complete. - for oldMsgKey, msg := range msgs { - if err := messageStore.Delete(oldMsgKey[:]); err != nil { - return err - } - - // Construct the new key for which we'll find this message with - // in the store. It'll be the same as the old, but we'll also - // include the message type. - var msgType [2]byte - binary.BigEndian.PutUint16(msgType[:], uint16(msg.MsgType())) - newMsgKey := append(oldMsgKey[:], msgType[:]...) - - // Serialize the message with its wire encoding. - var b bytes.Buffer - if _, err := lnwire.WriteMessage(&b, msg, 0); err != nil { - return err - } - - if err := messageStore.Put(newMsgKey, b.Bytes()); err != nil { - return err - } - } - - log.Info("Migration to the gossip message store new key format complete!") - - return nil -} - -// MigrateOutgoingPayments moves the OutgoingPayments into a new bucket format -// where they all reside in a top-level bucket indexed by the payment hash. In -// this sub-bucket we store information relevant to this payment, such as the -// payment status. -// -// Since the router cannot handle resumed payments that have the status -// InFlight (we have no PaymentAttemptInfo available for pre-migration -// payments) we delete those statuses, so only Completed payments remain in the -// new bucket structure. -func MigrateOutgoingPayments(tx kvdb.RwTx) er.R { - log.Infof("Migrating outgoing payments to new bucket structure") - - oldPayments := tx.ReadWriteBucket(paymentBucket) - - // Return early if there are no payments to migrate. - if oldPayments == nil { - log.Infof("No outgoing payments found, nothing to migrate.") - return nil - } - - newPayments, err := tx.CreateTopLevelBucket(paymentsRootBucket) - if err != nil { - return err - } - - // Helper method to get the source pubkey. We define it such that we - // only attempt to fetch it if needed. - sourcePub := func() ([33]byte, er.R) { - var pub [33]byte - nodes := tx.ReadWriteBucket(nodeBucket) - if nodes == nil { - return pub, ErrGraphNotFound.Default() - } - - selfPub := nodes.Get(sourceKey) - if selfPub == nil { - return pub, ErrSourceNodeNotSet.Default() - } - copy(pub[:], selfPub[:]) - return pub, nil - } - - err = oldPayments.ForEach(func(k, v []byte) er.R { - // Ignores if it is sub-bucket. - if v == nil { - return nil - } - - // Read the old payment format. - r := bytes.NewReader(v) - payment, err := deserializeOutgoingPayment(r) - if err != nil { - return err - } - - // Calculate payment hash from the payment preimage. - paymentHash := sha256.Sum256(payment.PaymentPreimage[:]) - - // Now create and add a PaymentCreationInfo to the bucket. - c := &PaymentCreationInfo{ - PaymentHash: paymentHash, - Value: payment.Terms.Value, - CreationDate: payment.CreationDate, - PaymentRequest: payment.PaymentRequest, - } - - var infoBuf bytes.Buffer - if err := serializePaymentCreationInfo(&infoBuf, c); err != nil { - return err - } - - sourcePubKey, err := sourcePub() - if err != nil { - return err - } - - // Do the same for the PaymentAttemptInfo. - totalAmt := payment.Terms.Value + payment.Fee - rt := Route{ - TotalTimeLock: payment.TimeLockLength, - TotalAmount: totalAmt, - SourcePubKey: sourcePubKey, - Hops: []*Hop{}, - } - for _, hop := range payment.Path { - rt.Hops = append(rt.Hops, &Hop{ - PubKeyBytes: hop, - AmtToForward: totalAmt, - }) - } - - // Since the old format didn't store the fee for individual - // hops, we let the last hop eat the whole fee for the total to - // add up. - if len(rt.Hops) > 0 { - rt.Hops[len(rt.Hops)-1].AmtToForward = payment.Terms.Value - } - - // Since we don't have the session key for old payments, we - // create a random one to be able to serialize the attempt - // info. - priv, _ := btcec.NewPrivateKey(btcec.S256()) - s := &PaymentAttemptInfo{ - PaymentID: 0, // unknown. - SessionKey: priv, // unknown. - Route: rt, - } - - var attemptBuf bytes.Buffer - if err := serializePaymentAttemptInfoMigration9(&attemptBuf, s); err != nil { - return err - } - - // Reuse the existing payment sequence number. - var seqNum [8]byte - copy(seqNum[:], k) - - // Create a bucket indexed by the payment hash. - bucket, err := newPayments.CreateBucket(paymentHash[:]) - - // If the bucket already exists, it means that we are migrating - // from a database containing duplicate payments to a payment - // hash. To keep this information, we store such duplicate - // payments in a sub-bucket. - if kvdb.ErrBucketExists.Is(err) { - pHashBucket := newPayments.NestedReadWriteBucket(paymentHash[:]) - - // Create a bucket for duplicate payments within this - // payment hash's bucket. - dup, err := pHashBucket.CreateBucketIfNotExists( - paymentDuplicateBucket, - ) - if err != nil { - return err - } - - // Each duplicate will get its own sub-bucket within - // this bucket, so use their sequence number to index - // them by. - bucket, err = dup.CreateBucket(seqNum[:]) - if err != nil { - return err - } - - } else if err != nil { - return err - } - - // Store the payment's information to the bucket. - err = bucket.Put(paymentSequenceKey, seqNum[:]) - if err != nil { - return err - } - - err = bucket.Put(paymentCreationInfoKey, infoBuf.Bytes()) - if err != nil { - return err - } - - err = bucket.Put(paymentAttemptInfoKey, attemptBuf.Bytes()) - if err != nil { - return err - } - - err = bucket.Put(paymentSettleInfoKey, payment.PaymentPreimage[:]) - if err != nil { - return err - } - - return nil - }) - if err != nil { - return err - } - - // To continue producing unique sequence numbers, we set the sequence - // of the new bucket to that of the old one. - seq := oldPayments.Sequence() - if err := newPayments.SetSequence(seq); err != nil { - return err - } - - // Now we delete the old buckets. Deleting the payment status buckets - // deletes all payment statuses other than Complete. - err = tx.DeleteTopLevelBucket(paymentStatusBucket) - if err != nil && !kvdb.ErrBucketNotFound.Is(err) { - return err - } - - // Finally delete the old payment bucket. - err = tx.DeleteTopLevelBucket(paymentBucket) - if err != nil && !kvdb.ErrBucketNotFound.Is(err) { - return err - } - - log.Infof("Migration of outgoing payment bucket structure completed!") - return nil -} diff --git a/lnd/channeldb/migration_01_to_11/migrations_test.go b/lnd/channeldb/migration_01_to_11/migrations_test.go deleted file mode 100644 index 0d2edbe2..00000000 --- a/lnd/channeldb/migration_01_to_11/migrations_test.go +++ /dev/null @@ -1,932 +0,0 @@ -package migration_01_to_11 - -import ( - "bytes" - "crypto/sha256" - "encoding/binary" - "math/rand" - "reflect" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// TestPaymentStatusesMigration checks that already completed payments will have -// their payment statuses set to Completed after the migration. -func TestPaymentStatusesMigration(t *testing.T) { - t.Parallel() - - fakePayment := makeFakePayment() - paymentHash := sha256.Sum256(fakePayment.PaymentPreimage[:]) - - // Add fake payment to test database, verifying that it was created, - // that we have only one payment, and its status is not "Completed". - beforeMigrationFunc := func(d *DB) { - if err := d.addPayment(fakePayment); err != nil { - t.Fatalf("unable to add payment: %v", err) - } - - payments, err := d.fetchAllPayments() - if err != nil { - t.Fatalf("unable to fetch payments: %v", err) - } - - if len(payments) != 1 { - t.Fatalf("wrong qty of paymets: expected 1, got %v", - len(payments)) - } - - paymentStatus, err := d.fetchPaymentStatus(paymentHash) - if err != nil { - t.Fatalf("unable to fetch payment status: %v", err) - } - - // We should receive default status if we have any in database. - if paymentStatus != StatusUnknown { - t.Fatalf("wrong payment status: expected %v, got %v", - StatusUnknown.String(), paymentStatus.String()) - } - - // Lastly, we'll add a locally-sourced circuit and - // non-locally-sourced circuit to the circuit map. The - // locally-sourced payment should end up with an InFlight - // status, while the other should remain unchanged, which - // defaults to Grounded. - err = kvdb.Update(d, func(tx kvdb.RwTx) er.R { - circuits, err := tx.CreateTopLevelBucket( - []byte("circuit-adds"), - ) - if err != nil { - return err - } - - groundedKey := make([]byte, 16) - binary.BigEndian.PutUint64(groundedKey[:8], 1) - binary.BigEndian.PutUint64(groundedKey[8:], 1) - - // Generated using TestHalfCircuitSerialization with nil - // ErrorEncrypter, which is the case for locally-sourced - // payments. No payment status should end up being set - // for this circuit, since the short channel id of the - // key is non-zero (e.g., a forwarded circuit). This - // will default it to Grounded. - groundedCircuit := []byte{ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x01, - // start payment hash - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // end payment hash - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, - 0x42, 0x40, 0x00, - } - - err = circuits.Put(groundedKey, groundedCircuit) - if err != nil { - return err - } - - inFlightKey := make([]byte, 16) - binary.BigEndian.PutUint64(inFlightKey[:8], 0) - binary.BigEndian.PutUint64(inFlightKey[8:], 1) - - // Generated using TestHalfCircuitSerialization with nil - // ErrorEncrypter, which is not the case for forwarded - // payments, but should have no impact on the - // correctness of the test. The payment status for this - // circuit should be set to InFlight, since the short - // channel id in the key is 0 (sourceHop). - inFlightCircuit := []byte{ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x01, - // start payment hash - 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // end payment hash - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, - 0x42, 0x40, 0x00, - } - - return circuits.Put(inFlightKey, inFlightCircuit) - }, func() {}) - if err != nil { - t.Fatalf("unable to add circuit map entry: %v", err) - } - } - - // Verify that the created payment status is "Completed" for our one - // fake payment. - afterMigrationFunc := func(d *DB) { - // Check that our completed payments were migrated. - paymentStatus, err := d.fetchPaymentStatus(paymentHash) - if err != nil { - t.Fatalf("unable to fetch payment status: %v", err) - } - - if paymentStatus != StatusSucceeded { - t.Fatalf("wrong payment status: expected %v, got %v", - StatusSucceeded.String(), paymentStatus.String()) - } - - inFlightHash := [32]byte{ - 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - } - - // Check that the locally sourced payment was transitioned to - // InFlight. - paymentStatus, err = d.fetchPaymentStatus(inFlightHash) - if err != nil { - t.Fatalf("unable to fetch payment status: %v", err) - } - - if paymentStatus != StatusInFlight { - t.Fatalf("wrong payment status: expected %v, got %v", - StatusInFlight.String(), paymentStatus.String()) - } - - groundedHash := [32]byte{ - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - } - - // Check that non-locally sourced payments remain in the default - // Grounded state. - paymentStatus, err = d.fetchPaymentStatus(groundedHash) - if err != nil { - t.Fatalf("unable to fetch payment status: %v", err) - } - - if paymentStatus != StatusUnknown { - t.Fatalf("wrong payment status: expected %v, got %v", - StatusUnknown.String(), paymentStatus.String()) - } - } - - applyMigration(t, - beforeMigrationFunc, - afterMigrationFunc, - PaymentStatusesMigration, - false) -} - -// TestMigrateOptionalChannelCloseSummaryFields properly converts a -// ChannelCloseSummary to the v7 format, where optional fields have their -// presence indicated with boolean markers. -func TestMigrateOptionalChannelCloseSummaryFields(t *testing.T) { - t.Parallel() - - chanState, err := createTestChannelState(nil) - if err != nil { - t.Fatalf("unable to create channel state: %v", err) - } - - var chanPointBuf bytes.Buffer - err = writeOutpoint(&chanPointBuf, &chanState.FundingOutpoint) - if err != nil { - t.Fatalf("unable to write outpoint: %v", err) - } - - chanID := chanPointBuf.Bytes() - - testCases := []struct { - closeSummary *ChannelCloseSummary - oldSerialization func(c *ChannelCloseSummary) []byte - }{ - { - // A close summary where none of the new fields are - // set. - closeSummary: &ChannelCloseSummary{ - ChanPoint: chanState.FundingOutpoint, - ShortChanID: chanState.ShortChanID(), - ChainHash: chanState.ChainHash, - ClosingTXID: testTx.TxHash(), - CloseHeight: 100, - RemotePub: chanState.IdentityPub, - Capacity: chanState.Capacity, - SettledBalance: btcutil.Amount(50000), - CloseType: RemoteForceClose, - IsPending: true, - - // The last fields will be unset. - RemoteCurrentRevocation: nil, - LocalChanConfig: ChannelConfig{}, - RemoteNextRevocation: nil, - }, - - // In the old format the last field written is the - // IsPendingField. It should be converted by adding an - // extra boolean marker at the end to indicate that the - // remaining fields are not there. - oldSerialization: func(cs *ChannelCloseSummary) []byte { - var buf bytes.Buffer - err := WriteElements(&buf, cs.ChanPoint, - cs.ShortChanID, cs.ChainHash, - cs.ClosingTXID, cs.CloseHeight, - cs.RemotePub, cs.Capacity, - cs.SettledBalance, cs.TimeLockedBalance, - cs.CloseType, cs.IsPending, - ) - if err != nil { - t.Fatal(err) - } - - // For the old format, these are all the fields - // that are written. - return buf.Bytes() - }, - }, - { - // A close summary where the new fields are present, - // but the optional RemoteNextRevocation field is not - // set. - closeSummary: &ChannelCloseSummary{ - ChanPoint: chanState.FundingOutpoint, - ShortChanID: chanState.ShortChanID(), - ChainHash: chanState.ChainHash, - ClosingTXID: testTx.TxHash(), - CloseHeight: 100, - RemotePub: chanState.IdentityPub, - Capacity: chanState.Capacity, - SettledBalance: btcutil.Amount(50000), - CloseType: RemoteForceClose, - IsPending: true, - RemoteCurrentRevocation: chanState.RemoteCurrentRevocation, - LocalChanConfig: chanState.LocalChanCfg, - - // RemoteNextRevocation is optional, and here - // it is not set. - RemoteNextRevocation: nil, - }, - - // In the old format the last field written is the - // LocalChanConfig. This indicates that the optional - // RemoteNextRevocation field is not present. It should - // be converted by adding boolean markers for all these - // fields. - oldSerialization: func(cs *ChannelCloseSummary) []byte { - var buf bytes.Buffer - err := WriteElements(&buf, cs.ChanPoint, - cs.ShortChanID, cs.ChainHash, - cs.ClosingTXID, cs.CloseHeight, - cs.RemotePub, cs.Capacity, - cs.SettledBalance, cs.TimeLockedBalance, - cs.CloseType, cs.IsPending, - ) - if err != nil { - t.Fatal(err) - } - - err = WriteElements(&buf, cs.RemoteCurrentRevocation) - if err != nil { - t.Fatal(err) - } - - err = writeChanConfig(&buf, &cs.LocalChanConfig) - if err != nil { - t.Fatal(err) - } - - // RemoteNextRevocation is not written. - return buf.Bytes() - }, - }, - { - // A close summary where all fields are present. - closeSummary: &ChannelCloseSummary{ - ChanPoint: chanState.FundingOutpoint, - ShortChanID: chanState.ShortChanID(), - ChainHash: chanState.ChainHash, - ClosingTXID: testTx.TxHash(), - CloseHeight: 100, - RemotePub: chanState.IdentityPub, - Capacity: chanState.Capacity, - SettledBalance: btcutil.Amount(50000), - CloseType: RemoteForceClose, - IsPending: true, - RemoteCurrentRevocation: chanState.RemoteCurrentRevocation, - LocalChanConfig: chanState.LocalChanCfg, - - // RemoteNextRevocation is optional, and in - // this case we set it. - RemoteNextRevocation: chanState.RemoteNextRevocation, - }, - - // In the old format all the fields are written. It - // should be converted by adding boolean markers for - // all these fields. - oldSerialization: func(cs *ChannelCloseSummary) []byte { - var buf bytes.Buffer - err := WriteElements(&buf, cs.ChanPoint, - cs.ShortChanID, cs.ChainHash, - cs.ClosingTXID, cs.CloseHeight, - cs.RemotePub, cs.Capacity, - cs.SettledBalance, cs.TimeLockedBalance, - cs.CloseType, cs.IsPending, - ) - if err != nil { - t.Fatal(err) - } - - err = WriteElements(&buf, cs.RemoteCurrentRevocation) - if err != nil { - t.Fatal(err) - } - - err = writeChanConfig(&buf, &cs.LocalChanConfig) - if err != nil { - t.Fatal(err) - } - - err = WriteElements(&buf, cs.RemoteNextRevocation) - if err != nil { - t.Fatal(err) - } - - return buf.Bytes() - }, - }, - } - - for _, test := range testCases { - - // Before the migration we must add the old format to the DB. - beforeMigrationFunc := func(d *DB) { - - // Get the old serialization format for this test's - // close summary, and it to the closed channel bucket. - old := test.oldSerialization(test.closeSummary) - err = kvdb.Update(d, func(tx kvdb.RwTx) er.R { - closedChanBucket, err := tx.CreateTopLevelBucket( - closedChannelBucket, - ) - if err != nil { - return err - } - return closedChanBucket.Put(chanID, old) - }, func() {}) - if err != nil { - t.Fatalf("unable to add old serialization: %v", - err) - } - } - - // After the migration it should be found in the new format. - afterMigrationFunc := func(d *DB) { - // We generate the new serialized version, to check - // against what is found in the DB. - var b bytes.Buffer - err = serializeChannelCloseSummary(&b, test.closeSummary) - if err != nil { - t.Fatalf("unable to serialize: %v", err) - } - newSerialization := b.Bytes() - - var dbSummary []byte - err = kvdb.View(d, func(tx kvdb.RTx) er.R { - closedChanBucket := tx.ReadBucket(closedChannelBucket) - if closedChanBucket == nil { - return er.New("unable to find bucket") - } - - // Get the serialized verision from the DB and - // make sure it matches what we expected. - dbSummary = closedChanBucket.Get(chanID) - if !bytes.Equal(dbSummary, newSerialization) { - return er.Errorf("unexpected new " + - "serialization") - } - return nil - }, func() { - dbSummary = nil - }) - if err != nil { - t.Fatalf("unable to view DB: %v", err) - } - - // Finally we fetch the deserialized summary from the - // DB and check that it is equal to our original one. - dbChannels, err := d.FetchClosedChannels(false) - if err != nil { - t.Fatalf("unable to fetch closed channels: %v", - err) - } - - if len(dbChannels) != 1 { - t.Fatalf("expected 1 closed channels, found %v", - len(dbChannels)) - } - - dbChan := dbChannels[0] - if !reflect.DeepEqual(dbChan, test.closeSummary) { - dbChan.RemotePub.Curve = nil - test.closeSummary.RemotePub.Curve = nil - t.Fatalf("not equal: %v vs %v", - spew.Sdump(dbChan), - spew.Sdump(test.closeSummary)) - } - - } - - applyMigration(t, - beforeMigrationFunc, - afterMigrationFunc, - MigrateOptionalChannelCloseSummaryFields, - false) - } -} - -// TestMigrateGossipMessageStoreKeys ensures that the migration to the new -// gossip message store key format is successful/unsuccessful under various -// scenarios. -func TestMigrateGossipMessageStoreKeys(t *testing.T) { - t.Parallel() - - // Construct the message which we'll use to test the migration, along - // with its old and new key formats. - shortChanID := lnwire.ShortChannelID{BlockHeight: 10} - msg := &lnwire.AnnounceSignatures{ShortChannelID: shortChanID} - - var oldMsgKey [33 + 8]byte - copy(oldMsgKey[:33], pubKey.SerializeCompressed()) - binary.BigEndian.PutUint64(oldMsgKey[33:41], shortChanID.ToUint64()) - - var newMsgKey [33 + 8 + 2]byte - copy(newMsgKey[:41], oldMsgKey[:]) - binary.BigEndian.PutUint16(newMsgKey[41:43], uint16(msg.MsgType())) - - // Before the migration, we'll create the bucket where the messages - // should live and insert them. - beforeMigration := func(db *DB) { - var b bytes.Buffer - if err := msg.Encode(&b, 0); err != nil { - t.Fatalf("unable to serialize message: %v", err) - } - - err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - messageStore, err := tx.CreateTopLevelBucket( - messageStoreBucket, - ) - if err != nil { - return err - } - - return messageStore.Put(oldMsgKey[:], b.Bytes()) - }, func() {}) - if err != nil { - t.Fatal(err) - } - } - - // After the migration, we'll make sure that: - // 1. We cannot find the message under its old key. - // 2. We can find the message under its new key. - // 3. The message matches the original. - afterMigration := func(db *DB) { - var rawMsg []byte - err := kvdb.View(db, func(tx kvdb.RTx) er.R { - messageStore := tx.ReadBucket(messageStoreBucket) - if messageStore == nil { - return er.New("message store bucket not " + - "found") - } - rawMsg = messageStore.Get(oldMsgKey[:]) - if rawMsg != nil { - t.Fatal("expected to not find message under " + - "old key, but did") - } - rawMsg = messageStore.Get(newMsgKey[:]) - if rawMsg == nil { - return er.Errorf("expected to find message " + - "under new key, but didn't") - } - - return nil - }, func() { - rawMsg = nil - }) - if err != nil { - t.Fatal(err) - } - - gotMsg, errr := lnwire.ReadMessage(bytes.NewReader(rawMsg), 0) - if errr != nil { - t.Fatalf("unable to deserialize raw message: %v", errr) - } - if !reflect.DeepEqual(msg, gotMsg) { - t.Fatalf("expected message: %v\ngot message: %v", - spew.Sdump(msg), spew.Sdump(gotMsg)) - } - } - - applyMigration( - t, beforeMigration, afterMigration, - MigrateGossipMessageStoreKeys, false, - ) -} - -// TestOutgoingPaymentsMigration checks that OutgoingPayments are migrated to a -// new bucket structure after the migration. -func TestOutgoingPaymentsMigration(t *testing.T) { - t.Parallel() - - const numPayments = 4 - var oldPayments []*outgoingPayment - - // Add fake payments to test database, verifying that it was created. - beforeMigrationFunc := func(d *DB) { - for i := 0; i < numPayments; i++ { - var p *outgoingPayment - var err er.R - - // We fill the database with random payments. For the - // very last one we'll use a duplicate of the first, to - // ensure we are able to handle migration from a - // database that has copies. - if i < numPayments-1 { - p, err = makeRandomFakePayment() - if err != nil { - t.Fatalf("unable to create payment: %v", - err) - } - } else { - p = oldPayments[0] - } - - if err := d.addPayment(p); err != nil { - t.Fatalf("unable to add payment: %v", err) - } - - oldPayments = append(oldPayments, p) - } - - payments, err := d.fetchAllPayments() - if err != nil { - t.Fatalf("unable to fetch payments: %v", err) - } - - if len(payments) != numPayments { - t.Fatalf("wrong qty of paymets: expected %d got %v", - numPayments, len(payments)) - } - } - - // Verify that all payments were migrated. - afterMigrationFunc := func(d *DB) { - sentPayments, err := d.fetchPaymentsMigration9() - if err != nil { - t.Fatalf("unable to fetch sent payments: %v", err) - } - - if len(sentPayments) != numPayments { - t.Fatalf("expected %d payments, got %d", numPayments, - len(sentPayments)) - } - - graph := d.ChannelGraph() - sourceNode, err := graph.SourceNode() - if err != nil { - t.Fatalf("unable to fetch source node: %v", err) - } - - for i, p := range sentPayments { - // The payment status should be Completed. - if p.Status != StatusSucceeded { - t.Fatalf("expected Completed, got %v", p.Status) - } - - // Check that the sequence number is preserved. They - // start counting at 1. - if p.sequenceNum != uint64(i+1) { - t.Fatalf("expected seqnum %d, got %d", i, - p.sequenceNum) - } - - // Order of payments should be be preserved. - old := oldPayments[i] - - // Check the individial fields. - if p.Info.Value != old.Terms.Value { - t.Fatalf("value mismatch") - } - - if p.Info.CreationDate != old.CreationDate { - t.Fatalf("date mismatch") - } - - if !bytes.Equal(p.Info.PaymentRequest, old.PaymentRequest) { - t.Fatalf("payreq mismatch") - } - - if *p.PaymentPreimage != old.PaymentPreimage { - t.Fatalf("preimage mismatch") - } - - if p.Attempt.Route.TotalFees() != old.Fee { - t.Fatalf("Fee mismatch") - } - - if p.Attempt.Route.TotalAmount != old.Fee+old.Terms.Value { - t.Fatalf("Total amount mismatch") - } - - if p.Attempt.Route.TotalTimeLock != old.TimeLockLength { - t.Fatalf("timelock mismatch") - } - - if p.Attempt.Route.SourcePubKey != sourceNode.PubKeyBytes { - t.Fatalf("source mismatch: %x vs %x", - p.Attempt.Route.SourcePubKey[:], - sourceNode.PubKeyBytes[:]) - } - - for i, hop := range old.Path { - if hop != p.Attempt.Route.Hops[i].PubKeyBytes { - t.Fatalf("path mismatch") - } - } - } - - // Finally, check that the payment sequence number is updated - // to reflect the migrated payments. - err = kvdb.Update(d, func(tx kvdb.RwTx) er.R { - payments := tx.ReadWriteBucket(paymentsRootBucket) - if payments == nil { - return er.Errorf("payments bucket not found") - } - - seq := payments.Sequence() - if seq != numPayments { - return er.Errorf("expected sequence to be "+ - "%d, got %d", numPayments, seq) - } - - return nil - }, func() {}) - if err != nil { - t.Fatal(err) - } - } - - applyMigration(t, - beforeMigrationFunc, - afterMigrationFunc, - MigrateOutgoingPayments, - false) -} - -func makeRandPaymentCreationInfo() (*PaymentCreationInfo, er.R) { - var payHash lntypes.Hash - if _, err := rand.Read(payHash[:]); err != nil { - return nil, er.E(err) - } - - return &PaymentCreationInfo{ - PaymentHash: payHash, - Value: lnwire.MilliSatoshi(rand.Int63()), - CreationDate: time.Now(), - PaymentRequest: []byte("test"), - }, nil -} - -// TestPaymentRouteSerialization tests that we're able to properly migrate -// existing payments on disk that contain the traversed routes to the new -// routing format which supports the TLV payloads. We also test that the -// migration is able to handle duplicate payment attempts. -func TestPaymentRouteSerialization(t *testing.T) { - t.Parallel() - - legacyHop1 := &Hop{ - PubKeyBytes: NewVertex(pub), - ChannelID: 12345, - OutgoingTimeLock: 111, - LegacyPayload: true, - AmtToForward: 555, - } - legacyHop2 := &Hop{ - PubKeyBytes: NewVertex(pub), - ChannelID: 12345, - OutgoingTimeLock: 111, - LegacyPayload: true, - AmtToForward: 555, - } - legacyRoute := Route{ - TotalTimeLock: 123, - TotalAmount: 1234567, - SourcePubKey: NewVertex(pub), - Hops: []*Hop{legacyHop1, legacyHop2}, - } - - const numPayments = 4 - var oldPayments []*Payment - - sharedPayAttempt := PaymentAttemptInfo{ - PaymentID: 1, - SessionKey: priv, - Route: legacyRoute, - } - - // We'll first add a series of fake payments, using the existing legacy - // serialization format. - beforeMigrationFunc := func(d *DB) { - err := kvdb.Update(d, func(tx kvdb.RwTx) er.R { - paymentsBucket, err := tx.CreateTopLevelBucket( - paymentsRootBucket, - ) - if err != nil { - t.Fatalf("unable to create new payments "+ - "bucket: %v", err) - } - - for i := 0; i < numPayments; i++ { - var seqNum [8]byte - byteOrder.PutUint64(seqNum[:], uint64(i)) - - // All payments will be randomly generated, - // other than the final payment. We'll force - // the final payment to re-use an existing - // payment hash so we can insert it into the - // duplicate payment hash bucket. - var payInfo *PaymentCreationInfo - if i < numPayments-1 { - payInfo, err = makeRandPaymentCreationInfo() - if err != nil { - t.Fatalf("unable to create "+ - "payment: %v", err) - } - } else { - payInfo = oldPayments[0].Info - } - - // Next, legacy encoded when needed, we'll - // serialize the info and the attempt. - var payInfoBytes bytes.Buffer - errr := serializePaymentCreationInfo( - &payInfoBytes, payInfo, - ) - if errr != nil { - t.Fatalf("unable to encode pay "+ - "info: %v", errr) - } - var payAttemptBytes bytes.Buffer - errr = serializePaymentAttemptInfoLegacy( - &payAttemptBytes, &sharedPayAttempt, - ) - if errr != nil { - t.Fatalf("unable to encode payment attempt: "+ - "%v", errr) - } - - // Before we write to disk, we'll need to fetch - // the proper bucket. If this is the duplicate - // payment, then we'll grab the dup bucket, - // otherwise, we'll use the top level bucket. - var payHashBucket kvdb.RwBucket - if i < numPayments-1 { - payHashBucket, err = paymentsBucket.CreateBucket( - payInfo.PaymentHash[:], - ) - if err != nil { - t.Fatalf("unable to create payments bucket: %v", err) - } - } else { - payHashBucket = paymentsBucket.NestedReadWriteBucket( - payInfo.PaymentHash[:], - ) - dupPayBucket, err := payHashBucket.CreateBucket( - paymentDuplicateBucket, - ) - if err != nil { - t.Fatalf("unable to create "+ - "dup hash bucket: %v", err) - } - - payHashBucket, err = dupPayBucket.CreateBucket( - seqNum[:], - ) - if err != nil { - t.Fatalf("unable to make dup "+ - "bucket: %v", err) - } - } - - err = payHashBucket.Put(paymentSequenceKey, seqNum[:]) - if err != nil { - t.Fatalf("unable to write seqno: %v", err) - } - - err = payHashBucket.Put( - paymentCreationInfoKey, payInfoBytes.Bytes(), - ) - if err != nil { - t.Fatalf("unable to write creation "+ - "info: %v", err) - } - - err = payHashBucket.Put( - paymentAttemptInfoKey, payAttemptBytes.Bytes(), - ) - if err != nil { - t.Fatalf("unable to write attempt "+ - "info: %v", err) - } - - oldPayments = append(oldPayments, &Payment{ - Info: payInfo, - Attempt: &sharedPayAttempt, - }) - } - - return nil - }, func() { - oldPayments = nil - }) - if err != nil { - t.Fatalf("unable to create test payments: %v", err) - } - } - - afterMigrationFunc := func(d *DB) { - newPayments, err := d.FetchPayments() - if err != nil { - t.Fatalf("unable to fetch new payments: %v", err) - } - - if len(newPayments) != numPayments { - t.Fatalf("expected %d payments, got %d", numPayments, - len(newPayments)) - } - - for i, p := range newPayments { - // Order of payments should be be preserved. - old := oldPayments[i] - - if p.Attempt.PaymentID != old.Attempt.PaymentID { - t.Fatalf("wrong pay ID: expected %v, got %v", - p.Attempt.PaymentID, - old.Attempt.PaymentID) - } - - if p.Attempt.Route.TotalFees() != old.Attempt.Route.TotalFees() { - t.Fatalf("Fee mismatch") - } - - if p.Attempt.Route.TotalAmount != old.Attempt.Route.TotalAmount { - t.Fatalf("Total amount mismatch") - } - - if p.Attempt.Route.TotalTimeLock != old.Attempt.Route.TotalTimeLock { - t.Fatalf("timelock mismatch") - } - - if p.Attempt.Route.SourcePubKey != old.Attempt.Route.SourcePubKey { - t.Fatalf("source mismatch: %x vs %x", - p.Attempt.Route.SourcePubKey[:], - old.Attempt.Route.SourcePubKey[:]) - } - - for i, hop := range p.Attempt.Route.Hops { - if !reflect.DeepEqual(hop, legacyRoute.Hops[i]) { - t.Fatalf("hop mismatch") - } - } - } - } - - applyMigration(t, - beforeMigrationFunc, - afterMigrationFunc, - MigrateRouteSerialization, - false) -} - -// TestNotCoveredMigrations only references migrations that are not referenced -// anywhere else in this package. This prevents false positives when linting -// with unused. -func TestNotCoveredMigrations(t *testing.T) { - _ = MigrateNodeAndEdgeUpdateIndex - _ = MigrateInvoiceTimeSeries - _ = MigrateInvoiceTimeSeriesOutgoingPayments - _ = MigrateEdgePolicies - _ = MigratePruneEdgeUpdateIndex -} diff --git a/lnd/channeldb/migration_01_to_11/options.go b/lnd/channeldb/migration_01_to_11/options.go deleted file mode 100644 index 03b287e0..00000000 --- a/lnd/channeldb/migration_01_to_11/options.go +++ /dev/null @@ -1,41 +0,0 @@ -package migration_01_to_11 - -const ( - // DefaultRejectCacheSize is the default number of rejectCacheEntries to - // cache for use in the rejection cache of incoming gossip traffic. This - // produces a cache size of around 1MB. - DefaultRejectCacheSize = 50000 - - // DefaultChannelCacheSize is the default number of ChannelEdges cached - // in order to reply to gossip queries. This produces a cache size of - // around 40MB. - DefaultChannelCacheSize = 20000 -) - -// Options holds parameters for tuning and customizing a channeldb.DB. -type Options struct { - // RejectCacheSize is the maximum number of rejectCacheEntries to hold - // in the rejection cache. - RejectCacheSize int - - // ChannelCacheSize is the maximum number of ChannelEdges to hold in the - // channel cache. - ChannelCacheSize int - - // NoFreelistSync, if true, prevents the database from syncing its - // freelist to disk, resulting in improved performance at the expense of - // increased startup time. - NoFreelistSync bool -} - -// DefaultOptions returns an Options populated with default values. -func DefaultOptions() Options { - return Options{ - RejectCacheSize: DefaultRejectCacheSize, - ChannelCacheSize: DefaultChannelCacheSize, - NoFreelistSync: true, - } -} - -// OptionModifier is a function signature for modifying the default Options. -type OptionModifier func(*Options) diff --git a/lnd/channeldb/migration_01_to_11/payment_control.go b/lnd/channeldb/migration_01_to_11/payment_control.go deleted file mode 100644 index 0525aaa1..00000000 --- a/lnd/channeldb/migration_01_to_11/payment_control.go +++ /dev/null @@ -1,21 +0,0 @@ -package migration_01_to_11 - -import "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - -// fetchPaymentStatus fetches the payment status of the payment. If the payment -// isn't found, it will default to "StatusUnknown". -func fetchPaymentStatus(bucket kvdb.RBucket) PaymentStatus { - if bucket.Get(paymentSettleInfoKey) != nil { - return StatusSucceeded - } - - if bucket.Get(paymentFailInfoKey) != nil { - return StatusFailed - } - - if bucket.Get(paymentCreationInfoKey) != nil { - return StatusInFlight - } - - return StatusUnknown -} diff --git a/lnd/channeldb/migration_01_to_11/payments.go b/lnd/channeldb/migration_01_to_11/payments.go deleted file mode 100644 index 39b5c0ca..00000000 --- a/lnd/channeldb/migration_01_to_11/payments.go +++ /dev/null @@ -1,623 +0,0 @@ -package migration_01_to_11 - -import ( - "bytes" - "encoding/binary" - "io" - "sort" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/tlv" - "github.com/pkt-cash/pktd/wire" -) - -var ( - // paymentsRootBucket is the name of the top-level bucket within the - // database that stores all data related to payments. Within this - // bucket, each payment hash its own sub-bucket keyed by its payment - // hash. - // - // Bucket hierarchy: - // - // root-bucket - // | - // |-- - // | |--sequence-key: - // | |--creation-info-key: - // | |--attempt-info-key: - // | |--settle-info-key: - // | |--fail-info-key: - // | | - // | |--duplicate-bucket (only for old, completed payments) - // | | - // | |-- - // | | |--sequence-key: - // | | |--creation-info-key: - // | | |--attempt-info-key: - // | | |--settle-info-key: - // | | |--fail-info-key: - // | | - // | |-- - // | | | - // | ... ... - // | - // |-- - // | | - // | ... - // ... - // - paymentsRootBucket = []byte("payments-root-bucket") - - // paymentDublicateBucket is the name of a optional sub-bucket within - // the payment hash bucket, that is used to hold duplicate payments to - // a payment hash. This is needed to support information from earlier - // versions of lnd, where it was possible to pay to a payment hash more - // than once. - paymentDuplicateBucket = []byte("payment-duplicate-bucket") - - // paymentSequenceKey is a key used in the payment's sub-bucket to - // store the sequence number of the payment. - paymentSequenceKey = []byte("payment-sequence-key") - - // paymentCreationInfoKey is a key used in the payment's sub-bucket to - // store the creation info of the payment. - paymentCreationInfoKey = []byte("payment-creation-info") - - // paymentAttemptInfoKey is a key used in the payment's sub-bucket to - // store the info about the latest attempt that was done for the - // payment in question. - paymentAttemptInfoKey = []byte("payment-attempt-info") - - // paymentSettleInfoKey is a key used in the payment's sub-bucket to - // store the settle info of the payment. - paymentSettleInfoKey = []byte("payment-settle-info") - - // paymentFailInfoKey is a key used in the payment's sub-bucket to - // store information about the reason a payment failed. - paymentFailInfoKey = []byte("payment-fail-info") -) - -// FailureReason encodes the reason a payment ultimately failed. -type FailureReason byte - -const ( - // FailureReasonTimeout indicates that the payment did timeout before a - // successful payment attempt was made. - FailureReasonTimeout FailureReason = 0 - - // FailureReasonNoRoute indicates no successful route to the - // destination was found during path finding. - FailureReasonNoRoute FailureReason = 1 - - // FailureReasonError indicates that an unexpected error happened during - // payment. - FailureReasonError FailureReason = 2 - - // FailureReasonIncorrectPaymentDetails indicates that either the hash - // is unknown or the final cltv delta or amount is incorrect. - FailureReasonIncorrectPaymentDetails FailureReason = 3 - - // TODO(halseth): cancel state. - - // TODO(joostjager): Add failure reasons for: - // LocalLiquidityInsufficient, RemoteCapacityInsufficient. -) - -// String returns a human readable FailureReason -func (r FailureReason) String() string { - switch r { - case FailureReasonTimeout: - return "timeout" - case FailureReasonNoRoute: - return "no_route" - case FailureReasonError: - return "error" - case FailureReasonIncorrectPaymentDetails: - return "incorrect_payment_details" - } - - return "unknown" -} - -// PaymentStatus represent current status of payment -type PaymentStatus byte - -const ( - // StatusUnknown is the status where a payment has never been initiated - // and hence is unknown. - StatusUnknown PaymentStatus = 0 - - // StatusInFlight is the status where a payment has been initiated, but - // a response has not been received. - StatusInFlight PaymentStatus = 1 - - // StatusSucceeded is the status where a payment has been initiated and - // the payment was completed successfully. - StatusSucceeded PaymentStatus = 2 - - // StatusFailed is the status where a payment has been initiated and a - // failure result has come back. - StatusFailed PaymentStatus = 3 -) - -// Bytes returns status as slice of bytes. -func (ps PaymentStatus) Bytes() []byte { - return []byte{byte(ps)} -} - -// FromBytes sets status from slice of bytes. -func (ps *PaymentStatus) FromBytes(status []byte) er.R { - if len(status) != 1 { - return er.New("payment status is empty") - } - - switch PaymentStatus(status[0]) { - case StatusUnknown, StatusInFlight, StatusSucceeded, StatusFailed: - *ps = PaymentStatus(status[0]) - default: - return er.New("unknown payment status") - } - - return nil -} - -// String returns readable representation of payment status. -func (ps PaymentStatus) String() string { - switch ps { - case StatusUnknown: - return "Unknown" - case StatusInFlight: - return "In Flight" - case StatusSucceeded: - return "Succeeded" - case StatusFailed: - return "Failed" - default: - return "Unknown" - } -} - -// PaymentCreationInfo is the information necessary to have ready when -// initiating a payment, moving it into state InFlight. -type PaymentCreationInfo struct { - // PaymentHash is the hash this payment is paying to. - PaymentHash lntypes.Hash - - // Value is the amount we are paying. - Value lnwire.MilliSatoshi - - // CreatingDate is the time when this payment was initiated. - CreationDate time.Time - - // PaymentRequest is the full payment request, if any. - PaymentRequest []byte -} - -// PaymentAttemptInfo contains information about a specific payment attempt for -// a given payment. This information is used by the router to handle any errors -// coming back after an attempt is made, and to query the switch about the -// status of a payment. For settled payment this will be the information for -// the succeeding payment attempt. -type PaymentAttemptInfo struct { - // PaymentID is the unique ID used for this attempt. - PaymentID uint64 - - // SessionKey is the ephemeral key used for this payment attempt. - SessionKey *btcec.PrivateKey - - // Route is the route attempted to send the HTLC. - Route Route -} - -// Payment is a wrapper around a payment's PaymentCreationInfo, -// PaymentAttemptInfo, and preimage. All payments will have the -// PaymentCreationInfo set, the PaymentAttemptInfo will be set only if at least -// one payment attempt has been made, while only completed payments will have a -// non-zero payment preimage. -type Payment struct { - // sequenceNum is a unique identifier used to sort the payments in - // order of creation. - sequenceNum uint64 - - // Status is the current PaymentStatus of this payment. - Status PaymentStatus - - // Info holds all static information about this payment, and is - // populated when the payment is initiated. - Info *PaymentCreationInfo - - // Attempt is the information about the last payment attempt made. - // - // NOTE: Can be nil if no attempt is yet made. - Attempt *PaymentAttemptInfo - - // PaymentPreimage is the preimage of a successful payment. This serves - // as a proof of payment. It will only be non-nil for settled payments. - // - // NOTE: Can be nil if payment is not settled. - PaymentPreimage *lntypes.Preimage - - // Failure is a failure reason code indicating the reason the payment - // failed. It is only non-nil for failed payments. - // - // NOTE: Can be nil if payment is not failed. - Failure *FailureReason -} - -// FetchPayments returns all sent payments found in the DB. -func (db *DB) FetchPayments() ([]*Payment, er.R) { - var payments []*Payment - - err := kvdb.View(db, func(tx kvdb.RTx) er.R { - paymentsBucket := tx.ReadBucket(paymentsRootBucket) - if paymentsBucket == nil { - return nil - } - - return paymentsBucket.ForEach(func(k, v []byte) er.R { - bucket := paymentsBucket.NestedReadBucket(k) - if bucket == nil { - // We only expect sub-buckets to be found in - // this top-level bucket. - return er.Errorf("non bucket element in " + - "payments bucket") - } - - p, err := fetchPayment(bucket) - if err != nil { - return err - } - - payments = append(payments, p) - - // For older versions of lnd, duplicate payments to a - // payment has was possible. These will be found in a - // sub-bucket indexed by their sequence number if - // available. - dup := bucket.NestedReadBucket(paymentDuplicateBucket) - if dup == nil { - return nil - } - - return dup.ForEach(func(k, v []byte) er.R { - subBucket := dup.NestedReadBucket(k) - if subBucket == nil { - // We one bucket for each duplicate to - // be found. - return er.Errorf("non bucket element" + - "in duplicate bucket") - } - - p, err := fetchPayment(subBucket) - if err != nil { - return err - } - - payments = append(payments, p) - return nil - }) - }) - }, func() { - payments = nil - }) - if err != nil { - return nil, err - } - - // Before returning, sort the payments by their sequence number. - sort.Slice(payments, func(i, j int) bool { - return payments[i].sequenceNum < payments[j].sequenceNum - }) - - return payments, nil -} - -func fetchPayment(bucket kvdb.RBucket) (*Payment, er.R) { - var ( - err er.R - p = &Payment{} - ) - - seqBytes := bucket.Get(paymentSequenceKey) - if seqBytes == nil { - return nil, er.Errorf("sequence number not found") - } - - p.sequenceNum = binary.BigEndian.Uint64(seqBytes) - - // Get the payment status. - p.Status = fetchPaymentStatus(bucket) - - // Get the PaymentCreationInfo. - b := bucket.Get(paymentCreationInfoKey) - if b == nil { - return nil, er.Errorf("creation info not found") - } - - r := bytes.NewReader(b) - p.Info, err = deserializePaymentCreationInfo(r) - if err != nil { - return nil, err - - } - - // Get the PaymentAttemptInfo. This can be unset. - b = bucket.Get(paymentAttemptInfoKey) - if b != nil { - r = bytes.NewReader(b) - p.Attempt, err = deserializePaymentAttemptInfo(r) - if err != nil { - return nil, err - } - } - - // Get the payment preimage. This is only found for - // completed payments. - b = bucket.Get(paymentSettleInfoKey) - if b != nil { - var preimg lntypes.Preimage - copy(preimg[:], b[:]) - p.PaymentPreimage = &preimg - } - - // Get failure reason if available. - b = bucket.Get(paymentFailInfoKey) - if b != nil { - reason := FailureReason(b[0]) - p.Failure = &reason - } - - return p, nil -} - -func serializePaymentCreationInfo(w io.Writer, c *PaymentCreationInfo) er.R { - var scratch [8]byte - - if _, err := util.Write(w, c.PaymentHash[:]); err != nil { - return err - } - - byteOrder.PutUint64(scratch[:], uint64(c.Value)) - if _, err := util.Write(w, scratch[:]); err != nil { - return err - } - - byteOrder.PutUint64(scratch[:], uint64(c.CreationDate.Unix())) - if _, err := util.Write(w, scratch[:]); err != nil { - return err - } - - byteOrder.PutUint32(scratch[:4], uint32(len(c.PaymentRequest))) - if _, err := util.Write(w, scratch[:4]); err != nil { - return err - } - - if _, err := util.Write(w, c.PaymentRequest[:]); err != nil { - return err - } - - return nil -} - -func deserializePaymentCreationInfo(r io.Reader) (*PaymentCreationInfo, er.R) { - var scratch [8]byte - - c := &PaymentCreationInfo{} - - if _, err := util.ReadFull(r, c.PaymentHash[:]); err != nil { - return nil, err - } - - if _, err := util.ReadFull(r, scratch[:]); err != nil { - return nil, err - } - c.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:])) - - if _, err := util.ReadFull(r, scratch[:]); err != nil { - return nil, err - } - c.CreationDate = time.Unix(int64(byteOrder.Uint64(scratch[:])), 0) - - if _, err := util.ReadFull(r, scratch[:4]); err != nil { - return nil, err - } - - reqLen := uint32(byteOrder.Uint32(scratch[:4])) - payReq := make([]byte, reqLen) - if reqLen > 0 { - if _, err := util.ReadFull(r, payReq[:]); err != nil { - return nil, err - } - } - c.PaymentRequest = payReq - - return c, nil -} - -func serializePaymentAttemptInfo(w io.Writer, a *PaymentAttemptInfo) er.R { - if err := WriteElements(w, a.PaymentID, a.SessionKey); err != nil { - return err - } - - if err := SerializeRoute(w, a.Route); err != nil { - return err - } - - return nil -} - -func deserializePaymentAttemptInfo(r io.Reader) (*PaymentAttemptInfo, er.R) { - a := &PaymentAttemptInfo{} - err := ReadElements(r, &a.PaymentID, &a.SessionKey) - if err != nil { - return nil, err - } - a.Route, err = DeserializeRoute(r) - if err != nil { - return nil, err - } - return a, nil -} - -func serializeHop(w io.Writer, h *Hop) er.R { - if err := WriteElements(w, - h.PubKeyBytes[:], h.ChannelID, h.OutgoingTimeLock, - h.AmtToForward, - ); err != nil { - return err - } - - if err := util.WriteBin(w, byteOrder, h.LegacyPayload); err != nil { - return err - } - - // For legacy payloads, we don't need to write any TLV records, so - // we'll write a zero indicating the our serialized TLV map has no - // records. - if h.LegacyPayload { - return WriteElements(w, uint32(0)) - } - - // Otherwise, we'll transform our slice of records into a map of the - // raw bytes, then serialize them in-line with a length (number of - // elements) prefix. - mapRecords, err := tlv.RecordsToMap(h.TLVRecords) - if err != nil { - return err - } - - numRecords := uint32(len(mapRecords)) - if err := WriteElements(w, numRecords); err != nil { - return err - } - - for recordType, rawBytes := range mapRecords { - if err := WriteElements(w, recordType); err != nil { - return err - } - - if err := wire.WriteVarBytes(w, 0, rawBytes); err != nil { - return err - } - } - - return nil -} - -// maxOnionPayloadSize is the largest Sphinx payload possible, so we don't need -// to read/write a TLV stream larger than this. -const maxOnionPayloadSize = 1300 - -func deserializeHop(r io.Reader) (*Hop, er.R) { - h := &Hop{} - - var pub []byte - if err := ReadElements(r, &pub); err != nil { - return nil, err - } - copy(h.PubKeyBytes[:], pub) - - if err := ReadElements(r, - &h.ChannelID, &h.OutgoingTimeLock, &h.AmtToForward, - ); err != nil { - return nil, err - } - - // TODO(roasbeef): change field to allow LegacyPayload false to be the - // legacy default? - err := util.ReadBin(r, byteOrder, &h.LegacyPayload) - if err != nil { - return nil, err - } - - var numElements uint32 - if err := ReadElements(r, &numElements); err != nil { - return nil, err - } - - // If there're no elements, then we can return early. - if numElements == 0 { - return h, nil - } - - tlvMap := make(map[uint64][]byte) - for i := uint32(0); i < numElements; i++ { - var tlvType uint64 - if err := ReadElements(r, &tlvType); err != nil { - return nil, err - } - - rawRecordBytes, err := wire.ReadVarBytes( - r, 0, maxOnionPayloadSize, "tlv", - ) - if err != nil { - return nil, err - } - - tlvMap[tlvType] = rawRecordBytes - } - - h.TLVRecords = tlv.MapToRecords(tlvMap) - - return h, nil -} - -// SerializeRoute serializes a route. -func SerializeRoute(w io.Writer, r Route) er.R { - if err := WriteElements(w, - r.TotalTimeLock, r.TotalAmount, r.SourcePubKey[:], - ); err != nil { - return err - } - - if err := WriteElements(w, uint32(len(r.Hops))); err != nil { - return err - } - - for _, h := range r.Hops { - if err := serializeHop(w, h); err != nil { - return err - } - } - - return nil -} - -// DeserializeRoute deserializes a route. -func DeserializeRoute(r io.Reader) (Route, er.R) { - rt := Route{} - if err := ReadElements(r, - &rt.TotalTimeLock, &rt.TotalAmount, - ); err != nil { - return rt, err - } - - var pub []byte - if err := ReadElements(r, &pub); err != nil { - return rt, err - } - copy(rt.SourcePubKey[:], pub) - - var numHops uint32 - if err := ReadElements(r, &numHops); err != nil { - return rt, err - } - - var hops []*Hop - for i := uint32(0); i < numHops; i++ { - hop, err := deserializeHop(r) - if err != nil { - return rt, err - } - hops = append(hops, hop) - } - rt.Hops = hops - - return rt, nil -} diff --git a/lnd/channeldb/migration_01_to_11/payments_test.go b/lnd/channeldb/migration_01_to_11/payments_test.go deleted file mode 100644 index 3e7bfe5f..00000000 --- a/lnd/channeldb/migration_01_to_11/payments_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package migration_01_to_11 - -import ( - "bytes" - "math/rand" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -var ( - priv, _ = btcec.NewPrivateKey(btcec.S256()) - pub = priv.PubKey() -) - -func makeFakePayment() *outgoingPayment { - fakeInvoice := &Invoice{ - // Use single second precision to avoid false positive test - // failures due to the monotonic time component. - CreationDate: time.Unix(time.Now().Unix(), 0), - Memo: []byte("fake memo"), - Receipt: []byte("fake receipt"), - PaymentRequest: []byte(""), - } - - copy(fakeInvoice.Terms.PaymentPreimage[:], rev[:]) - fakeInvoice.Terms.Value = lnwire.NewMSatFromSatoshis(10000) - - fakePath := make([][33]byte, 3) - for i := 0; i < 3; i++ { - copy(fakePath[i][:], bytes.Repeat([]byte{byte(i)}, 33)) - } - - fakePayment := &outgoingPayment{ - Invoice: *fakeInvoice, - Fee: 101, - Path: fakePath, - TimeLockLength: 1000, - } - copy(fakePayment.PaymentPreimage[:], rev[:]) - return fakePayment -} - -// randomBytes creates random []byte with length in range [minLen, maxLen) -func randomBytes(minLen, maxLen int) ([]byte, er.R) { - randBuf := make([]byte, minLen+rand.Intn(maxLen-minLen)) - - if _, err := rand.Read(randBuf); err != nil { - return nil, er.Errorf("Internal error. "+ - "Cannot generate random string: %v", err) - } - - return randBuf, nil -} - -func makeRandomFakePayment() (*outgoingPayment, er.R) { - var err er.R - fakeInvoice := &Invoice{ - // Use single second precision to avoid false positive test - // failures due to the monotonic time component. - CreationDate: time.Unix(time.Now().Unix(), 0), - } - - fakeInvoice.Memo, err = randomBytes(1, 50) - if err != nil { - return nil, err - } - - fakeInvoice.Receipt, err = randomBytes(1, 50) - if err != nil { - return nil, err - } - - fakeInvoice.PaymentRequest, err = randomBytes(1, 50) - if err != nil { - return nil, err - } - - preImg, err := randomBytes(32, 33) - if err != nil { - return nil, err - } - copy(fakeInvoice.Terms.PaymentPreimage[:], preImg) - - fakeInvoice.Terms.Value = lnwire.MilliSatoshi(rand.Intn(10000)) - - fakePathLen := 1 + rand.Intn(5) - fakePath := make([][33]byte, fakePathLen) - for i := 0; i < fakePathLen; i++ { - b, err := randomBytes(33, 34) - if err != nil { - return nil, err - } - copy(fakePath[i][:], b) - } - - fakePayment := &outgoingPayment{ - Invoice: *fakeInvoice, - Fee: lnwire.MilliSatoshi(rand.Intn(1001)), - Path: fakePath, - TimeLockLength: uint32(rand.Intn(10000)), - } - copy(fakePayment.PaymentPreimage[:], fakeInvoice.Terms.PaymentPreimage[:]) - - return fakePayment, nil -} diff --git a/lnd/channeldb/migration_01_to_11/route.go b/lnd/channeldb/migration_01_to_11/route.go deleted file mode 100644 index 58253d19..00000000 --- a/lnd/channeldb/migration_01_to_11/route.go +++ /dev/null @@ -1,331 +0,0 @@ -package migration_01_to_11 - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "strconv" - "strings" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - sphinx "github.com/pkt-cash/pktd/lightning-onion" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/record" - "github.com/pkt-cash/pktd/lnd/tlv" -) - -// VertexSize is the size of the array to store a vertex. -const VertexSize = 33 - -// ErrNoRouteHopsProvided is returned when a caller attempts to construct a new -// sphinx packet, but provides an empty set of hops for each route. -var ErrNoRouteHopsProvided = Err.CodeWithDetail("ErrNoRouteHopsProvided", "empty route hops provided") - -// Vertex is a simple alias for the serialization of a compressed Bitcoin -// public key. -type Vertex [VertexSize]byte - -// NewVertex returns a new Vertex given a public key. -func NewVertex(pub *btcec.PublicKey) Vertex { - var v Vertex - copy(v[:], pub.SerializeCompressed()) - return v -} - -// NewVertexFromBytes returns a new Vertex based on a serialized pubkey in a -// byte slice. -func NewVertexFromBytes(b []byte) (Vertex, er.R) { - vertexLen := len(b) - if vertexLen != VertexSize { - return Vertex{}, er.Errorf("invalid vertex length of %v, "+ - "want %v", vertexLen, VertexSize) - } - - var v Vertex - copy(v[:], b) - return v, nil -} - -// NewVertexFromStr returns a new Vertex given its hex-encoded string format. -func NewVertexFromStr(v string) (Vertex, er.R) { - // Return error if hex string is of incorrect length. - if len(v) != VertexSize*2 { - return Vertex{}, er.Errorf("invalid vertex string length of "+ - "%v, want %v", len(v), VertexSize*2) - } - - vertex, err := util.DecodeHex(v) - if err != nil { - return Vertex{}, err - } - - return NewVertexFromBytes(vertex) -} - -// String returns a human readable version of the Vertex which is the -// hex-encoding of the serialized compressed public key. -func (v Vertex) String() string { - return fmt.Sprintf("%x", v[:]) -} - -// Hop represents an intermediate or final node of the route. This naming -// is in line with the definition given in BOLT #4: Onion Routing Protocol. -// The struct houses the channel along which this hop can be reached and -// the values necessary to create the HTLC that needs to be sent to the -// next hop. It is also used to encode the per-hop payload included within -// the Sphinx packet. -type Hop struct { - // PubKeyBytes is the raw bytes of the public key of the target node. - PubKeyBytes Vertex - - // ChannelID is the unique channel ID for the channel. The first 3 - // bytes are the block height, the next 3 the index within the block, - // and the last 2 bytes are the output index for the channel. - ChannelID uint64 - - // OutgoingTimeLock is the timelock value that should be used when - // crafting the _outgoing_ HTLC from this hop. - OutgoingTimeLock uint32 - - // AmtToForward is the amount that this hop will forward to the next - // hop. This value is less than the value that the incoming HTLC - // carries as a fee will be subtracted by the hop. - AmtToForward lnwire.MilliSatoshi - - // TLVRecords if non-nil are a set of additional TLV records that - // should be included in the forwarding instructions for this node. - TLVRecords []tlv.Record - - // LegacyPayload if true, then this signals that this node doesn't - // understand the new TLV payload, so we must instead use the legacy - // payload. - LegacyPayload bool -} - -// PackHopPayload writes to the passed io.Writer, the series of byes that can -// be placed directly into the per-hop payload (EOB) for this hop. This will -// include the required routing fields, as well as serializing any of the -// passed optional TLVRecords. nextChanID is the unique channel ID that -// references the _outgoing_ channel ID that follows this hop. This field -// follows the same semantics as the NextAddress field in the onion: it should -// be set to zero to indicate the terminal hop. -func (h *Hop) PackHopPayload(w io.Writer, nextChanID uint64) er.R { - // If this is a legacy payload, then we'll exit here as this method - // shouldn't be called. - if h.LegacyPayload == true { - return er.Errorf("cannot pack hop payloads for legacy " + - "payloads") - } - - // Otherwise, we'll need to make a new stream that includes our - // required routing fields, as well as these optional values. - var records []tlv.Record - - // Every hop must have an amount to forward and CLTV expiry. - amt := uint64(h.AmtToForward) - records = append(records, - record.NewAmtToFwdRecord(&amt), - record.NewLockTimeRecord(&h.OutgoingTimeLock), - ) - - // BOLT 04 says the next_hop_id should be omitted for the final hop, - // but present for all others. - // - // TODO(conner): test using hop.Exit once available - if nextChanID != 0 { - records = append(records, - record.NewNextHopIDRecord(&nextChanID), - ) - } - - // Append any custom types destined for this hop. - records = append(records, h.TLVRecords...) - - // To ensure we produce a canonical stream, we'll sort the records - // before encoding them as a stream in the hop payload. - tlv.SortRecords(records) - - tlvStream, err := tlv.NewStream(records...) - if err != nil { - return err - } - - return tlvStream.Encode(w) -} - -// Route represents a path through the channel graph which runs over one or -// more channels in succession. This struct carries all the information -// required to craft the Sphinx onion packet, and send the payment along the -// first hop in the path. A route is only selected as valid if all the channels -// have sufficient capacity to carry the initial payment amount after fees are -// accounted for. -type Route struct { - // TotalTimeLock is the cumulative (final) time lock across the entire - // route. This is the CLTV value that should be extended to the first - // hop in the route. All other hops will decrement the time-lock as - // advertised, leaving enough time for all hops to wait for or present - // the payment preimage to complete the payment. - TotalTimeLock uint32 - - // TotalAmount is the total amount of funds required to complete a - // payment over this route. This value includes the cumulative fees at - // each hop. As a result, the HTLC extended to the first-hop in the - // route will need to have at least this many satoshis, otherwise the - // route will fail at an intermediate node due to an insufficient - // amount of fees. - TotalAmount lnwire.MilliSatoshi - - // SourcePubKey is the pubkey of the node where this route originates - // from. - SourcePubKey Vertex - - // Hops contains details concerning the specific forwarding details at - // each hop. - Hops []*Hop -} - -// HopFee returns the fee charged by the route hop indicated by hopIndex. -func (r *Route) HopFee(hopIndex int) lnwire.MilliSatoshi { - var incomingAmt lnwire.MilliSatoshi - if hopIndex == 0 { - incomingAmt = r.TotalAmount - } else { - incomingAmt = r.Hops[hopIndex-1].AmtToForward - } - - // Fee is calculated as difference between incoming and outgoing amount. - return incomingAmt - r.Hops[hopIndex].AmtToForward -} - -// TotalFees is the sum of the fees paid at each hop within the final route. In -// the case of a one-hop payment, this value will be zero as we don't need to -// pay a fee to ourself. -func (r *Route) TotalFees() lnwire.MilliSatoshi { - if len(r.Hops) == 0 { - return 0 - } - - return r.TotalAmount - r.Hops[len(r.Hops)-1].AmtToForward -} - -// NewRouteFromHops creates a new Route structure from the minimally required -// information to perform the payment. It infers fee amounts and populates the -// node, chan and prev/next hop maps. -func NewRouteFromHops(amtToSend lnwire.MilliSatoshi, timeLock uint32, - sourceVertex Vertex, hops []*Hop) (*Route, er.R) { - - if len(hops) == 0 { - return nil, ErrNoRouteHopsProvided.Default() - } - - // First, we'll create a route struct and populate it with the fields - // for which the values are provided as arguments of this function. - // TotalFees is determined based on the difference between the amount - // that is send from the source and the final amount that is received - // by the destination. - route := &Route{ - SourcePubKey: sourceVertex, - Hops: hops, - TotalTimeLock: timeLock, - TotalAmount: amtToSend, - } - - return route, nil -} - -// ToSphinxPath converts a complete route into a sphinx PaymentPath that -// contains the per-hop paylods used to encoding the HTLC routing data for each -// hop in the route. This method also accepts an optional EOB payload for the -// final hop. -func (r *Route) ToSphinxPath() (*sphinx.PaymentPath, er.R) { - var path sphinx.PaymentPath - - // For each hop encoded within the route, we'll convert the hop struct - // to an OnionHop with matching per-hop payload within the path as used - // by the sphinx package. - for i, hop := range r.Hops { - pub, err := btcec.ParsePubKey( - hop.PubKeyBytes[:], btcec.S256(), - ) - if err != nil { - return nil, err - } - - // As a base case, the next hop is set to all zeroes in order - // to indicate that the "last hop" as no further hops after it. - nextHop := uint64(0) - - // If we aren't on the last hop, then we set the "next address" - // field to be the channel that directly follows it. - if i != len(r.Hops)-1 { - nextHop = r.Hops[i+1].ChannelID - } - - var payload sphinx.HopPayload - - // If this is the legacy payload, then we can just include the - // hop data as normal. - if hop.LegacyPayload { - // Before we encode this value, we'll pack the next hop - // into the NextAddress field of the hop info to ensure - // we point to the right now. - hopData := sphinx.HopData{ - ForwardAmount: uint64(hop.AmtToForward), - OutgoingCltv: hop.OutgoingTimeLock, - } - binary.BigEndian.PutUint64( - hopData.NextAddress[:], nextHop, - ) - - payload, err = sphinx.NewHopPayload(&hopData, nil) - if err != nil { - return nil, err - } - } else { - // For non-legacy payloads, we'll need to pack the - // routing information, along with any extra TLV - // information into the new per-hop payload format. - // We'll also pass in the chan ID of the hop this - // channel should be forwarded to so we can construct a - // valid payload. - var b bytes.Buffer - err := hop.PackHopPayload(&b, nextHop) - if err != nil { - return nil, err - } - - // TODO(roasbeef): make better API for NewHopPayload? - payload, err = sphinx.NewHopPayload(nil, b.Bytes()) - if err != nil { - return nil, err - } - } - - path[i] = sphinx.OnionHop{ - NodePub: *pub, - HopPayload: payload, - } - } - - return &path, nil -} - -// String returns a human readable representation of the route. -func (r *Route) String() string { - var b strings.Builder - - for i, hop := range r.Hops { - if i > 0 { - b.WriteString(",") - } - b.WriteString(strconv.FormatUint(hop.ChannelID, 10)) - } - - return fmt.Sprintf("amt=%v, fees=%v, tl=%v, chans=%v", - r.TotalAmount-r.TotalFees(), r.TotalFees(), r.TotalTimeLock, - b.String(), - ) -} diff --git a/lnd/channeldb/migration_01_to_11/zpay32/amountunits.go b/lnd/channeldb/migration_01_to_11/zpay32/amountunits.go deleted file mode 100644 index 2e1caea0..00000000 --- a/lnd/channeldb/migration_01_to_11/zpay32/amountunits.go +++ /dev/null @@ -1,158 +0,0 @@ -package zpay32 - -import ( - "strconv" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -var ( - // toMSat is a map from a unit to a function that converts an amount - // of that unit to millisatoshis. - toMSat = map[byte]func(uint64) (lnwire.MilliSatoshi, er.R){ - 'm': mBtcToMSat, - 'u': uBtcToMSat, - 'n': nBtcToMSat, - 'p': pBtcToMSat, - } - - // fromMSat is a map from a unit to a function that converts an amount - // in millisatoshis to an amount of that unit. - fromMSat = map[byte]func(lnwire.MilliSatoshi) (uint64, er.R){ - 'm': mSatToMBtc, - 'u': mSatToUBtc, - 'n': mSatToNBtc, - 'p': mSatToPBtc, - } -) - -// mBtcToMSat converts the given amount in milliBTC to millisatoshis. -func mBtcToMSat(m uint64) (lnwire.MilliSatoshi, er.R) { - return lnwire.MilliSatoshi(m) * 100000000, nil -} - -// uBtcToMSat converts the given amount in microBTC to millisatoshis. -func uBtcToMSat(u uint64) (lnwire.MilliSatoshi, er.R) { - return lnwire.MilliSatoshi(u * 100000), nil -} - -// nBtcToMSat converts the given amount in nanoBTC to millisatoshis. -func nBtcToMSat(n uint64) (lnwire.MilliSatoshi, er.R) { - return lnwire.MilliSatoshi(n * 100), nil -} - -// pBtcToMSat converts the given amount in picoBTC to millisatoshis. -func pBtcToMSat(p uint64) (lnwire.MilliSatoshi, er.R) { - if p < 10 { - return 0, er.Errorf("minimum amount is 10p") - } - if p%10 != 0 { - return 0, er.Errorf("amount %d pBTC not expressible in msat", - p) - } - return lnwire.MilliSatoshi(p / 10), nil -} - -// mSatToMBtc converts the given amount in millisatoshis to milliBTC. -func mSatToMBtc(msat lnwire.MilliSatoshi) (uint64, er.R) { - if msat%100000000 != 0 { - return 0, er.Errorf("%d msat not expressible "+ - "in mBTC", msat) - } - return uint64(msat / 100000000), nil -} - -// mSatToUBtc converts the given amount in millisatoshis to microBTC. -func mSatToUBtc(msat lnwire.MilliSatoshi) (uint64, er.R) { - if msat%100000 != 0 { - return 0, er.Errorf("%d msat not expressible "+ - "in uBTC", msat) - } - return uint64(msat / 100000), nil -} - -// mSatToNBtc converts the given amount in millisatoshis to nanoBTC. -func mSatToNBtc(msat lnwire.MilliSatoshi) (uint64, er.R) { - if msat%100 != 0 { - return 0, er.Errorf("%d msat not expressible in nBTC", msat) - } - return uint64(msat / 100), nil -} - -// mSatToPBtc converts the given amount in millisatoshis to picoBTC. -func mSatToPBtc(msat lnwire.MilliSatoshi) (uint64, er.R) { - return uint64(msat * 10), nil -} - -// decodeAmount returns the amount encoded by the provided string in -// millisatoshi. -func decodeAmount(amount string) (lnwire.MilliSatoshi, er.R) { - if len(amount) < 1 { - return 0, er.Errorf("amount must be non-empty") - } - - // If last character is a digit, then the amount can just be - // interpreted as BTC. - char := amount[len(amount)-1] - digit := char - '0' - if digit >= 0 && digit <= 9 { - btc, err := strconv.ParseUint(amount, 10, 64) - if err != nil { - return 0, er.E(err) - } - return lnwire.MilliSatoshi(btc) * mSatPerBtc, nil - } - - // If not a digit, it must be part of the known units. - conv, ok := toMSat[char] - if !ok { - return 0, er.Errorf("unknown multiplier %c", char) - } - - // Known unit. - num := amount[:len(amount)-1] - if len(num) < 1 { - return 0, er.Errorf("number must be non-empty") - } - - am, err := strconv.ParseUint(num, 10, 64) - if err != nil { - return 0, er.E(err) - } - - return conv(am) -} - -// encodeAmount encodes the provided millisatoshi amount using as few characters -// as possible. -func encodeAmount(msat lnwire.MilliSatoshi) (string, er.R) { - // If possible to express in BTC, that will always be the shortest - // representation. - if msat%mSatPerBtc == 0 { - return strconv.FormatInt(int64(msat/mSatPerBtc), 10), nil - } - - // Should always be expressible in pico BTC. - pico, err := fromMSat['p'](msat) - if err != nil { - return "", er.Errorf("unable to express %d msat as pBTC: %v", - msat, err) - } - shortened := strconv.FormatUint(pico, 10) + "p" - for unit, conv := range fromMSat { - am, err := conv(msat) - if err != nil { - // Not expressible using this unit. - continue - } - - // Save the shortest found representation. - str := strconv.FormatUint(am, 10) + string(unit) - if len(str) < len(shortened) { - shortened = str - } - } - - return shortened, nil -} diff --git a/lnd/channeldb/migration_01_to_11/zpay32/bech32.go b/lnd/channeldb/migration_01_to_11/zpay32/bech32.go deleted file mode 100644 index 209a8423..00000000 --- a/lnd/channeldb/migration_01_to_11/zpay32/bech32.go +++ /dev/null @@ -1,170 +0,0 @@ -package zpay32 - -import ( - "fmt" - "strings" - - "github.com/pkt-cash/pktd/btcutil/er" -) - -const charset = "qpzry9x8gf2tvdw0s3jn54khce6mua7l" - -var gen = []int{0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3} - -// NOTE: This method it a slight modification of the method bech32.Decode found -// btcutil, allowing strings to be more than 90 characters. - -// decodeBech32 decodes a bech32 encoded string, returning the human-readable -// part and the data part excluding the checksum. -// Note: the data will be base32 encoded, that is each element of the returned -// byte array will encode 5 bits of data. Use the ConvertBits method to convert -// this to 8-bit representation. -func decodeBech32(bech string) (string, []byte, er.R) { - // The maximum allowed length for a bech32 string is 90. It must also - // be at least 8 characters, since it needs a non-empty HRP, a - // separator, and a 6 character checksum. - // NB: The 90 character check specified in BIP173 is skipped here, to - // allow strings longer than 90 characters. - if len(bech) < 8 { - return "", nil, er.Errorf("invalid bech32 string length %d", - len(bech)) - } - // Only ASCII characters between 33 and 126 are allowed. - for i := 0; i < len(bech); i++ { - if bech[i] < 33 || bech[i] > 126 { - return "", nil, er.Errorf("invalid character in "+ - "string: '%c'", bech[i]) - } - } - - // The characters must be either all lowercase or all uppercase. - lower := strings.ToLower(bech) - upper := strings.ToUpper(bech) - if bech != lower && bech != upper { - return "", nil, er.Errorf("string not all lowercase or all " + - "uppercase") - } - - // We'll work with the lowercase string from now on. - bech = lower - - // The string is invalid if the last '1' is non-existent, it is the - // first character of the string (no human-readable part) or one of the - // last 6 characters of the string (since checksum cannot contain '1'), - // or if the string is more than 90 characters in total. - one := strings.LastIndexByte(bech, '1') - if one < 1 || one+7 > len(bech) { - return "", nil, er.Errorf("invalid index of 1") - } - - // The human-readable part is everything before the last '1'. - hrp := bech[:one] - data := bech[one+1:] - - // Each character corresponds to the byte with value of the index in - // 'charset'. - decoded, err := toBytes(data) - if err != nil { - return "", nil, er.Errorf("failed converting data to bytes: "+ - "%v", err) - } - - if !bech32VerifyChecksum(hrp, decoded) { - moreInfo := "" - checksum := bech[len(bech)-6:] - expected, err := toChars(bech32Checksum(hrp, - decoded[:len(decoded)-6])) - if err == nil { - moreInfo = fmt.Sprintf("Expected %v, got %v.", - expected, checksum) - } - return "", nil, er.Errorf("checksum failed. " + moreInfo) - } - - // We exclude the last 6 bytes, which is the checksum. - return hrp, decoded[:len(decoded)-6], nil -} - -// toBytes converts each character in the string 'chars' to the value of the -// index of the corresponding character in 'charset'. -func toBytes(chars string) ([]byte, er.R) { - decoded := make([]byte, 0, len(chars)) - for i := 0; i < len(chars); i++ { - index := strings.IndexByte(charset, chars[i]) - if index < 0 { - return nil, er.Errorf("invalid character not part of "+ - "charset: %v", chars[i]) - } - decoded = append(decoded, byte(index)) - } - return decoded, nil -} - -// toChars converts the byte slice 'data' to a string where each byte in 'data' -// encodes the index of a character in 'charset'. -func toChars(data []byte) (string, er.R) { - result := make([]byte, 0, len(data)) - for _, b := range data { - if int(b) >= len(charset) { - return "", er.Errorf("invalid data byte: %v", b) - } - result = append(result, charset[b]) - } - return string(result), nil -} - -// For more details on the checksum calculation, please refer to BIP 173. -func bech32Checksum(hrp string, data []byte) []byte { - // Convert the bytes to list of integers, as this is needed for the - // checksum calculation. - integers := make([]int, len(data)) - for i, b := range data { - integers[i] = int(b) - } - values := append(bech32HrpExpand(hrp), integers...) - values = append(values, []int{0, 0, 0, 0, 0, 0}...) - polymod := bech32Polymod(values) ^ 1 - var res []byte - for i := 0; i < 6; i++ { - res = append(res, byte((polymod>>uint(5*(5-i)))&31)) - } - return res -} - -// For more details on the polymod calculation, please refer to BIP 173. -func bech32Polymod(values []int) int { - chk := 1 - for _, v := range values { - b := chk >> 25 - chk = (chk&0x1ffffff)<<5 ^ v - for i := 0; i < 5; i++ { - if (b>>uint(i))&1 == 1 { - chk ^= gen[i] - } - } - } - return chk -} - -// For more details on HRP expansion, please refer to BIP 173. -func bech32HrpExpand(hrp string) []int { - v := make([]int, 0, len(hrp)*2+1) - for i := 0; i < len(hrp); i++ { - v = append(v, int(hrp[i]>>5)) - } - v = append(v, 0) - for i := 0; i < len(hrp); i++ { - v = append(v, int(hrp[i]&31)) - } - return v -} - -// For more details on the checksum verification, please refer to BIP 173. -func bech32VerifyChecksum(hrp string, data []byte) bool { - integers := make([]int, len(data)) - for i, b := range data { - integers[i] = int(b) - } - concat := append(bech32HrpExpand(hrp), integers...) - return bech32Polymod(concat) == 1 -} diff --git a/lnd/channeldb/migration_01_to_11/zpay32/decode.go b/lnd/channeldb/migration_01_to_11/zpay32/decode.go deleted file mode 100644 index 56627098..00000000 --- a/lnd/channeldb/migration_01_to_11/zpay32/decode.go +++ /dev/null @@ -1,496 +0,0 @@ -package zpay32 - -import ( - "bytes" - "encoding/binary" - "strings" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/bech32" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Decode parses the provided encoded invoice and returns a decoded Invoice if -// it is valid by BOLT-0011 and matches the provided active network. -func Decode(invoice string, net *chaincfg.Params) (*Invoice, er.R) { - decodedInvoice := Invoice{} - - // Before bech32 decoding the invoice, make sure that it is not too large. - // This is done as an anti-DoS measure since bech32 decoding is expensive. - if len(invoice) > maxInvoiceLength { - return nil, ErrInvoiceTooLarge.Default() - } - - // Decode the invoice using the modified bech32 decoder. - hrp, data, err := decodeBech32(invoice) - if err != nil { - return nil, err - } - - // We expect the human-readable part to at least have ln + one char - // encoding the network. - if len(hrp) < 3 { - return nil, er.Errorf("hrp too short") - } - - // First two characters of HRP should be "ln". - if hrp[:2] != "ln" { - return nil, er.Errorf("prefix should be \"ln\"") - } - - // The next characters should be a valid prefix for a segwit BIP173 - // address that match the active network. - if !strings.HasPrefix(hrp[2:], net.Bech32HRPSegwit) { - return nil, er.Errorf( - "invoice not for current active network '%s'", net.Name) - } - decodedInvoice.Net = net - - // Optionally, if there's anything left of the HRP after ln + the segwit - // prefix, we try to decode this as the payment amount. - var netPrefixLength = len(net.Bech32HRPSegwit) + 2 - if len(hrp) > netPrefixLength { - amount, err := decodeAmount(hrp[netPrefixLength:]) - if err != nil { - return nil, err - } - decodedInvoice.MilliSat = &amount - } - - // Everything except the last 520 bits of the data encodes the invoice's - // timestamp and tagged fields. - if len(data) < signatureBase32Len { - return nil, er.New("short invoice") - } - invoiceData := data[:len(data)-signatureBase32Len] - - // Parse the timestamp and tagged fields, and fill the Invoice struct. - if err := parseData(&decodedInvoice, invoiceData, net); err != nil { - return nil, err - } - - // The last 520 bits (104 groups) make up the signature. - sigBase32 := data[len(data)-signatureBase32Len:] - sigBase256, err := bech32.ConvertBits(sigBase32, 5, 8, true) - if err != nil { - return nil, err - } - var sig lnwire.Sig - copy(sig[:], sigBase256[:64]) - recoveryID := sigBase256[64] - - // The signature is over the hrp + the data the invoice, encoded in - // base 256. - taggedDataBytes, err := bech32.ConvertBits(invoiceData, 5, 8, true) - if err != nil { - return nil, err - } - - toSign := append([]byte(hrp), taggedDataBytes...) - - // We expect the signature to be over the single SHA-256 hash of that - // data. - hash := chainhash.HashB(toSign) - - // If the destination pubkey was provided as a tagged field, use that - // to verify the signature, if not do public key recovery. - if decodedInvoice.Destination != nil { - signature, err := sig.ToSignature() - if err != nil { - return nil, er.Errorf("unable to deserialize "+ - "signature: %v", err) - } - if !signature.Verify(hash, decodedInvoice.Destination) { - return nil, er.Errorf("invalid invoice signature") - } - } else { - headerByte := recoveryID + 27 + 4 - compactSign := append([]byte{headerByte}, sig[:]...) - pubkey, _, err := btcec.RecoverCompact(btcec.S256(), - compactSign, hash) - if err != nil { - return nil, err - } - decodedInvoice.Destination = pubkey - } - - // If no feature vector was decoded, populate an empty one. - if decodedInvoice.Features == nil { - decodedInvoice.Features = lnwire.NewFeatureVector( - nil, lnwire.Features, - ) - } - - // Now that we have created the invoice, make sure it has the required - // fields set. - if err := validateInvoice(&decodedInvoice); err != nil { - return nil, err - } - - return &decodedInvoice, nil -} - -// parseData parses the data part of the invoice. It expects base32 data -// returned from the bech32.Decode method, except signature. -func parseData(invoice *Invoice, data []byte, net *chaincfg.Params) er.R { - // It must contain the timestamp, encoded using 35 bits (7 groups). - if len(data) < timestampBase32Len { - return er.Errorf("data too short: %d", len(data)) - } - - t, err := parseTimestamp(data[:timestampBase32Len]) - if err != nil { - return err - } - invoice.Timestamp = time.Unix(int64(t), 0) - - // The rest are tagged parts. - tagData := data[7:] - return parseTaggedFields(invoice, tagData, net) -} - -// parseTimestamp converts a 35-bit timestamp (encoded in base32) to uint64. -func parseTimestamp(data []byte) (uint64, er.R) { - if len(data) != timestampBase32Len { - return 0, er.Errorf("timestamp must be 35 bits, was %d", - len(data)*5) - } - - return base32ToUint64(data) -} - -// parseTaggedFields takes the base32 encoded tagged fields of the invoice, and -// fills the Invoice struct accordingly. -func parseTaggedFields(invoice *Invoice, fields []byte, net *chaincfg.Params) er.R { - index := 0 - for len(fields)-index > 0 { - // If there are less than 3 groups to read, there cannot be more - // interesting information, as we need the type (1 group) and - // length (2 groups). - // - // This means the last tagged field is broken. - if len(fields)-index < 3 { - return ErrBrokenTaggedField.Default() - } - - typ := fields[index] - dataLength, err := parseFieldDataLength(fields[index+1 : index+3]) - if err != nil { - return err - } - - // If we don't have enough field data left to read this length, - // return error. - if len(fields) < index+3+int(dataLength) { - return ErrInvalidFieldLength.Default() - } - base32Data := fields[index+3 : index+3+int(dataLength)] - - // Advance the index in preparation for the next iteration. - index += 3 + int(dataLength) - - switch typ { - case fieldTypeP: - if invoice.PaymentHash != nil { - // We skip the field if we have already seen a - // supported one. - continue - } - - invoice.PaymentHash, err = parse32Bytes(base32Data) - case fieldTypeS: - if invoice.PaymentAddr != nil { - // We skip the field if we have already seen a - // supported one. - continue - } - - invoice.PaymentAddr, err = parse32Bytes(base32Data) - case fieldTypeD: - if invoice.Description != nil { - // We skip the field if we have already seen a - // supported one. - continue - } - - invoice.Description, err = parseDescription(base32Data) - case fieldTypeN: - if invoice.Destination != nil { - // We skip the field if we have already seen a - // supported one. - continue - } - - invoice.Destination, err = parseDestination(base32Data) - case fieldTypeH: - if invoice.DescriptionHash != nil { - // We skip the field if we have already seen a - // supported one. - continue - } - - invoice.DescriptionHash, err = parse32Bytes(base32Data) - case fieldTypeX: - if invoice.expiry != nil { - // We skip the field if we have already seen a - // supported one. - continue - } - - invoice.expiry, err = parseExpiry(base32Data) - case fieldTypeC: - if invoice.minFinalCLTVExpiry != nil { - // We skip the field if we have already seen a - // supported one. - continue - } - - invoice.minFinalCLTVExpiry, err = parseMinFinalCLTVExpiry(base32Data) - case fieldTypeF: - if invoice.FallbackAddr != nil { - // We skip the field if we have already seen a - // supported one. - continue - } - - invoice.FallbackAddr, err = parseFallbackAddr(base32Data, net) - case fieldTypeR: - // An `r` field can be included in an invoice multiple - // times, so we won't skip it if we have already seen - // one. - routeHint, err := parseRouteHint(base32Data) - if err != nil { - return err - } - - invoice.RouteHints = append(invoice.RouteHints, routeHint) - case fieldType9: - if invoice.Features != nil { - // We skip the field if we have already seen a - // supported one. - continue - } - - invoice.Features, err = parseFeatures(base32Data) - default: - // Ignore unknown type. - } - - // Check if there was an error from parsing any of the tagged - // fields and return it. - if err != nil { - return err - } - } - - return nil -} - -// parseFieldDataLength converts the two byte slice into a uint16. -func parseFieldDataLength(data []byte) (uint16, er.R) { - if len(data) != 2 { - return 0, er.Errorf("data length must be 2 bytes, was %d", - len(data)) - } - - return uint16(data[0])<<5 | uint16(data[1]), nil -} - -// parse32Bytes converts a 256-bit value (encoded in base32) to *[32]byte. This -// can be used for payment hashes, description hashes, payment addresses, etc. -func parse32Bytes(data []byte) (*[32]byte, er.R) { - var paymentHash [32]byte - - // As BOLT-11 states, a reader must skip over the 32-byte fields if - // it does not have a length of 52, so avoid returning an error. - if len(data) != hashBase32Len { - return nil, nil - } - - hash, err := bech32.ConvertBits(data, 5, 8, false) - if err != nil { - return nil, err - } - - copy(paymentHash[:], hash) - - return &paymentHash, nil -} - -// parseDescription converts the data (encoded in base32) into a string to use -// as the description. -func parseDescription(data []byte) (*string, er.R) { - base256Data, err := bech32.ConvertBits(data, 5, 8, false) - if err != nil { - return nil, err - } - - description := string(base256Data) - - return &description, nil -} - -// parseDestination converts the data (encoded in base32) into a 33-byte public -// key of the payee node. -func parseDestination(data []byte) (*btcec.PublicKey, er.R) { - // As BOLT-11 states, a reader must skip over the destination field - // if it does not have a length of 53, so avoid returning an error. - if len(data) != pubKeyBase32Len { - return nil, nil - } - - base256Data, err := bech32.ConvertBits(data, 5, 8, false) - if err != nil { - return nil, err - } - - return btcec.ParsePubKey(base256Data, btcec.S256()) -} - -// parseExpiry converts the data (encoded in base32) into the expiry time. -func parseExpiry(data []byte) (*time.Duration, er.R) { - expiry, err := base32ToUint64(data) - if err != nil { - return nil, err - } - - duration := time.Duration(expiry) * time.Second - - return &duration, nil -} - -// parseMinFinalCLTVExpiry converts the data (encoded in base32) into a uint64 -// to use as the minFinalCLTVExpiry. -func parseMinFinalCLTVExpiry(data []byte) (*uint64, er.R) { - expiry, err := base32ToUint64(data) - if err != nil { - return nil, err - } - - return &expiry, nil -} - -// parseFallbackAddr converts the data (encoded in base32) into a fallback -// on-chain address. -func parseFallbackAddr(data []byte, net *chaincfg.Params) (btcutil.Address, er.R) { - // Checks if the data is empty or contains a version without an address. - if len(data) < 2 { - return nil, er.Errorf("empty fallback address field") - } - - var addr btcutil.Address - - version := data[0] - switch version { - case 0: - witness, err := bech32.ConvertBits(data[1:], 5, 8, false) - if err != nil { - return nil, err - } - - switch len(witness) { - case 20: - addr, err = btcutil.NewAddressWitnessPubKeyHash(witness, net) - case 32: - addr, err = btcutil.NewAddressWitnessScriptHash(witness, net) - default: - return nil, er.Errorf("unknown witness program length %d", - len(witness)) - } - - if err != nil { - return nil, err - } - case 17: - pubKeyHash, err := bech32.ConvertBits(data[1:], 5, 8, false) - if err != nil { - return nil, err - } - - addr, err = btcutil.NewAddressPubKeyHash(pubKeyHash, net) - if err != nil { - return nil, err - } - case 18: - scriptHash, err := bech32.ConvertBits(data[1:], 5, 8, false) - if err != nil { - return nil, err - } - - addr, err = btcutil.NewAddressScriptHashFromHash(scriptHash, net) - if err != nil { - return nil, err - } - default: - // Ignore unknown version. - } - - return addr, nil -} - -// parseRouteHint converts the data (encoded in base32) into an array containing -// one or more routing hop hints that represent a single route hint. -func parseRouteHint(data []byte) ([]HopHint, er.R) { - base256Data, err := bech32.ConvertBits(data, 5, 8, false) - if err != nil { - return nil, err - } - - // Check that base256Data is a multiple of hopHintLen. - if len(base256Data)%hopHintLen != 0 { - return nil, er.Errorf("expected length multiple of %d bytes, "+ - "got %d", hopHintLen, len(base256Data)) - } - - var routeHint []HopHint - - for len(base256Data) > 0 { - hopHint := HopHint{} - hopHint.NodeID, err = btcec.ParsePubKey(base256Data[:33], btcec.S256()) - if err != nil { - return nil, err - } - hopHint.ChannelID = binary.BigEndian.Uint64(base256Data[33:41]) - hopHint.FeeBaseMSat = binary.BigEndian.Uint32(base256Data[41:45]) - hopHint.FeeProportionalMillionths = binary.BigEndian.Uint32(base256Data[45:49]) - hopHint.CLTVExpiryDelta = binary.BigEndian.Uint16(base256Data[49:51]) - - routeHint = append(routeHint, hopHint) - - base256Data = base256Data[51:] - } - - return routeHint, nil -} - -// parseFeatures decodes any feature bits directly from the base32 -// representation. -func parseFeatures(data []byte) (*lnwire.FeatureVector, er.R) { - rawFeatures := lnwire.NewRawFeatureVector() - err := rawFeatures.DecodeBase32(bytes.NewReader(data), len(data)) - if err != nil { - return nil, err - } - - return lnwire.NewFeatureVector(rawFeatures, lnwire.Features), nil -} - -// base32ToUint64 converts a base32 encoded number to uint64. -func base32ToUint64(data []byte) (uint64, er.R) { - // Maximum that fits in uint64 is ceil(64 / 5) = 12 groups. - if len(data) > 13 { - return 0, er.Errorf("cannot parse data of length %d as uint64", - len(data)) - } - - val := uint64(0) - for i := 0; i < len(data); i++ { - val = val<<5 | uint64(data[i]) - } - return val, nil -} diff --git a/lnd/channeldb/migration_01_to_11/zpay32/hophint.go b/lnd/channeldb/migration_01_to_11/zpay32/hophint.go deleted file mode 100644 index e2c8d858..00000000 --- a/lnd/channeldb/migration_01_to_11/zpay32/hophint.go +++ /dev/null @@ -1,43 +0,0 @@ -package zpay32 - -import "github.com/pkt-cash/pktd/btcec" - -const ( - // DefaultFinalCLTVDelta is the default value to be used as the final - // CLTV delta for a route if one is unspecified. - DefaultFinalCLTVDelta = 9 -) - -// HopHint is a routing hint that contains the minimum information of a channel -// required for an intermediate hop in a route to forward the payment to the -// next. This should be ideally used for private channels, since they are not -// publicly advertised to the network for routing. -type HopHint struct { - // NodeID is the public key of the node at the start of the channel. - NodeID *btcec.PublicKey - - // ChannelID is the unique identifier of the channel. - ChannelID uint64 - - // FeeBaseMSat is the base fee of the channel in millisatoshis. - FeeBaseMSat uint32 - - // FeeProportionalMillionths is the fee rate, in millionths of a - // satoshi, for every satoshi sent through the channel. - FeeProportionalMillionths uint32 - - // CLTVExpiryDelta is the time-lock delta of the channel. - CLTVExpiryDelta uint16 -} - -// Copy returns a deep copy of the hop hint. -func (h HopHint) Copy() HopHint { - nodeID := *h.NodeID - return HopHint{ - NodeID: &nodeID, - ChannelID: h.ChannelID, - FeeBaseMSat: h.FeeBaseMSat, - FeeProportionalMillionths: h.FeeProportionalMillionths, - CLTVExpiryDelta: h.CLTVExpiryDelta, - } -} diff --git a/lnd/channeldb/migration_01_to_11/zpay32/invoice.go b/lnd/channeldb/migration_01_to_11/zpay32/invoice.go deleted file mode 100644 index 2ff684a1..00000000 --- a/lnd/channeldb/migration_01_to_11/zpay32/invoice.go +++ /dev/null @@ -1,374 +0,0 @@ -package zpay32 - -import ( - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -const ( - // mSatPerBtc is the number of millisatoshis in 1 BTC. - mSatPerBtc = 100000000000 - - // signatureBase32Len is the number of 5-bit groups needed to encode - // the 512 bit signature + 8 bit recovery ID. - signatureBase32Len = 104 - - // timestampBase32Len is the number of 5-bit groups needed to encode - // the 35-bit timestamp. - timestampBase32Len = 7 - - // hashBase32Len is the number of 5-bit groups needed to encode a - // 256-bit hash. Note that the last group will be padded with zeroes. - hashBase32Len = 52 - - // pubKeyBase32Len is the number of 5-bit groups needed to encode a - // 33-byte compressed pubkey. Note that the last group will be padded - // with zeroes. - pubKeyBase32Len = 53 - - // hopHintLen is the number of bytes needed to encode the hop hint of a - // single private route. - hopHintLen = 51 - - // The following byte values correspond to the supported field types. - // The field name is the character representing that 5-bit value in the - // bech32 string. - - // fieldTypeP is the field containing the payment hash. - fieldTypeP = 1 - - // fieldTypeD contains a short description of the payment. - fieldTypeD = 13 - - // fieldTypeN contains the pubkey of the target node. - fieldTypeN = 19 - - // fieldTypeH contains the hash of a description of the payment. - fieldTypeH = 23 - - // fieldTypeX contains the expiry in seconds of the invoice. - fieldTypeX = 6 - - // fieldTypeF contains a fallback on-chain address. - fieldTypeF = 9 - - // fieldTypeR contains extra routing information. - fieldTypeR = 3 - - // fieldTypeC contains an optional requested final CLTV delta. - fieldTypeC = 24 - - // fieldType9 contains one or more bytes for signaling features - // supported or required by the receiver. - fieldType9 = 5 - - // fieldTypeS contains a 32-byte payment address, which is a nonce - // included in the final hop's payload to prevent intermediaries from - // probing the recipient. - fieldTypeS = 16 - - // maxInvoiceLength is the maximum total length an invoice can have. - // This is chosen to be the maximum number of bytes that can fit into a - // single QR code: https://en.wikipedia.org/wiki/QR_code#Storage - maxInvoiceLength = 7089 - - // DefaultInvoiceExpiry is the default expiry duration from the creation - // timestamp if expiry is set to zero. - DefaultInvoiceExpiry = time.Hour -) - -var ( - Err = er.NewErrorType("lnd.zpay32") - // ErrInvoiceTooLarge is returned when an invoice exceeds - // maxInvoiceLength. - ErrInvoiceTooLarge = Err.CodeWithDetail("ErrInvoiceTooLarge", "invoice is too large") - - // ErrInvalidFieldLength is returned when a tagged field was specified - // with a length larger than the left over bytes of the data field. - ErrInvalidFieldLength = Err.CodeWithDetail("ErrInvalidFieldLength", "invalid field length") - - // ErrBrokenTaggedField is returned when the last tagged field is - // incorrectly formatted and doesn't have enough bytes to be read. - ErrBrokenTaggedField = Err.CodeWithDetail("ErrBrokenTaggedField", "last tagged field is broken") -) - -// MessageSigner is passed to the Encode method to provide a signature -// corresponding to the node's pubkey. -type MessageSigner struct { - // SignCompact signs the passed hash with the node's privkey. The - // returned signature should be 65 bytes, where the last 64 are the - // compact signature, and the first one is a header byte. This is the - // format returned by btcec.SignCompact. - SignCompact func(hash []byte) ([]byte, er.R) -} - -// Invoice represents a decoded invoice, or to-be-encoded invoice. Some of the -// fields are optional, and will only be non-nil if the invoice this was parsed -// from contains that field. When encoding, only the non-nil fields will be -// added to the encoded invoice. -type Invoice struct { - // Net specifies what network this Lightning invoice is meant for. - Net *chaincfg.Params - - // MilliSat specifies the amount of this invoice in millisatoshi. - // Optional. - MilliSat *lnwire.MilliSatoshi - - // Timestamp specifies the time this invoice was created. - // Mandatory - Timestamp time.Time - - // PaymentHash is the payment hash to be used for a payment to this - // invoice. - PaymentHash *[32]byte - - // PaymentAddr is the payment address to be used by payments to prevent - // probing of the destination. - PaymentAddr *[32]byte - - // Destination is the public key of the target node. This will always - // be set after decoding, and can optionally be set before encoding to - // include the pubkey as an 'n' field. If this is not set before - // encoding then the destination pubkey won't be added as an 'n' field, - // and the pubkey will be extracted from the signature during decoding. - Destination *btcec.PublicKey - - // minFinalCLTVExpiry is the value that the creator of the invoice - // expects to be used for the CLTV expiry of the HTLC extended to it in - // the last hop. - // - // NOTE: This value is optional, and should be set to nil if the - // invoice creator doesn't have a strong requirement on the CLTV expiry - // of the final HTLC extended to it. - // - // This field is un-exported and can only be read by the - // MinFinalCLTVExpiry() method. By forcing callers to read via this - // method, we can easily enforce the default if not specified. - minFinalCLTVExpiry *uint64 - - // Description is a short description of the purpose of this invoice. - // Optional. Non-nil iff DescriptionHash is nil. - Description *string - - // DescriptionHash is the SHA256 hash of a description of the purpose of - // this invoice. - // Optional. Non-nil iff Description is nil. - DescriptionHash *[32]byte - - // expiry specifies the timespan this invoice will be valid. - // Optional. If not set, a default expiry of 60 min will be implied. - // - // This field is unexported and can be read by the Expiry() method. This - // method makes sure the default expiry time is returned in case the - // field is not set. - expiry *time.Duration - - // FallbackAddr is an on-chain address that can be used for payment in - // case the Lightning payment fails. - // Optional. - FallbackAddr btcutil.Address - - // RouteHints represents one or more different route hints. Each route - // hint can be individually used to reach the destination. These usually - // represent private routes. - // - // NOTE: This is optional. - RouteHints [][]HopHint - - // Features represents an optional field used to signal optional or - // required support for features by the receiver. - Features *lnwire.FeatureVector -} - -// Amount is a functional option that allows callers of NewInvoice to set the -// amount in millisatoshis that the Invoice should encode. -func Amount(milliSat lnwire.MilliSatoshi) func(*Invoice) { - return func(i *Invoice) { - i.MilliSat = &milliSat - } -} - -// Destination is a functional option that allows callers of NewInvoice to -// explicitly set the pubkey of the Invoice's destination node. -func Destination(destination *btcec.PublicKey) func(*Invoice) { - return func(i *Invoice) { - i.Destination = destination - } -} - -// Description is a functional option that allows callers of NewInvoice to set -// the payment description of the created Invoice. -// -// NOTE: Must be used if and only if DescriptionHash is not used. -func Description(description string) func(*Invoice) { - return func(i *Invoice) { - i.Description = &description - } -} - -// CLTVExpiry is an optional value which allows the receiver of the payment to -// specify the delta between the current height and the HTLC extended to the -// receiver. -func CLTVExpiry(delta uint64) func(*Invoice) { - return func(i *Invoice) { - i.minFinalCLTVExpiry = &delta - } -} - -// DescriptionHash is a functional option that allows callers of NewInvoice to -// set the payment description hash of the created Invoice. -// -// NOTE: Must be used if and only if Description is not used. -func DescriptionHash(descriptionHash [32]byte) func(*Invoice) { - return func(i *Invoice) { - i.DescriptionHash = &descriptionHash - } -} - -// Expiry is a functional option that allows callers of NewInvoice to set the -// expiry of the created Invoice. If not set, a default expiry of 60 min will -// be implied. -func Expiry(expiry time.Duration) func(*Invoice) { - return func(i *Invoice) { - i.expiry = &expiry - } -} - -// FallbackAddr is a functional option that allows callers of NewInvoice to set -// the Invoice's fallback on-chain address that can be used for payment in case -// the Lightning payment fails -func FallbackAddr(fallbackAddr btcutil.Address) func(*Invoice) { - return func(i *Invoice) { - i.FallbackAddr = fallbackAddr - } -} - -// RouteHint is a functional option that allows callers of NewInvoice to add -// one or more hop hints that represent a private route to the destination. -func RouteHint(routeHint []HopHint) func(*Invoice) { - return func(i *Invoice) { - i.RouteHints = append(i.RouteHints, routeHint) - } -} - -// Features is a functional option that allows callers of NewInvoice to set the -// desired feature bits that are advertised on the invoice. If this option is -// not used, an empty feature vector will automatically be populated. -func Features(features *lnwire.FeatureVector) func(*Invoice) { - return func(i *Invoice) { - i.Features = features - } -} - -// PaymentAddr is a functional option that allows callers of NewInvoice to set -// the desired payment address tht is advertised on the invoice. -func PaymentAddr(addr [32]byte) func(*Invoice) { - return func(i *Invoice) { - i.PaymentAddr = &addr - } -} - -// NewInvoice creates a new Invoice object. The last parameter is a set of -// variadic arguments for setting optional fields of the invoice. -// -// NOTE: Either Description or DescriptionHash must be provided for the Invoice -// to be considered valid. -func NewInvoice(net *chaincfg.Params, paymentHash [32]byte, - timestamp time.Time, options ...func(*Invoice)) (*Invoice, er.R) { - - invoice := &Invoice{ - Net: net, - PaymentHash: &paymentHash, - Timestamp: timestamp, - } - - for _, option := range options { - option(invoice) - } - - // If no features were set, we'll populate an empty feature vector. - if invoice.Features == nil { - invoice.Features = lnwire.NewFeatureVector( - nil, lnwire.Features, - ) - } - - if err := validateInvoice(invoice); err != nil { - return nil, err - } - - return invoice, nil -} - -// Expiry returns the expiry time for this invoice. If expiry time is not set -// explicitly, the default 3600 second expiry will be returned. -func (invoice *Invoice) Expiry() time.Duration { - if invoice.expiry != nil { - return *invoice.expiry - } - - // If no expiry is set for this invoice, default is 3600 seconds. - return DefaultInvoiceExpiry -} - -// MinFinalCLTVExpiry returns the minimum final CLTV expiry delta as specified -// by the creator of the invoice. This value specifies the delta between the -// current height and the expiry height of the HTLC extended in the last hop. -func (invoice *Invoice) MinFinalCLTVExpiry() uint64 { - if invoice.minFinalCLTVExpiry != nil { - return *invoice.minFinalCLTVExpiry - } - - return DefaultFinalCLTVDelta -} - -// validateInvoice does a sanity check of the provided Invoice, making sure it -// has all the necessary fields set for it to be considered valid by BOLT-0011. -func validateInvoice(invoice *Invoice) er.R { - // The net must be set. - if invoice.Net == nil { - return er.Errorf("net params not set") - } - - // The invoice must contain a payment hash. - if invoice.PaymentHash == nil { - return er.Errorf("no payment hash found") - } - - // Either Description or DescriptionHash must be set, not both. - if invoice.Description != nil && invoice.DescriptionHash != nil { - return er.Errorf("both description and description hash set") - } - if invoice.Description == nil && invoice.DescriptionHash == nil { - return er.Errorf("neither description nor description hash set") - } - - // Check that we support the field lengths. - if len(invoice.PaymentHash) != 32 { - return er.Errorf("unsupported payment hash length: %d", - len(invoice.PaymentHash)) - } - - if invoice.DescriptionHash != nil && len(invoice.DescriptionHash) != 32 { - return er.Errorf("unsupported description hash length: %d", - len(invoice.DescriptionHash)) - } - - if invoice.Destination != nil && - len(invoice.Destination.SerializeCompressed()) != 33 { - return er.Errorf("unsupported pubkey length: %d", - len(invoice.Destination.SerializeCompressed())) - } - - // Ensure that all invoices have feature vectors. - if invoice.Features == nil { - return er.Errorf("missing feature vector") - } - - return nil -} diff --git a/lnd/channeldb/migtest/migtest.go b/lnd/channeldb/migtest/migtest.go deleted file mode 100644 index 5caebc72..00000000 --- a/lnd/channeldb/migtest/migtest.go +++ /dev/null @@ -1,93 +0,0 @@ -package migtest - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" -) - -// MakeDB creates a new instance of the ChannelDB for testing purposes. A -// callback which cleans up the created temporary directories is also returned -// and intended to be executed after the test completes. -func MakeDB() (kvdb.Backend, func(), er.R) { - // Create temporary database for mission control. - file, errr := ioutil.TempFile("", "*.db") - if errr != nil { - return nil, nil, er.E(errr) - } - - dbPath := file.Name() - db, err := kvdb.Open(kvdb.BoltBackendName, dbPath, true) - if err != nil { - return nil, nil, err - } - - cleanUp := func() { - db.Close() - os.RemoveAll(dbPath) - } - - return db, cleanUp, nil -} - -// ApplyMigration is a helper test function that encapsulates the general steps -// which are needed to properly check the result of applying migration function. -func ApplyMigration(t *testing.T, - beforeMigration, afterMigration, migrationFunc func(tx kvdb.RwTx) er.R, - shouldFail bool) { - - cdb, cleanUp, err := MakeDB() - defer cleanUp() - if err != nil { - t.Fatal(err) - } - - // beforeMigration usually used for populating the database - // with test data. - err = kvdb.Update(cdb, beforeMigration, func() {}) - if err != nil { - t.Fatal(err) - } - - defer func() { - if r := recover(); r != nil { - err = newError(r) - } - - if err == nil && shouldFail { - t.Fatal("error wasn't received on migration stage") - } else if err != nil && !shouldFail { - t.Fatalf("error was received on migration stage: %v", err) - } - - // afterMigration usually used for checking the database state and - // throwing the error if something went wrong. - err = kvdb.Update(cdb, afterMigration, func() {}) - if err != nil { - t.Fatal(err) - } - }() - - // Apply migration. - err = kvdb.Update(cdb, migrationFunc, func() {}) - if err != nil { - t.Logf("migration error: %v", err) - } -} - -func newError(e interface{}) er.R { - var err er.R - switch e := e.(type) { - case er.R: - err = e - case error: - err = er.E(e) - default: - err = er.Errorf("%v", e) - } - - return err -} diff --git a/lnd/channeldb/migtest/raw_db.go b/lnd/channeldb/migtest/raw_db.go deleted file mode 100644 index 449e85aa..00000000 --- a/lnd/channeldb/migtest/raw_db.go +++ /dev/null @@ -1,188 +0,0 @@ -package migtest - -import ( - "bytes" - "encoding/hex" - "fmt" - "strings" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" -) - -// DumpDB dumps go code describing the contents of the database to stdout. This -// function is only intended for use during development. -// -// Example output: -// -// map[string]interface{}{ -// hex("1234"): map[string]interface{}{ -// "human-readable": hex("102030"), -// hex("1111"): hex("5783492373"), -// }, -// } -func DumpDB(tx kvdb.RTx, rootKey []byte) er.R { - bucket := tx.ReadBucket(rootKey) - if bucket == nil { - return er.Errorf("bucket %v not found", string(rootKey)) - } - - return dumpBucket(bucket) -} - -func dumpBucket(bucket kvdb.RBucket) er.R { - fmt.Printf("map[string]interface{} {\n") - err := bucket.ForEach(func(k, v []byte) er.R { - key := toString(k) - fmt.Printf("%v: ", key) - - subBucket := bucket.NestedReadBucket(k) - if subBucket != nil { - err := dumpBucket(subBucket) - if err != nil { - return err - } - } else { - fmt.Print(toHex(v)) - } - fmt.Printf(",\n") - - return nil - }) - if err != nil { - return err - } - fmt.Printf("}") - - return nil -} - -// RestoreDB primes the database with the given data set. -func RestoreDB(tx kvdb.RwTx, rootKey []byte, data map[string]interface{}) er.R { - bucket, err := tx.CreateTopLevelBucket(rootKey) - if err != nil { - return err - } - - return restoreDB(bucket, data) -} - -func restoreDB(bucket kvdb.RwBucket, data map[string]interface{}) er.R { - for k, v := range data { - key := []byte(k) - - switch value := v.(type) { - - // Key contains value. - case string: - err := bucket.Put(key, []byte(value)) - if err != nil { - return err - } - - // Key contains a sub-bucket. - case map[string]interface{}: - subBucket, err := bucket.CreateBucket(key) - if err != nil { - return err - } - - if err := restoreDB(subBucket, value); err != nil { - return err - } - - default: - return er.New("invalid type") - } - } - - return nil -} - -// VerifyDB verifies the database against the given data set. -func VerifyDB(tx kvdb.RTx, rootKey []byte, data map[string]interface{}) er.R { - bucket := tx.ReadBucket(rootKey) - if bucket == nil { - return er.Errorf("bucket %v not found", string(rootKey)) - } - - return verifyDB(bucket, data) -} - -func verifyDB(bucket kvdb.RBucket, data map[string]interface{}) er.R { - for k, v := range data { - key := []byte(k) - - switch value := v.(type) { - - // Key contains value. - case string: - expectedValue := []byte(value) - dbValue := bucket.Get(key) - - if !bytes.Equal(dbValue, expectedValue) { - return er.New("value mismatch") - } - - // Key contains a sub-bucket. - case map[string]interface{}: - subBucket := bucket.NestedReadBucket(key) - if subBucket == nil { - return er.Errorf("bucket %v not found", k) - } - - err := verifyDB(subBucket, value) - if err != nil { - return err - } - - default: - return er.New("invalid type") - } - } - - keyCount := 0 - err := bucket.ForEach(func(k, v []byte) er.R { - keyCount++ - return nil - }) - if err != nil { - return err - } - if keyCount != len(data) { - return er.New("unexpected keys in database") - } - - return nil -} - -func toHex(v []byte) string { - if len(v) == 0 { - return "nil" - } - - return "hex(\"" + hex.EncodeToString(v) + "\")" -} - -func toString(v []byte) string { - readableChars := "abcdefghijklmnopqrstuvwxyz0123456789-" - - for _, c := range v { - if !strings.Contains(readableChars, string(c)) { - return toHex(v) - } - } - - return "\"" + string(v) + "\"" -} - -// Hex is a test helper function to convert readable hex arrays to raw byte -// strings. -func Hex(value string) string { - b, err := util.DecodeHex(value) - if err != nil { - panic(err) - } - return string(b) -} diff --git a/lnd/channeldb/mp_payment.go b/lnd/channeldb/mp_payment.go deleted file mode 100644 index 64e5af49..00000000 --- a/lnd/channeldb/mp_payment.go +++ /dev/null @@ -1,313 +0,0 @@ -package channeldb - -import ( - "bytes" - "io" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/routing/route" - "github.com/pkt-cash/pktd/wire" -) - -// HTLCAttemptInfo contains static information about a specific HTLC attempt -// for a payment. This information is used by the router to handle any errors -// coming back after an attempt is made, and to query the switch about the -// status of the attempt. -type HTLCAttemptInfo struct { - // AttemptID is the unique ID used for this attempt. - AttemptID uint64 - - // SessionKey is the ephemeral key used for this attempt. - SessionKey *btcec.PrivateKey - - // Route is the route attempted to send the HTLC. - Route route.Route - - // AttemptTime is the time at which this HTLC was attempted. - AttemptTime time.Time -} - -// HTLCAttempt contains information about a specific HTLC attempt for a given -// payment. It contains the HTLCAttemptInfo used to send the HTLC, as well -// as a timestamp and any known outcome of the attempt. -type HTLCAttempt struct { - HTLCAttemptInfo - - // Settle is the preimage of a successful payment. This serves as a - // proof of payment. It will only be non-nil for settled payments. - // - // NOTE: Can be nil if payment is not settled. - Settle *HTLCSettleInfo - - // Fail is a failure reason code indicating the reason the payment - // failed. It is only non-nil for failed payments. - // - // NOTE: Can be nil if payment is not failed. - Failure *HTLCFailInfo -} - -// HTLCSettleInfo encapsulates the information that augments an HTLCAttempt in -// the event that the HTLC is successful. -type HTLCSettleInfo struct { - // Preimage is the preimage of a successful HTLC. This serves as a proof - // of payment. - Preimage lntypes.Preimage - - // SettleTime is the time at which this HTLC was settled. - SettleTime time.Time -} - -// HTLCFailReason is the reason an htlc failed. -type HTLCFailReason byte - -const ( - // HTLCFailUnknown is recorded for htlcs that failed with an unknown - // reason. - HTLCFailUnknown HTLCFailReason = 0 - - // HTLCFailUnknown is recorded for htlcs that had a failure message that - // couldn't be decrypted. - HTLCFailUnreadable HTLCFailReason = 1 - - // HTLCFailInternal is recorded for htlcs that failed because of an - // internal error. - HTLCFailInternal HTLCFailReason = 2 - - // HTLCFailMessage is recorded for htlcs that failed with a network - // failure message. - HTLCFailMessage HTLCFailReason = 3 -) - -// HTLCFailInfo encapsulates the information that augments an HTLCAttempt in the -// event that the HTLC fails. -type HTLCFailInfo struct { - // FailTime is the time at which this HTLC was failed. - FailTime time.Time - - // Message is the wire message that failed this HTLC. This field will be - // populated when the failure reason is HTLCFailMessage. - Message lnwire.FailureMessage - - // Reason is the failure reason for this HTLC. - Reason HTLCFailReason - - // The position in the path of the intermediate or final node that - // generated the failure message. Position zero is the sender node. This - // field will be populated when the failure reason is either - // HTLCFailMessage or HTLCFailUnknown. - FailureSourceIndex uint32 -} - -// MPPayment is a wrapper around a payment's PaymentCreationInfo and -// HTLCAttempts. All payments will have the PaymentCreationInfo set, any -// HTLCs made in attempts to be completed will populated in the HTLCs slice. -// Each populated HTLCAttempt represents an attempted HTLC, each of which may -// have the associated Settle or Fail struct populated if the HTLC is no longer -// in-flight. -type MPPayment struct { - // SequenceNum is a unique identifier used to sort the payments in - // order of creation. - SequenceNum uint64 - - // Info holds all static information about this payment, and is - // populated when the payment is initiated. - Info *PaymentCreationInfo - - // HTLCs holds the information about individual HTLCs that we send in - // order to make the payment. - HTLCs []HTLCAttempt - - // FailureReason is the failure reason code indicating the reason the - // payment failed. - // - // NOTE: Will only be set once the daemon has given up on the payment - // altogether. - FailureReason *FailureReason - - // Status is the current PaymentStatus of this payment. - Status PaymentStatus -} - -// TerminalInfo returns any HTLC settle info recorded. If no settle info is -// recorded, any payment level failure will be returned. If neither a settle -// nor a failure is recorded, both return values will be nil. -func (m *MPPayment) TerminalInfo() (*HTLCSettleInfo, *FailureReason) { - for _, h := range m.HTLCs { - if h.Settle != nil { - return h.Settle, nil - } - } - - return nil, m.FailureReason -} - -// SentAmt returns the sum of sent amount and fees for HTLCs that are either -// settled or still in flight. -func (m *MPPayment) SentAmt() (lnwire.MilliSatoshi, lnwire.MilliSatoshi) { - var sent, fees lnwire.MilliSatoshi - for _, h := range m.HTLCs { - if h.Failure != nil { - continue - } - - // The attempt was not failed, meaning the amount was - // potentially sent to the receiver. - sent += h.Route.ReceiverAmt() - fees += h.Route.TotalFees() - } - - return sent, fees -} - -// InFlightHTLCs returns the HTLCs that are still in-flight, meaning they have -// not been settled or failed. -func (m *MPPayment) InFlightHTLCs() []HTLCAttempt { - var inflights []HTLCAttempt - for _, h := range m.HTLCs { - if h.Settle != nil || h.Failure != nil { - continue - } - - inflights = append(inflights, h) - } - - return inflights -} - -// GetAttempt returns the specified htlc attempt on the payment. -func (m *MPPayment) GetAttempt(id uint64) (*HTLCAttempt, er.R) { - for _, htlc := range m.HTLCs { - htlc := htlc - if htlc.AttemptID == id { - return &htlc, nil - } - } - - return nil, er.New("htlc attempt not found on payment") -} - -// serializeHTLCSettleInfo serializes the details of a settled htlc. -func serializeHTLCSettleInfo(w io.Writer, s *HTLCSettleInfo) er.R { - if _, err := util.Write(w, s.Preimage[:]); err != nil { - return err - } - - if err := serializeTime(w, s.SettleTime); err != nil { - return err - } - - return nil -} - -// deserializeHTLCSettleInfo deserializes the details of a settled htlc. -func deserializeHTLCSettleInfo(r io.Reader) (*HTLCSettleInfo, er.R) { - s := &HTLCSettleInfo{} - if _, err := util.ReadFull(r, s.Preimage[:]); err != nil { - return nil, err - } - - var err er.R - s.SettleTime, err = deserializeTime(r) - if err != nil { - return nil, err - } - - return s, nil -} - -// serializeHTLCFailInfo serializes the details of a failed htlc including the -// wire failure. -func serializeHTLCFailInfo(w io.Writer, f *HTLCFailInfo) er.R { - if err := serializeTime(w, f.FailTime); err != nil { - return err - } - - // Write failure. If there is no failure message, write an empty - // byte slice. - var messageBytes bytes.Buffer - if f.Message != nil { - err := lnwire.EncodeFailureMessage(&messageBytes, f.Message, 0) - if err != nil { - return err - } - } - if err := wire.WriteVarBytes(w, 0, messageBytes.Bytes()); err != nil { - return err - } - - return WriteElements(w, byte(f.Reason), f.FailureSourceIndex) -} - -// deserializeHTLCFailInfo deserializes the details of a failed htlc including -// the wire failure. -func deserializeHTLCFailInfo(r io.Reader) (*HTLCFailInfo, er.R) { - f := &HTLCFailInfo{} - var err er.R - f.FailTime, err = deserializeTime(r) - if err != nil { - return nil, err - } - - // Read failure. - failureBytes, err := wire.ReadVarBytes( - r, 0, lnwire.FailureMessageLength, "failure", - ) - if err != nil { - return nil, err - } - if len(failureBytes) > 0 { - f.Message, err = lnwire.DecodeFailureMessage( - bytes.NewReader(failureBytes), 0, - ) - if err != nil { - return nil, err - } - } - - var reason byte - err = ReadElements(r, &reason, &f.FailureSourceIndex) - if err != nil { - return nil, err - } - f.Reason = HTLCFailReason(reason) - - return f, nil -} - -// deserializeTime deserializes time as unix nanoseconds. -func deserializeTime(r io.Reader) (time.Time, er.R) { - var scratch [8]byte - if _, err := util.ReadFull(r, scratch[:]); err != nil { - return time.Time{}, err - } - - // Convert to time.Time. Interpret unix nano time zero as a zero - // time.Time value. - unixNano := byteOrder.Uint64(scratch[:]) - if unixNano == 0 { - return time.Time{}, nil - } - - return time.Unix(0, int64(unixNano)), nil -} - -// serializeTime serializes time as unix nanoseconds. -func serializeTime(w io.Writer, t time.Time) er.R { - var scratch [8]byte - - // Convert to unix nano seconds, but only if time is non-zero. Calling - // UnixNano() on a zero time yields an undefined result. - var unixNano int64 - if !t.IsZero() { - unixNano = t.UnixNano() - } - - byteOrder.PutUint64(scratch[:], uint64(unixNano)) - _, err := util.Write(w, scratch[:]) - return err -} diff --git a/lnd/channeldb/nodes.go b/lnd/channeldb/nodes.go deleted file mode 100644 index 7d3acfc8..00000000 --- a/lnd/channeldb/nodes.go +++ /dev/null @@ -1,322 +0,0 @@ -package channeldb - -import ( - "bytes" - "io" - "net" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/wire/protocol" -) - -var ( - // nodeInfoBucket stores metadata pertaining to nodes that we've had - // direct channel-based correspondence with. This bucket allows one to - // query for all open channels pertaining to the node by exploring each - // node's sub-bucket within the openChanBucket. - nodeInfoBucket = []byte("nib") -) - -// LinkNode stores metadata related to node's that we have/had a direct -// channel open with. Information such as the Bitcoin network the node -// advertised, and its identity public key are also stored. Additionally, this -// struct and the bucket its stored within have store data similar to that of -// Bitcoin's addrmanager. The TCP address information stored within the struct -// can be used to establish persistent connections will all channel -// counterparties on daemon startup. -// -// TODO(roasbeef): also add current OnionKey plus rotation schedule? -// TODO(roasbeef): add bitfield for supported services -// * possibly add a wire.NetAddress type, type -type LinkNode struct { - // Network indicates the Bitcoin network that the LinkNode advertises - // for incoming channel creation. - Network protocol.BitcoinNet - - // IdentityPub is the node's current identity public key. Any - // channel/topology related information received by this node MUST be - // signed by this public key. - IdentityPub *btcec.PublicKey - - // LastSeen tracks the last time this node was seen within the network. - // A node should be marked as seen if the daemon either is able to - // establish an outgoing connection to the node or receives a new - // incoming connection from the node. This timestamp (stored in unix - // epoch) may be used within a heuristic which aims to determine when a - // channel should be unilaterally closed due to inactivity. - // - // TODO(roasbeef): replace with block hash/height? - // * possibly add a time-value metric into the heuristic? - LastSeen time.Time - - // Addresses is a list of IP address in which either we were able to - // reach the node over in the past, OR we received an incoming - // authenticated connection for the stored identity public key. - Addresses []net.Addr - - db *DB -} - -// NewLinkNode creates a new LinkNode from the provided parameters, which is -// backed by an instance of channeldb. -func (db *DB) NewLinkNode(bitNet protocol.BitcoinNet, pub *btcec.PublicKey, - addrs ...net.Addr) *LinkNode { - - return &LinkNode{ - Network: bitNet, - IdentityPub: pub, - LastSeen: time.Now(), - Addresses: addrs, - db: db, - } -} - -// UpdateLastSeen updates the last time this node was directly encountered on -// the Lightning Network. -func (l *LinkNode) UpdateLastSeen(lastSeen time.Time) er.R { - l.LastSeen = lastSeen - - return l.Sync() -} - -// AddAddress appends the specified TCP address to the list of known addresses -// this node is/was known to be reachable at. -func (l *LinkNode) AddAddress(addr net.Addr) er.R { - for _, a := range l.Addresses { - if a.String() == addr.String() { - return nil - } - } - - l.Addresses = append(l.Addresses, addr) - - return l.Sync() -} - -// Sync performs a full database sync which writes the current up-to-date data -// within the struct to the database. -func (l *LinkNode) Sync() er.R { - - // Finally update the database by storing the link node and updating - // any relevant indexes. - return kvdb.Update(l.db, func(tx kvdb.RwTx) er.R { - nodeMetaBucket := tx.ReadWriteBucket(nodeInfoBucket) - if nodeMetaBucket == nil { - return ErrLinkNodesNotFound.Default() - } - - return putLinkNode(nodeMetaBucket, l) - }, func() {}) -} - -// putLinkNode serializes then writes the encoded version of the passed link -// node into the nodeMetaBucket. This function is provided in order to allow -// the ability to re-use a database transaction across many operations. -func putLinkNode(nodeMetaBucket kvdb.RwBucket, l *LinkNode) er.R { - // First serialize the LinkNode into its raw-bytes encoding. - var b bytes.Buffer - if err := serializeLinkNode(&b, l); err != nil { - return err - } - - // Finally insert the link-node into the node metadata bucket keyed - // according to the its pubkey serialized in compressed form. - nodePub := l.IdentityPub.SerializeCompressed() - return nodeMetaBucket.Put(nodePub, b.Bytes()) -} - -// DeleteLinkNode removes the link node with the given identity from the -// database. -func (db *DB) DeleteLinkNode(identity *btcec.PublicKey) er.R { - return kvdb.Update(db, func(tx kvdb.RwTx) er.R { - return db.deleteLinkNode(tx, identity) - }, func() {}) -} - -func (db *DB) deleteLinkNode(tx kvdb.RwTx, identity *btcec.PublicKey) er.R { - nodeMetaBucket := tx.ReadWriteBucket(nodeInfoBucket) - if nodeMetaBucket == nil { - return ErrLinkNodesNotFound.Default() - } - - pubKey := identity.SerializeCompressed() - return nodeMetaBucket.Delete(pubKey) -} - -// FetchLinkNode attempts to lookup the data for a LinkNode based on a target -// identity public key. If a particular LinkNode for the passed identity public -// key cannot be found, then ErrNodeNotFound if returned. -func (db *DB) FetchLinkNode(identity *btcec.PublicKey) (*LinkNode, er.R) { - var linkNode *LinkNode - err := kvdb.View(db, func(tx kvdb.RTx) er.R { - node, err := fetchLinkNode(tx, identity) - if err != nil { - return err - } - - linkNode = node - return nil - }, func() { - linkNode = nil - }) - - return linkNode, err -} - -func fetchLinkNode(tx kvdb.RTx, targetPub *btcec.PublicKey) (*LinkNode, er.R) { - // First fetch the bucket for storing node metadata, bailing out early - // if it hasn't been created yet. - nodeMetaBucket := tx.ReadBucket(nodeInfoBucket) - if nodeMetaBucket == nil { - return nil, ErrLinkNodesNotFound.Default() - } - - // If a link node for that particular public key cannot be located, - // then exit early with an ErrNodeNotFound. - pubKey := targetPub.SerializeCompressed() - nodeBytes := nodeMetaBucket.Get(pubKey) - if nodeBytes == nil { - return nil, ErrNodeNotFound.Default() - } - - // Finally, decode and allocate a fresh LinkNode object to be returned - // to the caller. - nodeReader := bytes.NewReader(nodeBytes) - return deserializeLinkNode(nodeReader) -} - -// TODO(roasbeef): update link node addrs in server upon connection - -// FetchAllLinkNodes starts a new database transaction to fetch all nodes with -// whom we have active channels with. -func (db *DB) FetchAllLinkNodes() ([]*LinkNode, er.R) { - var linkNodes []*LinkNode - err := kvdb.View(db, func(tx kvdb.RTx) er.R { - nodes, err := db.fetchAllLinkNodes(tx) - if err != nil { - return err - } - - linkNodes = nodes - return nil - }, func() { - linkNodes = nil - }) - if err != nil { - return nil, err - } - - return linkNodes, nil -} - -// fetchAllLinkNodes uses an existing database transaction to fetch all nodes -// with whom we have active channels with. -func (db *DB) fetchAllLinkNodes(tx kvdb.RTx) ([]*LinkNode, er.R) { - nodeMetaBucket := tx.ReadBucket(nodeInfoBucket) - if nodeMetaBucket == nil { - return nil, ErrLinkNodesNotFound.Default() - } - - var linkNodes []*LinkNode - err := nodeMetaBucket.ForEach(func(k, v []byte) er.R { - if v == nil { - return nil - } - - nodeReader := bytes.NewReader(v) - linkNode, err := deserializeLinkNode(nodeReader) - if err != nil { - return err - } - - linkNodes = append(linkNodes, linkNode) - return nil - }) - if err != nil { - return nil, err - } - - return linkNodes, nil -} - -func serializeLinkNode(w io.Writer, l *LinkNode) er.R { - var buf [8]byte - - byteOrder.PutUint32(buf[:4], uint32(l.Network)) - if _, err := util.Write(w, buf[:4]); err != nil { - return err - } - - serializedID := l.IdentityPub.SerializeCompressed() - if _, err := util.Write(w, serializedID); err != nil { - return err - } - - seenUnix := uint64(l.LastSeen.Unix()) - byteOrder.PutUint64(buf[:], seenUnix) - if _, err := util.Write(w, buf[:]); err != nil { - return err - } - - numAddrs := uint32(len(l.Addresses)) - byteOrder.PutUint32(buf[:4], numAddrs) - if _, err := util.Write(w, buf[:4]); err != nil { - return err - } - - for _, addr := range l.Addresses { - if err := serializeAddr(w, addr); err != nil { - return err - } - } - - return nil -} - -func deserializeLinkNode(r io.Reader) (*LinkNode, er.R) { - var ( - err er.R - buf [8]byte - ) - - node := &LinkNode{} - - if _, err := util.ReadFull(r, buf[:4]); err != nil { - return nil, err - } - node.Network = protocol.BitcoinNet(byteOrder.Uint32(buf[:4])) - - var pub [33]byte - if _, err := util.ReadFull(r, pub[:]); err != nil { - return nil, err - } - node.IdentityPub, err = btcec.ParsePubKey(pub[:], btcec.S256()) - if err != nil { - return nil, err - } - - if _, err := util.ReadFull(r, buf[:]); err != nil { - return nil, err - } - node.LastSeen = time.Unix(int64(byteOrder.Uint64(buf[:])), 0) - - if _, err := util.ReadFull(r, buf[:4]); err != nil { - return nil, err - } - numAddrs := byteOrder.Uint32(buf[:4]) - - node.Addresses = make([]net.Addr, numAddrs) - for i := uint32(0); i < numAddrs; i++ { - addr, err := deserializeAddr(r) - if err != nil { - return nil, err - } - node.Addresses[i] = addr - } - - return node, nil -} diff --git a/lnd/channeldb/nodes_test.go b/lnd/channeldb/nodes_test.go deleted file mode 100644 index 156c61b4..00000000 --- a/lnd/channeldb/nodes_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package channeldb - -import ( - "bytes" - "net" - "testing" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/wire/protocol" -) - -func TestLinkNodeEncodeDecode(t *testing.T) { - t.Parallel() - - cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - defer cleanUp() - - // First we'll create some initial data to use for populating our test - // LinkNode instances. - _, pub1 := btcec.PrivKeyFromBytes(btcec.S256(), key[:]) - _, pub2 := btcec.PrivKeyFromBytes(btcec.S256(), rev[:]) - addr1, errr := net.ResolveTCPAddr("tcp", "10.0.0.1:9000") - if errr != nil { - t.Fatalf("unable to create test addr: %v", errr) - } - addr2, errr := net.ResolveTCPAddr("tcp", "10.0.0.2:9000") - if errr != nil { - t.Fatalf("unable to create test addr: %v", errr) - } - - // Create two fresh link node instances with the above dummy data, then - // fully sync both instances to disk. - node1 := cdb.NewLinkNode(protocol.MainNet, pub1, addr1) - node2 := cdb.NewLinkNode(protocol.TestNet3, pub2, addr2) - if err := node1.Sync(); err != nil { - t.Fatalf("unable to sync node: %v", err) - } - if err := node2.Sync(); err != nil { - t.Fatalf("unable to sync node: %v", err) - } - - // Fetch all current link nodes from the database, they should exactly - // match the two created above. - originalNodes := []*LinkNode{node2, node1} - linkNodes, err := cdb.FetchAllLinkNodes() - if err != nil { - t.Fatalf("unable to fetch nodes: %v", err) - } - for i, node := range linkNodes { - if originalNodes[i].Network != node.Network { - t.Fatalf("node networks don't match: expected %v, got %v", - originalNodes[i].Network, node.Network) - } - - originalPubkey := originalNodes[i].IdentityPub.SerializeCompressed() - dbPubkey := node.IdentityPub.SerializeCompressed() - if !bytes.Equal(originalPubkey, dbPubkey) { - t.Fatalf("node pubkeys don't match: expected %x, got %x", - originalPubkey, dbPubkey) - } - if originalNodes[i].LastSeen.Unix() != node.LastSeen.Unix() { - t.Fatalf("last seen timestamps don't match: expected %v got %v", - originalNodes[i].LastSeen.Unix(), node.LastSeen.Unix()) - } - if originalNodes[i].Addresses[0].String() != node.Addresses[0].String() { - t.Fatalf("addresses don't match: expected %v, got %v", - originalNodes[i].Addresses, node.Addresses) - } - } - - // Next, we'll exercise the methods to append additional IP - // addresses, and also to update the last seen time. - if err := node1.UpdateLastSeen(time.Now()); err != nil { - t.Fatalf("unable to update last seen: %v", err) - } - if err := node1.AddAddress(addr2); err != nil { - t.Fatalf("unable to update addr: %v", err) - } - - // Fetch the same node from the database according to its public key. - node1DB, err := cdb.FetchLinkNode(pub1) - if err != nil { - t.Fatalf("unable to find node: %v", err) - } - - // Both the last seen timestamp and the list of reachable addresses for - // the node should be updated. - if node1DB.LastSeen.Unix() != node1.LastSeen.Unix() { - t.Fatalf("last seen timestamps don't match: expected %v got %v", - node1.LastSeen.Unix(), node1DB.LastSeen.Unix()) - } - if len(node1DB.Addresses) != 2 { - t.Fatalf("wrong length for node1 addresses: expected %v, got %v", - 2, len(node1DB.Addresses)) - } - if node1DB.Addresses[0].String() != addr1.String() { - t.Fatalf("wrong address for node: expected %v, got %v", - addr1.String(), node1DB.Addresses[0].String()) - } - if node1DB.Addresses[1].String() != addr2.String() { - t.Fatalf("wrong address for node: expected %v, got %v", - addr2.String(), node1DB.Addresses[1].String()) - } -} - -func TestDeleteLinkNode(t *testing.T) { - t.Parallel() - - cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - defer cleanUp() - - _, pubKey := btcec.PrivKeyFromBytes(btcec.S256(), key[:]) - addr := &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 1337, - } - linkNode := cdb.NewLinkNode(protocol.TestNet3, pubKey, addr) - if err := linkNode.Sync(); err != nil { - t.Fatalf("unable to write link node to db: %v", err) - } - - if _, err := cdb.FetchLinkNode(pubKey); err != nil { - t.Fatalf("unable to find link node: %v", err) - } - - if err := cdb.DeleteLinkNode(pubKey); err != nil { - t.Fatalf("unable to delete link node from db: %v", err) - } - - if _, err := cdb.FetchLinkNode(pubKey); err == nil { - t.Fatal("should not have found link node in db, but did") - } -} diff --git a/lnd/channeldb/options.go b/lnd/channeldb/options.go deleted file mode 100644 index 3103dad7..00000000 --- a/lnd/channeldb/options.go +++ /dev/null @@ -1,108 +0,0 @@ -package channeldb - -import ( - "time" - - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/clock" -) - -const ( - // DefaultRejectCacheSize is the default number of rejectCacheEntries to - // cache for use in the rejection cache of incoming gossip traffic. This - // produces a cache size of around 1MB. - DefaultRejectCacheSize = 50000 - - // DefaultChannelCacheSize is the default number of ChannelEdges cached - // in order to reply to gossip queries. This produces a cache size of - // around 40MB. - DefaultChannelCacheSize = 20000 -) - -// Options holds parameters for tuning and customizing a channeldb.DB. -type Options struct { - kvdb.BoltBackendConfig - - // RejectCacheSize is the maximum number of rejectCacheEntries to hold - // in the rejection cache. - RejectCacheSize int - - // ChannelCacheSize is the maximum number of ChannelEdges to hold in the - // channel cache. - ChannelCacheSize int - - // clock is the time source used by the database. - clock clock.Clock - - // dryRun will fail to commit a successful migration when opening the - // database if set to true. - dryRun bool -} - -// DefaultOptions returns an Options populated with default values. -func DefaultOptions() Options { - return Options{ - BoltBackendConfig: kvdb.BoltBackendConfig{ - NoFreelistSync: true, - AutoCompact: false, - AutoCompactMinAge: kvdb.DefaultBoltAutoCompactMinAge, - }, - RejectCacheSize: DefaultRejectCacheSize, - ChannelCacheSize: DefaultChannelCacheSize, - clock: clock.NewDefaultClock(), - } -} - -// OptionModifier is a function signature for modifying the default Options. -type OptionModifier func(*Options) - -// OptionSetRejectCacheSize sets the RejectCacheSize to n. -func OptionSetRejectCacheSize(n int) OptionModifier { - return func(o *Options) { - o.RejectCacheSize = n - } -} - -// OptionSetChannelCacheSize sets the ChannelCacheSize to n. -func OptionSetChannelCacheSize(n int) OptionModifier { - return func(o *Options) { - o.ChannelCacheSize = n - } -} - -// OptionSetSyncFreelist allows the database to sync its freelist. -func OptionSetSyncFreelist(b bool) OptionModifier { - return func(o *Options) { - o.NoFreelistSync = !b - } -} - -// OptionAutoCompact turns on automatic database compaction on startup. -func OptionAutoCompact() OptionModifier { - return func(o *Options) { - o.AutoCompact = true - } -} - -// OptionAutoCompactMinAge sets the minimum age for automatic database -// compaction. -func OptionAutoCompactMinAge(minAge time.Duration) OptionModifier { - return func(o *Options) { - o.AutoCompactMinAge = minAge - } -} - -// OptionClock sets a non-default clock dependency. -func OptionClock(clock clock.Clock) OptionModifier { - return func(o *Options) { - o.clock = clock - } -} - -// OptionDryRunMigration controls whether or not to intentially fail to commit a -// successful migration that occurs when opening the database. -func OptionDryRunMigration(dryRun bool) OptionModifier { - return func(o *Options) { - o.dryRun = dryRun - } -} diff --git a/lnd/channeldb/paginate.go b/lnd/channeldb/paginate.go deleted file mode 100644 index 1646160d..00000000 --- a/lnd/channeldb/paginate.go +++ /dev/null @@ -1,143 +0,0 @@ -package channeldb - -import ( - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" -) - -type paginator struct { - // cursor is the cursor which we are using to iterate through a bucket. - cursor kvdb.RCursor - - // reversed indicates whether we are paginating forwards or backwards. - reversed bool - - // indexOffset is the index from which we will begin querying. - indexOffset uint64 - - // totalItems is the total number of items we allow in our response. - totalItems uint64 -} - -// newPaginator returns a struct which can be used to query an indexed bucket -// in pages. -func newPaginator(c kvdb.RCursor, reversed bool, - indexOffset, totalItems uint64) paginator { - - return paginator{ - cursor: c, - reversed: reversed, - indexOffset: indexOffset, - totalItems: totalItems, - } -} - -// keyValueForIndex seeks our cursor to a given index and returns the key and -// value at that position. -func (p paginator) keyValueForIndex(index uint64) ([]byte, []byte) { - var keyIndex [8]byte - byteOrder.PutUint64(keyIndex[:], index) - return p.cursor.Seek(keyIndex[:]) -} - -// lastIndex returns the last value in our index, if our index is empty it -// returns 0. -func (p paginator) lastIndex() uint64 { - keyIndex, _ := p.cursor.Last() - if keyIndex == nil { - return 0 - } - - return byteOrder.Uint64(keyIndex) -} - -// nextKey is a helper closure to determine what key we should use next when -// we are iterating, depending on whether we are iterating forwards or in -// reverse. -func (p paginator) nextKey() ([]byte, []byte) { - if p.reversed { - return p.cursor.Prev() - } - return p.cursor.Next() -} - -// cursorStart gets the index key and value for the first item we are looking -// up, taking into account that we may be paginating in reverse. The index -// offset provided is *excusive* so we will start with the item after the offset -// for forwards queries, and the item before the index for backwards queries. -func (p paginator) cursorStart() ([]byte, []byte) { - indexKey, indexValue := p.keyValueForIndex(p.indexOffset + 1) - - // If the query is specifying reverse iteration, then we must - // handle a few offset cases. - if p.reversed { - switch { - - // This indicates the default case, where no offset was - // specified. In that case we just start from the last - // entry. - case p.indexOffset == 0: - indexKey, indexValue = p.cursor.Last() - - // This indicates the offset being set to the very - // first entry. Since there are no entries before - // this offset, and the direction is reversed, we can - // return without adding any invoices to the response. - case p.indexOffset == 1: - return nil, nil - - // If we have been given an index offset that is beyond our last - // index value, we just return the last indexed value in our set - // since we are querying in reverse. We do not cover the case - // where our index offset equals our last index value, because - // index offset is exclusive, so we would want to start at the - // value before our last index. - case p.indexOffset > p.lastIndex(): - return p.cursor.Last() - - // Otherwise we have an index offset which is within our set of - // indexed keys, and we want to start at the item before our - // offset. We seek to our index offset, then return the element - // before it. We do this rather than p.indexOffset-1 to account - // for indexes that have gaps. - default: - p.keyValueForIndex(p.indexOffset) - indexKey, indexValue = p.cursor.Prev() - } - } - - return indexKey, indexValue -} - -// query gets the start point for our index offset and iterates through keys -// in our index until we reach the total number of items required for the query -// or we run out of cursor values. This function takes a fetchAndAppend function -// which is responsible for looking up the entry at that index, adding the entry -// to its set of return items (if desired) and return a boolean which indicates -// whether the item was added. This is required to allow the paginator to -// determine when the response has the maximum number of required items. -func (p paginator) query(fetchAndAppend func(k, v []byte) (bool, er.R)) er.R { - indexKey, indexValue := p.cursorStart() - - var totalItems int - for ; indexKey != nil; indexKey, indexValue = p.nextKey() { - // If our current return payload exceeds the max number - // of invoices, then we'll exit now. - if uint64(totalItems) >= p.totalItems { - break - } - - added, err := fetchAndAppend(indexKey, indexValue) - if err != nil { - return err - } - - // If we added an item to our set in the latest fetch and append - // we increment our total count. - if added { - totalItems++ - } - } - - return nil -} diff --git a/lnd/channeldb/payment_control.go b/lnd/channeldb/payment_control.go deleted file mode 100644 index 30f2f376..00000000 --- a/lnd/channeldb/payment_control.go +++ /dev/null @@ -1,727 +0,0 @@ -package channeldb - -import ( - "bytes" - "encoding/binary" - "io" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lntypes" -) - -var ( - // ErrAlreadyPaid signals we have already paid this payment hash. - ErrAlreadyPaid = Err.CodeWithDetail("ErrAlreadyPaid", "invoice is already paid") - - // ErrPaymentInFlight signals that payment for this payment hash is - // already "in flight" on the network. - ErrPaymentInFlight = Err.CodeWithDetail("ErrPaymentInFlight", "payment is in transition") - - // ErrPaymentNotInitiated is returned if the payment wasn't initiated. - ErrPaymentNotInitiated = Err.CodeWithDetail("ErrPaymentNotInitiated", "payment isn't initiated") - - // ErrPaymentAlreadySucceeded is returned in the event we attempt to - // change the status of a payment already succeeded. - ErrPaymentAlreadySucceeded = Err.CodeWithDetail("ErrPaymentAlreadySucceeded", "payment is already succeeded") - - // ErrPaymentAlreadyFailed is returned in the event we attempt to alter - // a failed payment. - ErrPaymentAlreadyFailed = Err.CodeWithDetail("ErrPaymentAlreadyFailed", "payment has already failed") - - // ErrUnknownPaymentStatus is returned when we do not recognize the - // existing state of a payment. - ErrUnknownPaymentStatus = Err.CodeWithDetail("ErrUnknownPaymentStatus", "unknown payment status") - - // ErrPaymentTerminal is returned if we attempt to alter a payment that - // already has reached a terminal condition. - ErrPaymentTerminal = Err.CodeWithDetail("ErrPaymentTerminal", "payment has reached terminal condition") - - // ErrAttemptAlreadySettled is returned if we try to alter an already - // settled HTLC attempt. - ErrAttemptAlreadySettled = Err.CodeWithDetail("ErrAttemptAlreadySettled", "attempt already settled") - - // ErrAttemptAlreadyFailed is returned if we try to alter an already - // failed HTLC attempt. - ErrAttemptAlreadyFailed = Err.CodeWithDetail("ErrAttemptAlreadyFailed", "attempt already failed") - - // ErrValueMismatch is returned if we try to register a non-MPP attempt - // with an amount that doesn't match the payment amount. - ErrValueMismatch = Err.CodeWithDetail("ErrValueMismatch", - "attempted value doesn't match payment amount") - - // ErrValueExceedsAmt is returned if we try to register an attempt that - // would take the total sent amount above the payment amount. - ErrValueExceedsAmt = Err.CodeWithDetail("ErrValueExceedsAmt", - "attempted value exceeds payment amount") - - // ErrNonMPPayment is returned if we try to register an MPP attempt for - // a payment that already has a non-MPP attempt regitered. - ErrNonMPPayment = Err.CodeWithDetail("ErrNonMPPayment", "payment has non-MPP attempts") - - // ErrMPPayment is returned if we try to register a non-MPP attempt for - // a payment that already has an MPP attempt regitered. - ErrMPPayment = Err.CodeWithDetail("ErrMPPayment", "payment has MPP attempts") - - // ErrMPPPaymentAddrMismatch is returned if we try to register an MPP - // shard where the payment address doesn't match existing shards. - ErrMPPPaymentAddrMismatch = Err.CodeWithDetail("ErrMPPPaymentAddrMismatch", "payment address mismatch") - - // ErrMPPTotalAmountMismatch is returned if we try to register an MPP - // shard where the total amount doesn't match existing shards. - ErrMPPTotalAmountMismatch = Err.CodeWithDetail("ErrMPPTotalAmountMismatch", "mp payment total amount mismatch") - - // errNoAttemptInfo is returned when no attempt info is stored yet. - errNoAttemptInfo = Err.CodeWithDetail("errNoAttemptInfo", "unable to find attempt info for "+ - "inflight payment") - - // errNoSequenceNrIndex is returned when an attempt to lookup a payment - // index is made for a sequence number that is not indexed. - errNoSequenceNrIndex = Err.CodeWithDetail("errNoSequenceNrIndex", "payment sequence number index "+ - "does not exist") -) - -// PaymentControl implements persistence for payments and payment attempts. -type PaymentControl struct { - db *DB -} - -// NewPaymentControl creates a new instance of the PaymentControl. -func NewPaymentControl(db *DB) *PaymentControl { - return &PaymentControl{ - db: db, - } -} - -// InitPayment checks or records the given PaymentCreationInfo with the DB, -// making sure it does not already exist as an in-flight payment. When this -// method returns successfully, the payment is guranteeed to be in the InFlight -// state. -func (p *PaymentControl) InitPayment(paymentHash lntypes.Hash, - info *PaymentCreationInfo) er.R { - - var b bytes.Buffer - if err := serializePaymentCreationInfo(&b, info); err != nil { - return err - } - infoBytes := b.Bytes() - - var updateErr er.R - err := kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) er.R { - // Reset the update error, to avoid carrying over an error - // from a previous execution of the batched db transaction. - updateErr = nil - - bucket, err := createPaymentBucket(tx, paymentHash) - if err != nil { - return err - } - - // Get the existing status of this payment, if any. - paymentStatus, err := fetchPaymentStatus(bucket) - if err != nil { - return err - } - - switch paymentStatus { - - // We allow retrying failed payments. - case StatusFailed: - - // This is a new payment that is being initialized for the - // first time. - case StatusUnknown: - - // We already have an InFlight payment on the network. We will - // disallow any new payments. - case StatusInFlight: - updateErr = ErrPaymentInFlight.Default() - return nil - - // We've already succeeded a payment to this payment hash, - // forbid the switch from sending another. - case StatusSucceeded: - updateErr = ErrAlreadyPaid.Default() - return nil - - default: - updateErr = ErrUnknownPaymentStatus.Default() - return nil - } - - // Obtain a new sequence number for this payment. This is used - // to sort the payments in order of creation, and also acts as - // a unique identifier for each payment. - sequenceNum, err := nextPaymentSequence(tx) - if err != nil { - return err - } - - // Before we set our new sequence number, we check whether this - // payment has a previously set sequence number and remove its - // index entry if it exists. This happens in the case where we - // have a previously attempted payment which was left in a state - // where we can retry. - seqBytes := bucket.Get(paymentSequenceKey) - if seqBytes != nil { - indexBucket := tx.ReadWriteBucket(paymentsIndexBucket) - if err := indexBucket.Delete(seqBytes); err != nil { - return err - } - } - - // Once we have obtained a sequence number, we add an entry - // to our index bucket which will map the sequence number to - // our payment hash. - err = createPaymentIndexEntry(tx, sequenceNum, info.PaymentHash) - if err != nil { - return err - } - - err = bucket.Put(paymentSequenceKey, sequenceNum) - if err != nil { - return err - } - - // Add the payment info to the bucket, which contains the - // static information for this payment - err = bucket.Put(paymentCreationInfoKey, infoBytes) - if err != nil { - return err - } - - // We'll delete any lingering HTLCs to start with, in case we - // are initializing a payment that was attempted earlier, but - // left in a state where we could retry. - if err := bucket.DeleteNestedBucket(paymentHtlcsBucket); err != nil && !kvdb.ErrBucketNotFound.Is(err) { - return err - } - - // Also delete any lingering failure info now that we are - // re-attempting. - return bucket.Delete(paymentFailInfoKey) - }) - if err != nil { - return err - } - - return updateErr -} - -// paymentIndexTypeHash is a payment index type which indicates that we have -// created an index of payment sequence number to payment hash. -type paymentIndexType uint8 - -// paymentIndexTypeHash is a payment index type which indicates that we have -// created an index of payment sequence number to payment hash. -const paymentIndexTypeHash paymentIndexType = 0 - -// createPaymentIndexEntry creates a payment hash typed index for a payment. The -// index produced contains a payment index type (which can be used in future to -// signal different payment index types) and the payment hash. -func createPaymentIndexEntry(tx kvdb.RwTx, sequenceNumber []byte, - hash lntypes.Hash) er.R { - - var b bytes.Buffer - if err := WriteElements(&b, paymentIndexTypeHash, hash[:]); err != nil { - return err - } - - indexes := tx.ReadWriteBucket(paymentsIndexBucket) - return indexes.Put(sequenceNumber, b.Bytes()) -} - -// deserializePaymentIndex deserializes a payment index entry. This function -// currently only supports deserialization of payment hash indexes, and will -// fail for other types. -func deserializePaymentIndex(r io.Reader) (lntypes.Hash, er.R) { - var ( - indexType paymentIndexType - paymentHash []byte - ) - - if err := ReadElements(r, &indexType, &paymentHash); err != nil { - return lntypes.Hash{}, err - } - - // While we only have on payment index type, we do not need to use our - // index type to deserialize the index. However, we sanity check that - // this type is as expected, since we had to read it out anyway. - if indexType != paymentIndexTypeHash { - return lntypes.Hash{}, er.Errorf("unknown payment index "+ - "type: %v", indexType) - } - - hash, err := lntypes.MakeHash(paymentHash) - if err != nil { - return lntypes.Hash{}, err - } - - return hash, nil -} - -// RegisterAttempt atomically records the provided HTLCAttemptInfo to the -// DB. -func (p *PaymentControl) RegisterAttempt(paymentHash lntypes.Hash, - attempt *HTLCAttemptInfo) (*MPPayment, er.R) { - - // Serialize the information before opening the db transaction. - var a bytes.Buffer - err := serializeHTLCAttemptInfo(&a, attempt) - if err != nil { - return nil, err - } - htlcInfoBytes := a.Bytes() - - htlcIDBytes := make([]byte, 8) - binary.BigEndian.PutUint64(htlcIDBytes, attempt.AttemptID) - - var payment *MPPayment - err = kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) er.R { - bucket, err := fetchPaymentBucketUpdate(tx, paymentHash) - if err != nil { - return err - } - - p, err := fetchPayment(bucket) - if err != nil { - return err - } - - // Ensure the payment is in-flight. - if err := ensureInFlight(p); err != nil { - return err - } - - // We cannot register a new attempt if the payment already has - // reached a terminal condition: - settle, fail := p.TerminalInfo() - if settle != nil || fail != nil { - return ErrPaymentTerminal.Default() - } - - // Make sure any existing shards match the new one with regards - // to MPP options. - mpp := attempt.Route.FinalHop().MPP - for _, h := range p.InFlightHTLCs() { - hMpp := h.Route.FinalHop().MPP - - switch { - - // We tried to register a non-MPP attempt for a MPP - // payment. - case mpp == nil && hMpp != nil: - return ErrMPPayment.Default() - - // We tried to register a MPP shard for a non-MPP - // payment. - case mpp != nil && hMpp == nil: - return ErrNonMPPayment.Default() - - // Non-MPP payment, nothing more to validate. - case mpp == nil: - continue - } - - // Check that MPP options match. - if mpp.PaymentAddr() != hMpp.PaymentAddr() { - return ErrMPPPaymentAddrMismatch.Default() - } - - if mpp.TotalMsat() != hMpp.TotalMsat() { - return ErrMPPTotalAmountMismatch.Default() - } - } - - // If this is a non-MPP attempt, it must match the total amount - // exactly. - amt := attempt.Route.ReceiverAmt() - if mpp == nil && amt != p.Info.Value { - return ErrValueMismatch.Default() - } - - // Ensure we aren't sending more than the total payment amount. - sentAmt, _ := p.SentAmt() - if sentAmt+amt > p.Info.Value { - return ErrValueExceedsAmt.Default() - } - - htlcsBucket, err := bucket.CreateBucketIfNotExists( - paymentHtlcsBucket, - ) - if err != nil { - return err - } - - // Create bucket for this attempt. Fail if the bucket already - // exists. - htlcBucket, err := htlcsBucket.CreateBucket(htlcIDBytes) - if err != nil { - return err - } - - err = htlcBucket.Put(htlcAttemptInfoKey, htlcInfoBytes) - if err != nil { - return err - } - - // Retrieve attempt info for the notification. - payment, err = fetchPayment(bucket) - return err - }) - if err != nil { - return nil, err - } - - return payment, err -} - -// SettleAttempt marks the given attempt settled with the preimage. If this is -// a multi shard payment, this might implicitly mean that the full payment -// succeeded. -// -// After invoking this method, InitPayment should always return an error to -// prevent us from making duplicate payments to the same payment hash. The -// provided preimage is atomically saved to the DB for record keeping. -func (p *PaymentControl) SettleAttempt(hash lntypes.Hash, - attemptID uint64, settleInfo *HTLCSettleInfo) (*MPPayment, er.R) { - - var b bytes.Buffer - if err := serializeHTLCSettleInfo(&b, settleInfo); err != nil { - return nil, err - } - settleBytes := b.Bytes() - - return p.updateHtlcKey(hash, attemptID, htlcSettleInfoKey, settleBytes) -} - -// FailAttempt marks the given payment attempt failed. -func (p *PaymentControl) FailAttempt(hash lntypes.Hash, - attemptID uint64, failInfo *HTLCFailInfo) (*MPPayment, er.R) { - - var b bytes.Buffer - if err := serializeHTLCFailInfo(&b, failInfo); err != nil { - return nil, err - } - failBytes := b.Bytes() - - return p.updateHtlcKey(hash, attemptID, htlcFailInfoKey, failBytes) -} - -// updateHtlcKey updates a database key for the specified htlc. -func (p *PaymentControl) updateHtlcKey(paymentHash lntypes.Hash, - attemptID uint64, key, value []byte) (*MPPayment, er.R) { - - htlcIDBytes := make([]byte, 8) - binary.BigEndian.PutUint64(htlcIDBytes, attemptID) - - var payment *MPPayment - err := kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) er.R { - payment = nil - - bucket, err := fetchPaymentBucketUpdate(tx, paymentHash) - if err != nil { - return err - } - - p, err := fetchPayment(bucket) - if err != nil { - return err - } - - // We can only update keys of in-flight payments. We allow - // updating keys even if the payment has reached a terminal - // condition, since the HTLC outcomes must still be updated. - if err := ensureInFlight(p); err != nil { - return err - } - - htlcsBucket := bucket.NestedReadWriteBucket(paymentHtlcsBucket) - if htlcsBucket == nil { - return er.Errorf("htlcs bucket not found") - } - - htlcBucket := htlcsBucket.NestedReadWriteBucket(htlcIDBytes) - if htlcBucket == nil { - return er.Errorf("HTLC with ID %v not registered", - attemptID) - } - - // Make sure the shard is not already failed or settled. - if htlcBucket.Get(htlcFailInfoKey) != nil { - return ErrAttemptAlreadyFailed.Default() - } - - if htlcBucket.Get(htlcSettleInfoKey) != nil { - return ErrAttemptAlreadySettled.Default() - } - - // Add or update the key for this htlc. - err = htlcBucket.Put(key, value) - if err != nil { - return err - } - - // Retrieve attempt info for the notification. - payment, err = fetchPayment(bucket) - return err - }) - if err != nil { - return nil, err - } - - return payment, err -} - -// Fail transitions a payment into the Failed state, and records the reason the -// payment failed. After invoking this method, InitPayment should return nil on -// its next call for this payment hash, allowing the switch to make a -// subsequent payment. -func (p *PaymentControl) Fail(paymentHash lntypes.Hash, - reason FailureReason) (*MPPayment, er.R) { - - var ( - updateErr er.R - payment *MPPayment - ) - err := kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) er.R { - // Reset the update error, to avoid carrying over an error - // from a previous execution of the batched db transaction. - updateErr = nil - payment = nil - - bucket, err := fetchPaymentBucketUpdate(tx, paymentHash) - if ErrPaymentNotInitiated.Is(err) { - updateErr = ErrPaymentNotInitiated.Default() - return nil - } else if err != nil { - return err - } - - // We mark the payent as failed as long as it is known. This - // lets the last attempt to fail with a terminal write its - // failure to the PaymentControl without synchronizing with - // other attempts. - paymentStatus, err := fetchPaymentStatus(bucket) - if err != nil { - return err - } - - if paymentStatus == StatusUnknown { - updateErr = ErrPaymentNotInitiated.Default() - return nil - } - - // Put the failure reason in the bucket for record keeping. - v := []byte{byte(reason)} - err = bucket.Put(paymentFailInfoKey, v) - if err != nil { - return err - } - - // Retrieve attempt info for the notification, if available. - payment, err = fetchPayment(bucket) - if err != nil { - return err - } - - return nil - }) - if err != nil { - return nil, err - } - - return payment, updateErr -} - -// FetchPayment returns information about a payment from the database. -func (p *PaymentControl) FetchPayment(paymentHash lntypes.Hash) ( - *MPPayment, er.R) { - - var payment *MPPayment - err := kvdb.View(p.db, func(tx kvdb.RTx) er.R { - bucket, err := fetchPaymentBucket(tx, paymentHash) - if err != nil { - return err - } - - payment, err = fetchPayment(bucket) - - return err - }, func() { - payment = nil - }) - if err != nil { - return nil, err - } - - return payment, nil -} - -// createPaymentBucket creates or fetches the sub-bucket assigned to this -// payment hash. -func createPaymentBucket(tx kvdb.RwTx, paymentHash lntypes.Hash) ( - kvdb.RwBucket, er.R) { - - payments, err := tx.CreateTopLevelBucket(paymentsRootBucket) - if err != nil { - return nil, err - } - - return payments.CreateBucketIfNotExists(paymentHash[:]) -} - -// fetchPaymentBucket fetches the sub-bucket assigned to this payment hash. If -// the bucket does not exist, it returns ErrPaymentNotInitiated. -func fetchPaymentBucket(tx kvdb.RTx, paymentHash lntypes.Hash) ( - kvdb.RBucket, er.R) { - - payments := tx.ReadBucket(paymentsRootBucket) - if payments == nil { - return nil, ErrPaymentNotInitiated.Default() - } - - bucket := payments.NestedReadBucket(paymentHash[:]) - if bucket == nil { - return nil, ErrPaymentNotInitiated.Default() - } - - return bucket, nil - -} - -// fetchPaymentBucketUpdate is identical to fetchPaymentBucket, but it returns a -// bucket that can be written to. -func fetchPaymentBucketUpdate(tx kvdb.RwTx, paymentHash lntypes.Hash) ( - kvdb.RwBucket, er.R) { - - payments := tx.ReadWriteBucket(paymentsRootBucket) - if payments == nil { - return nil, ErrPaymentNotInitiated.Default() - } - - bucket := payments.NestedReadWriteBucket(paymentHash[:]) - if bucket == nil { - return nil, ErrPaymentNotInitiated.Default() - } - - return bucket, nil -} - -// nextPaymentSequence returns the next sequence number to store for a new -// payment. -func nextPaymentSequence(tx kvdb.RwTx) ([]byte, er.R) { - payments, err := tx.CreateTopLevelBucket(paymentsRootBucket) - if err != nil { - return nil, err - } - - seq, errr := payments.NextSequence() - if errr != nil { - return nil, errr - } - - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, seq) - return b, nil -} - -// fetchPaymentStatus fetches the payment status of the payment. If the payment -// isn't found, it will default to "StatusUnknown". -func fetchPaymentStatus(bucket kvdb.RBucket) (PaymentStatus, er.R) { - // Creation info should be set for all payments, regardless of state. - // If not, it is unknown. - if bucket.Get(paymentCreationInfoKey) == nil { - return StatusUnknown, nil - } - - payment, err := fetchPayment(bucket) - if err != nil { - return 0, err - } - - return payment.Status, nil -} - -// ensureInFlight checks whether the payment found in the given bucket has -// status InFlight, and returns an error otherwise. This should be used to -// ensure we only mark in-flight payments as succeeded or failed. -func ensureInFlight(payment *MPPayment) er.R { - paymentStatus := payment.Status - - switch { - - // The payment was indeed InFlight. - case paymentStatus == StatusInFlight: - return nil - - // Our records show the payment as unknown, meaning it never - // should have left the switch. - case paymentStatus == StatusUnknown: - return ErrPaymentNotInitiated.Default() - - // The payment succeeded previously. - case paymentStatus == StatusSucceeded: - return ErrPaymentAlreadySucceeded.Default() - - // The payment was already failed. - case paymentStatus == StatusFailed: - return ErrPaymentAlreadyFailed.Default() - - default: - return ErrUnknownPaymentStatus.Default() - } -} - -// InFlightPayment is a wrapper around the info for a payment that has status -// InFlight. -type InFlightPayment struct { - // Info is the PaymentCreationInfo of the in-flight payment. - Info *PaymentCreationInfo -} - -// FetchInFlightPayments returns all payments with status InFlight. -func (p *PaymentControl) FetchInFlightPayments() ([]*InFlightPayment, er.R) { - var inFlights []*InFlightPayment - err := kvdb.View(p.db, func(tx kvdb.RTx) er.R { - payments := tx.ReadBucket(paymentsRootBucket) - if payments == nil { - return nil - } - - return payments.ForEach(func(k, _ []byte) er.R { - bucket := payments.NestedReadBucket(k) - if bucket == nil { - return er.Errorf("non bucket element") - } - - // If the status is not InFlight, we can return early. - paymentStatus, err := fetchPaymentStatus(bucket) - if err != nil { - return err - } - - if paymentStatus != StatusInFlight { - return nil - } - - inFlight := &InFlightPayment{} - - // Get the CreationInfo. - inFlight.Info, err = fetchCreationInfo(bucket) - if err != nil { - return err - } - - inFlights = append(inFlights, inFlight) - return nil - }) - }, func() { - inFlights = nil - }) - if err != nil { - return nil, err - } - - return inFlights, nil -} diff --git a/lnd/channeldb/payment_control_test.go b/lnd/channeldb/payment_control_test.go deleted file mode 100644 index d1f16567..00000000 --- a/lnd/channeldb/payment_control_test.go +++ /dev/null @@ -1,1022 +0,0 @@ -package channeldb - -import ( - "bytes" - "crypto/rand" - "crypto/sha256" - "fmt" - "reflect" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/record" - "github.com/pkt-cash/pktd/pktwallet/walletdb" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func genPreimage() ([32]byte, er.R) { - var preimage [32]byte - if _, err := util.ReadFull(rand.Reader, preimage[:]); err != nil { - return preimage, err - } - return preimage, nil -} - -func genInfo() (*PaymentCreationInfo, *HTLCAttemptInfo, - lntypes.Preimage, er.R) { - - preimage, err := genPreimage() - if err != nil { - return nil, nil, preimage, er.Errorf("unable to "+ - "generate preimage: %v", err) - } - - rhash := sha256.Sum256(preimage[:]) - return &PaymentCreationInfo{ - PaymentHash: rhash, - Value: testRoute.ReceiverAmt(), - CreationTime: time.Unix(time.Now().Unix(), 0), - PaymentRequest: []byte("hola"), - }, - &HTLCAttemptInfo{ - AttemptID: 0, - SessionKey: priv, - Route: *testRoute.Copy(), - }, preimage, nil -} - -// TestPaymentControlSwitchFail checks that payment status returns to Failed -// status after failing, and that InitPayment allows another HTLC for the -// same payment hash. -func TestPaymentControlSwitchFail(t *testing.T) { - t.Parallel() - - db, cleanup, err := MakeTestDB() - defer cleanup() - if err != nil { - t.Fatalf("unable to init db: %v", err) - } - - pControl := NewPaymentControl(db) - - info, attempt, preimg, err := genInfo() - if err != nil { - t.Fatalf("unable to generate htlc message: %v", err) - } - - // Sends base htlc message which initiate StatusInFlight. - err = pControl.InitPayment(info.PaymentHash, info) - if err != nil { - t.Fatalf("unable to send htlc message: %v", err) - } - - assertPaymentIndex(t, pControl, info.PaymentHash) - assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight) - assertPaymentInfo( - t, pControl, info.PaymentHash, info, nil, nil, - ) - - // Fail the payment, which should moved it to Failed. - failReason := FailureReasonNoRoute - _, err = pControl.Fail(info.PaymentHash, failReason) - if err != nil { - t.Fatalf("unable to fail payment hash: %v", err) - } - - // Verify the status is indeed Failed. - assertPaymentStatus(t, pControl, info.PaymentHash, StatusFailed) - assertPaymentInfo( - t, pControl, info.PaymentHash, info, &failReason, nil, - ) - - // Lookup the payment so we can get its old sequence number before it is - // overwritten. - payment, err := pControl.FetchPayment(info.PaymentHash) - util.RequireNoErr(t, err) - - // Sends the htlc again, which should succeed since the prior payment - // failed. - err = pControl.InitPayment(info.PaymentHash, info) - if err != nil { - t.Fatalf("unable to send htlc message: %v", err) - } - - // Check that our index has been updated, and the old index has been - // removed. - assertPaymentIndex(t, pControl, info.PaymentHash) - assertNoIndex(t, pControl, payment.SequenceNum) - - assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight) - assertPaymentInfo( - t, pControl, info.PaymentHash, info, nil, nil, - ) - - // Record a new attempt. In this test scenario, the attempt fails. - // However, this is not communicated to control tower in the current - // implementation. It only registers the initiation of the attempt. - _, err = pControl.RegisterAttempt(info.PaymentHash, attempt) - if err != nil { - t.Fatalf("unable to register attempt: %v", err) - } - - htlcReason := HTLCFailUnreadable - _, err = pControl.FailAttempt( - info.PaymentHash, attempt.AttemptID, - &HTLCFailInfo{ - Reason: htlcReason, - }, - ) - if err != nil { - t.Fatal(err) - } - assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight) - - htlc := &htlcStatus{ - HTLCAttemptInfo: attempt, - failure: &htlcReason, - } - - assertPaymentInfo(t, pControl, info.PaymentHash, info, nil, htlc) - - // Record another attempt. - attempt.AttemptID = 1 - _, err = pControl.RegisterAttempt(info.PaymentHash, attempt) - if err != nil { - t.Fatalf("unable to send htlc message: %v", err) - } - assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight) - - htlc = &htlcStatus{ - HTLCAttemptInfo: attempt, - } - - assertPaymentInfo( - t, pControl, info.PaymentHash, info, nil, htlc, - ) - - // Settle the attempt and verify that status was changed to - // StatusSucceeded. - payment, err = pControl.SettleAttempt( - info.PaymentHash, attempt.AttemptID, - &HTLCSettleInfo{ - Preimage: preimg, - }, - ) - if err != nil { - t.Fatalf("error shouldn't have been received, got: %v", err) - } - - if len(payment.HTLCs) != 2 { - t.Fatalf("payment should have two htlcs, got: %d", - len(payment.HTLCs)) - } - - err = assertRouteEqual(&payment.HTLCs[0].Route, &attempt.Route) - if err != nil { - t.Fatalf("unexpected route returned: %v vs %v: %v", - spew.Sdump(attempt.Route), - spew.Sdump(payment.HTLCs[0].Route), err) - } - - assertPaymentStatus(t, pControl, info.PaymentHash, StatusSucceeded) - - htlc.settle = &preimg - assertPaymentInfo( - t, pControl, info.PaymentHash, info, nil, htlc, - ) - - // Attempt a final payment, which should now fail since the prior - // payment succeed. - err = pControl.InitPayment(info.PaymentHash, info) - if !ErrAlreadyPaid.Is(err) { - t.Fatalf("unable to send htlc message: %v", err) - } -} - -// TestPaymentControlSwitchDoubleSend checks the ability of payment control to -// prevent double sending of htlc message, when message is in StatusInFlight. -func TestPaymentControlSwitchDoubleSend(t *testing.T) { - t.Parallel() - - db, cleanup, err := MakeTestDB() - defer cleanup() - - if err != nil { - t.Fatalf("unable to init db: %v", err) - } - - pControl := NewPaymentControl(db) - - info, attempt, preimg, err := genInfo() - if err != nil { - t.Fatalf("unable to generate htlc message: %v", err) - } - - // Sends base htlc message which initiate base status and move it to - // StatusInFlight and verifies that it was changed. - err = pControl.InitPayment(info.PaymentHash, info) - if err != nil { - t.Fatalf("unable to send htlc message: %v", err) - } - - assertPaymentIndex(t, pControl, info.PaymentHash) - assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight) - assertPaymentInfo( - t, pControl, info.PaymentHash, info, nil, nil, - ) - - // Try to initiate double sending of htlc message with the same - // payment hash, should result in error indicating that payment has - // already been sent. - err = pControl.InitPayment(info.PaymentHash, info) - if !ErrPaymentInFlight.Is(err) { - t.Fatalf("payment control wrong behaviour: " + - "double sending must trigger ErrPaymentInFlight error") - } - - // Record an attempt. - _, err = pControl.RegisterAttempt(info.PaymentHash, attempt) - if err != nil { - t.Fatalf("unable to send htlc message: %v", err) - } - assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight) - - htlc := &htlcStatus{ - HTLCAttemptInfo: attempt, - } - assertPaymentInfo( - t, pControl, info.PaymentHash, info, nil, htlc, - ) - - // Sends base htlc message which initiate StatusInFlight. - err = pControl.InitPayment(info.PaymentHash, info) - if !ErrPaymentInFlight.Is(err) { - t.Fatalf("payment control wrong behaviour: " + - "double sending must trigger ErrPaymentInFlight error") - } - - // After settling, the error should be ErrAlreadyPaid. - _, err = pControl.SettleAttempt( - info.PaymentHash, attempt.AttemptID, - &HTLCSettleInfo{ - Preimage: preimg, - }, - ) - if err != nil { - t.Fatalf("error shouldn't have been received, got: %v", err) - } - assertPaymentStatus(t, pControl, info.PaymentHash, StatusSucceeded) - - htlc.settle = &preimg - assertPaymentInfo(t, pControl, info.PaymentHash, info, nil, htlc) - - err = pControl.InitPayment(info.PaymentHash, info) - if !ErrAlreadyPaid.Is(err) { - t.Fatalf("unable to send htlc message: %v", err) - } -} - -// TestPaymentControlSuccessesWithoutInFlight checks that the payment -// control will disallow calls to Success when no payment is in flight. -func TestPaymentControlSuccessesWithoutInFlight(t *testing.T) { - t.Parallel() - - db, cleanup, err := MakeTestDB() - defer cleanup() - - if err != nil { - t.Fatalf("unable to init db: %v", err) - } - - pControl := NewPaymentControl(db) - - info, _, preimg, err := genInfo() - if err != nil { - t.Fatalf("unable to generate htlc message: %v", err) - } - - // Attempt to complete the payment should fail. - _, err = pControl.SettleAttempt( - info.PaymentHash, 0, - &HTLCSettleInfo{ - Preimage: preimg, - }, - ) - if !ErrPaymentNotInitiated.Is(err) { - t.Fatalf("expected ErrPaymentNotInitiated, got %v", err) - } - - assertPaymentStatus(t, pControl, info.PaymentHash, StatusUnknown) -} - -// TestPaymentControlFailsWithoutInFlight checks that a strict payment -// control will disallow calls to Fail when no payment is in flight. -func TestPaymentControlFailsWithoutInFlight(t *testing.T) { - t.Parallel() - - db, cleanup, err := MakeTestDB() - defer cleanup() - - if err != nil { - t.Fatalf("unable to init db: %v", err) - } - - pControl := NewPaymentControl(db) - - info, _, _, err := genInfo() - if err != nil { - t.Fatalf("unable to generate htlc message: %v", err) - } - - // Calling Fail should return an error. - _, err = pControl.Fail(info.PaymentHash, FailureReasonNoRoute) - if !ErrPaymentNotInitiated.Is(err) { - t.Fatalf("expected ErrPaymentNotInitiated, got %v", err) - } - - assertPaymentStatus(t, pControl, info.PaymentHash, StatusUnknown) -} - -// TestPaymentControlDeleteNonInFlight checks that calling DeletePayments only -// deletes payments from the database that are not in-flight. -func TestPaymentControlDeleteNonInFligt(t *testing.T) { - t.Parallel() - - db, cleanup, err := MakeTestDB() - defer cleanup() - - if err != nil { - t.Fatalf("unable to init db: %v", err) - } - - // Create a sequence number for duplicate payments that will not collide - // with the sequence numbers for the payments we create. These values - // start at 1, so 9999 is a safe bet for this test. - var duplicateSeqNr = 9999 - - pControl := NewPaymentControl(db) - - payments := []struct { - failed bool - success bool - hasDuplicate bool - }{ - { - failed: true, - success: false, - hasDuplicate: false, - }, - { - failed: false, - success: true, - hasDuplicate: false, - }, - { - failed: false, - success: false, - hasDuplicate: false, - }, - { - failed: false, - success: true, - hasDuplicate: true, - }, - } - - for _, p := range payments { - info, attempt, preimg, err := genInfo() - if err != nil { - t.Fatalf("unable to generate htlc message: %v", err) - } - - // Sends base htlc message which initiate StatusInFlight. - err = pControl.InitPayment(info.PaymentHash, info) - if err != nil { - t.Fatalf("unable to send htlc message: %v", err) - } - _, err = pControl.RegisterAttempt(info.PaymentHash, attempt) - if err != nil { - t.Fatalf("unable to send htlc message: %v", err) - } - - htlc := &htlcStatus{ - HTLCAttemptInfo: attempt, - } - - if p.failed { - // Fail the payment attempt. - htlcFailure := HTLCFailUnreadable - _, err := pControl.FailAttempt( - info.PaymentHash, attempt.AttemptID, - &HTLCFailInfo{ - Reason: htlcFailure, - }, - ) - if err != nil { - t.Fatalf("unable to fail htlc: %v", err) - } - - // Fail the payment, which should moved it to Failed. - failReason := FailureReasonNoRoute - _, err = pControl.Fail(info.PaymentHash, failReason) - if err != nil { - t.Fatalf("unable to fail payment hash: %v", err) - } - - // Verify the status is indeed Failed. - assertPaymentStatus(t, pControl, info.PaymentHash, StatusFailed) - - htlc.failure = &htlcFailure - assertPaymentInfo( - t, pControl, info.PaymentHash, info, - &failReason, htlc, - ) - } else if p.success { - // Verifies that status was changed to StatusSucceeded. - _, err := pControl.SettleAttempt( - info.PaymentHash, attempt.AttemptID, - &HTLCSettleInfo{ - Preimage: preimg, - }, - ) - if err != nil { - t.Fatalf("error shouldn't have been received, got: %v", err) - } - - assertPaymentStatus(t, pControl, info.PaymentHash, StatusSucceeded) - - htlc.settle = &preimg - assertPaymentInfo( - t, pControl, info.PaymentHash, info, nil, htlc, - ) - } else { - assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight) - assertPaymentInfo( - t, pControl, info.PaymentHash, info, nil, htlc, - ) - } - - // If the payment is intended to have a duplicate payment, we - // add one. - if p.hasDuplicate { - appendDuplicatePayment( - t, pControl.db, info.PaymentHash, - uint64(duplicateSeqNr), - ) - duplicateSeqNr++ - } - } - - // Delete payments. - if err := db.DeletePayments(); err != nil { - t.Fatal(err) - } - - // This should leave the in-flight payment. - dbPayments, err := db.FetchPayments() - if err != nil { - t.Fatal(err) - } - - if len(dbPayments) != 1 { - t.Fatalf("expected one payment, got %d", len(dbPayments)) - } - - status := dbPayments[0].Status - if status != StatusInFlight { - t.Fatalf("expected in-fligth status, got %v", status) - } - - // Finally, check that we only have a single index left in the payment - // index bucket. - var indexCount int - err = kvdb.View(db, func(tx walletdb.ReadTx) er.R { - index := tx.ReadBucket(paymentsIndexBucket) - - return index.ForEach(func(k, v []byte) er.R { - indexCount++ - return nil - }) - }, func() { indexCount = 0 }) - util.RequireNoErr(t, err) - - require.Equal(t, 1, indexCount) -} - -// TestPaymentControlMultiShard checks the ability of payment control to -// have multiple in-flight HTLCs for a single payment. -func TestPaymentControlMultiShard(t *testing.T) { - t.Parallel() - - // We will register three HTLC attempts, and always fail the second - // one. We'll generate all combinations of settling/failing the first - // and third HTLC, and assert that the payment status end up as we - // expect. - type testCase struct { - settleFirst bool - settleLast bool - } - - var tests []testCase - for _, f := range []bool{true, false} { - for _, l := range []bool{true, false} { - tests = append(tests, testCase{f, l}) - } - } - - runSubTest := func(t *testing.T, test testCase) { - db, cleanup, err := MakeTestDB() - defer cleanup() - - if err != nil { - t.Fatalf("unable to init db: %v", err) - } - - pControl := NewPaymentControl(db) - - info, attempt, preimg, err := genInfo() - if err != nil { - t.Fatalf("unable to generate htlc message: %v", err) - } - - // Init the payment, moving it to the StatusInFlight state. - err = pControl.InitPayment(info.PaymentHash, info) - if err != nil { - t.Fatalf("unable to send htlc message: %v", err) - } - - assertPaymentIndex(t, pControl, info.PaymentHash) - assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight) - assertPaymentInfo( - t, pControl, info.PaymentHash, info, nil, nil, - ) - - // Create three unique attempts we'll use for the test, and - // register them with the payment control. We set each - // attempts's value to one third of the payment amount, and - // populate the MPP options. - shardAmt := info.Value / 3 - attempt.Route.FinalHop().AmtToForward = shardAmt - attempt.Route.FinalHop().MPP = record.NewMPP( - info.Value, [32]byte{1}, - ) - - var attempts []*HTLCAttemptInfo - for i := uint64(0); i < 3; i++ { - a := *attempt - a.AttemptID = i - attempts = append(attempts, &a) - - _, err = pControl.RegisterAttempt(info.PaymentHash, &a) - if err != nil { - t.Fatalf("unable to send htlc message: %v", err) - } - assertPaymentStatus( - t, pControl, info.PaymentHash, StatusInFlight, - ) - - htlc := &htlcStatus{ - HTLCAttemptInfo: &a, - } - assertPaymentInfo( - t, pControl, info.PaymentHash, info, nil, htlc, - ) - } - - // For a fourth attempt, check that attempting to - // register it will fail since the total sent amount - // will be too large. - b := *attempt - b.AttemptID = 3 - _, err = pControl.RegisterAttempt(info.PaymentHash, &b) - if !ErrValueExceedsAmt.Is(err) { - t.Fatalf("expected ErrValueExceedsAmt, got: %v", - err) - } - - // Fail the second attempt. - a := attempts[1] - htlcFail := HTLCFailUnreadable - _, err = pControl.FailAttempt( - info.PaymentHash, a.AttemptID, - &HTLCFailInfo{ - Reason: htlcFail, - }, - ) - if err != nil { - t.Fatal(err) - } - - htlc := &htlcStatus{ - HTLCAttemptInfo: a, - failure: &htlcFail, - } - assertPaymentInfo( - t, pControl, info.PaymentHash, info, nil, htlc, - ) - - // Payment should still be in-flight. - assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight) - - // Depending on the test case, settle or fail the first attempt. - a = attempts[0] - htlc = &htlcStatus{ - HTLCAttemptInfo: a, - } - - var firstFailReason *FailureReason - if test.settleFirst { - _, err := pControl.SettleAttempt( - info.PaymentHash, a.AttemptID, - &HTLCSettleInfo{ - Preimage: preimg, - }, - ) - if err != nil { - t.Fatalf("error shouldn't have been "+ - "received, got: %v", err) - } - - // Assert that the HTLC has had the preimage recorded. - htlc.settle = &preimg - assertPaymentInfo( - t, pControl, info.PaymentHash, info, nil, htlc, - ) - } else { - _, err := pControl.FailAttempt( - info.PaymentHash, a.AttemptID, - &HTLCFailInfo{ - Reason: htlcFail, - }, - ) - if err != nil { - t.Fatalf("error shouldn't have been "+ - "received, got: %v", err) - } - - // Assert the failure was recorded. - htlc.failure = &htlcFail - assertPaymentInfo( - t, pControl, info.PaymentHash, info, nil, htlc, - ) - - // We also record a payment level fail, to move it into - // a terminal state. - failReason := FailureReasonNoRoute - _, err = pControl.Fail(info.PaymentHash, failReason) - if err != nil { - t.Fatalf("unable to fail payment hash: %v", err) - } - - // Record the reason we failed the payment, such that - // we can assert this later in the test. - firstFailReason = &failReason - } - - // The payment should still be considered in-flight, since there - // is still an active HTLC. - assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight) - - // Try to register yet another attempt. This should fail now - // that the payment has reached a terminal condition. - b = *attempt - b.AttemptID = 3 - _, err = pControl.RegisterAttempt(info.PaymentHash, &b) - if !ErrPaymentTerminal.Is(err) { - t.Fatalf("expected ErrPaymentTerminal, got: %v", err) - } - - assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight) - - // Settle or fail the remaining attempt based on the testcase. - a = attempts[2] - htlc = &htlcStatus{ - HTLCAttemptInfo: a, - } - if test.settleLast { - // Settle the last outstanding attempt. - _, err = pControl.SettleAttempt( - info.PaymentHash, a.AttemptID, - &HTLCSettleInfo{ - Preimage: preimg, - }, - ) - if err != nil { - t.Fatalf("error shouldn't have been "+ - "received, got: %v", err) - } - - htlc.settle = &preimg - assertPaymentInfo( - t, pControl, info.PaymentHash, info, - firstFailReason, htlc, - ) - } else { - // Fail the attempt. - _, err := pControl.FailAttempt( - info.PaymentHash, a.AttemptID, - &HTLCFailInfo{ - Reason: htlcFail, - }, - ) - if err != nil { - t.Fatalf("error shouldn't have been "+ - "received, got: %v", err) - } - - // Assert the failure was recorded. - htlc.failure = &htlcFail - assertPaymentInfo( - t, pControl, info.PaymentHash, info, - firstFailReason, htlc, - ) - - // Check that we can override any perevious terminal - // failure. This is to allow multiple concurrent shard - // write a terminal failure to the database without - // syncing. - failReason := FailureReasonPaymentDetails - _, err = pControl.Fail(info.PaymentHash, failReason) - if err != nil { - t.Fatalf("unable to fail payment hash: %v", err) - } - } - - // If any of the two attempts settled, the payment should end - // up in the Succeeded state. If both failed the payment should - // also be Failed at this poinnt. - finalStatus := StatusFailed - expRegErr := ErrPaymentAlreadyFailed - if test.settleFirst || test.settleLast { - finalStatus = StatusSucceeded - expRegErr = ErrPaymentAlreadySucceeded - } - - assertPaymentStatus(t, pControl, info.PaymentHash, finalStatus) - - // Finally assert we cannot register more attempts. - _, err = pControl.RegisterAttempt(info.PaymentHash, &b) - if !expRegErr.Is(err) { - t.Fatalf("expected error %v, got: %v", expRegErr, err) - } - } - - for _, test := range tests { - test := test - subTest := fmt.Sprintf("first=%v, second=%v", - test.settleFirst, test.settleLast) - - t.Run(subTest, func(t *testing.T) { - runSubTest(t, test) - }) - } -} - -func TestPaymentControlMPPRecordValidation(t *testing.T) { - t.Parallel() - - db, cleanup, err := MakeTestDB() - defer cleanup() - - if err != nil { - t.Fatalf("unable to init db: %v", err) - } - - pControl := NewPaymentControl(db) - - info, attempt, _, err := genInfo() - if err != nil { - t.Fatalf("unable to generate htlc message: %v", err) - } - - // Init the payment. - err = pControl.InitPayment(info.PaymentHash, info) - if err != nil { - t.Fatalf("unable to send htlc message: %v", err) - } - - // Create three unique attempts we'll use for the test, and - // register them with the payment control. We set each - // attempts's value to one third of the payment amount, and - // populate the MPP options. - shardAmt := info.Value / 3 - attempt.Route.FinalHop().AmtToForward = shardAmt - attempt.Route.FinalHop().MPP = record.NewMPP( - info.Value, [32]byte{1}, - ) - - _, err = pControl.RegisterAttempt(info.PaymentHash, attempt) - if err != nil { - t.Fatalf("unable to send htlc message: %v", err) - } - - // Now try to register a non-MPP attempt, which should fail. - b := *attempt - b.AttemptID = 1 - b.Route.FinalHop().MPP = nil - _, err = pControl.RegisterAttempt(info.PaymentHash, &b) - if !ErrMPPayment.Is(err) { - t.Fatalf("expected ErrMPPayment, got: %v", err) - } - - // Try to register attempt one with a different payment address. - b.Route.FinalHop().MPP = record.NewMPP( - info.Value, [32]byte{2}, - ) - _, err = pControl.RegisterAttempt(info.PaymentHash, &b) - if !ErrMPPPaymentAddrMismatch.Is(err) { - t.Fatalf("expected ErrMPPPaymentAddrMismatch, got: %v", err) - } - - // Try registering one with a different total amount. - b.Route.FinalHop().MPP = record.NewMPP( - info.Value/2, [32]byte{1}, - ) - _, err = pControl.RegisterAttempt(info.PaymentHash, &b) - if !ErrMPPTotalAmountMismatch.Is(err) { - t.Fatalf("expected ErrMPPTotalAmountMismatch, got: %v", err) - } - - // Create and init a new payment. This time we'll check that we cannot - // register an MPP attempt if we already registered a non-MPP one. - info, attempt, _, err = genInfo() - if err != nil { - t.Fatalf("unable to generate htlc message: %v", err) - } - - err = pControl.InitPayment(info.PaymentHash, info) - if err != nil { - t.Fatalf("unable to send htlc message: %v", err) - } - - attempt.Route.FinalHop().MPP = nil - _, err = pControl.RegisterAttempt(info.PaymentHash, attempt) - if err != nil { - t.Fatalf("unable to send htlc message: %v", err) - } - - // Attempt to register an MPP attempt, which should fail. - b = *attempt - b.AttemptID = 1 - b.Route.FinalHop().MPP = record.NewMPP( - info.Value, [32]byte{1}, - ) - - _, err = pControl.RegisterAttempt(info.PaymentHash, &b) - if !ErrNonMPPayment.Is(err) { - t.Fatalf("expected ErrNonMPPayment, got: %v", err) - } -} - -// assertPaymentStatus retrieves the status of the payment referred to by hash -// and compares it with the expected state. -func assertPaymentStatus(t *testing.T, p *PaymentControl, - hash lntypes.Hash, expStatus PaymentStatus) { - - t.Helper() - - payment, err := p.FetchPayment(hash) - if expStatus == StatusUnknown && ErrPaymentNotInitiated.Is(err) { - return - } - if err != nil { - t.Fatal(err) - } - - if payment.Status != expStatus { - t.Fatalf("payment status mismatch: expected %v, got %v", - expStatus, payment.Status) - } -} - -type htlcStatus struct { - *HTLCAttemptInfo - settle *lntypes.Preimage - failure *HTLCFailReason -} - -// assertPaymentInfo retrieves the payment referred to by hash and verifies the -// expected values. -func assertPaymentInfo(t *testing.T, p *PaymentControl, hash lntypes.Hash, - c *PaymentCreationInfo, f *FailureReason, a *htlcStatus) { - - t.Helper() - - payment, err := p.FetchPayment(hash) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(payment.Info, c) { - t.Fatalf("PaymentCreationInfos don't match: %v vs %v", - spew.Sdump(payment.Info), spew.Sdump(c)) - } - - if f != nil { - if *payment.FailureReason != *f { - t.Fatal("unexpected failure reason") - } - } else { - if payment.FailureReason != nil { - t.Fatal("unexpected failure reason") - } - } - - if a == nil { - if len(payment.HTLCs) > 0 { - t.Fatal("expected no htlcs") - } - return - } - - htlc := payment.HTLCs[a.AttemptID] - if err := assertRouteEqual(&htlc.Route, &a.Route); err != nil { - t.Fatal("routes do not match") - } - - if htlc.AttemptID != a.AttemptID { - t.Fatalf("unnexpected attempt ID %v, expected %v", - htlc.AttemptID, a.AttemptID) - } - - if a.failure != nil { - if htlc.Failure == nil { - t.Fatalf("expected HTLC to be failed") - } - - if htlc.Failure.Reason != *a.failure { - t.Fatalf("expected HTLC failure %v, had %v", - *a.failure, htlc.Failure.Reason) - } - } else if htlc.Failure != nil { - t.Fatalf("expected no HTLC failure") - } - - if a.settle != nil { - if htlc.Settle.Preimage != *a.settle { - t.Fatalf("Preimages don't match: %x vs %x", - htlc.Settle.Preimage, a.settle) - } - } else if htlc.Settle != nil { - t.Fatal("expected no settle info") - } -} - -// fetchPaymentIndexEntry gets the payment hash for the sequence number provided -// from our payment indexes bucket. -func fetchPaymentIndexEntry(_ *testing.T, p *PaymentControl, - sequenceNumber uint64) (*lntypes.Hash, er.R) { - - var hash lntypes.Hash - - if err := kvdb.View(p.db, func(tx walletdb.ReadTx) er.R { - indexBucket := tx.ReadBucket(paymentsIndexBucket) - key := make([]byte, 8) - byteOrder.PutUint64(key, sequenceNumber) - - indexValue := indexBucket.Get(key) - if indexValue == nil { - return errNoSequenceNrIndex.Default() - } - - r := bytes.NewReader(indexValue) - - var err er.R - hash, err = deserializePaymentIndex(r) - return err - }, func() { - hash = lntypes.Hash{} - }); err != nil { - return nil, err - } - - return &hash, nil -} - -// assertPaymentIndex looks up the index for a payment in the db and checks -// that its payment hash matches the expected hash passed in. -func assertPaymentIndex(t *testing.T, p *PaymentControl, - expectedHash lntypes.Hash) { - - // Lookup the payment so that we have its sequence number and check - // that is has correctly been indexed in the payment indexes bucket. - pmt, err := p.FetchPayment(expectedHash) - util.RequireNoErr(t, err) - - hash, err := fetchPaymentIndexEntry(t, p, pmt.SequenceNum) - util.RequireNoErr(t, err) - assert.Equal(t, expectedHash, *hash) -} - -// assertNoIndex checks that an index for the sequence number provided does not -// exist. -func assertNoIndex(t *testing.T, p *PaymentControl, seqNr uint64) { - _, err := fetchPaymentIndexEntry(t, p, seqNr) - require.True(t, errNoSequenceNrIndex.Is(err)) -} diff --git a/lnd/channeldb/payments.go b/lnd/channeldb/payments.go deleted file mode 100644 index 378f9f35..00000000 --- a/lnd/channeldb/payments.go +++ /dev/null @@ -1,1084 +0,0 @@ -package channeldb - -import ( - "bytes" - "encoding/binary" - "io" - "sort" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/record" - "github.com/pkt-cash/pktd/lnd/routing/route" - "github.com/pkt-cash/pktd/lnd/tlv" - "github.com/pkt-cash/pktd/wire" -) - -var ( - // paymentsRootBucket is the name of the top-level bucket within the - // database that stores all data related to payments. Within this - // bucket, each payment hash its own sub-bucket keyed by its payment - // hash. - // - // Bucket hierarchy: - // - // root-bucket - // | - // |-- - // | |--sequence-key: - // | |--creation-info-key: - // | |--fail-info-key: <(optional) fail info> - // | | - // | |--payment-htlcs-bucket (shard-bucket) - // | | | - // | | |-- - // | | | |--htlc-attempt-info-key: - // | | | |--htlc-settle-info-key: <(optional) settle info> - // | | | |--htlc-fail-info-key: <(optional) fail info> - // | | | - // | | |-- - // | | | | - // | | ... ... - // | | - // | | - // | |--duplicate-bucket (only for old, completed payments) - // | | - // | |-- - // | | |--sequence-key: - // | | |--creation-info-key: - // | | |--attempt-info-key: - // | | |--settle-info-key: - // | | |--fail-info-key: - // | | - // | |-- - // | | | - // | ... ... - // | - // |-- - // | | - // | ... - // ... - // - paymentsRootBucket = []byte("payments-root-bucket") - - // paymentSequenceKey is a key used in the payment's sub-bucket to - // store the sequence number of the payment. - paymentSequenceKey = []byte("payment-sequence-key") - - // paymentCreationInfoKey is a key used in the payment's sub-bucket to - // store the creation info of the payment. - paymentCreationInfoKey = []byte("payment-creation-info") - - // paymentHtlcsBucket is a bucket where we'll store the information - // about the HTLCs that were attempted for a payment. - paymentHtlcsBucket = []byte("payment-htlcs-bucket") - - // htlcAttemptInfoKey is a key used in a HTLC's sub-bucket to store the - // info about the attempt that was done for the HTLC in question. - htlcAttemptInfoKey = []byte("htlc-attempt-info") - - // htlcSettleInfoKey is a key used in a HTLC's sub-bucket to store the - // settle info, if any. - htlcSettleInfoKey = []byte("htlc-settle-info") - - // htlcFailInfoKey is a key used in a HTLC's sub-bucket to store - // failure information, if any. - htlcFailInfoKey = []byte("htlc-fail-info") - - // paymentFailInfoKey is a key used in the payment's sub-bucket to - // store information about the reason a payment failed. - paymentFailInfoKey = []byte("payment-fail-info") - - // paymentsIndexBucket is the name of the top-level bucket within the - // database that stores an index of payment sequence numbers to its - // payment hash. - // payments-sequence-index-bucket - // |--: - // |--... - // |--: - paymentsIndexBucket = []byte("payments-index-bucket") -) - -var ( - // ErrNoSequenceNumber is returned if we lookup a payment which does - // not have a sequence number. - ErrNoSequenceNumber = Err.CodeWithDetail("ErrNoSequenceNumber", "sequence number not found") - - // ErrDuplicateNotFound is returned when we lookup a payment by its - // index and cannot find a payment with a matching sequence number. - ErrDuplicateNotFound = Err.CodeWithDetail("ErrDuplicateNotFound", "duplicate payment not found") - - // ErrNoDuplicateBucket is returned when we expect to find duplicates - // when looking up a payment from its index, but the payment does not - // have any. - ErrNoDuplicateBucket = Err.CodeWithDetail("ErrNoDuplicateBucket", "expected duplicate bucket") - - // ErrNoDuplicateNestedBucket is returned if we do not find duplicate - // payments in their own sub-bucket. - ErrNoDuplicateNestedBucket = Err.CodeWithDetail("ErrNoDuplicateNestedBucket", "nested duplicate bucket not "+ - "found") -) - -// FailureReason encodes the reason a payment ultimately failed. -type FailureReason byte - -const ( - // FailureReasonTimeout indicates that the payment did timeout before a - // successful payment attempt was made. - FailureReasonTimeout FailureReason = 0 - - // FailureReasonNoRoute indicates no successful route to the - // destination was found during path finding. - FailureReasonNoRoute FailureReason = 1 - - // FailureReasonError indicates that an unexpected error happened during - // payment. - FailureReasonError FailureReason = 2 - - // FailureReasonPaymentDetails indicates that either the hash is unknown - // or the final cltv delta or amount is incorrect. - FailureReasonPaymentDetails FailureReason = 3 - - // FailureReasonInsufficientBalance indicates that we didn't have enough - // balance to complete the payment. - FailureReasonInsufficientBalance FailureReason = 4 - - // TODO(halseth): cancel state. - - // TODO(joostjager): Add failure reasons for: - // LocalLiquidityInsufficient, RemoteCapacityInsufficient. -) - -// Error returns a human readable error string for the FailureReason. -func (r FailureReason) Error() string { - return r.String() -} - -// String returns a human readable FailureReason. -func (r FailureReason) String() string { - switch r { - case FailureReasonTimeout: - return "timeout" - case FailureReasonNoRoute: - return "no_route" - case FailureReasonError: - return "error" - case FailureReasonPaymentDetails: - return "incorrect_payment_details" - case FailureReasonInsufficientBalance: - return "insufficient_balance" - } - - return "unknown" -} - -// PaymentStatus represent current status of payment -type PaymentStatus byte - -const ( - // StatusUnknown is the status where a payment has never been initiated - // and hence is unknown. - StatusUnknown PaymentStatus = 0 - - // StatusInFlight is the status where a payment has been initiated, but - // a response has not been received. - StatusInFlight PaymentStatus = 1 - - // StatusSucceeded is the status where a payment has been initiated and - // the payment was completed successfully. - StatusSucceeded PaymentStatus = 2 - - // StatusFailed is the status where a payment has been initiated and a - // failure result has come back. - StatusFailed PaymentStatus = 3 -) - -// String returns readable representation of payment status. -func (ps PaymentStatus) String() string { - switch ps { - case StatusUnknown: - return "Unknown" - case StatusInFlight: - return "In Flight" - case StatusSucceeded: - return "Succeeded" - case StatusFailed: - return "Failed" - default: - return "Unknown" - } -} - -// PaymentCreationInfo is the information necessary to have ready when -// initiating a payment, moving it into state InFlight. -type PaymentCreationInfo struct { - // PaymentHash is the hash this payment is paying to. - PaymentHash lntypes.Hash - - // Value is the amount we are paying. - Value lnwire.MilliSatoshi - - // CreationTime is the time when this payment was initiated. - CreationTime time.Time - - // PaymentRequest is the full payment request, if any. - PaymentRequest []byte -} - -// FetchPayments returns all sent payments found in the DB. -// -// nolint: dupl -func (db *DB) FetchPayments() ([]*MPPayment, er.R) { - var payments []*MPPayment - - err := kvdb.View(db, func(tx kvdb.RTx) er.R { - paymentsBucket := tx.ReadBucket(paymentsRootBucket) - if paymentsBucket == nil { - return nil - } - - return paymentsBucket.ForEach(func(k, v []byte) er.R { - bucket := paymentsBucket.NestedReadBucket(k) - if bucket == nil { - // We only expect sub-buckets to be found in - // this top-level bucket. - return er.Errorf("non bucket element in " + - "payments bucket") - } - - p, err := fetchPayment(bucket) - if err != nil { - return err - } - - payments = append(payments, p) - - // For older versions of lnd, duplicate payments to a - // payment has was possible. These will be found in a - // sub-bucket indexed by their sequence number if - // available. - duplicatePayments, err := fetchDuplicatePayments(bucket) - if err != nil { - return err - } - - payments = append(payments, duplicatePayments...) - return nil - }) - }, func() { - payments = nil - }) - if err != nil { - return nil, err - } - - // Before returning, sort the payments by their sequence number. - sort.Slice(payments, func(i, j int) bool { - return payments[i].SequenceNum < payments[j].SequenceNum - }) - - return payments, nil -} - -func fetchCreationInfo(bucket kvdb.RBucket) (*PaymentCreationInfo, er.R) { - b := bucket.Get(paymentCreationInfoKey) - if b == nil { - return nil, er.Errorf("creation info not found") - } - - r := bytes.NewReader(b) - return deserializePaymentCreationInfo(r) -} - -func fetchPayment(bucket kvdb.RBucket) (*MPPayment, er.R) { - seqBytes := bucket.Get(paymentSequenceKey) - if seqBytes == nil { - return nil, er.Errorf("sequence number not found") - } - - sequenceNum := binary.BigEndian.Uint64(seqBytes) - - // Get the PaymentCreationInfo. - creationInfo, err := fetchCreationInfo(bucket) - if err != nil { - return nil, err - - } - - var htlcs []HTLCAttempt - htlcsBucket := bucket.NestedReadBucket(paymentHtlcsBucket) - if htlcsBucket != nil { - // Get the payment attempts. This can be empty. - htlcs, err = fetchHtlcAttempts(htlcsBucket) - if err != nil { - return nil, err - } - } - - // Get failure reason if available. - var failureReason *FailureReason - b := bucket.Get(paymentFailInfoKey) - if b != nil { - reason := FailureReason(b[0]) - failureReason = &reason - } - - // Go through all HTLCs for this payment, noting whether we have any - // settled HTLC, and any still in-flight. - var inflight, settled bool - for _, h := range htlcs { - if h.Failure != nil { - continue - } - - if h.Settle != nil { - settled = true - continue - } - - // If any of the HTLCs are not failed nor settled, we - // still have inflight HTLCs. - inflight = true - } - - // Use the DB state to determine the status of the payment. - var paymentStatus PaymentStatus - - switch { - - // If any of the the HTLCs did succeed and there are no HTLCs in - // flight, the payment succeeded. - case !inflight && settled: - paymentStatus = StatusSucceeded - - // If we have no in-flight HTLCs, and the payment failure is set, the - // payment is considered failed. - case !inflight && failureReason != nil: - paymentStatus = StatusFailed - - // Otherwise it is still in flight. - default: - paymentStatus = StatusInFlight - } - - return &MPPayment{ - SequenceNum: sequenceNum, - Info: creationInfo, - HTLCs: htlcs, - FailureReason: failureReason, - Status: paymentStatus, - }, nil -} - -// fetchHtlcAttempts retrives all htlc attempts made for the payment found in -// the given bucket. -func fetchHtlcAttempts(bucket kvdb.RBucket) ([]HTLCAttempt, er.R) { - htlcs := make([]HTLCAttempt, 0) - - err := bucket.ForEach(func(k, _ []byte) er.R { - aid := byteOrder.Uint64(k) - htlcBucket := bucket.NestedReadBucket(k) - - attemptInfo, err := fetchHtlcAttemptInfo( - htlcBucket, - ) - if err != nil { - return err - } - attemptInfo.AttemptID = aid - - htlc := HTLCAttempt{ - HTLCAttemptInfo: *attemptInfo, - } - - // Settle info might be nil. - htlc.Settle, err = fetchHtlcSettleInfo(htlcBucket) - if err != nil { - return err - } - - // Failure info might be nil. - htlc.Failure, err = fetchHtlcFailInfo(htlcBucket) - if err != nil { - return err - } - - htlcs = append(htlcs, htlc) - return nil - }) - if err != nil { - return nil, err - } - - return htlcs, nil -} - -// fetchHtlcAttemptInfo fetches the payment attempt info for this htlc from the -// bucket. -func fetchHtlcAttemptInfo(bucket kvdb.RBucket) (*HTLCAttemptInfo, er.R) { - b := bucket.Get(htlcAttemptInfoKey) - if b == nil { - return nil, errNoAttemptInfo.Default() - } - - r := bytes.NewReader(b) - return deserializeHTLCAttemptInfo(r) -} - -// fetchHtlcSettleInfo retrieves the settle info for the htlc. If the htlc isn't -// settled, nil is returned. -func fetchHtlcSettleInfo(bucket kvdb.RBucket) (*HTLCSettleInfo, er.R) { - b := bucket.Get(htlcSettleInfoKey) - if b == nil { - // Settle info is optional. - return nil, nil - } - - r := bytes.NewReader(b) - return deserializeHTLCSettleInfo(r) -} - -// fetchHtlcFailInfo retrieves the failure info for the htlc. If the htlc hasn't -// failed, nil is returned. -func fetchHtlcFailInfo(bucket kvdb.RBucket) (*HTLCFailInfo, er.R) { - b := bucket.Get(htlcFailInfoKey) - if b == nil { - // Fail info is optional. - return nil, nil - } - - r := bytes.NewReader(b) - return deserializeHTLCFailInfo(r) -} - -// PaymentsQuery represents a query to the payments database starting or ending -// at a certain offset index. The number of retrieved records can be limited. -type PaymentsQuery struct { - // IndexOffset determines the starting point of the payments query and - // is always exclusive. In normal order, the query starts at the next - // higher (available) index compared to IndexOffset. In reversed order, - // the query ends at the next lower (available) index compared to the - // IndexOffset. In the case of a zero index_offset, the query will start - // with the oldest payment when paginating forwards, or will end with - // the most recent payment when paginating backwards. - IndexOffset uint64 - - // MaxPayments is the maximal number of payments returned in the - // payments query. - MaxPayments uint64 - - // Reversed gives a meaning to the IndexOffset. If reversed is set to - // true, the query will fetch payments with indices lower than the - // IndexOffset, otherwise, it will return payments with indices greater - // than the IndexOffset. - Reversed bool - - // If IncludeIncomplete is true, then return payments that have not yet - // fully completed. This means that pending payments, as well as failed - // payments will show up if this field is set to true. - IncludeIncomplete bool -} - -// PaymentsResponse contains the result of a query to the payments database. -// It includes the set of payments that match the query and integers which -// represent the index of the first and last item returned in the series of -// payments. These integers allow callers to resume their query in the event -// that the query's response exceeds the max number of returnable events. -type PaymentsResponse struct { - // Payments is the set of payments returned from the database for the - // PaymentsQuery. - Payments []*MPPayment - - // FirstIndexOffset is the index of the first element in the set of - // returned MPPayments. Callers can use this to resume their query - // in the event that the slice has too many events to fit into a single - // response. The offset can be used to continue reverse pagination. - FirstIndexOffset uint64 - - // LastIndexOffset is the index of the last element in the set of - // returned MPPayments. Callers can use this to resume their query - // in the event that the slice has too many events to fit into a single - // response. The offset can be used to continue forward pagination. - LastIndexOffset uint64 -} - -// QueryPayments is a query to the payments database which is restricted -// to a subset of payments by the payments query, containing an offset -// index and a maximum number of returned payments. -func (db *DB) QueryPayments(query PaymentsQuery) (PaymentsResponse, er.R) { - var resp PaymentsResponse - - if err := kvdb.View(db, func(tx kvdb.RTx) er.R { - // Get the root payments bucket. - paymentsBucket := tx.ReadBucket(paymentsRootBucket) - if paymentsBucket == nil { - return nil - } - - // Get the index bucket which maps sequence number -> payment - // hash and duplicate bool. If we have a payments bucket, we - // should have an indexes bucket as well. - indexes := tx.ReadBucket(paymentsIndexBucket) - if indexes == nil { - return er.Errorf("index bucket does not exist") - } - - // accumulatePayments gets payments with the sequence number - // and hash provided and adds them to our list of payments if - // they meet the criteria of our query. It returns the number - // of payments that were added. - accumulatePayments := func(sequenceKey, hash []byte) (bool, er.R) { - - r := bytes.NewReader(hash) - paymentHash, err := deserializePaymentIndex(r) - if err != nil { - return false, err - } - - payment, err := fetchPaymentWithSequenceNumber( - tx, paymentHash, sequenceKey, - ) - if err != nil { - return false, err - } - - // To keep compatibility with the old API, we only - // return non-succeeded payments if requested. - if payment.Status != StatusSucceeded && - !query.IncludeIncomplete { - - return false, err - } - - // At this point, we've exhausted the offset, so we'll - // begin collecting invoices found within the range. - resp.Payments = append(resp.Payments, payment) - return true, nil - } - - // Create a paginator which reads from our sequence index bucket - // with the parameters provided by the payments query. - paginator := newPaginator( - indexes.ReadCursor(), query.Reversed, query.IndexOffset, - query.MaxPayments, - ) - - // Run a paginated query, adding payments to our response. - if err := paginator.query(accumulatePayments); err != nil { - return err - } - - return nil - }, func() { - resp = PaymentsResponse{} - }); err != nil { - return resp, err - } - - // Need to swap the payments slice order if reversed order. - if query.Reversed { - for l, r := 0, len(resp.Payments)-1; l < r; l, r = l+1, r-1 { - resp.Payments[l], resp.Payments[r] = - resp.Payments[r], resp.Payments[l] - } - } - - // Set the first and last index of the returned payments so that the - // caller can resume from this point later on. - if len(resp.Payments) > 0 { - resp.FirstIndexOffset = resp.Payments[0].SequenceNum - resp.LastIndexOffset = - resp.Payments[len(resp.Payments)-1].SequenceNum - } - - return resp, nil -} - -// fetchPaymentWithSequenceNumber get the payment which matches the payment hash -// *and* sequence number provided from the database. This is required because -// we previously had more than one payment per hash, so we have multiple indexes -// pointing to a single payment; we want to retrieve the correct one. -func fetchPaymentWithSequenceNumber(tx kvdb.RTx, paymentHash lntypes.Hash, - sequenceNumber []byte) (*MPPayment, er.R) { - - // We can now lookup the payment keyed by its hash in - // the payments root bucket. - bucket, err := fetchPaymentBucket(tx, paymentHash) - if err != nil { - return nil, err - } - - // A single payment hash can have multiple payments associated with it. - // We lookup our sequence number first, to determine whether this is - // the payment we are actually looking for. - seqBytes := bucket.Get(paymentSequenceKey) - if seqBytes == nil { - return nil, ErrNoSequenceNumber.Default() - } - - // If this top level payment has the sequence number we are looking for, - // return it. - if bytes.Equal(seqBytes, sequenceNumber) { - return fetchPayment(bucket) - } - - // If we were not looking for the top level payment, we are looking for - // one of our duplicate payments. We need to iterate through the seq - // numbers in this bucket to find the correct payments. If we do not - // find a duplicate payments bucket here, something is wrong. - dup := bucket.NestedReadBucket(duplicatePaymentsBucket) - if dup == nil { - return nil, ErrNoDuplicateBucket.Default() - } - - var duplicatePayment *MPPayment - err = dup.ForEach(func(k, v []byte) er.R { - subBucket := dup.NestedReadBucket(k) - if subBucket == nil { - // We one bucket for each duplicate to be found. - return ErrNoDuplicateNestedBucket.Default() - } - - seqBytes := subBucket.Get(duplicatePaymentSequenceKey) - if seqBytes == nil { - return err - } - - // If this duplicate payment is not the sequence number we are - // looking for, we can continue. - if !bytes.Equal(seqBytes, sequenceNumber) { - return nil - } - - duplicatePayment, err = fetchDuplicatePayment(subBucket) - if err != nil { - return err - } - - return nil - }) - if err != nil { - return nil, err - } - - // If none of the duplicate payments matched our sequence number, we - // failed to find the payment with this sequence number; something is - // wrong. - if duplicatePayment == nil { - return nil, ErrDuplicateNotFound.Default() - } - - return duplicatePayment, nil -} - -// DeletePayments deletes all completed and failed payments from the DB. -func (db *DB) DeletePayments() er.R { - return kvdb.Update(db, func(tx kvdb.RwTx) er.R { - payments := tx.ReadWriteBucket(paymentsRootBucket) - if payments == nil { - return nil - } - - var ( - // deleteBuckets is the set of payment buckets we need - // to delete. - deleteBuckets [][]byte - - // deleteIndexes is the set of indexes pointing to these - // payments that need to be deleted. - deleteIndexes [][]byte - ) - err := payments.ForEach(func(k, _ []byte) er.R { - bucket := payments.NestedReadWriteBucket(k) - if bucket == nil { - // We only expect sub-buckets to be found in - // this top-level bucket. - return er.Errorf("non bucket element in " + - "payments bucket") - } - - // If the status is InFlight, we cannot safely delete - // the payment information, so we return early. - paymentStatus, err := fetchPaymentStatus(bucket) - if err != nil { - return err - } - - // If the status is InFlight, we cannot safely delete - // the payment information, so we return early. - if paymentStatus == StatusInFlight { - return nil - } - - // Add the bucket to the set of buckets we can delete. - deleteBuckets = append(deleteBuckets, k) - - // Get all the sequence number associated with the - // payment, including duplicates. - seqNrs, err := fetchSequenceNumbers(bucket) - if err != nil { - return err - } - - deleteIndexes = append(deleteIndexes, seqNrs...) - - return nil - }) - if err != nil { - return err - } - - for _, k := range deleteBuckets { - if err := payments.DeleteNestedBucket(k); err != nil { - return err - } - } - - // Get our index bucket and delete all indexes pointing to the - // payments we are deleting. - indexBucket := tx.ReadWriteBucket(paymentsIndexBucket) - for _, k := range deleteIndexes { - if err := indexBucket.Delete(k); err != nil { - return err - } - } - - return nil - }, func() {}) -} - -// fetchSequenceNumbers fetches all the sequence numbers associated with a -// payment, including those belonging to any duplicate payments. -func fetchSequenceNumbers(paymentBucket kvdb.RBucket) ([][]byte, er.R) { - seqNum := paymentBucket.Get(paymentSequenceKey) - if seqNum == nil { - return nil, er.New("expected sequence number") - } - - sequenceNumbers := [][]byte{seqNum} - - // Get the duplicate payments bucket, if it has no duplicates, just - // return early with the payment sequence number. - duplicates := paymentBucket.NestedReadBucket(duplicatePaymentsBucket) - if duplicates == nil { - return sequenceNumbers, nil - } - - // If we do have duplicated, they are keyed by sequence number, so we - // iterate through the duplicates bucket and add them to our set of - // sequence numbers. - if err := duplicates.ForEach(func(k, v []byte) er.R { - sequenceNumbers = append(sequenceNumbers, k) - return nil - }); err != nil { - return nil, err - } - - return sequenceNumbers, nil -} - -// nolint: dupl -func serializePaymentCreationInfo(w io.Writer, c *PaymentCreationInfo) er.R { - var scratch [8]byte - - if _, err := util.Write(w, c.PaymentHash[:]); err != nil { - return err - } - - byteOrder.PutUint64(scratch[:], uint64(c.Value)) - if _, err := util.Write(w, scratch[:]); err != nil { - return err - } - - if err := serializeTime(w, c.CreationTime); err != nil { - return err - } - - byteOrder.PutUint32(scratch[:4], uint32(len(c.PaymentRequest))) - if _, err := util.Write(w, scratch[:4]); err != nil { - return err - } - - if _, err := util.Write(w, c.PaymentRequest[:]); err != nil { - return err - } - - return nil -} - -func deserializePaymentCreationInfo(r io.Reader) (*PaymentCreationInfo, er.R) { - var scratch [8]byte - - c := &PaymentCreationInfo{} - - if _, err := util.ReadFull(r, c.PaymentHash[:]); err != nil { - return nil, err - } - - if _, err := util.ReadFull(r, scratch[:]); err != nil { - return nil, err - } - c.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:])) - - creationTime, err := deserializeTime(r) - if err != nil { - return nil, err - } - c.CreationTime = creationTime - - if _, err := util.ReadFull(r, scratch[:4]); err != nil { - return nil, err - } - - reqLen := uint32(byteOrder.Uint32(scratch[:4])) - payReq := make([]byte, reqLen) - if reqLen > 0 { - if _, err := util.ReadFull(r, payReq); err != nil { - return nil, err - } - } - c.PaymentRequest = payReq - - return c, nil -} - -func serializeHTLCAttemptInfo(w io.Writer, a *HTLCAttemptInfo) er.R { - if err := WriteElements(w, a.SessionKey); err != nil { - return err - } - - if err := SerializeRoute(w, a.Route); err != nil { - return err - } - - return serializeTime(w, a.AttemptTime) -} - -func deserializeHTLCAttemptInfo(r io.Reader) (*HTLCAttemptInfo, er.R) { - a := &HTLCAttemptInfo{} - err := ReadElements(r, &a.SessionKey) - if err != nil { - return nil, err - } - a.Route, err = DeserializeRoute(r) - if err != nil { - return nil, err - } - - a.AttemptTime, err = deserializeTime(r) - if err != nil { - return nil, err - } - - return a, nil -} - -func serializeHop(w io.Writer, h *route.Hop) er.R { - if err := WriteElements(w, - h.PubKeyBytes[:], - h.ChannelID, - h.OutgoingTimeLock, - h.AmtToForward, - ); err != nil { - return err - } - - if err := util.WriteBin(w, byteOrder, h.LegacyPayload); err != nil { - return err - } - - // For legacy payloads, we don't need to write any TLV records, so - // we'll write a zero indicating the our serialized TLV map has no - // records. - if h.LegacyPayload { - return WriteElements(w, uint32(0)) - } - - // Gather all non-primitive TLV records so that they can be serialized - // as a single blob. - // - // TODO(conner): add migration to unify all fields in a single TLV - // blobs. The split approach will cause headaches down the road as more - // fields are added, which we can avoid by having a single TLV stream - // for all payload fields. - var records []tlv.Record - if h.MPP != nil { - records = append(records, h.MPP.Record()) - } - - // Final sanity check to absolutely rule out custom records that are not - // custom and write into the standard range. - if err := h.CustomRecords.Validate(); err != nil { - return err - } - - // Convert custom records to tlv and add to the record list. - // MapToRecords sorts the list, so adding it here will keep the list - // canonical. - tlvRecords := tlv.MapToRecords(h.CustomRecords) - records = append(records, tlvRecords...) - - // Otherwise, we'll transform our slice of records into a map of the - // raw bytes, then serialize them in-line with a length (number of - // elements) prefix. - mapRecords, err := tlv.RecordsToMap(records) - if err != nil { - return err - } - - numRecords := uint32(len(mapRecords)) - if err := WriteElements(w, numRecords); err != nil { - return err - } - - for recordType, rawBytes := range mapRecords { - if err := WriteElements(w, recordType); err != nil { - return err - } - - if err := wire.WriteVarBytes(w, 0, rawBytes); err != nil { - return err - } - } - - return nil -} - -// maxOnionPayloadSize is the largest Sphinx payload possible, so we don't need -// to read/write a TLV stream larger than this. -const maxOnionPayloadSize = 1300 - -func deserializeHop(r io.Reader) (*route.Hop, er.R) { - h := &route.Hop{} - - var pub []byte - if err := ReadElements(r, &pub); err != nil { - return nil, err - } - copy(h.PubKeyBytes[:], pub) - - if err := ReadElements(r, - &h.ChannelID, &h.OutgoingTimeLock, &h.AmtToForward, - ); err != nil { - return nil, err - } - - // TODO(roasbeef): change field to allow LegacyPayload false to be the - // legacy default? - err := util.ReadBin(r, byteOrder, &h.LegacyPayload) - if err != nil { - return nil, err - } - - var numElements uint32 - if err := ReadElements(r, &numElements); err != nil { - return nil, err - } - - // If there're no elements, then we can return early. - if numElements == 0 { - return h, nil - } - - tlvMap := make(map[uint64][]byte) - for i := uint32(0); i < numElements; i++ { - var tlvType uint64 - if err := ReadElements(r, &tlvType); err != nil { - return nil, err - } - - rawRecordBytes, err := wire.ReadVarBytes( - r, 0, maxOnionPayloadSize, "tlv", - ) - if err != nil { - return nil, err - } - - tlvMap[tlvType] = rawRecordBytes - } - - // If the MPP type is present, remove it from the generic TLV map and - // parse it back into a proper MPP struct. - // - // TODO(conner): add migration to unify all fields in a single TLV - // blobs. The split approach will cause headaches down the road as more - // fields are added, which we can avoid by having a single TLV stream - // for all payload fields. - mppType := uint64(record.MPPOnionType) - if mppBytes, ok := tlvMap[mppType]; ok { - delete(tlvMap, mppType) - - var ( - mpp = &record.MPP{} - mppRec = mpp.Record() - r = bytes.NewReader(mppBytes) - ) - err := mppRec.Decode(r, uint64(len(mppBytes))) - if err != nil { - return nil, err - } - h.MPP = mpp - } - - h.CustomRecords = tlvMap - - return h, nil -} - -// SerializeRoute serializes a route. -func SerializeRoute(w io.Writer, r route.Route) er.R { - if err := WriteElements(w, - r.TotalTimeLock, r.TotalAmount, r.SourcePubKey[:], - ); err != nil { - return err - } - - if err := WriteElements(w, uint32(len(r.Hops))); err != nil { - return err - } - - for _, h := range r.Hops { - if err := serializeHop(w, h); err != nil { - return err - } - } - - return nil -} - -// DeserializeRoute deserializes a route. -func DeserializeRoute(r io.Reader) (route.Route, er.R) { - rt := route.Route{} - if err := ReadElements(r, - &rt.TotalTimeLock, &rt.TotalAmount, - ); err != nil { - return rt, err - } - - var pub []byte - if err := ReadElements(r, &pub); err != nil { - return rt, err - } - copy(rt.SourcePubKey[:], pub) - - var numHops uint32 - if err := ReadElements(r, &numHops); err != nil { - return rt, err - } - - var hops []*route.Hop - for i := uint32(0); i < numHops; i++ { - hop, err := deserializeHop(r) - if err != nil { - return rt, err - } - hops = append(hops, hop) - } - rt.Hops = hops - - return rt, nil -} diff --git a/lnd/channeldb/payments_test.go b/lnd/channeldb/payments_test.go deleted file mode 100644 index 7ef6868b..00000000 --- a/lnd/channeldb/payments_test.go +++ /dev/null @@ -1,715 +0,0 @@ -package channeldb - -import ( - "bytes" - "math" - "reflect" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/record" - "github.com/pkt-cash/pktd/lnd/routing/route" - "github.com/pkt-cash/pktd/pktwallet/walletdb" - "github.com/stretchr/testify/require" -) - -var ( - priv, _ = btcec.NewPrivateKey(btcec.S256()) - pub = priv.PubKey() - - testHop1 = &route.Hop{ - PubKeyBytes: route.NewVertex(pub), - ChannelID: 12345, - OutgoingTimeLock: 111, - AmtToForward: 555, - CustomRecords: record.CustomSet{ - 65536: []byte{}, - 80001: []byte{}, - }, - MPP: record.NewMPP(32, [32]byte{0x42}), - } - - testHop2 = &route.Hop{ - PubKeyBytes: route.NewVertex(pub), - ChannelID: 12345, - OutgoingTimeLock: 111, - AmtToForward: 555, - LegacyPayload: true, - } - - testRoute = route.Route{ - TotalTimeLock: 123, - TotalAmount: 1234567, - SourcePubKey: route.NewVertex(pub), - Hops: []*route.Hop{ - testHop2, - testHop1, - }, - } -) - -func makeFakeInfo() (*PaymentCreationInfo, *HTLCAttemptInfo) { - var preimg lntypes.Preimage - copy(preimg[:], rev[:]) - - c := &PaymentCreationInfo{ - PaymentHash: preimg.Hash(), - Value: 1000, - // Use single second precision to avoid false positive test - // failures due to the monotonic time component. - CreationTime: time.Unix(time.Now().Unix(), 0), - PaymentRequest: []byte(""), - } - - a := &HTLCAttemptInfo{ - AttemptID: 44, - SessionKey: priv, - Route: testRoute, - AttemptTime: time.Unix(100, 0), - } - return c, a -} - -func TestSentPaymentSerialization(t *testing.T) { - t.Parallel() - - c, s := makeFakeInfo() - - var b bytes.Buffer - if err := serializePaymentCreationInfo(&b, c); err != nil { - t.Fatalf("unable to serialize creation info: %v", err) - } - - newCreationInfo, err := deserializePaymentCreationInfo(&b) - if err != nil { - t.Fatalf("unable to deserialize creation info: %v", err) - } - - if !reflect.DeepEqual(c, newCreationInfo) { - t.Fatalf("Payments do not match after "+ - "serialization/deserialization %v vs %v", - spew.Sdump(c), spew.Sdump(newCreationInfo), - ) - } - - b.Reset() - if err := serializeHTLCAttemptInfo(&b, s); err != nil { - t.Fatalf("unable to serialize info: %v", err) - } - - newWireInfo, err := deserializeHTLCAttemptInfo(&b) - if err != nil { - t.Fatalf("unable to deserialize info: %v", err) - } - newWireInfo.AttemptID = s.AttemptID - - // First we verify all the records match up porperly, as they aren't - // able to be properly compared using reflect.DeepEqual. - err = assertRouteEqual(&s.Route, &newWireInfo.Route) - if err != nil { - t.Fatalf("Routes do not match after "+ - "serialization/deserialization: %v", err) - } - - // Clear routes to allow DeepEqual to compare the remaining fields. - newWireInfo.Route = route.Route{} - s.Route = route.Route{} - - if !reflect.DeepEqual(s, newWireInfo) { - s.SessionKey.Curve = nil - newWireInfo.SessionKey.Curve = nil - t.Fatalf("Payments do not match after "+ - "serialization/deserialization %v vs %v", - spew.Sdump(s), spew.Sdump(newWireInfo), - ) - } -} - -// assertRouteEquals compares to routes for equality and returns an error if -// they are not equal. -func assertRouteEqual(a, b *route.Route) er.R { - if !reflect.DeepEqual(a, b) { - return er.Errorf("HTLCAttemptInfos don't match: %v vs %v", - spew.Sdump(a), spew.Sdump(b)) - } - - return nil -} - -func TestRouteSerialization(t *testing.T) { - t.Parallel() - - var b bytes.Buffer - if err := SerializeRoute(&b, testRoute); err != nil { - t.Fatal(err) - } - - r := bytes.NewReader(b.Bytes()) - route2, err := DeserializeRoute(r) - if err != nil { - t.Fatal(err) - } - - // First we verify all the records match up porperly, as they aren't - // able to be properly compared using reflect.DeepEqual. - err = assertRouteEqual(&testRoute, &route2) - if err != nil { - t.Fatalf("routes not equal: \n%v vs \n%v", - spew.Sdump(testRoute), spew.Sdump(route2)) - } -} - -// deletePayment removes a payment with paymentHash from the payments database. -func deletePayment(t *testing.T, db *DB, paymentHash lntypes.Hash, seqNr uint64) { - t.Helper() - - err := kvdb.Update(db, func(tx kvdb.RwTx) er.R { - payments := tx.ReadWriteBucket(paymentsRootBucket) - - // Delete the payment bucket. - err := payments.DeleteNestedBucket(paymentHash[:]) - if err != nil { - return err - } - - key := make([]byte, 8) - byteOrder.PutUint64(key, seqNr) - - // Delete the index that references this payment. - indexes := tx.ReadWriteBucket(paymentsIndexBucket) - return indexes.Delete(key) - }, func() {}) - - if err != nil { - t.Fatalf("could not delete "+ - "payment: %v", err) - } -} - -// TestQueryPayments tests retrieval of payments with forwards and reversed -// queries. -func TestQueryPayments(t *testing.T) { - // Define table driven test for QueryPayments. - // Test payments have sequence indices [1, 3, 4, 5, 6, 7]. - // Note that the payment with index 7 has the same payment hash as 6, - // and is stored in a nested bucket within payment 6 rather than being - // its own entry in the payments bucket. We do this to test retrieval - // of legacy payments. - tests := []struct { - name string - query PaymentsQuery - firstIndex uint64 - lastIndex uint64 - - // expectedSeqNrs contains the set of sequence numbers we expect - // our query to return. - expectedSeqNrs []uint64 - }{ - { - name: "IndexOffset at the end of the payments range", - query: PaymentsQuery{ - IndexOffset: 7, - MaxPayments: 7, - Reversed: false, - IncludeIncomplete: true, - }, - firstIndex: 0, - lastIndex: 0, - expectedSeqNrs: nil, - }, - { - name: "query in forwards order, start at beginning", - query: PaymentsQuery{ - IndexOffset: 0, - MaxPayments: 2, - Reversed: false, - IncludeIncomplete: true, - }, - firstIndex: 1, - lastIndex: 3, - expectedSeqNrs: []uint64{1, 3}, - }, - { - name: "query in forwards order, start at end, overflow", - query: PaymentsQuery{ - IndexOffset: 6, - MaxPayments: 2, - Reversed: false, - IncludeIncomplete: true, - }, - firstIndex: 7, - lastIndex: 7, - expectedSeqNrs: []uint64{7}, - }, - { - name: "start at offset index outside of payments", - query: PaymentsQuery{ - IndexOffset: 20, - MaxPayments: 2, - Reversed: false, - IncludeIncomplete: true, - }, - firstIndex: 0, - lastIndex: 0, - expectedSeqNrs: nil, - }, - { - name: "overflow in forwards order", - query: PaymentsQuery{ - IndexOffset: 4, - MaxPayments: math.MaxUint64, - Reversed: false, - IncludeIncomplete: true, - }, - firstIndex: 5, - lastIndex: 7, - expectedSeqNrs: []uint64{5, 6, 7}, - }, - { - name: "start at offset index outside of payments, " + - "reversed order", - query: PaymentsQuery{ - IndexOffset: 9, - MaxPayments: 2, - Reversed: true, - IncludeIncomplete: true, - }, - firstIndex: 6, - lastIndex: 7, - expectedSeqNrs: []uint64{6, 7}, - }, - { - name: "query in reverse order, start at end", - query: PaymentsQuery{ - IndexOffset: 0, - MaxPayments: 2, - Reversed: true, - IncludeIncomplete: true, - }, - firstIndex: 6, - lastIndex: 7, - expectedSeqNrs: []uint64{6, 7}, - }, - { - name: "query in reverse order, starting in middle", - query: PaymentsQuery{ - IndexOffset: 4, - MaxPayments: 2, - Reversed: true, - IncludeIncomplete: true, - }, - firstIndex: 1, - lastIndex: 3, - expectedSeqNrs: []uint64{1, 3}, - }, - { - name: "query in reverse order, starting in middle, " + - "with underflow", - query: PaymentsQuery{ - IndexOffset: 4, - MaxPayments: 5, - Reversed: true, - IncludeIncomplete: true, - }, - firstIndex: 1, - lastIndex: 3, - expectedSeqNrs: []uint64{1, 3}, - }, - { - name: "all payments in reverse, order maintained", - query: PaymentsQuery{ - IndexOffset: 0, - MaxPayments: 7, - Reversed: true, - IncludeIncomplete: true, - }, - firstIndex: 1, - lastIndex: 7, - expectedSeqNrs: []uint64{1, 3, 4, 5, 6, 7}, - }, - { - name: "exclude incomplete payments", - query: PaymentsQuery{ - IndexOffset: 0, - MaxPayments: 7, - Reversed: false, - IncludeIncomplete: false, - }, - firstIndex: 0, - lastIndex: 0, - expectedSeqNrs: nil, - }, - { - name: "query payments at index gap", - query: PaymentsQuery{ - IndexOffset: 1, - MaxPayments: 7, - Reversed: false, - IncludeIncomplete: true, - }, - firstIndex: 3, - lastIndex: 7, - expectedSeqNrs: []uint64{3, 4, 5, 6, 7}, - }, - { - name: "query payments reverse before index gap", - query: PaymentsQuery{ - IndexOffset: 3, - MaxPayments: 7, - Reversed: true, - IncludeIncomplete: true, - }, - firstIndex: 1, - lastIndex: 1, - expectedSeqNrs: []uint64{1}, - }, - { - name: "query payments reverse on index gap", - query: PaymentsQuery{ - IndexOffset: 2, - MaxPayments: 7, - Reversed: true, - IncludeIncomplete: true, - }, - firstIndex: 1, - lastIndex: 1, - expectedSeqNrs: []uint64{1}, - }, - { - name: "query payments forward on index gap", - query: PaymentsQuery{ - IndexOffset: 2, - MaxPayments: 2, - Reversed: false, - IncludeIncomplete: true, - }, - firstIndex: 3, - lastIndex: 4, - expectedSeqNrs: []uint64{3, 4}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - db, cleanup, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to init db: %v", err) - } - defer cleanup() - - // Make a preliminary query to make sure it's ok to - // query when we have no payments. - resp, err := db.QueryPayments(tt.query) - util.RequireNoErr(t, err) - require.Len(t, resp.Payments, 0) - - // Populate the database with a set of test payments. - // We create 6 original payments, deleting the payment - // at index 2 so that we cover the case where sequence - // numbers are missing. We also add a duplicate payment - // to the last payment added to test the legacy case - // where we have duplicates in the nested duplicates - // bucket. - nonDuplicatePayments := 6 - pControl := NewPaymentControl(db) - - for i := 0; i < nonDuplicatePayments; i++ { - // Generate a test payment. - info, _, _, err := genInfo() - if err != nil { - t.Fatalf("unable to create test "+ - "payment: %v", err) - } - - // Create a new payment entry in the database. - err = pControl.InitPayment(info.PaymentHash, info) - if err != nil { - t.Fatalf("unable to initialize "+ - "payment in database: %v", err) - } - - // Immediately delete the payment with index 2. - if i == 1 { - pmt, err := pControl.FetchPayment( - info.PaymentHash, - ) - util.RequireNoErr(t, err) - - deletePayment(t, db, info.PaymentHash, - pmt.SequenceNum) - } - - // If we are on the last payment entry, add a - // duplicate payment with sequence number equal - // to the parent payment + 1. - if i == (nonDuplicatePayments - 1) { - pmt, err := pControl.FetchPayment( - info.PaymentHash, - ) - util.RequireNoErr(t, err) - - appendDuplicatePayment( - t, pControl.db, - info.PaymentHash, - pmt.SequenceNum+1, - ) - } - } - - // Fetch all payments in the database. - allPayments, err := db.FetchPayments() - if err != nil { - t.Fatalf("payments could not be fetched from "+ - "database: %v", err) - } - - if len(allPayments) != 6 { - t.Fatalf("Number of payments received does not "+ - "match expected one. Got %v, want %v.", - len(allPayments), 6) - } - - querySlice, err := db.QueryPayments(tt.query) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if tt.firstIndex != querySlice.FirstIndexOffset || - tt.lastIndex != querySlice.LastIndexOffset { - t.Errorf("First or last index does not match "+ - "expected index. Want (%d, %d), got (%d, %d).", - tt.firstIndex, tt.lastIndex, - querySlice.FirstIndexOffset, - querySlice.LastIndexOffset) - } - - if len(querySlice.Payments) != len(tt.expectedSeqNrs) { - t.Errorf("expected: %v payments, got: %v", - len(allPayments), len(querySlice.Payments)) - } - - for i, seqNr := range tt.expectedSeqNrs { - q := querySlice.Payments[i] - if seqNr != q.SequenceNum { - t.Errorf("sequence numbers do not match, "+ - "got %v, want %v", q.SequenceNum, seqNr) - } - } - }) - } -} - -// TestFetchPaymentWithSequenceNumber tests lookup of payments with their -// sequence number. It sets up one payment with no duplicates, and another with -// two duplicates in its duplicates bucket then uses these payments to test the -// case where a specific duplicate is not found and the duplicates bucket is not -// present when we expect it to be. -func TestFetchPaymentWithSequenceNumber(t *testing.T) { - db, cleanup, err := MakeTestDB() - util.RequireNoErr(t, err) - - defer cleanup() - - pControl := NewPaymentControl(db) - - // Generate a test payment which does not have duplicates. - noDuplicates, _, _, err := genInfo() - util.RequireNoErr(t, err) - - // Create a new payment entry in the database. - err = pControl.InitPayment(noDuplicates.PaymentHash, noDuplicates) - util.RequireNoErr(t, err) - - // Fetch the payment so we can get its sequence nr. - noDuplicatesPayment, err := pControl.FetchPayment( - noDuplicates.PaymentHash, - ) - util.RequireNoErr(t, err) - - // Generate a test payment which we will add duplicates to. - hasDuplicates, _, _, err := genInfo() - util.RequireNoErr(t, err) - - // Create a new payment entry in the database. - err = pControl.InitPayment(hasDuplicates.PaymentHash, hasDuplicates) - util.RequireNoErr(t, err) - - // Fetch the payment so we can get its sequence nr. - hasDuplicatesPayment, err := pControl.FetchPayment( - hasDuplicates.PaymentHash, - ) - util.RequireNoErr(t, err) - - // We declare the sequence numbers used here so that we can reference - // them in tests. - var ( - duplicateOneSeqNr = hasDuplicatesPayment.SequenceNum + 1 - duplicateTwoSeqNr = hasDuplicatesPayment.SequenceNum + 2 - ) - - // Add two duplicates to our second payment. - appendDuplicatePayment( - t, db, hasDuplicates.PaymentHash, duplicateOneSeqNr, - ) - appendDuplicatePayment( - t, db, hasDuplicates.PaymentHash, duplicateTwoSeqNr, - ) - - tests := []struct { - name string - paymentHash lntypes.Hash - sequenceNumber uint64 - expectedErr *er.ErrorCode - }{ - { - name: "lookup payment without duplicates", - paymentHash: noDuplicates.PaymentHash, - sequenceNumber: noDuplicatesPayment.SequenceNum, - expectedErr: nil, - }, - { - name: "lookup payment with duplicates", - paymentHash: hasDuplicates.PaymentHash, - sequenceNumber: hasDuplicatesPayment.SequenceNum, - expectedErr: nil, - }, - { - name: "lookup first duplicate", - paymentHash: hasDuplicates.PaymentHash, - sequenceNumber: duplicateOneSeqNr, - expectedErr: nil, - }, - { - name: "lookup second duplicate", - paymentHash: hasDuplicates.PaymentHash, - sequenceNumber: duplicateTwoSeqNr, - expectedErr: nil, - }, - { - name: "lookup non-existent duplicate", - paymentHash: hasDuplicates.PaymentHash, - sequenceNumber: 999999, - expectedErr: ErrDuplicateNotFound, - }, - { - name: "lookup duplicate, no duplicates bucket", - paymentHash: noDuplicates.PaymentHash, - sequenceNumber: duplicateTwoSeqNr, - expectedErr: ErrNoDuplicateBucket, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - err := kvdb.Update(db, - func(tx walletdb.ReadWriteTx) er.R { - - var seqNrBytes [8]byte - byteOrder.PutUint64( - seqNrBytes[:], test.sequenceNumber, - ) - - _, err := fetchPaymentWithSequenceNumber( - tx, test.paymentHash, seqNrBytes[:], - ) - return err - }, func() {}) - require.True(t, er.Cis(test.expectedErr, err)) - }) - } -} - -// appendDuplicatePayment adds a duplicate payment to an existing payment. Note -// that this function requires a unique sequence number. -// -// This code is *only* intended to replicate legacy duplicate payments in lnd, -// our current schema does not allow duplicates. -func appendDuplicatePayment(t *testing.T, db *DB, paymentHash lntypes.Hash, - seqNr uint64) { - - err := kvdb.Update(db, func(tx walletdb.ReadWriteTx) er.R { - bucket, err := fetchPaymentBucketUpdate( - tx, paymentHash, - ) - if err != nil { - return err - } - - // Create the duplicates bucket if it is not - // present. - dup, err := bucket.CreateBucketIfNotExists( - duplicatePaymentsBucket, - ) - if err != nil { - return err - } - - var sequenceKey [8]byte - byteOrder.PutUint64(sequenceKey[:], seqNr) - - // Create duplicate payments for the two dup - // sequence numbers we've setup. - putDuplicatePayment(t, dup, sequenceKey[:], paymentHash) - - // Finally, once we have created our entry we add an index for - // it. - err = createPaymentIndexEntry(tx, sequenceKey[:], paymentHash) - util.RequireNoErr(t, err) - - return nil - }, func() {}) - if err != nil { - t.Fatalf("could not create payment: %v", err) - } -} - -// putDuplicatePayment creates a duplicate payment in the duplicates bucket -// provided with the minimal information required for successful reading. -func putDuplicatePayment(t *testing.T, duplicateBucket kvdb.RwBucket, - sequenceKey []byte, paymentHash lntypes.Hash) { - - paymentBucket, err := duplicateBucket.CreateBucketIfNotExists( - sequenceKey, - ) - util.RequireNoErr(t, err) - - err = paymentBucket.Put(duplicatePaymentSequenceKey, sequenceKey) - util.RequireNoErr(t, err) - - // Generate fake information for the duplicate payment. - info, _, _, err := genInfo() - util.RequireNoErr(t, err) - - // Write the payment info to disk under the creation info key. This code - // is copied rather than using serializePaymentCreationInfo to ensure - // we always write in the legacy format used by duplicate payments. - var b bytes.Buffer - var scratch [8]byte - _, errr := b.Write(paymentHash[:]) - require.NoError(t, errr) - - byteOrder.PutUint64(scratch[:], uint64(info.Value)) - _, errr = b.Write(scratch[:]) - require.NoError(t, errr) - - err = serializeTime(&b, info.CreationTime) - util.RequireNoErr(t, err) - - byteOrder.PutUint32(scratch[:4], 0) - _, errr = b.Write(scratch[:4]) - require.NoError(t, errr) - - // Get the PaymentCreationInfo. - err = paymentBucket.Put(duplicatePaymentCreationInfoKey, b.Bytes()) - util.RequireNoErr(t, err) -} diff --git a/lnd/channeldb/peers.go b/lnd/channeldb/peers.go deleted file mode 100644 index 2e818a41..00000000 --- a/lnd/channeldb/peers.go +++ /dev/null @@ -1,122 +0,0 @@ -package channeldb - -import ( - "bytes" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/routing/route" -) - -var ( - // peersBucket is the name of a top level bucket in which we store - // information about our peers. Information for different peers is - // stored in buckets keyed by their public key. - // - // - // peers-bucket - // | - // |-- - // | |--flap-count-key: - // | - // |-- - // | |--flap-count-key: - peersBucket = []byte("peers-bucket") - - // flapCountKey is a key used in the peer pubkey sub-bucket that stores - // the timestamp of a peer's last flap count and its all time flap - // count. - flapCountKey = []byte("flap-count") -) - -var ( - // ErrNoPeerBucket is returned when we try to read entries for a peer - // that is not tracked. - ErrNoPeerBucket = Err.CodeWithDetail("ErrNoPeerBucket", "peer bucket not found") -) - -// FlapCount contains information about a peer's flap count. -type FlapCount struct { - // Count provides the total flap count for a peer. - Count uint32 - - // LastFlap is the timestamp of the last flap recorded for a peer. - LastFlap time.Time -} - -// WriteFlapCounts writes the flap count for a set of peers to disk, creating a -// bucket for the peer's pubkey if necessary. Note that this function overwrites -// the current value. -func (d *DB) WriteFlapCounts(flapCounts map[route.Vertex]*FlapCount) er.R { - return kvdb.Update(d, func(tx kvdb.RwTx) er.R { - // Run through our set of flap counts and record them for - // each peer, creating a bucket for the peer pubkey if required. - for peer, flapCount := range flapCounts { - peers := tx.ReadWriteBucket(peersBucket) - - peerBucket, err := peers.CreateBucketIfNotExists( - peer[:], - ) - if err != nil { - return err - } - - var b bytes.Buffer - errr := serializeTime(&b, flapCount.LastFlap) - if errr != nil { - return errr - } - - if errr = WriteElement(&b, flapCount.Count); errr != nil { - return errr - } - - err = peerBucket.Put(flapCountKey, b.Bytes()) - if err != nil { - return err - } - } - - return nil - }, func() {}) -} - -// ReadFlapCount attempts to read the flap count for a peer, failing if the -// peer is not found or we do not have flap count stored. -func (d *DB) ReadFlapCount(pubkey route.Vertex) (*FlapCount, er.R) { - var flapCount FlapCount - - if err := kvdb.View(d, func(tx kvdb.RTx) er.R { - peers := tx.ReadBucket(peersBucket) - - peerBucket := peers.NestedReadBucket(pubkey[:]) - if peerBucket == nil { - return ErrNoPeerBucket.Default() - } - - flapBytes := peerBucket.Get(flapCountKey) - if flapBytes == nil { - return er.Errorf("flap count not recorded for: %v", - pubkey) - } - - var ( - err er.R - r = bytes.NewReader(flapBytes) - ) - - flapCount.LastFlap, err = deserializeTime(r) - if err != nil { - return err - } - - return ReadElements(r, &flapCount.Count) - }, func() { - flapCount = FlapCount{} - }); err != nil { - return nil, err - } - - return &flapCount, nil -} diff --git a/lnd/channeldb/peers_test.go b/lnd/channeldb/peers_test.go deleted file mode 100644 index c90dbf5f..00000000 --- a/lnd/channeldb/peers_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package channeldb - -import ( - "testing" - "time" - - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/routing/route" - "github.com/stretchr/testify/require" -) - -// TestFlapCount tests lookup and writing of flap count to disk. -func TestFlapCount(t *testing.T) { - db, cleanup, err := MakeTestDB() - util.RequireNoErr(t, err) - defer cleanup() - - // Try to read flap count for a peer that we have no records for. - _, err = db.ReadFlapCount(testPub) - require.True(t, ErrNoPeerBucket.Is(err)) - - var ( - testPub2 = route.Vertex{2, 2, 2} - peer1FlapCount = &FlapCount{ - Count: 20, - LastFlap: time.Unix(100, 23), - } - peer2FlapCount = &FlapCount{ - Count: 39, - LastFlap: time.Unix(200, 23), - } - ) - - peers := map[route.Vertex]*FlapCount{ - testPub: peer1FlapCount, - testPub2: peer2FlapCount, - } - - err = db.WriteFlapCounts(peers) - util.RequireNoErr(t, err) - - // Lookup flap count for our first pubkey. - count, err := db.ReadFlapCount(testPub) - util.RequireNoErr(t, err) - require.Equal(t, peer1FlapCount, count) - - // Lookup our flap count for the second peer. - count, err = db.ReadFlapCount(testPub2) - util.RequireNoErr(t, err) - require.Equal(t, peer2FlapCount, count) -} diff --git a/lnd/channeldb/reject_cache.go b/lnd/channeldb/reject_cache.go deleted file mode 100644 index acadb878..00000000 --- a/lnd/channeldb/reject_cache.go +++ /dev/null @@ -1,95 +0,0 @@ -package channeldb - -// rejectFlags is a compact representation of various metadata stored by the -// reject cache about a particular channel. -type rejectFlags uint8 - -const ( - // rejectFlagExists is a flag indicating whether the channel exists, - // i.e. the channel is open and has a recent channel update. If this - // flag is not set, the channel is either a zombie or unknown. - rejectFlagExists rejectFlags = 1 << iota - - // rejectFlagZombie is a flag indicating whether the channel is a - // zombie, i.e. the channel is open but has no recent channel updates. - rejectFlagZombie -) - -// packRejectFlags computes the rejectFlags corresponding to the passed boolean -// values indicating whether the edge exists or is a zombie. -func packRejectFlags(exists, isZombie bool) rejectFlags { - var flags rejectFlags - if exists { - flags |= rejectFlagExists - } - if isZombie { - flags |= rejectFlagZombie - } - - return flags -} - -// unpack returns the booleans packed into the rejectFlags. The first indicates -// if the edge exists in our graph, the second indicates if the edge is a -// zombie. -func (f rejectFlags) unpack() (bool, bool) { - return f&rejectFlagExists == rejectFlagExists, - f&rejectFlagZombie == rejectFlagZombie -} - -// rejectCacheEntry caches frequently accessed information about a channel, -// including the timestamps of its latest edge policies and whether or not the -// channel exists in the graph. -type rejectCacheEntry struct { - upd1Time int64 - upd2Time int64 - flags rejectFlags -} - -// rejectCache is an in-memory cache used to improve the performance of -// HasChannelEdge. It caches information about the whether or channel exists, as -// well as the most recent timestamps for each policy (if they exists). -type rejectCache struct { - n int - edges map[uint64]rejectCacheEntry -} - -// newRejectCache creates a new rejectCache with maximum capacity of n entries. -func newRejectCache(n int) *rejectCache { - return &rejectCache{ - n: n, - edges: make(map[uint64]rejectCacheEntry, n), - } -} - -// get returns the entry from the cache for chanid, if it exists. -func (c *rejectCache) get(chanid uint64) (rejectCacheEntry, bool) { - entry, ok := c.edges[chanid] - return entry, ok -} - -// insert adds the entry to the reject cache. If an entry for chanid already -// exists, it will be replaced with the new entry. If the entry doesn't exists, -// it will be inserted to the cache, performing a random eviction if the cache -// is at capacity. -func (c *rejectCache) insert(chanid uint64, entry rejectCacheEntry) { - // If entry exists, replace it. - if _, ok := c.edges[chanid]; ok { - c.edges[chanid] = entry - return - } - - // Otherwise, evict an entry at random and insert. - if len(c.edges) == c.n { - for id := range c.edges { - delete(c.edges, id) - break - } - } - c.edges[chanid] = entry -} - -// remove deletes an entry for chanid from the cache, if it exists. -func (c *rejectCache) remove(chanid uint64) { - delete(c.edges, chanid) -} diff --git a/lnd/channeldb/reject_cache_test.go b/lnd/channeldb/reject_cache_test.go deleted file mode 100644 index 6974f425..00000000 --- a/lnd/channeldb/reject_cache_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package channeldb - -import ( - "reflect" - "testing" -) - -// TestRejectCache checks the behavior of the rejectCache with respect to insertion, -// eviction, and removal of cache entries. -func TestRejectCache(t *testing.T) { - const cacheSize = 100 - - // Create a new reject cache with the configured max size. - c := newRejectCache(cacheSize) - - // As a sanity check, assert that querying the empty cache does not - // return an entry. - _, ok := c.get(0) - if ok { - t.Fatalf("reject cache should be empty") - } - - // Now, fill up the cache entirely. - for i := uint64(0); i < cacheSize; i++ { - c.insert(i, entryForInt(i)) - } - - // Assert that the cache has all of the entries just inserted, since no - // eviction should occur until we try to surpass the max size. - assertHasEntries(t, c, 0, cacheSize) - - // Now, insert a new element that causes the cache to evict an element. - c.insert(cacheSize, entryForInt(cacheSize)) - - // Assert that the cache has this last entry, as the cache should evict - // some prior element and not the newly inserted one. - assertHasEntries(t, c, cacheSize, cacheSize) - - // Iterate over all inserted elements and construct a set of the evicted - // elements. - evicted := make(map[uint64]struct{}) - for i := uint64(0); i < cacheSize+1; i++ { - _, ok := c.get(i) - if !ok { - evicted[i] = struct{}{} - } - } - - // Assert that exactly one element has been evicted. - numEvicted := len(evicted) - if numEvicted != 1 { - t.Fatalf("expected one evicted entry, got: %d", numEvicted) - } - - // Remove the highest item which initially caused the eviction and - // reinsert the element that was evicted prior. - c.remove(cacheSize) - for i := range evicted { - c.insert(i, entryForInt(i)) - } - - // Since the removal created an extra slot, the last insertion should - // not have caused an eviction and the entries for all channels in the - // original set that filled the cache should be present. - assertHasEntries(t, c, 0, cacheSize) - - // Finally, reinsert the existing set back into the cache and test that - // the cache still has all the entries. If the randomized eviction were - // happening on inserts for existing cache items, we expect this to fail - // with high probability. - for i := uint64(0); i < cacheSize; i++ { - c.insert(i, entryForInt(i)) - } - assertHasEntries(t, c, 0, cacheSize) - -} - -// assertHasEntries queries the reject cache for all channels in the range [start, -// end), asserting that they exist and their value matches the entry produced by -// entryForInt. -func assertHasEntries(t *testing.T, c *rejectCache, start, end uint64) { - t.Helper() - - for i := start; i < end; i++ { - entry, ok := c.get(i) - if !ok { - t.Fatalf("reject cache should contain chan %d", i) - } - - expEntry := entryForInt(i) - if !reflect.DeepEqual(entry, expEntry) { - t.Fatalf("entry mismatch, want: %v, got: %v", - expEntry, entry) - } - } -} - -// entryForInt generates a unique rejectCacheEntry given an integer. -func entryForInt(i uint64) rejectCacheEntry { - exists := i%2 == 0 - isZombie := i%3 == 0 - return rejectCacheEntry{ - upd1Time: int64(2 * i), - upd2Time: int64(2*i + 1), - flags: packRejectFlags(exists, isZombie), - } -} diff --git a/lnd/channeldb/reports.go b/lnd/channeldb/reports.go deleted file mode 100644 index b521a1c2..00000000 --- a/lnd/channeldb/reports.go +++ /dev/null @@ -1,354 +0,0 @@ -package channeldb - -import ( - "bytes" - "io" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/tlv" - "github.com/pkt-cash/pktd/wire" -) - -var ( - // closeSummaryBucket is a top level bucket which holds additional - // information about channel closes. It nests channels by chainhash - // and channel point. - // [closeSummaryBucket] - // [chainHashBucket] - // [channelBucket] - // [resolversBucket] - closeSummaryBucket = []byte("close-summaries") - - // resolversBucket holds the outcome of a channel's resolvers. It is - // nested under a channel and chainhash bucket in the close summaries - // bucket. - resolversBucket = []byte("resolvers-bucket") -) - -var ( - // ErrNoChainHashBucket is returned when we have not created a bucket - // for the current chain hash. - ErrNoChainHashBucket = Err.CodeWithDetail("ErrNoChainHashBucket", "no chain hash bucket") - - // ErrNoChannelSummaries is returned when a channel is not found in the - // chain hash bucket. - ErrNoChannelSummaries = Err.CodeWithDetail("ErrNoChannelSummaries", "channel bucket not found") - - amountType tlv.Type = 1 - resolverType tlv.Type = 2 - outcomeType tlv.Type = 3 - spendTxIDType tlv.Type = 4 -) - -// ResolverType indicates the type of resolver that was resolved on chain. -type ResolverType uint8 - -const ( - // ResolverTypeAnchor represents a resolver for an anchor output. - ResolverTypeAnchor ResolverType = 0 - - // ResolverTypeIncomingHtlc represents resolution of an incoming htlc. - ResolverTypeIncomingHtlc ResolverType = 1 - - // ResolverTypeOutgoingHtlc represents resolution of an outgoing htlc. - ResolverTypeOutgoingHtlc ResolverType = 2 - - // ResolverTypeCommit represents resolution of our time locked commit - // when we force close. - ResolverTypeCommit ResolverType = 3 -) - -// ResolverOutcome indicates the outcome for the resolver that that the contract -// court reached. This state is not necessarily final, since htlcs on our own -// commitment are resolved across two resolvers. -type ResolverOutcome uint8 - -const ( - // ResolverOutcomeClaimed indicates that funds were claimed on chain. - ResolverOutcomeClaimed ResolverOutcome = 0 - - // ResolverOutcomeUnclaimed indicates that we did not claim our funds on - // chain. This may be the case for anchors that we did not sweep, or - // outputs that were not economical to sweep. - ResolverOutcomeUnclaimed ResolverOutcome = 1 - - // ResolverOutcomeAbandoned indicates that we did not attempt to claim - // an output on chain. This is the case for htlcs that we could not - // decode to claim, or invoice which we fail when an attempt is made - // to settle them on chain. - ResolverOutcomeAbandoned ResolverOutcome = 2 - - // ResolverOutcomeTimeout indicates that a contract was timed out on - // chain. - ResolverOutcomeTimeout ResolverOutcome = 3 - - // ResolverOutcomeFirstStage indicates that a htlc had to be claimed - // over two stages, with this outcome representing the confirmation - // of our success/timeout tx. - ResolverOutcomeFirstStage ResolverOutcome = 4 -) - -// ResolverReport provides an account of the outcome of a resolver. This differs -// from a ContractReport because it does not necessarily fully resolve the -// contract; each step of two stage htlc resolution is included. -type ResolverReport struct { - // OutPoint is the on chain outpoint that was spent as a result of this - // resolution. When an output is directly resolved (eg, commitment - // sweeps and single stage htlcs on the remote party's output) this - // is an output on the commitment tx that was broadcast. When we resolve - // across two stages (eg, htlcs on our own force close commit), the - // first stage outpoint is the output on our commitment and the second - // stage output is the spend from our htlc success/timeout tx. - OutPoint wire.OutPoint - - // Amount is the value of the output referenced above. - Amount btcutil.Amount - - // ResolverType indicates the type of resolution that occurred. - ResolverType - - // ResolverOutcome indicates the outcome of the resolver. - ResolverOutcome - - // SpendTxID is the transaction ID of the spending transaction that - // claimed the outpoint. This may be a sweep transaction, or a first - // stage success/timeout transaction. - SpendTxID *chainhash.Hash -} - -// PutResolverReport creates and commits a transaction that is used to write a -// resolver report to disk. -func (d *DB) PutResolverReport(tx kvdb.RwTx, chainHash chainhash.Hash, - channelOutpoint *wire.OutPoint, report *ResolverReport) er.R { - - putReportFunc := func(tx kvdb.RwTx) er.R { - return putReport(tx, chainHash, channelOutpoint, report) - } - - // If the transaction is nil, we'll create a new one. - if tx == nil { - return kvdb.Update(d, putReportFunc, func() {}) - } - - // Otherwise, we can write the report to disk using the existing - // transaction. - return putReportFunc(tx) -} - -// putReport puts a report in the bucket provided, with its outpoint as its key. -func putReport(tx kvdb.RwTx, chainHash chainhash.Hash, - channelOutpoint *wire.OutPoint, report *ResolverReport) er.R { - - channelBucket, err := fetchReportWriteBucket( - tx, chainHash, channelOutpoint, - ) - if err != nil { - return err - } - - // If the resolvers bucket does not exist yet, create it. - resolvers, err := channelBucket.CreateBucketIfNotExists( - resolversBucket, - ) - if err != nil { - return err - } - - var valueBuf bytes.Buffer - if err := serializeReport(&valueBuf, report); err != nil { - return err - } - - // Finally write our outpoint to be used as the key for this record. - var keyBuf bytes.Buffer - if err := writeOutpoint(&keyBuf, &report.OutPoint); err != nil { - return err - } - - return resolvers.Put(keyBuf.Bytes(), valueBuf.Bytes()) -} - -// serializeReport serialized a report using a TLV stream to allow for optional -// fields. -func serializeReport(w io.Writer, report *ResolverReport) er.R { - amt := uint64(report.Amount) - resolver := uint8(report.ResolverType) - outcome := uint8(report.ResolverOutcome) - - // Create a set of TLV records for the values we know to be present. - records := []tlv.Record{ - tlv.MakePrimitiveRecord(amountType, &amt), - tlv.MakePrimitiveRecord(resolverType, &resolver), - tlv.MakePrimitiveRecord(outcomeType, &outcome), - } - - // If our spend txid is non-nil, we add a tlv entry for it. - if report.SpendTxID != nil { - var spendBuf bytes.Buffer - err := WriteElement(&spendBuf, *report.SpendTxID) - if err != nil { - return err - } - spendBytes := spendBuf.Bytes() - - records = append(records, tlv.MakePrimitiveRecord( - spendTxIDType, &spendBytes, - )) - } - - // Create our stream and encode it. - tlvStream, err := tlv.NewStream(records...) - if err != nil { - return err - } - - return tlvStream.Encode(w) -} - -// FetchChannelReports fetches the set of reports for a channel. -func (d DB) FetchChannelReports(chainHash chainhash.Hash, - outPoint *wire.OutPoint) ([]*ResolverReport, er.R) { - - var reports []*ResolverReport - - if err := kvdb.View(d, func(tx kvdb.RTx) er.R { - chanBucket, err := fetchReportReadBucket( - tx, chainHash, outPoint, - ) - if err != nil { - return err - } - - // If there are no resolvers for this channel, we simply - // return nil, because nothing has been persisted yet. - resolvers := chanBucket.NestedReadBucket(resolversBucket) - if resolvers == nil { - return nil - } - - // Run through each resolution and add it to our set of - // resolutions. - return resolvers.ForEach(func(k, v []byte) er.R { - // Deserialize the contents of our field. - r := bytes.NewReader(v) - report, err := deserializeReport(r) - if err != nil { - return err - } - - // Once we have read our values out, set the outpoint - // on the report using the key. - r = bytes.NewReader(k) - if err := ReadElement(r, &report.OutPoint); err != nil { - return err - } - - reports = append(reports, report) - - return nil - }) - }, func() { - reports = nil - }); err != nil { - return nil, err - } - - return reports, nil -} - -// deserializeReport gets a resolver report from a tlv stream. The outpoint on -// the resolver will not be set because we key reports by their outpoint, and -// this function reads only the values saved in the stream. -func deserializeReport(r io.Reader) (*ResolverReport, er.R) { - var ( - resolver, outcome uint8 - amt uint64 - spentTx []byte - ) - - tlvStream, err := tlv.NewStream( - tlv.MakePrimitiveRecord(amountType, &amt), - tlv.MakePrimitiveRecord(resolverType, &resolver), - tlv.MakePrimitiveRecord(outcomeType, &outcome), - tlv.MakePrimitiveRecord(spendTxIDType, &spentTx), - ) - if err != nil { - return nil, err - } - - if err := tlvStream.Decode(r); err != nil { - return nil, err - } - - report := &ResolverReport{ - Amount: btcutil.Amount(amt), - ResolverOutcome: ResolverOutcome(outcome), - ResolverType: ResolverType(resolver), - } - - // If our spend tx is set, we set it on our report. - if len(spentTx) != 0 { - spendTx, err := chainhash.NewHash(spentTx) - if err != nil { - return nil, err - } - report.SpendTxID = spendTx - } - - return report, nil -} - -// fetchReportWriteBucket returns a write channel bucket within the reports -// top level bucket. If the channel's bucket does not yet exist, it will be -// created. -func fetchReportWriteBucket(tx kvdb.RwTx, chainHash chainhash.Hash, - outPoint *wire.OutPoint) (kvdb.RwBucket, er.R) { - - // Get the channel close summary bucket. - closedBucket := tx.ReadWriteBucket(closeSummaryBucket) - - // Create the chain hash bucket if it does not exist. - chainHashBkt, err := closedBucket.CreateBucketIfNotExists(chainHash[:]) - if err != nil { - return nil, err - } - - var chanPointBuf bytes.Buffer - if err := writeOutpoint(&chanPointBuf, outPoint); err != nil { - return nil, err - } - - return chainHashBkt.CreateBucketIfNotExists(chanPointBuf.Bytes()) -} - -// fetchReportReadBucket returns a read channel bucket within the reports -// top level bucket. If any bucket along the way does not exist, it will error. -func fetchReportReadBucket(tx kvdb.RTx, chainHash chainhash.Hash, - outPoint *wire.OutPoint) (kvdb.RBucket, er.R) { - - // First fetch the top level channel close summary bucket. - closeBucket := tx.ReadBucket(closeSummaryBucket) - - // Next we get the chain hash bucket for our current chain. - chainHashBucket := closeBucket.NestedReadBucket(chainHash[:]) - if chainHashBucket == nil { - return nil, ErrNoChainHashBucket.Default() - } - - // With the bucket for the node and chain fetched, we can now go down - // another level, for the channel itself. - var chanPointBuf bytes.Buffer - if err := writeOutpoint(&chanPointBuf, outPoint); err != nil { - return nil, err - } - - chanBucket := chainHashBucket.NestedReadBucket(chanPointBuf.Bytes()) - if chanBucket == nil { - return nil, ErrNoChannelSummaries.Default() - } - - return chanBucket, nil -} diff --git a/lnd/channeldb/reports_test.go b/lnd/channeldb/reports_test.go deleted file mode 100644 index 99d0c626..00000000 --- a/lnd/channeldb/reports_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package channeldb - -import ( - "bytes" - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/wire" - "github.com/stretchr/testify/require" -) - -var ( - testChainHash = [chainhash.HashSize]byte{ - 0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, - 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, - 0x2d, 0xe7, 0x93, 0xe4, - } - - testChanPoint1 = wire.OutPoint{ - Hash: chainhash.Hash{ - 0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, - 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, - 0x2d, 0xe7, 0x93, 0xe4, - }, - Index: 1, - } -) - -// TestPersistReport tests the writing and retrieval of a report on disk with -// and without a spend txid. -func TestPersistReport(t *testing.T) { - tests := []struct { - name string - spendTxID *chainhash.Hash - }{ - { - name: "Non-nil spend txid", - spendTxID: &testChanPoint1.Hash, - }, - { - name: "Nil spend txid", - spendTxID: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - db, cleanup, err := MakeTestDB() - util.RequireNoErr(t, err) - defer cleanup() - - channelOutpoint := testChanPoint1 - - testOutpoint := testChanPoint1 - testOutpoint.Index++ - - report := &ResolverReport{ - OutPoint: testOutpoint, - Amount: 2, - ResolverType: 1, - ResolverOutcome: 2, - SpendTxID: test.spendTxID, - } - - // Write report to disk, and ensure it is identical when - // it is read. - err = db.PutResolverReport( - nil, testChainHash, &channelOutpoint, report, - ) - util.RequireNoErr(t, err) - - reports, err := db.FetchChannelReports( - testChainHash, &channelOutpoint, - ) - util.RequireNoErr(t, err) - require.Equal(t, report, reports[0]) - }) - } -} - -// TestFetchChannelReadBucket tests retrieval of the reports bucket for a -// channel, testing that the appropriate error is returned based on the state -// of the existing bucket. -func TestFetchChannelReadBucket(t *testing.T) { - db, cleanup, err := MakeTestDB() - util.RequireNoErr(t, err) - defer cleanup() - - channelOutpoint := testChanPoint1 - - testOutpoint := testChanPoint1 - testOutpoint.Index++ - - // If we attempt to get reports when we do not have any present, we - // expect to fail because our chain hash bucket is not present. - _, err = db.FetchChannelReports( - testChainHash, &channelOutpoint, - ) - require.True(t, ErrNoChainHashBucket.Is(err)) - - // Finally we write a report to disk and check that we can fetch it. - report := &ResolverReport{ - OutPoint: testOutpoint, - Amount: 2, - ResolverOutcome: 1, - ResolverType: 2, - SpendTxID: nil, - } - - err = db.PutResolverReport( - nil, testChainHash, &channelOutpoint, report, - ) - util.RequireNoErr(t, err) - - // Now that the channel bucket exists, we expect the channel to be - // successfully fetched, with no reports. - reports, err := db.FetchChannelReports(testChainHash, &testChanPoint1) - util.RequireNoErr(t, err) - require.Equal(t, report, reports[0]) -} - -// TestFetchChannelWriteBucket tests the creation of missing buckets when -// retrieving the reports bucket. -func TestFetchChannelWriteBucket(t *testing.T) { - createReportsBucket := func(tx kvdb.RwTx) (kvdb.RwBucket, er.R) { - return tx.CreateTopLevelBucket(closedChannelBucket) - } - - createChainHashBucket := func(reports kvdb.RwBucket) (kvdb.RwBucket, - er.R) { - - return reports.CreateBucketIfNotExists(testChainHash[:]) - } - - createChannelBucket := func(chainHash kvdb.RwBucket) (kvdb.RwBucket, - er.R) { - - var chanPointBuf bytes.Buffer - err := writeOutpoint(&chanPointBuf, &testChanPoint1) - util.RequireNoErr(t, err) - - return chainHash.CreateBucketIfNotExists(chanPointBuf.Bytes()) - } - - tests := []struct { - name string - setup func(tx kvdb.RwTx) er.R - }{ - { - name: "no existing buckets", - setup: func(tx kvdb.RwTx) er.R { - return nil - }, - }, - { - name: "reports bucket exists", - setup: func(tx kvdb.RwTx) er.R { - _, err := createReportsBucket(tx) - return err - }, - }, - { - name: "chainhash bucket exists", - setup: func(tx kvdb.RwTx) er.R { - reports, err := createReportsBucket(tx) - if err != nil { - return err - } - - _, err = createChainHashBucket(reports) - return err - }, - }, - { - name: "channel bucket exists", - setup: func(tx kvdb.RwTx) er.R { - reports, err := createReportsBucket(tx) - if err != nil { - return err - } - - chainHash, err := createChainHashBucket(reports) - if err != nil { - return err - } - - _, err = createChannelBucket(chainHash) - return err - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - db, cleanup, err := MakeTestDB() - util.RequireNoErr(t, err) - defer cleanup() - - // Update our db to the starting state we expect. - err = kvdb.Update(db, test.setup, func() {}) - util.RequireNoErr(t, err) - - // Try to get our report bucket. - err = kvdb.Update(db, func(tx kvdb.RwTx) er.R { - _, err := fetchReportWriteBucket( - tx, testChainHash, &testChanPoint1, - ) - return err - }, func() {}) - util.RequireNoErr(t, err) - }) - } -} diff --git a/lnd/channeldb/waitingproof.go b/lnd/channeldb/waitingproof.go deleted file mode 100644 index 12c3caf7..00000000 --- a/lnd/channeldb/waitingproof.go +++ /dev/null @@ -1,256 +0,0 @@ -package channeldb - -import ( - "encoding/binary" - "sync" - - "io" - - "bytes" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -var ( - // waitingProofsBucketKey byte string name of the waiting proofs store. - waitingProofsBucketKey = []byte("waitingproofs") - - // ErrWaitingProofNotFound is returned if waiting proofs haven't been - // found by db. - ErrWaitingProofNotFound = Err.CodeWithDetail("ErrWaitingProofNotFound", "waiting proofs haven't been "+ - "found") - - // ErrWaitingProofAlreadyExist is returned if waiting proofs haven't been - // found by db. - ErrWaitingProofAlreadyExist = Err.CodeWithDetail("ErrWaitingProofAlreadyExist", "waiting proof with such "+ - "key already exist") -) - -// WaitingProofStore is the bold db map-like storage for half announcement -// signatures. The one responsibility of this storage is to be able to -// retrieve waiting proofs after client restart. -type WaitingProofStore struct { - // cache is used in order to reduce the number of redundant get - // calls, when object isn't stored in it. - cache map[WaitingProofKey]struct{} - db *DB - mu sync.RWMutex -} - -// NewWaitingProofStore creates new instance of proofs storage. -func NewWaitingProofStore(db *DB) (*WaitingProofStore, er.R) { - s := &WaitingProofStore{ - db: db, - } - - if err := s.ForAll(func(proof *WaitingProof) er.R { - s.cache[proof.Key()] = struct{}{} - return nil - }, func() { - s.cache = make(map[WaitingProofKey]struct{}) - }); err != nil && !ErrWaitingProofNotFound.Is(err) { - return nil, err - } - - return s, nil -} - -// Add adds new waiting proof in the storage. -func (s *WaitingProofStore) Add(proof *WaitingProof) er.R { - s.mu.Lock() - defer s.mu.Unlock() - - err := kvdb.Update(s.db, func(tx kvdb.RwTx) er.R { - var err er.R - var b bytes.Buffer - - // Get or create the bucket. - bucket, err := tx.CreateTopLevelBucket(waitingProofsBucketKey) - if err != nil { - return err - } - - // Encode the objects and place it in the bucket. - if err := proof.Encode(&b); err != nil { - return err - } - - key := proof.Key() - - return bucket.Put(key[:], b.Bytes()) - }, func() {}) - if err != nil { - return err - } - - // Knowing that the write succeeded, we can now update the in-memory - // cache with the proof's key. - s.cache[proof.Key()] = struct{}{} - - return nil -} - -// Remove removes the proof from storage by its key. -func (s *WaitingProofStore) Remove(key WaitingProofKey) er.R { - s.mu.Lock() - defer s.mu.Unlock() - - if _, ok := s.cache[key]; !ok { - return ErrWaitingProofNotFound.Default() - } - - err := kvdb.Update(s.db, func(tx kvdb.RwTx) er.R { - // Get or create the top bucket. - bucket := tx.ReadWriteBucket(waitingProofsBucketKey) - if bucket == nil { - return ErrWaitingProofNotFound.Default() - } - - return bucket.Delete(key[:]) - }, func() {}) - if err != nil { - return err - } - - // Since the proof was successfully deleted from the store, we can now - // remove it from the in-memory cache. - delete(s.cache, key) - - return nil -} - -// ForAll iterates thought all waiting proofs and passing the waiting proof -// in the given callback. -func (s *WaitingProofStore) ForAll(cb func(*WaitingProof) er.R, reset func()) er.R { - - return kvdb.View(s.db, func(tx kvdb.RTx) er.R { - bucket := tx.ReadBucket(waitingProofsBucketKey) - if bucket == nil { - return ErrWaitingProofNotFound.Default() - } - - // Iterate over objects buckets. - return bucket.ForEach(func(k, v []byte) er.R { - // Skip buckets fields. - if v == nil { - return nil - } - - r := bytes.NewReader(v) - proof := &WaitingProof{} - if err := proof.Decode(r); err != nil { - return err - } - - return cb(proof) - }) - }, reset) -} - -// Get returns the object which corresponds to the given index. -func (s *WaitingProofStore) Get(key WaitingProofKey) (*WaitingProof, er.R) { - var proof *WaitingProof - - s.mu.RLock() - defer s.mu.RUnlock() - - if _, ok := s.cache[key]; !ok { - return nil, ErrWaitingProofNotFound.Default() - } - - err := kvdb.View(s.db, func(tx kvdb.RTx) er.R { - bucket := tx.ReadBucket(waitingProofsBucketKey) - if bucket == nil { - return ErrWaitingProofNotFound.Default() - } - - // Iterate over objects buckets. - v := bucket.Get(key[:]) - if v == nil { - return ErrWaitingProofNotFound.Default() - } - - r := bytes.NewReader(v) - return proof.Decode(r) - }, func() { - proof = &WaitingProof{} - }) - - return proof, err -} - -// WaitingProofKey is the proof key which uniquely identifies the waiting -// proof object. The goal of this key is distinguish the local and remote -// proof for the same channel id. -type WaitingProofKey [9]byte - -// WaitingProof is the storable object, which encapsulate the half proof and -// the information about from which side this proof came. This structure is -// needed to make channel proof exchange persistent, so that after client -// restart we may receive remote/local half proof and process it. -type WaitingProof struct { - *lnwire.AnnounceSignatures - isRemote bool -} - -// NewWaitingProof constructs a new waiting prof instance. -func NewWaitingProof(isRemote bool, proof *lnwire.AnnounceSignatures) *WaitingProof { - return &WaitingProof{ - AnnounceSignatures: proof, - isRemote: isRemote, - } -} - -// OppositeKey returns the key which uniquely identifies opposite waiting proof. -func (p *WaitingProof) OppositeKey() WaitingProofKey { - var key [9]byte - binary.BigEndian.PutUint64(key[:8], p.ShortChannelID.ToUint64()) - - if !p.isRemote { - key[8] = 1 - } - return key -} - -// Key returns the key which uniquely identifies waiting proof. -func (p *WaitingProof) Key() WaitingProofKey { - var key [9]byte - binary.BigEndian.PutUint64(key[:8], p.ShortChannelID.ToUint64()) - - if p.isRemote { - key[8] = 1 - } - return key -} - -// Encode writes the internal representation of waiting proof in byte stream. -func (p *WaitingProof) Encode(w io.Writer) er.R { - if err := util.WriteBin(w, byteOrder, p.isRemote); err != nil { - return err - } - - if err := p.AnnounceSignatures.Encode(w, 0); err != nil { - return err - } - - return nil -} - -// Decode reads the data from the byte stream and initializes the -// waiting proof object with it. -func (p *WaitingProof) Decode(r io.Reader) er.R { - if err := util.ReadBin(r, byteOrder, &p.isRemote); err != nil { - return err - } - - msg := &lnwire.AnnounceSignatures{} - if err := msg.Decode(r, 0); err != nil { - return err - } - - (*p).AnnounceSignatures = msg - return nil -} diff --git a/lnd/channeldb/waitingproof_test.go b/lnd/channeldb/waitingproof_test.go deleted file mode 100644 index 44bec037..00000000 --- a/lnd/channeldb/waitingproof_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package channeldb - -import ( - "testing" - - "reflect" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// TestWaitingProofStore tests add/get/remove functions of the waiting proof -// storage. -func TestWaitingProofStore(t *testing.T) { - t.Parallel() - - db, cleanup, err := MakeTestDB() - if err != nil { - t.Fatalf("failed to make test database: %s", err) - } - defer cleanup() - - proof1 := NewWaitingProof(true, &lnwire.AnnounceSignatures{ - NodeSignature: wireSig, - BitcoinSignature: wireSig, - }) - - store, err := NewWaitingProofStore(db) - if err != nil { - t.Fatalf("unable to create the waiting proofs storage: %v", - err) - } - - if err := store.Add(proof1); err != nil { - t.Fatalf("unable add proof to storage: %v", err) - } - - proof2, err := store.Get(proof1.Key()) - if err != nil { - t.Fatalf("unable retrieve proof from storage: %v", err) - } - if !reflect.DeepEqual(proof1, proof2) { - t.Fatal("wrong proof retrieved") - } - - if _, err := store.Get(proof1.OppositeKey()); !ErrWaitingProofNotFound.Is(err) { - t.Fatalf("proof shouldn't be found: %v", err) - } - - if err := store.Remove(proof1.Key()); err != nil { - t.Fatalf("unable remove proof from storage: %v", err) - } - - if err := store.ForAll(func(proof *WaitingProof) er.R { - return er.New("storage should be empty") - }, func() {}); err != nil && !ErrWaitingProofNotFound.Is(err) { - t.Fatal(err) - } -} diff --git a/lnd/channeldb/witness_cache.go b/lnd/channeldb/witness_cache.go deleted file mode 100644 index bd79160f..00000000 --- a/lnd/channeldb/witness_cache.go +++ /dev/null @@ -1,232 +0,0 @@ -package channeldb - -import ( - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lntypes" -) - -var ( - // ErrNoWitnesses is an error that's returned when no new witnesses have - // been added to the WitnessCache. - ErrNoWitnesses = Err.CodeWithDetail("ErrNoWitnesses", - "no witnesses") - - // ErrUnknownWitnessType is returned if a caller attempts to - ErrUnknownWitnessType = Err.CodeWithDetail("ErrUnknownWitnessType", - "unknown witness type") -) - -// WitnessType is enum that denotes what "type" of witness is being -// stored/retrieved. As the WitnessCache itself is agnostic and doesn't enforce -// any structure on added witnesses, we use this type to partition the -// witnesses on disk, and also to know how to map a witness to its look up key. -type WitnessType uint8 - -var ( - // Sha256HashWitness is a witness that is simply the pre image to a - // hash image. In order to map to its key, we'll use sha256. - Sha256HashWitness WitnessType = 1 -) - -// toDBKey is a helper method that maps a witness type to the key that we'll -// use to store it within the database. -func (w WitnessType) toDBKey() ([]byte, er.R) { - switch w { - - case Sha256HashWitness: - return []byte{byte(w)}, nil - - default: - return nil, ErrUnknownWitnessType.Default() - } -} - -var ( - // witnessBucketKey is the name of the bucket that we use to store all - // witnesses encountered. Within this bucket, we'll create a sub-bucket for - // each witness type. - witnessBucketKey = []byte("byte") -) - -// WitnessCache is a persistent cache of all witnesses we've encountered on the -// network. In the case of multi-hop, multi-step contracts, a cache of all -// witnesses can be useful in the case of partial contract resolution. If -// negotiations break down, we may be forced to locate the witness for a -// portion of the contract on-chain. In this case, we'll then add that witness -// to the cache so the incoming contract can fully resolve witness. -// Additionally, as one MUST always use a unique witness on the network, we may -// use this cache to detect duplicate witnesses. -// -// TODO(roasbeef): need expiry policy? -// * encrypt? -type WitnessCache struct { - db *DB -} - -// NewWitnessCache returns a new instance of the witness cache. -func (d *DB) NewWitnessCache() *WitnessCache { - return &WitnessCache{ - db: d, - } -} - -// witnessEntry is a key-value struct that holds each key -> witness pair, used -// when inserting records into the cache. -type witnessEntry struct { - key []byte - witness []byte -} - -// AddSha256Witnesses adds a batch of new sha256 preimages into the witness -// cache. This is an alias for AddWitnesses that uses Sha256HashWitness as the -// preimages' witness type. -func (w *WitnessCache) AddSha256Witnesses(preimages ...lntypes.Preimage) er.R { - // Optimistically compute the preimages' hashes before attempting to - // start the db transaction. - entries := make([]witnessEntry, 0, len(preimages)) - for i := range preimages { - hash := preimages[i].Hash() - entries = append(entries, witnessEntry{ - key: hash[:], - witness: preimages[i][:], - }) - } - - return w.addWitnessEntries(Sha256HashWitness, entries) -} - -// addWitnessEntries inserts the witnessEntry key-value pairs into the cache, -// using the appropriate witness type to segment the namespace of possible -// witness types. -func (w *WitnessCache) addWitnessEntries(wType WitnessType, - entries []witnessEntry) er.R { - - // Exit early if there are no witnesses to add. - if len(entries) == 0 { - return nil - } - - return kvdb.Batch(w.db.Backend, func(tx kvdb.RwTx) er.R { - witnessBucket, err := tx.CreateTopLevelBucket(witnessBucketKey) - if err != nil { - return err - } - - witnessTypeBucketKey, errr := wType.toDBKey() - if errr != nil { - return errr - } - witnessTypeBucket, err := witnessBucket.CreateBucketIfNotExists( - witnessTypeBucketKey, - ) - if err != nil { - return err - } - - for _, entry := range entries { - err = witnessTypeBucket.Put(entry.key, entry.witness) - if err != nil { - return err - } - } - - return nil - }) -} - -// LookupSha256Witness attempts to lookup the preimage for a sha256 hash. If -// the witness isn't found, ErrNoWitnesses will be returned. -func (w *WitnessCache) LookupSha256Witness(hash lntypes.Hash) (lntypes.Preimage, er.R) { - witness, err := w.lookupWitness(Sha256HashWitness, hash[:]) - if err != nil { - return lntypes.Preimage{}, err - } - - return lntypes.MakePreimage(witness) -} - -// lookupWitness attempts to lookup a witness according to its type and also -// its witness key. In the case that the witness isn't found, ErrNoWitnesses -// will be returned. -func (w *WitnessCache) lookupWitness(wType WitnessType, witnessKey []byte) ([]byte, er.R) { - var witness []byte - err := kvdb.View(w.db, func(tx kvdb.RTx) er.R { - witnessBucket := tx.ReadBucket(witnessBucketKey) - if witnessBucket == nil { - return ErrNoWitnesses.Default() - } - - witnessTypeBucketKey, err := wType.toDBKey() - if err != nil { - return err - } - witnessTypeBucket := witnessBucket.NestedReadBucket(witnessTypeBucketKey) - if witnessTypeBucket == nil { - return ErrNoWitnesses.Default() - } - - dbWitness := witnessTypeBucket.Get(witnessKey) - if dbWitness == nil { - return ErrNoWitnesses.Default() - } - - witness = make([]byte, len(dbWitness)) - copy(witness[:], dbWitness) - - return nil - }, func() { - witness = nil - }) - if err != nil { - return nil, err - } - - return witness, nil -} - -// DeleteSha256Witness attempts to delete a sha256 preimage identified by hash. -func (w *WitnessCache) DeleteSha256Witness(hash lntypes.Hash) er.R { - return w.deleteWitness(Sha256HashWitness, hash[:]) -} - -// deleteWitness attempts to delete a particular witness from the database. -func (w *WitnessCache) deleteWitness(wType WitnessType, witnessKey []byte) er.R { - return kvdb.Batch(w.db.Backend, func(tx kvdb.RwTx) er.R { - witnessBucket, err := tx.CreateTopLevelBucket(witnessBucketKey) - if err != nil { - return err - } - - witnessTypeBucketKey, errr := wType.toDBKey() - if errr != nil { - return errr - } - witnessTypeBucket, err := witnessBucket.CreateBucketIfNotExists( - witnessTypeBucketKey, - ) - if err != nil { - return err - } - - return witnessTypeBucket.Delete(witnessKey) - }) -} - -// DeleteWitnessClass attempts to delete an *entire* class of witnesses. After -// this function return with a non-nil error, -func (w *WitnessCache) DeleteWitnessClass(wType WitnessType) er.R { - return kvdb.Batch(w.db.Backend, func(tx kvdb.RwTx) er.R { - witnessBucket, err := tx.CreateTopLevelBucket(witnessBucketKey) - if err != nil { - return err - } - - witnessTypeBucketKey, errr := wType.toDBKey() - if errr != nil { - return errr - } - - return witnessBucket.DeleteNestedBucket(witnessTypeBucketKey) - }) -} diff --git a/lnd/channeldb/witness_cache_test.go b/lnd/channeldb/witness_cache_test.go deleted file mode 100644 index 7826fadb..00000000 --- a/lnd/channeldb/witness_cache_test.go +++ /dev/null @@ -1,239 +0,0 @@ -package channeldb - -import ( - "crypto/sha256" - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lntypes" -) - -// TestWitnessCacheSha256Retrieval tests that we're able to add and lookup new -// sha256 preimages to the witness cache. -func TestWitnessCacheSha256Retrieval(t *testing.T) { - t.Parallel() - - cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - defer cleanUp() - - wCache := cdb.NewWitnessCache() - - // We'll be attempting to add then lookup two simple sha256 preimages - // within this test. - preimage1 := lntypes.Preimage(rev) - preimage2 := lntypes.Preimage(key) - - preimages := []lntypes.Preimage{preimage1, preimage2} - hashes := []lntypes.Hash{preimage1.Hash(), preimage2.Hash()} - - // First, we'll attempt to add the preimages to the database. - err = wCache.AddSha256Witnesses(preimages...) - if err != nil { - t.Fatalf("unable to add witness: %v", err) - } - - // With the preimages stored, we'll now attempt to look them up. - for i, hash := range hashes { - preimage := preimages[i] - - // We should get back the *exact* same preimage as we originally - // stored. - dbPreimage, err := wCache.LookupSha256Witness(hash) - if err != nil { - t.Fatalf("unable to look up witness: %v", err) - } - - if preimage != dbPreimage { - t.Fatalf("witnesses don't match: expected %x, got %x", - preimage[:], dbPreimage[:]) - } - } -} - -// TestWitnessCacheSha256Deletion tests that we're able to delete a single -// sha256 preimage, and also a class of witnesses from the cache. -func TestWitnessCacheSha256Deletion(t *testing.T) { - t.Parallel() - - cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - defer cleanUp() - - wCache := cdb.NewWitnessCache() - - // We'll start by adding two preimages to the cache. - preimage1 := lntypes.Preimage(key) - hash1 := preimage1.Hash() - - preimage2 := lntypes.Preimage(rev) - hash2 := preimage2.Hash() - - if err := wCache.AddSha256Witnesses(preimage1); err != nil { - t.Fatalf("unable to add witness: %v", err) - } - - if err := wCache.AddSha256Witnesses(preimage2); err != nil { - t.Fatalf("unable to add witness: %v", err) - } - - // We'll now delete the first preimage. If we attempt to look it up, we - // should get ErrNoWitnesses. - err = wCache.DeleteSha256Witness(hash1) - if err != nil { - t.Fatalf("unable to delete witness: %v", err) - } - _, err = wCache.LookupSha256Witness(hash1) - if !ErrNoWitnesses.Is(err) { - t.Fatalf("expected ErrNoWitnesses instead got: %v", err) - } - - // Next, we'll attempt to delete the entire witness class itself. When - // we try to lookup the second preimage, we should again get - // ErrNoWitnesses. - if err := wCache.DeleteWitnessClass(Sha256HashWitness); err != nil { - t.Fatalf("unable to delete witness class: %v", err) - } - _, err = wCache.LookupSha256Witness(hash2) - if !ErrNoWitnesses.Is(err) { - t.Fatalf("expected ErrNoWitnesses instead got: %v", err) - } -} - -// TestWitnessCacheUnknownWitness tests that we get an error if we attempt to -// query/add/delete an unknown witness. -func TestWitnessCacheUnknownWitness(t *testing.T) { - t.Parallel() - - cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - defer cleanUp() - - wCache := cdb.NewWitnessCache() - - // We'll attempt to add a new, undefined witness type to the database. - // We should get an error. - err = wCache.legacyAddWitnesses(234, key[:]) - if !ErrUnknownWitnessType.Is(err) { - t.Fatalf("expected ErrUnknownWitnessType, got %v", err) - } -} - -// TestAddSha256Witnesses tests that insertion using AddSha256Witnesses behaves -// identically to the insertion via the generalized interface. -func TestAddSha256Witnesses(t *testing.T) { - cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } - defer cleanUp() - - wCache := cdb.NewWitnessCache() - - // We'll start by adding a witnesses to the cache using the generic - // AddWitnesses method. - witness1 := rev[:] - preimage1 := lntypes.Preimage(rev) - hash1 := preimage1.Hash() - - witness2 := key[:] - preimage2 := lntypes.Preimage(key) - hash2 := preimage2.Hash() - - var ( - witnesses = [][]byte{witness1, witness2} - preimages = []lntypes.Preimage{preimage1, preimage2} - hashes = []lntypes.Hash{hash1, hash2} - ) - - err = wCache.legacyAddWitnesses(Sha256HashWitness, witnesses...) - if err != nil { - t.Fatalf("unable to add witness: %v", err) - } - - for i, hash := range hashes { - preimage := preimages[i] - - dbPreimage, err := wCache.LookupSha256Witness(hash) - if err != nil { - t.Fatalf("unable to lookup witness: %v", err) - } - - // Assert that the retrieved witness matches the original. - if dbPreimage != preimage { - t.Fatalf("retrieved witness mismatch, want: %x, "+ - "got: %x", preimage, dbPreimage) - } - - // We'll now delete the witness, as we'll be reinserting it - // using the specialized AddSha256Witnesses method. - err = wCache.DeleteSha256Witness(hash) - if err != nil { - t.Fatalf("unable to delete witness: %v", err) - } - } - - // Now, add the same witnesses using the type-safe interface for - // lntypes.Preimages.. - err = wCache.AddSha256Witnesses(preimages...) - if err != nil { - t.Fatalf("unable to add sha256 preimage: %v", err) - } - - // Finally, iterate over the keys and assert that the returned witnesses - // match the original witnesses. This asserts that the specialized - // insertion method behaves identically to the generalized interface. - for i, hash := range hashes { - preimage := preimages[i] - - dbPreimage, err := wCache.LookupSha256Witness(hash) - if err != nil { - t.Fatalf("unable to lookup witness: %v", err) - } - - // Assert that the retrieved witness matches the original. - if dbPreimage != preimage { - t.Fatalf("retrieved witness mismatch, want: %x, "+ - "got: %x", preimage, dbPreimage) - } - } -} - -// legacyAddWitnesses adds a batch of new witnesses of wType to the witness -// cache. The type of the witness will be used to map each witness to the key -// that will be used to look it up. All witnesses should be of the same -// WitnessType. -// -// NOTE: Previously this method exposed a generic interface for adding -// witnesses, which has since been deprecated in favor of a strongly typed -// interface for each witness class. We keep this method around to assert the -// correctness of specialized witness adding methods. -func (w *WitnessCache) legacyAddWitnesses(wType WitnessType, - witnesses ...[]byte) er.R { - - // Optimistically compute the witness keys before attempting to start - // the db transaction. - entries := make([]witnessEntry, 0, len(witnesses)) - for _, witness := range witnesses { - // Map each witness to its key by applying the appropriate - // transformation for the given witness type. - switch wType { - case Sha256HashWitness: - key := sha256.Sum256(witness) - entries = append(entries, witnessEntry{ - key: key[:], - witness: witness, - }) - default: - return ErrUnknownWitnessType.Default() - } - } - - return w.addWitnessEntries(wType, entries) -} diff --git a/lnd/channelnotifier/channelnotifier.go b/lnd/channelnotifier/channelnotifier.go deleted file mode 100644 index dd19601a..00000000 --- a/lnd/channelnotifier/channelnotifier.go +++ /dev/null @@ -1,183 +0,0 @@ -package channelnotifier - -import ( - "sync" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/subscribe" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/wire" -) - -// ChannelNotifier is a subsystem which all active, inactive, and closed channel -// events pipe through. It takes subscriptions for its events, and whenever -// it receives a new event it notifies its subscribers over the proper channel. -type ChannelNotifier struct { - started sync.Once - stopped sync.Once - - ntfnServer *subscribe.Server - - chanDB *channeldb.DB -} - -// PendingOpenChannelEvent represents a new event where a new channel has -// entered a pending open state. -type PendingOpenChannelEvent struct { - // ChannelPoint is the channel outpoint for the new channel. - ChannelPoint *wire.OutPoint - - // PendingChannel is the channel configuration for the newly created - // channel. This might not have been persisted to the channel DB yet - // because we are still waiting for the final message from the remote - // peer. - PendingChannel *channeldb.OpenChannel -} - -// OpenChannelEvent represents a new event where a channel goes from pending -// open to open. -type OpenChannelEvent struct { - // Channel is the channel that has become open. - Channel *channeldb.OpenChannel -} - -// ActiveLinkEvent represents a new event where the link becomes active in the -// switch. This happens before the ActiveChannelEvent. -type ActiveLinkEvent struct { - // ChannelPoint is the channel point for the newly active channel. - ChannelPoint *wire.OutPoint -} - -// ActiveChannelEvent represents a new event where a channel becomes active. -type ActiveChannelEvent struct { - // ChannelPoint is the channelpoint for the newly active channel. - ChannelPoint *wire.OutPoint -} - -// InactiveChannelEvent represents a new event where a channel becomes inactive. -type InactiveChannelEvent struct { - // ChannelPoint is the channelpoint for the newly inactive channel. - ChannelPoint *wire.OutPoint -} - -// ClosedChannelEvent represents a new event where a channel becomes closed. -type ClosedChannelEvent struct { - // CloseSummary is the summary of the channel close that has occurred. - CloseSummary *channeldb.ChannelCloseSummary -} - -// New creates a new channel notifier. The ChannelNotifier gets channel -// events from peers and from the chain arbitrator, and dispatches them to -// its clients. -func New(chanDB *channeldb.DB) *ChannelNotifier { - return &ChannelNotifier{ - ntfnServer: subscribe.NewServer(), - chanDB: chanDB, - } -} - -// Start starts the ChannelNotifier and all goroutines it needs to carry out its task. -func (c *ChannelNotifier) Start() er.R { - var err er.R - c.started.Do(func() { - log.Trace("ChannelNotifier starting") - err = c.ntfnServer.Start() - }) - return err -} - -// Stop signals the notifier for a graceful shutdown. -func (c *ChannelNotifier) Stop() { - c.stopped.Do(func() { - c.ntfnServer.Stop() - }) -} - -// SubscribeChannelEvents returns a subscribe.Client that will receive updates -// any time the Server is made aware of a new event. The subscription provides -// channel events from the point of subscription onwards. -// -// TODO(carlaKC): update to allow subscriptions to specify a block height from -// which we would like to subscribe to events. -func (c *ChannelNotifier) SubscribeChannelEvents() (*subscribe.Client, er.R) { - return c.ntfnServer.Subscribe() -} - -// NotifyPendingOpenChannelEvent notifies the channelEventNotifier goroutine -// that a new channel is pending. The pending channel is passed as a parameter -// instead of read from the database because it might not yet have been -// persisted to the DB because we still wait for the final message from the -// remote peer. -func (c *ChannelNotifier) NotifyPendingOpenChannelEvent(chanPoint wire.OutPoint, - pendingChan *channeldb.OpenChannel) { - - event := PendingOpenChannelEvent{ - ChannelPoint: &chanPoint, - PendingChannel: pendingChan, - } - - if err := c.ntfnServer.SendUpdate(event); err != nil { - log.Warnf("Unable to send pending open channel update: %v", err) - } -} - -// NotifyOpenChannelEvent notifies the channelEventNotifier goroutine that a -// channel has gone from pending open to open. -func (c *ChannelNotifier) NotifyOpenChannelEvent(chanPoint wire.OutPoint) { - - // Fetch the relevant channel from the database. - channel, err := c.chanDB.FetchChannel(chanPoint) - if err != nil { - log.Warnf("Unable to fetch open channel from the db: %v", err) - } - - // Send the open event to all channel event subscribers. - event := OpenChannelEvent{Channel: channel} - if err := c.ntfnServer.SendUpdate(event); err != nil { - log.Warnf("Unable to send open channel update: %v", err) - } -} - -// NotifyClosedChannelEvent notifies the channelEventNotifier goroutine that a -// channel has closed. -func (c *ChannelNotifier) NotifyClosedChannelEvent(chanPoint wire.OutPoint) { - // Fetch the relevant closed channel from the database. - closeSummary, err := c.chanDB.FetchClosedChannel(&chanPoint) - if err != nil { - log.Warnf("Unable to fetch closed channel summary from the db: %v", err) - } - - // Send the closed event to all channel event subscribers. - event := ClosedChannelEvent{CloseSummary: closeSummary} - if err := c.ntfnServer.SendUpdate(event); err != nil { - log.Warnf("Unable to send closed channel update: %v", err) - } -} - -// NotifyActiveLinkEvent notifies the channelEventNotifier goroutine that a -// link has been added to the switch. -func (c *ChannelNotifier) NotifyActiveLinkEvent(chanPoint wire.OutPoint) { - event := ActiveLinkEvent{ChannelPoint: &chanPoint} - if err := c.ntfnServer.SendUpdate(event); err != nil { - log.Warnf("Unable to send active link update: %v", err) - } -} - -// NotifyActiveChannelEvent notifies the channelEventNotifier goroutine that a -// channel is active. -func (c *ChannelNotifier) NotifyActiveChannelEvent(chanPoint wire.OutPoint) { - event := ActiveChannelEvent{ChannelPoint: &chanPoint} - if err := c.ntfnServer.SendUpdate(event); err != nil { - log.Warnf("Unable to send active channel update: %v", err) - } -} - -// NotifyInactiveChannelEvent notifies the channelEventNotifier goroutine that a -// channel is inactive. -func (c *ChannelNotifier) NotifyInactiveChannelEvent(chanPoint wire.OutPoint) { - event := InactiveChannelEvent{ChannelPoint: &chanPoint} - if err := c.ntfnServer.SendUpdate(event); err != nil { - log.Warnf("Unable to send inactive channel update: %v", err) - } -} diff --git a/lnd/chanrestore.go b/lnd/chanrestore.go deleted file mode 100644 index 9d95856f..00000000 --- a/lnd/chanrestore.go +++ /dev/null @@ -1,298 +0,0 @@ -package lnd - -import ( - "math" - "net" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/chanbackup" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/contractcourt" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/shachain" - "github.com/pkt-cash/pktd/pktlog/log" -) - -const ( - // mainnetSCBLaunchBlock is the approximate block height of the bitcoin - // mainnet chain of the date when SCBs first were released in lnd - // (v0.6.0-beta). The block date is 4/15/2019, 10:54 PM UTC. - mainnetSCBLaunchBlock = 571800 - - // testnetSCBLaunchBlock is the approximate block height of the bitcoin - // testnet3 chain of the date when SCBs first were released in lnd - // (v0.6.0-beta). The block date is 4/16/2019, 08:04 AM UTC. - testnetSCBLaunchBlock = 1489300 -) - -// chanDBRestorer is an implementation of the chanbackup.ChannelRestorer -// interface that is able to properly map a Single backup, into a -// channeldb.ChannelShell which is required to fully restore a channel. We also -// need the secret key chain in order obtain the prior shachain root so we can -// verify the DLP protocol as initiated by the remote node. -type chanDBRestorer struct { - db *channeldb.DB - - secretKeys keychain.SecretKeyRing - - chainArb *contractcourt.ChainArbitrator -} - -// openChannelShell maps the static channel back up into an open channel -// "shell". We say shell as this doesn't include all the information required -// to continue to use the channel, only the minimal amount of information to -// insert this shell channel back into the database. -func (c *chanDBRestorer) openChannelShell(backup chanbackup.Single) ( - *channeldb.ChannelShell, er.R) { - - // First, we'll also need to obtain the private key for the shachain - // root from the encoded public key. - // - // TODO(roasbeef): now adds req for hardware signers to impl - // shachain... - privKey, err := c.secretKeys.DerivePrivKey(backup.ShaChainRootDesc) - if err != nil { - return nil, er.Errorf("unable to derive shachain root key: %v", err) - } - revRoot, err := chainhash.NewHash(privKey.Serialize()) - if err != nil { - return nil, err - } - shaChainProducer := shachain.NewRevocationProducer(*revRoot) - - // Each of the keys in our local channel config only have their - // locators populate, so we'll re-derive the raw key now as we'll need - // it in order to carry out the DLP protocol. - backup.LocalChanCfg.MultiSigKey, err = c.secretKeys.DeriveKey( - backup.LocalChanCfg.MultiSigKey.KeyLocator, - ) - if err != nil { - return nil, er.Errorf("unable to derive multi sig key: %v", err) - } - backup.LocalChanCfg.RevocationBasePoint, err = c.secretKeys.DeriveKey( - backup.LocalChanCfg.RevocationBasePoint.KeyLocator, - ) - if err != nil { - return nil, er.Errorf("unable to derive revocation key: %v", err) - } - backup.LocalChanCfg.PaymentBasePoint, err = c.secretKeys.DeriveKey( - backup.LocalChanCfg.PaymentBasePoint.KeyLocator, - ) - if err != nil { - return nil, er.Errorf("unable to derive payment key: %v", err) - } - backup.LocalChanCfg.DelayBasePoint, err = c.secretKeys.DeriveKey( - backup.LocalChanCfg.DelayBasePoint.KeyLocator, - ) - if err != nil { - return nil, er.Errorf("unable to derive delay key: %v", err) - } - backup.LocalChanCfg.HtlcBasePoint, err = c.secretKeys.DeriveKey( - backup.LocalChanCfg.HtlcBasePoint.KeyLocator, - ) - if err != nil { - return nil, er.Errorf("unable to derive htlc key: %v", err) - } - - var chanType channeldb.ChannelType - switch backup.Version { - - case chanbackup.DefaultSingleVersion: - chanType = channeldb.SingleFunderBit - - case chanbackup.TweaklessCommitVersion: - chanType = channeldb.SingleFunderTweaklessBit - - case chanbackup.AnchorsCommitVersion: - chanType = channeldb.AnchorOutputsBit - chanType |= channeldb.SingleFunderTweaklessBit - - default: - return nil, er.Errorf("unknown Single version: %v", err) - } - - log.Infof("SCB Recovery: created channel shell for ChannelPoint(%v), "+ - "chan_type=%v", backup.FundingOutpoint, chanType) - - chanShell := channeldb.ChannelShell{ - NodeAddrs: backup.Addresses, - Chan: &channeldb.OpenChannel{ - ChanType: chanType, - ChainHash: backup.ChainHash, - IsInitiator: backup.IsInitiator, - Capacity: backup.Capacity, - FundingOutpoint: backup.FundingOutpoint, - ShortChannelID: backup.ShortChannelID, - IdentityPub: backup.RemoteNodePub, - IsPending: false, - LocalChanCfg: backup.LocalChanCfg, - RemoteChanCfg: backup.RemoteChanCfg, - RemoteCurrentRevocation: backup.RemoteNodePub, - RevocationStore: shachain.NewRevocationStore(), - RevocationProducer: shaChainProducer, - }, - } - - return &chanShell, nil -} - -// RestoreChansFromSingles attempts to map the set of single channel backups to -// channel shells that will be stored persistently. Once these shells have been -// stored on disk, we'll be able to connect to the channel peer an execute the -// data loss recovery protocol. -// -// NOTE: Part of the chanbackup.ChannelRestorer interface. -func (c *chanDBRestorer) RestoreChansFromSingles(backups ...chanbackup.Single) er.R { - channelShells := make([]*channeldb.ChannelShell, 0, len(backups)) - firstChanHeight := uint32(math.MaxUint32) - for _, backup := range backups { - chanShell, err := c.openChannelShell(backup) - if err != nil { - return err - } - - // Find the block height of the earliest channel in this backup. - chanHeight := chanShell.Chan.ShortChanID().BlockHeight - if chanHeight != 0 && chanHeight < firstChanHeight { - firstChanHeight = chanHeight - } - - channelShells = append(channelShells, chanShell) - } - - // In case there were only unconfirmed channels, we will have to scan - // the chain beginning from the launch date of SCBs. - if firstChanHeight == math.MaxUint32 { - chainHash := channelShells[0].Chan.ChainHash - switch { - case chainHash.IsEqual(chaincfg.MainNetParams.GenesisHash): - firstChanHeight = mainnetSCBLaunchBlock - - case chainHash.IsEqual(chaincfg.TestNet3Params.GenesisHash): - firstChanHeight = testnetSCBLaunchBlock - - default: - // Worst case: We have no height hint and start at - // block 1. Should only happen for SCBs in regtest, - // simnet and litecoin. - firstChanHeight = 1 - } - } - - // If there were channels in the backup that were not confirmed at the - // time of the backup creation, they won't have a block height in the - // ShortChanID which would lead to an error in the chain watcher. - // We want to at least set the funding broadcast height that the chain - // watcher can use instead. We have two possible fallback values for - // the broadcast height that we are going to try here. - for _, chanShell := range channelShells { - channel := chanShell.Chan - - switch { - // Fallback case 1: It is extremely unlikely at this point that - // a channel we are trying to restore has a coinbase funding TX. - // Therefore we can be quite certain that if the TxIndex is - // zero, it was an unconfirmed channel where we used the - // BlockHeight to encode the funding TX broadcast height. To not - // end up with an invalid short channel ID that looks valid, we - // restore the "original" unconfirmed one here. - case channel.ShortChannelID.TxIndex == 0: - broadcastHeight := channel.ShortChannelID.BlockHeight - channel.FundingBroadcastHeight = broadcastHeight - channel.ShortChannelID.BlockHeight = 0 - - // Fallback case 2: This is an unconfirmed channel from an old - // backup file where we didn't have any workaround in place. - // Best we can do here is set the funding broadcast height to a - // reasonable value that we determined earlier. - case channel.ShortChanID().BlockHeight == 0: - channel.FundingBroadcastHeight = firstChanHeight - } - } - - log.Infof("Inserting %v SCB channel shells into DB", - len(channelShells)) - - // Now that we have all the backups mapped into a series of Singles, - // we'll insert them all into the database. - if err := c.db.RestoreChannelShells(channelShells...); err != nil { - return err - } - - log.Infof("Informing chain watchers of new restored channels") - - // Finally, we'll need to inform the chain arbitrator of these new - // channels so we'll properly watch for their ultimate closure on chain - // and sweep them via the DLP. - for _, restoredChannel := range channelShells { - err := c.chainArb.WatchNewChannel(restoredChannel.Chan) - if err != nil { - return err - } - } - - return nil -} - -// A compile-time constraint to ensure chanDBRestorer implements -// chanbackup.ChannelRestorer. -var _ chanbackup.ChannelRestorer = (*chanDBRestorer)(nil) - -// ConnectPeer attempts to connect to the target node at the set of available -// addresses. Once this method returns with a non-nil error, the connector -// should attempt to persistently connect to the target peer in the background -// as a persistent attempt. -// -// NOTE: Part of the chanbackup.PeerConnector interface. -func (s *server) ConnectPeer(nodePub *btcec.PublicKey, addrs []net.Addr) er.R { - // Before we connect to the remote peer, we'll remove any connections - // to ensure the new connection is created after this new link/channel - // is known. - if err := s.DisconnectPeer(nodePub); err != nil { - log.Infof("Peer(%v) is already connected, proceeding "+ - "with chan restore", nodePub.SerializeCompressed()) - } - - // For each of the known addresses, we'll attempt to launch a - // persistent connection to the (pub, addr) pair. In the event that any - // of them connect, all the other stale requests will be canceled. - for _, addr := range addrs { - netAddr := &lnwire.NetAddress{ - IdentityKey: nodePub, - Address: addr, - } - - log.Infof("Attempting to connect to %v for SCB restore "+ - "DLP", netAddr) - - // Attempt to connect to the peer using this full address. If - // we're unable to connect to them, then we'll try the next - // address in place of it. - err := s.ConnectToPeer(netAddr, true, s.cfg.ConnectionTimeout) - - // If we're already connected to this peer, then we don't - // consider this an error, so we'll exit here. - errr := er.Wrapped(err) - if _, ok := errr.(*errPeerAlreadyConnected); ok { - return nil - - } else if err != nil { - // Otherwise, something else happened, so we'll try the - // next address. - log.Errorf("unable to connect to %v to "+ - "complete SCB restore: %v", netAddr, err) - continue - } - - // If we connected no problem, then we can exit early as our - // job here is done. - return nil - } - - return er.Errorf("unable to connect to peer %x for SCB restore", - nodePub.SerializeCompressed()) -} diff --git a/lnd/clock/default_clock.go b/lnd/clock/default_clock.go deleted file mode 100644 index 3a4f8df3..00000000 --- a/lnd/clock/default_clock.go +++ /dev/null @@ -1,24 +0,0 @@ -package clock - -import ( - "time" -) - -// DefaultClock implements Clock interface by simply calling the appropriate -// time functions. -type DefaultClock struct{} - -// NewDefaultClock constructs a new DefaultClock. -func NewDefaultClock() Clock { - return &DefaultClock{} -} - -// Now simply returns time.Now(). -func (DefaultClock) Now() time.Time { - return time.Now() -} - -// TickAfter simply wraps time.After(). -func (DefaultClock) TickAfter(duration time.Duration) <-chan time.Time { - return time.After(duration) -} diff --git a/lnd/clock/interface.go b/lnd/clock/interface.go deleted file mode 100644 index 0450410e..00000000 --- a/lnd/clock/interface.go +++ /dev/null @@ -1,16 +0,0 @@ -package clock - -import ( - "time" -) - -// Clock is an interface that provides a time functions for LND packages. -// This is useful during testing when a concrete time reference is needed. -type Clock interface { - // Now returns the current local time (as defined by the Clock). - Now() time.Time - - // TickAfter returns a channel that will receive a tick after the specified - // duration has passed. - TickAfter(duration time.Duration) <-chan time.Time -} diff --git a/lnd/clock/test_clock.go b/lnd/clock/test_clock.go deleted file mode 100644 index 85e33d4f..00000000 --- a/lnd/clock/test_clock.go +++ /dev/null @@ -1,96 +0,0 @@ -package clock - -import ( - "sync" - "time" -) - -// TestClock can be used in tests to mock time. -type TestClock struct { - currentTime time.Time - timeChanMap map[time.Time][]chan time.Time - timeLock sync.Mutex - tickSignal chan time.Duration -} - -// NewTestClock returns a new test clock. -func NewTestClock(startTime time.Time) *TestClock { - return &TestClock{ - currentTime: startTime, - timeChanMap: make(map[time.Time][]chan time.Time), - } -} - -// NewTestClockWithTickSignal will create a new test clock with an added -// channel which will be used to signal when a new ticker is registered. -// This is useful when creating a ticker on a separate goroutine and we'd -// like to wait for that to happen before advancing the test case. -func NewTestClockWithTickSignal(startTime time.Time, - tickSignal chan time.Duration) *TestClock { - - testClock := NewTestClock(startTime) - testClock.tickSignal = tickSignal - - return testClock -} - -// Now returns the current (test) time. -func (c *TestClock) Now() time.Time { - c.timeLock.Lock() - defer c.timeLock.Unlock() - - return c.currentTime -} - -// TickAfter returns a channel that will receive a tick after the specified -// duration has passed passed by the user set test time. -func (c *TestClock) TickAfter(duration time.Duration) <-chan time.Time { - c.timeLock.Lock() - defer func() { - c.timeLock.Unlock() - - // Signal that the ticker has been added. - if c.tickSignal != nil { - c.tickSignal <- duration - } - }() - - triggerTime := c.currentTime.Add(duration) - ch := make(chan time.Time, 1) - - // If already expired, tick immediately. - if !triggerTime.After(c.currentTime) { - ch <- c.currentTime - return ch - } - - // Otherwise store the channel until the trigger time is there. - chans := c.timeChanMap[triggerTime] - chans = append(chans, ch) - c.timeChanMap[triggerTime] = chans - - return ch -} - -// SetTime sets the (test) time and triggers tick channels when they expire. -func (c *TestClock) SetTime(now time.Time) { - c.timeLock.Lock() - defer c.timeLock.Unlock() - - c.currentTime = now - remainingChans := make(map[time.Time][]chan time.Time) - for triggerTime, chans := range c.timeChanMap { - // If the trigger time is still in the future, keep this channel - // in the channel map for later. - if triggerTime.After(now) { - remainingChans[triggerTime] = chans - continue - } - - for _, c := range chans { - c <- now - } - } - - c.timeChanMap = remainingChans -} diff --git a/lnd/clock/test_clock_test.go b/lnd/clock/test_clock_test.go deleted file mode 100644 index 36ad3aea..00000000 --- a/lnd/clock/test_clock_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package clock - -import ( - "testing" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/stretchr/testify/assert" -) - -var ( - testTime = time.Date(2009, time.January, 3, 12, 0, 0, 0, time.UTC) -) - -func TestNow(t *testing.T) { - c := NewTestClock(testTime) - now := c.Now() - assert.Equal(t, testTime, now) - - now = now.Add(time.Hour) - c.SetTime(now) - assert.Equal(t, now, c.Now()) -} - -func TestTickAfter(t *testing.T) { - c := NewTestClock(testTime) - - // Should be ticking immediately. - ticker0 := c.TickAfter(0) - - // Both should be ticking after SetTime - ticker1 := c.TickAfter(time.Hour) - ticker2 := c.TickAfter(time.Hour) - - // We don't expect this one to tick. - ticker3 := c.TickAfter(2 * time.Hour) - - tickOrTimeOut := func(ticker <-chan time.Time, expectTick bool) { - tick := false - select { - case <-ticker: - tick = true - - case <-time.After(time.Millisecond): - } - - assert.Equal(t, expectTick, tick) - } - - tickOrTimeOut(ticker0, true) - tickOrTimeOut(ticker1, false) - tickOrTimeOut(ticker2, false) - tickOrTimeOut(ticker3, false) - - c.SetTime(c.Now().Add(time.Hour)) - - tickOrTimeOut(ticker1, true) - tickOrTimeOut(ticker2, true) - tickOrTimeOut(ticker3, false) -} - -// TestTickSignal tests that TickAfter signals registration allowing -// safe time advancement. -func TestTickSignal(t *testing.T) { - const interval = time.Second - - ch := make(chan time.Duration) - c := NewTestClockWithTickSignal(testTime, ch) - err := make(chan er.R, 1) - - go func() { - select { - // TickAfter will signal registration but will not - // tick, unless we read the signal and set the time. - case <-c.TickAfter(interval): - err <- nil - - // Signal timeout if tick didn't happen. - case <-time.After(time.Second): - err <- er.Errorf("timeout") - } - }() - - tick := <-ch - // Expect that the interval is correctly passed over the channel. - assert.Equal(t, interval, tick) - - // Once the ticker is registered, set the time to make it fire. - c.SetTime(testTime.Add(time.Second)) - assert.NoError(t, er.Native(<-err)) -} diff --git a/lnd/cmd/lncli/arg_parse.go b/lnd/cmd/lncli/arg_parse.go deleted file mode 100644 index 17ca459c..00000000 --- a/lnd/cmd/lncli/arg_parse.go +++ /dev/null @@ -1,43 +0,0 @@ -package main - -import ( - "regexp" - "strconv" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" -) - -// reTimeRange matches systemd.time-like short negative timeranges, e.g. "-200s". -var reTimeRange = regexp.MustCompile(`^-\d{1,18}[s|m|h|d|w|M|y]$`) - -// secondsPer allows translating s(seconds), m(minutes), h(ours), d(ays), -// w(eeks), M(onths) and y(ears) into corresponding seconds. -var secondsPer = map[string]int64{ - "s": 1, - "m": 60, - "h": 3600, - "d": 86400, - "w": 604800, - "M": 2630016, // 30.44 days - "y": 31557600, // 365.25 days -} - -// parseTime parses UNIX timestamps or short timeranges inspired by sytemd (when starting with "-"), -// e.g. "-1M" for one month (30.44 days) ago. -func parseTime(s string, base time.Time) (uint64, er.R) { - if reTimeRange.MatchString(s) { - last := len(s) - 1 - - d, errr := strconv.ParseInt(s[1:last], 10, 64) - if errr != nil { - return uint64(0), er.E(errr) - } - - mul := secondsPer[string(s[last])] - return uint64(base.Unix() - d*mul), nil - } - - i, e := strconv.ParseUint(s, 10, 64) - return i, er.E(e) -} diff --git a/lnd/cmd/lncli/arg_parse_test.go b/lnd/cmd/lncli/arg_parse_test.go deleted file mode 100644 index 9de6f895..00000000 --- a/lnd/cmd/lncli/arg_parse_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package main - -import ( - "testing" - "time" -) - -var now = time.Date(2017, 11, 10, 7, 8, 9, 1234, time.UTC) - -var partTimeTests = []struct { - in string - expected uint64 - errExpected bool -}{ - { - "12345", - uint64(12345), - false, - }, - { - "-0s", - uint64(now.Unix()), - false, - }, - { - "-1s", - uint64(time.Date(2017, 11, 10, 7, 8, 8, 1234, time.UTC).Unix()), - false, - }, - { - "-2h", - uint64(time.Date(2017, 11, 10, 5, 8, 9, 1234, time.UTC).Unix()), - false, - }, - { - "-3d", - uint64(time.Date(2017, 11, 7, 7, 8, 9, 1234, time.UTC).Unix()), - false, - }, - { - "-4w", - uint64(time.Date(2017, 10, 13, 7, 8, 9, 1234, time.UTC).Unix()), - false, - }, - { - "-5M", - uint64(now.Unix() - 30.44*5*24*60*60), - false, - }, - { - "-6y", - uint64(now.Unix() - 365.25*6*24*60*60), - false, - }, - { - "-999999999999999999s", - uint64(now.Unix() - 999999999999999999), - false, - }, - { - "-9999999999999999991s", - 0, - true, - }, - { - "-7z", - 0, - true, - }, -} - -// Test that parsing absolute and relative times works. -func TestParseTime(t *testing.T) { - for _, test := range partTimeTests { - actual, err := parseTime(test.in, now) - if test.errExpected == (err == nil) { - t.Fatalf("unexpected error for %s:\n%v\n", test.in, err) - } - if actual != test.expected { - t.Fatalf( - "for %s actual and expected do not match:\n%d\n%d\n", - test.in, - actual, - test.expected, - ) - } - } -} diff --git a/lnd/cmd/lncli/autopilotrpc_active.go b/lnd/cmd/lncli/autopilotrpc_active.go deleted file mode 100644 index 498ae7ea..00000000 --- a/lnd/cmd/lncli/autopilotrpc_active.go +++ /dev/null @@ -1,163 +0,0 @@ -// +build autopilotrpc - -package main - -import ( - "context" - - "github.com/pkt-cash/pktd/lnd/lnrpc/autopilotrpc" - "github.com/urfave/cli" -) - -func getAutopilotClient(ctx *cli.Context) (autopilotrpc.AutopilotClient, func()) { - conn := getClientConn(ctx, false) - - cleanUp := func() { - conn.Close() - } - - return autopilotrpc.NewAutopilotClient(conn), cleanUp -} - -var getStatusCommand = cli.Command{ - Name: "status", - Usage: "Get the active status of autopilot.", - Description: "", - Action: actionDecorator(getStatus), -} - -func getStatus(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getAutopilotClient(ctx) - defer cleanUp() - - req := &autopilotrpc.StatusRequest{} - - resp, err := client.Status(ctxb, req) - if err != nil { - return err - } - - printRespJSON(resp) - return nil -} - -var enableCommand = cli.Command{ - Name: "enable", - Usage: "Enable the autopilot.", - Description: "", - Action: actionDecorator(enable), -} - -var disableCommand = cli.Command{ - Name: "disable", - Usage: "Disable the active autopilot.", - Description: "", - Action: actionDecorator(disable), -} - -func enable(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getAutopilotClient(ctx) - defer cleanUp() - - // We will enable the autopilot. - req := &autopilotrpc.ModifyStatusRequest{ - Enable: true, - } - - resp, err := client.ModifyStatus(ctxb, req) - if err != nil { - return err - } - - printRespJSON(resp) - return nil -} - -func disable(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getAutopilotClient(ctx) - defer cleanUp() - - // We will disable the autopilot. - req := &autopilotrpc.ModifyStatusRequest{ - Enable: false, - } - - resp, err := client.ModifyStatus(ctxb, req) - if err != nil { - return err - } - - printRespJSON(resp) - return nil -} - -var queryScoresCommand = cli.Command{ - Name: "query", - Usage: "Query the autopilot heuristics for nodes' scores.", - ArgsUsage: "[flags] ...", - Description: "", - Action: actionDecorator(queryScores), - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "ignorelocalstate, i", - Usage: "Ignore local channel state when calculating " + - "scores.", - }, - }, -} - -func queryScores(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getAutopilotClient(ctx) - defer cleanUp() - - args := ctx.Args() - var pubs []string - - // Keep reading pubkeys as long as there are arguments. -loop: - for { - switch { - case args.Present(): - pubs = append(pubs, args.First()) - args = args.Tail() - default: - break loop - } - } - - req := &autopilotrpc.QueryScoresRequest{ - Pubkeys: pubs, - IgnoreLocalState: ctx.Bool("ignorelocalstate"), - } - - resp, err := client.QueryScores(ctxb, req) - if err != nil { - return err - } - - printRespJSON(resp) - return nil -} - -// autopilotCommands will return the set of commands to enable for autopilotrpc -// builds. -func autopilotCommands() []cli.Command { - return []cli.Command{ - { - Name: "autopilot", - Category: "Autopilot", - Usage: "Interact with a running autopilot.", - Description: "", - Subcommands: []cli.Command{ - getStatusCommand, - enableCommand, - disableCommand, - queryScoresCommand, - }, - }, - } -} diff --git a/lnd/cmd/lncli/autopilotrpc_default.go b/lnd/cmd/lncli/autopilotrpc_default.go deleted file mode 100644 index 49061254..00000000 --- a/lnd/cmd/lncli/autopilotrpc_default.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !autopilotrpc - -package main - -import "github.com/urfave/cli" - -// autopilotCommands will return nil for non-autopilotrpc builds. -func autopilotCommands() []cli.Command { - return nil -} diff --git a/lnd/cmd/lncli/cmd_build_route.go b/lnd/cmd/lncli/cmd_build_route.go deleted file mode 100644 index 62c4288a..00000000 --- a/lnd/cmd/lncli/cmd_build_route.go +++ /dev/null @@ -1,91 +0,0 @@ -package main - -import ( - "context" - "strings" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/chainreg" - "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc" - "github.com/pkt-cash/pktd/lnd/routing/route" - "github.com/urfave/cli" -) - -var buildRouteCommand = cli.Command{ - Name: "buildroute", - Category: "Payments", - Usage: "Build a route from a list of hop pubkeys.", - Action: actionDecorator(buildRoute), - Flags: []cli.Flag{ - cli.Int64Flag{ - Name: "amt", - Usage: "the amount to send expressed in satoshis. If" + - "not set, the minimum routable amount is used", - }, - cli.Int64Flag{ - Name: "final_cltv_delta", - Usage: "number of blocks the last hop has to reveal " + - "the preimage", - Value: chainreg.DefaultBitcoinTimeLockDelta, - }, - cli.StringFlag{ - Name: "hops", - Usage: "comma separated hex pubkeys", - }, - cli.Uint64Flag{ - Name: "outgoing_chan_id", - Usage: "short channel id of the outgoing channel to " + - "use for the first hop of the payment", - Value: 0, - }, - }, -} - -func buildRoute(ctx *cli.Context) er.R { - conn := getClientConn(ctx, false) - defer conn.Close() - - client := routerrpc.NewRouterClient(conn) - - if !ctx.IsSet("hops") { - return er.New("hops required") - } - - // Build list of hop addresses for the rpc. - hops := strings.Split(ctx.String("hops"), ",") - rpcHops := make([][]byte, 0, len(hops)) - for _, k := range hops { - pubkey, err := route.NewVertexFromStr(k) - if err != nil { - return er.Errorf("error parsing %v: %v", k, err) - } - rpcHops = append(rpcHops, pubkey[:]) - } - - var amtMsat int64 - hasAmt := ctx.IsSet("amt") - if hasAmt { - amtMsat = ctx.Int64("amt") * 1000 - if amtMsat == 0 { - return er.Errorf("non-zero amount required") - } - } - - // Call BuildRoute rpc. - req := &routerrpc.BuildRouteRequest{ - AmtMsat: amtMsat, - FinalCltvDelta: int32(ctx.Int64("final_cltv_delta")), - HopPubkeys: rpcHops, - OutgoingChanId: ctx.Uint64("outgoing_chan_id"), - } - - rpcCtx := context.Background() - route, err := client.BuildRoute(rpcCtx, req) - if err != nil { - return er.E(err) - } - - printRespJSON(route) - - return nil -} diff --git a/lnd/cmd/lncli/cmd_invoice.go b/lnd/cmd/lncli/cmd_invoice.go deleted file mode 100644 index d5f37e41..00000000 --- a/lnd/cmd/lncli/cmd_invoice.go +++ /dev/null @@ -1,289 +0,0 @@ -package main - -import ( - "context" - "strconv" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/urfave/cli" -) - -var addInvoiceCommand = cli.Command{ - Name: "addinvoice", - Category: "Invoices", - Usage: "Add a new invoice.", - Description: ` - Add a new invoice, expressing intent for a future payment. - - Invoices without an amount can be created by not supplying any - parameters or providing an amount of 0. These invoices allow the payee - to specify the amount of satoshis they wish to send.`, - ArgsUsage: "value preimage", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "memo", - Usage: "a description of the payment to attach along " + - "with the invoice (default=\"\")", - }, - cli.StringFlag{ - Name: "preimage", - Usage: "the hex-encoded preimage (32 byte) which will " + - "allow settling an incoming HTLC payable to this " + - "preimage. If not set, a random preimage will be " + - "created.", - }, - cli.Int64Flag{ - Name: "amt", - Usage: "the amt of satoshis in this invoice", - }, - cli.StringFlag{ - Name: "description_hash", - Usage: "SHA-256 hash of the description of the payment. " + - "Used if the purpose of payment cannot naturally " + - "fit within the memo. If provided this will be " + - "used instead of the description(memo) field in " + - "the encoded invoice.", - }, - cli.StringFlag{ - Name: "fallback_addr", - Usage: "fallback on-chain address that can be used in " + - "case the lightning payment fails", - }, - cli.Int64Flag{ - Name: "expiry", - Usage: "the invoice's expiry time in seconds. If not " + - "specified an expiry of 3600 seconds (1 hour) " + - "is implied.", - }, - cli.BoolTFlag{ - Name: "private", - Usage: "encode routing hints in the invoice with " + - "private channels in order to assist the " + - "payer in reaching you", - }, - }, - Action: actionDecorator(addInvoice), -} - -func addInvoice(ctx *cli.Context) er.R { - var ( - preimage []byte - descHash []byte - amt int64 - err er.R - errr error - ) - - client, cleanUp := getClient(ctx) - defer cleanUp() - - args := ctx.Args() - - switch { - case ctx.IsSet("amt"): - amt = ctx.Int64("amt") - case args.Present(): - amt, errr = strconv.ParseInt(args.First(), 10, 64) - args = args.Tail() - if errr != nil { - return er.Errorf("unable to decode amt argument: %v", errr) - } - } - - switch { - case ctx.IsSet("preimage"): - preimage, err = util.DecodeHex(ctx.String("preimage")) - case args.Present(): - preimage, err = util.DecodeHex(args.First()) - } - - if err != nil { - return er.Errorf("unable to parse preimage: %v", err) - } - - descHash, err = util.DecodeHex(ctx.String("description_hash")) - if err != nil { - return er.Errorf("unable to parse description_hash: %v", err) - } - - invoice := &lnrpc.Invoice{ - Memo: ctx.String("memo"), - RPreimage: preimage, - Value: amt, - DescriptionHash: descHash, - FallbackAddr: ctx.String("fallback_addr"), - Expiry: ctx.Int64("expiry"), - Private: ctx.Bool("private"), - } - - resp, errr := client.AddInvoice(context.Background(), invoice) - if errr != nil { - return er.E(errr) - } - - printRespJSON(resp) - - return nil -} - -var lookupInvoiceCommand = cli.Command{ - Name: "lookupinvoice", - Category: "Invoices", - Usage: "Lookup an existing invoice by its payment hash.", - ArgsUsage: "rhash", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "rhash", - Usage: "the 32 byte payment hash of the invoice to query for, the hash " + - "should be a hex-encoded string", - }, - }, - Action: actionDecorator(lookupInvoice), -} - -func lookupInvoice(ctx *cli.Context) er.R { - client, cleanUp := getClient(ctx) - defer cleanUp() - - var ( - rHash []byte - err er.R - ) - - switch { - case ctx.IsSet("rhash"): - rHash, err = util.DecodeHex(ctx.String("rhash")) - case ctx.Args().Present(): - rHash, err = util.DecodeHex(ctx.Args().First()) - default: - return er.Errorf("rhash argument missing") - } - - if err != nil { - return er.Errorf("unable to decode rhash argument: %v", err) - } - - req := &lnrpc.PaymentHash{ - RHash: rHash, - } - - invoice, errr := client.LookupInvoice(context.Background(), req) - if errr != nil { - return er.E(errr) - } - - printRespJSON(invoice) - - return nil -} - -var listInvoicesCommand = cli.Command{ - Name: "listinvoices", - Category: "Invoices", - Usage: "List all invoices currently stored within the database. Any " + - "active debug invoices are ignored.", - Description: ` - This command enables the retrieval of all invoices currently stored - within the database. It has full support for paginationed responses, - allowing users to query for specific invoices through their add_index. - This can be done by using either the first_index_offset or - last_index_offset fields included in the response as the index_offset of - the next request. Backward pagination is enabled by default to receive - current invoices first. If you wish to paginate forwards, set the - paginate-forwards flag. If none of the parameters are specified, then - the last 100 invoices will be returned. - - For example: if you have 200 invoices, "lncli listinvoices" will return - the last 100 created. If you wish to retrieve the previous 100, the - first_offset_index of the response can be used as the index_offset of - the next listinvoices request.`, - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "pending_only", - Usage: "toggles if all invoices should be returned, " + - "or only those that are currently unsettled", - }, - cli.Uint64Flag{ - Name: "index_offset", - Usage: "the index of an invoice that will be used as " + - "either the start or end of a query to " + - "determine which invoices should be returned " + - "in the response", - }, - cli.Uint64Flag{ - Name: "max_invoices", - Usage: "the max number of invoices to return", - }, - cli.BoolFlag{ - Name: "paginate-forwards", - Usage: "if set, invoices succeeding the " + - "index_offset will be returned", - }, - }, - Action: actionDecorator(listInvoices), -} - -func listInvoices(ctx *cli.Context) er.R { - client, cleanUp := getClient(ctx) - defer cleanUp() - - req := &lnrpc.ListInvoiceRequest{ - PendingOnly: ctx.Bool("pending_only"), - IndexOffset: ctx.Uint64("index_offset"), - NumMaxInvoices: ctx.Uint64("max_invoices"), - Reversed: !ctx.Bool("paginate-forwards"), - } - - invoices, errr := client.ListInvoices(context.Background(), req) - if errr != nil { - return er.E(errr) - } - - printRespJSON(invoices) - - return nil -} - -var decodePayReqCommand = cli.Command{ - Name: "decodepayreq", - Category: "Invoices", - Usage: "Decode a payment request.", - Description: "Decode the passed payment request revealing the destination, payment hash and value of the payment request", - ArgsUsage: "pay_req", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "pay_req", - Usage: "the bech32 encoded payment request", - }, - }, - Action: actionDecorator(decodePayReq), -} - -func decodePayReq(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - var payreq string - - switch { - case ctx.IsSet("pay_req"): - payreq = ctx.String("pay_req") - case ctx.Args().Present(): - payreq = ctx.Args().First() - default: - return er.Errorf("pay_req argument missing") - } - - resp, errr := client.DecodePayReq(ctxb, &lnrpc.PayReqString{ - PayReq: payreq, - }) - if errr != nil { - return er.E(errr) - } - - printRespJSON(resp) - return nil -} diff --git a/lnd/cmd/lncli/cmd_macaroon.go b/lnd/cmd/lncli/cmd_macaroon.go deleted file mode 100644 index adcddaf0..00000000 --- a/lnd/cmd/lncli/cmd_macaroon.go +++ /dev/null @@ -1,416 +0,0 @@ -package main - -import ( - "bytes" - "context" - "encoding/hex" - "fmt" - "io/ioutil" - "net" - "strconv" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/lncfg" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/macaroons" - "github.com/urfave/cli" - "gopkg.in/macaroon-bakery.v2/bakery" - "gopkg.in/macaroon.v2" -) - -var bakeMacaroonCommand = cli.Command{ - Name: "bakemacaroon", - Category: "Macaroons", - Usage: "Bakes a new macaroon with the provided list of permissions " + - "and restrictions.", - ArgsUsage: "[--save_to=] [--timeout=] [--ip_address=] permissions...", - Description: ` - Bake a new macaroon that grants the provided permissions and - optionally adds restrictions (timeout, IP address) to it. - - The new macaroon can either be shown on command line in hex serialized - format or it can be saved directly to a file using the --save_to - argument. - - A permission is a tuple of an entity and an action, separated by a - colon. Multiple operations can be added as arguments, for example: - - lncli bakemacaroon info:read invoices:write foo:bar - - For even more fine-grained permission control, it is also possible to - specify single RPC method URIs that are allowed to be accessed by a - macaroon. This can be achieved by specifying "uri:" pairs, - for example: - - lncli bakemacaroon uri:/lnrpc.Lightning/GetInfo uri:/verrpc.Versioner/GetVersion - - The macaroon created by this command would only be allowed to use the - "lncli getinfo" and "lncli version" commands. - - To get a list of all available URIs and permissions, use the - "lncli listpermissions" command. - `, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "save_to", - Usage: "save the created macaroon to this file " + - "using the default binary format", - }, - cli.Uint64Flag{ - Name: "timeout", - Usage: "the number of seconds the macaroon will be " + - "valid before it times out", - }, - cli.StringFlag{ - Name: "ip_address", - Usage: "the IP address the macaroon will be bound to", - }, - cli.Uint64Flag{ - Name: "root_key_id", - Usage: "the numerical root key ID used to create the macaroon", - }, - }, - Action: actionDecorator(bakeMacaroon), -} - -func bakeMacaroon(ctx *cli.Context) er.R { - client, cleanUp := getClient(ctx) - defer cleanUp() - - // Show command help if no arguments. - if ctx.NArg() == 0 { - return er.E(cli.ShowCommandHelp(ctx, "bakemacaroon")) - } - args := ctx.Args() - - var ( - savePath string - timeout int64 - ipAddress net.IP - rootKeyID uint64 - parsedPermissions []*lnrpc.MacaroonPermission - err er.R - ) - - if ctx.String("save_to") != "" { - savePath = lncfg.CleanAndExpandPath(ctx.String("save_to")) - } - - if ctx.IsSet("timeout") { - timeout = ctx.Int64("timeout") - if timeout <= 0 { - return er.Errorf("timeout must be greater than 0") - } - } - - if ctx.IsSet("ip_address") { - ipAddress = net.ParseIP(ctx.String("ip_address")) - if ipAddress == nil { - return er.Errorf("unable to parse ip_address: %s", - ctx.String("ip_address")) - } - } - - if ctx.IsSet("root_key_id") { - rootKeyID = ctx.Uint64("root_key_id") - } - - // A command line argument can't be an empty string. So we'll check each - // entry if it's a valid entity:action tuple. The content itself is - // validated server side. We just make sure we can parse it correctly. - for _, permission := range args { - tuple := strings.Split(permission, ":") - if len(tuple) != 2 { - return er.Errorf("unable to parse "+ - "permission tuple: %s", permission) - } - entity, action := tuple[0], tuple[1] - if entity == "" { - return er.Errorf("invalid permission [%s]. entity "+ - "cannot be empty", permission) - } - if action == "" { - return er.Errorf("invalid permission [%s]. action "+ - "cannot be empty", permission) - } - - // No we can assume that we have a formally valid entity:action - // tuple. The rest of the validation happens server side. - parsedPermissions = append( - parsedPermissions, &lnrpc.MacaroonPermission{ - Entity: entity, - Action: action, - }, - ) - } - - // Now we have gathered all the input we need and can do the actual - // RPC call. - req := &lnrpc.BakeMacaroonRequest{ - Permissions: parsedPermissions, - RootKeyId: rootKeyID, - } - resp, errr := client.BakeMacaroon(context.Background(), req) - if errr != nil { - return er.E(errr) - } - - // Now we should have gotten a valid macaroon. Unmarshal it so we can - // add first-party caveats (if necessary) to it. - macBytes, err := util.DecodeHex(resp.Macaroon) - if err != nil { - return err - } - unmarshalMac := &macaroon.Macaroon{} - if errr := unmarshalMac.UnmarshalBinary(macBytes); errr != nil { - return er.E(errr) - } - - // Now apply the desired constraints to the macaroon. This will always - // create a new macaroon object, even if no constraints are added. - macConstraints := make([]macaroons.Constraint, 0) - if timeout > 0 { - macConstraints = append( - macConstraints, macaroons.TimeoutConstraint(timeout), - ) - } - if ipAddress != nil { - macConstraints = append( - macConstraints, - macaroons.IPLockConstraint(ipAddress.String()), - ) - } - constrainedMac, err := macaroons.AddConstraints( - unmarshalMac, macConstraints..., - ) - if err != nil { - return err - } - macBytes, errr = constrainedMac.MarshalBinary() - if errr != nil { - return er.E(errr) - } - - // Now we can output the result. We either write it binary serialized to - // a file or write to the standard output using hex encoding. - switch { - case savePath != "": - err := ioutil.WriteFile(savePath, macBytes, 0644) - if err != nil { - return er.E(err) - } - fmt.Printf("Macaroon saved to %s\n", savePath) - - default: - fmt.Printf("%s\n", hex.EncodeToString(macBytes)) - } - - return nil -} - -var listMacaroonIDsCommand = cli.Command{ - Name: "listmacaroonids", - Category: "Macaroons", - Usage: "List all macaroons root key IDs in use.", - Action: actionDecorator(listMacaroonIDs), -} - -func listMacaroonIDs(ctx *cli.Context) er.R { - client, cleanUp := getClient(ctx) - defer cleanUp() - - req := &lnrpc.ListMacaroonIDsRequest{} - resp, errr := client.ListMacaroonIDs(context.Background(), req) - if errr != nil { - return er.E(errr) - } - - printRespJSON(resp) - return nil -} - -var deleteMacaroonIDCommand = cli.Command{ - Name: "deletemacaroonid", - Category: "Macaroons", - Usage: "Delete a specific macaroon ID.", - ArgsUsage: "root_key_id", - Description: ` - Remove a macaroon ID using the specified root key ID. For example: - - lncli deletemacaroonid 1 - - WARNING - When the ID is deleted, all macaroons created from that root key will - be invalidated. - - Note that the default root key ID 0 cannot be deleted. - `, - Action: actionDecorator(deleteMacaroonID), -} - -func deleteMacaroonID(ctx *cli.Context) er.R { - client, cleanUp := getClient(ctx) - defer cleanUp() - - // Validate args length. Only one argument is allowed. - if ctx.NArg() != 1 { - return er.E(cli.ShowCommandHelp(ctx, "deletemacaroonid")) - } - - rootKeyIDString := ctx.Args().First() - - // Convert string into uint64. - rootKeyID, errr := strconv.ParseUint(rootKeyIDString, 10, 64) - if errr != nil { - return er.Errorf("root key ID must be a positive integer") - } - - // Check that the value is not equal to DefaultRootKeyID. Note that the - // server also validates the root key ID when removing it. However, we check - // it here too so that we can give users a nice warning. - if bytes.Equal([]byte(rootKeyIDString), macaroons.DefaultRootKeyID) { - return er.Errorf("deleting the default root key ID 0 is not allowed") - } - - // Make the actual RPC call. - req := &lnrpc.DeleteMacaroonIDRequest{ - RootKeyId: rootKeyID, - } - resp, errr := client.DeleteMacaroonID(context.Background(), req) - if errr != nil { - return er.E(errr) - } - - printRespJSON(resp) - return nil -} - -var listPermissionsCommand = cli.Command{ - Name: "listpermissions", - Category: "Macaroons", - Usage: "Lists all RPC method URIs and the macaroon permissions they " + - "require to be invoked.", - Action: actionDecorator(listPermissions), -} - -func listPermissions(ctx *cli.Context) er.R { - client, cleanUp := getClient(ctx) - defer cleanUp() - - request := &lnrpc.ListPermissionsRequest{} - response, errr := client.ListPermissions(context.Background(), request) - if errr != nil { - return er.E(errr) - } - - printRespJSON(response) - - return nil -} - -type macaroonContent struct { - Version uint16 `json:"version"` - Location string `json:"location"` - RootKeyID string `json:"root_key_id"` - Permissions []string `json:"permissions"` - Caveats []string `json:"caveats"` -} - -var printMacaroonCommand = cli.Command{ - Name: "printmacaroon", - Category: "Macaroons", - Usage: "Print the content of a macaroon in a human readable format.", - ArgsUsage: "[macaroon_content_hex]", - Description: ` - Decode a macaroon and show its content in a more human readable format. - The macaroon can either be passed as a hex encoded positional parameter - or loaded from a file. - `, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "macaroon_file", - Usage: "load the macaroon from a file instead of the " + - "command line directly", - }, - }, - Action: actionDecorator(printMacaroon), -} - -func printMacaroon(ctx *cli.Context) er.R { - // Show command help if no arguments or flags are set. - if ctx.NArg() == 0 && ctx.NumFlags() == 0 { - return er.E(cli.ShowCommandHelp(ctx, "printmacaroon")) - } - - var ( - macBytes []byte - err er.R - args = ctx.Args() - ) - switch { - case ctx.IsSet("macaroon_file"): - macPath := lncfg.CleanAndExpandPath(ctx.String("macaroon_file")) - - // Load the specified macaroon file. - var errr error - macBytes, errr = ioutil.ReadFile(macPath) - if errr != nil { - return er.Errorf("unable to read macaroon path %v: %v", - macPath, errr) - } - - case args.Present(): - macBytes, err = util.DecodeHex(args.First()) - if err != nil { - return er.Errorf("unable to hex decode macaroon: %v", - err) - } - - default: - return er.Errorf("macaroon parameter missing") - } - - // Decode the macaroon and its protobuf encoded internal identifier. - mac := &macaroon.Macaroon{} - if err := mac.UnmarshalBinary(macBytes); err != nil { - return er.Errorf("unable to decode macaroon: %v", err) - } - rawID := mac.Id() - if rawID[0] != byte(bakery.LatestVersion) { - return er.Errorf("invalid macaroon version: %x", rawID) - } - decodedID := &lnrpc.MacaroonId{} - idProto := rawID[1:] - errr := proto.Unmarshal(idProto, decodedID) - if errr != nil { - return er.Errorf("unable to decode macaroon version: %v", errr) - } - - // Prepare everything to be printed in a more human readable format. - content := &macaroonContent{ - Version: uint16(mac.Version()), - Location: mac.Location(), - RootKeyID: string(decodedID.StorageId), - Permissions: nil, - Caveats: nil, - } - - for _, caveat := range mac.Caveats() { - content.Caveats = append(content.Caveats, string(caveat.Id)) - } - for _, op := range decodedID.Ops { - for _, action := range op.Actions { - permission := fmt.Sprintf("%s:%s", op.Entity, action) - content.Permissions = append( - content.Permissions, permission, - ) - } - } - - printJSON(content) - - return nil -} diff --git a/lnd/cmd/lncli/cmd_open_channel.go b/lnd/cmd/lncli/cmd_open_channel.go deleted file mode 100644 index 215036ef..00000000 --- a/lnd/cmd/lncli/cmd_open_channel.go +++ /dev/null @@ -1,731 +0,0 @@ -package main - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/base64" - "encoding/hex" - "fmt" - "io" - "strconv" - "strings" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnwallet/chanfunding" - "github.com/pkt-cash/pktd/lnd/signal" - "github.com/pkt-cash/pktd/wire" - "github.com/urfave/cli" -) - -const ( - userMsgFund = `PSBT funding initiated with peer %x. -Please create a PSBT that sends %v (%d satoshi) to the funding address %s. - -Note: The whole process should be completed within 10 minutes, otherwise there -is a risk of the remote node timing out and canceling the funding process. - -Example with bitcoind: - bitcoin-cli walletcreatefundedpsbt [] '[{"%s":%.8f}]' - -If you are using a wallet that can fund a PSBT directly (currently not possible -with bitcoind), you can use this PSBT that contains the same address and amount: -%s - -!!! WARNING !!! -DO NOT PUBLISH the finished transaction by yourself or with another tool. -lnd MUST publish it in the proper funding flow order OR THE FUNDS CAN BE LOST! - -Paste the funded PSBT here to continue the funding flow. -Base64 encoded PSBT: ` - - userMsgSign = ` -PSBT verified by lnd, please continue the funding flow by signing the PSBT by -all required parties/devices. Once the transaction is fully signed, paste it -again here either in base64 PSBT or hex encoded raw wire TX format. - -Signed base64 encoded PSBT or hex encoded raw wire TX: ` -) - -// TODO(roasbeef): change default number of confirmations -var openChannelCommand = cli.Command{ - Name: "openchannel", - Category: "Channels", - Usage: "Open a channel to a node or an existing peer.", - Description: ` - Attempt to open a new channel to an existing peer with the key node-key - optionally blocking until the channel is 'open'. - - One can also connect to a node before opening a new channel to it by - setting its host:port via the --connect argument. For this to work, - the node_key must be provided, rather than the peer_id. This is optional. - - The channel will be initialized with local-amt satoshis local and push-amt - satoshis for the remote node. Note that specifying push-amt means you give that - amount to the remote node as part of the channel opening. Once the channel is open, - a channelPoint (txid:vout) of the funding output is returned. - - If the remote peer supports the option upfront shutdown feature bit (query - listpeers to see their supported feature bits), an address to enforce - payout of funds on cooperative close can optionally be provided. Note that - if you set this value, you will not be able to cooperatively close out to - another address. - - One can manually set the fee to be used for the funding transaction via either - the --conf_target or --sat_per_byte arguments. This is optional.`, - ArgsUsage: "node-key local-amt push-amt", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "node_key", - Usage: "the identity public key of the target node/peer " + - "serialized in compressed format", - }, - cli.StringFlag{ - Name: "connect", - Usage: "(optional) the host:port of the target node", - }, - cli.IntFlag{ - Name: "local_amt", - Usage: "the number of satoshis the wallet should commit to the channel", - }, - cli.IntFlag{ - Name: "push_amt", - Usage: "the number of satoshis to give the remote side " + - "as part of the initial commitment state, " + - "this is equivalent to first opening a " + - "channel and sending the remote party funds, " + - "but done all in one step", - }, - cli.BoolFlag{ - Name: "block", - Usage: "block and wait until the channel is fully open", - }, - cli.Int64Flag{ - Name: "conf_target", - Usage: "(optional) the number of blocks that the " + - "transaction *should* confirm in, will be " + - "used for fee estimation", - }, - cli.Int64Flag{ - Name: "sat_per_byte", - Usage: "(optional) a manual fee expressed in " + - "sat/byte that should be used when crafting " + - "the transaction", - }, - cli.BoolFlag{ - Name: "private", - Usage: "make the channel private, such that it won't " + - "be announced to the greater network, and " + - "nodes other than the two channel endpoints " + - "must be explicitly told about it to be able " + - "to route through it", - }, - cli.Int64Flag{ - Name: "min_htlc_msat", - Usage: "(optional) the minimum value we will require " + - "for incoming HTLCs on the channel", - }, - cli.Uint64Flag{ - Name: "remote_csv_delay", - Usage: "(optional) the number of blocks we will require " + - "our channel counterparty to wait before accessing " + - "its funds in case of unilateral close. If this is " + - "not set, we will scale the value according to the " + - "channel size", - }, - cli.Uint64Flag{ - Name: "max_local_csv", - Usage: "(optional) the maximum number of blocks that " + - "we will allow the remote peer to require we " + - "wait before accessing our funds in the case " + - "of a unilateral close.", - }, - cli.Uint64Flag{ - Name: "min_confs", - Usage: "(optional) the minimum number of confirmations " + - "each one of your outputs used for the funding " + - "transaction must satisfy", - Value: defaultUtxoMinConf, - }, - cli.StringFlag{ - Name: "close_address", - Usage: "(optional) an address to enforce payout of our " + - "funds to on cooperative close. Note that if this " + - "value is set on channel open, you will *not* be " + - "able to cooperatively close to a different address.", - }, - cli.BoolFlag{ - Name: "psbt", - Usage: "start an interactive mode that initiates " + - "funding through a partially signed bitcoin " + - "transaction (PSBT), allowing the channel " + - "funds to be added and signed from a hardware " + - "or other offline device.", - }, - cli.StringFlag{ - Name: "base_psbt", - Usage: "when using the interactive PSBT mode to open " + - "a new channel, use this base64 encoded PSBT " + - "as a base and add the new channel output to " + - "it instead of creating a new, empty one.", - }, - cli.BoolFlag{ - Name: "no_publish", - Usage: "when using the interactive PSBT mode to open " + - "multiple channels in a batch, this flag " + - "instructs lnd to not publish the full batch " + - "transaction just yet. For safety reasons " + - "this flag should be set for each of the " + - "batch's transactions except the very last", - }, - cli.Uint64Flag{ - Name: "remote_max_value_in_flight_msat", - Usage: "(optional) the maximum value in msat that " + - "can be pending within the channel at any given time", - }, - }, - Action: actionDecorator(openChannel), -} - -func openChannel(ctx *cli.Context) er.R { - // TODO(roasbeef): add deadline to context - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - args := ctx.Args() - var errr error - - // Show command help if no arguments provided - if ctx.NArg() == 0 && ctx.NumFlags() == 0 { - _ = cli.ShowCommandHelp(ctx, "openchannel") - return nil - } - - minConfs := int32(ctx.Uint64("min_confs")) - req := &lnrpc.OpenChannelRequest{ - TargetConf: int32(ctx.Int64("conf_target")), - SatPerByte: ctx.Int64("sat_per_byte"), - MinHtlcMsat: ctx.Int64("min_htlc_msat"), - RemoteCsvDelay: uint32(ctx.Uint64("remote_csv_delay")), - MinConfs: minConfs, - SpendUnconfirmed: minConfs == 0, - CloseAddress: ctx.String("close_address"), - RemoteMaxValueInFlightMsat: ctx.Uint64("remote_max_value_in_flight_msat"), - MaxLocalCsv: uint32(ctx.Uint64("max_local_csv")), - } - - switch { - case ctx.IsSet("node_key"): - nodePubHex, err := util.DecodeHex(ctx.String("node_key")) - if err != nil { - return er.Errorf("unable to decode node public key: %v", err) - } - req.NodePubkey = nodePubHex - - case args.Present(): - nodePubHex, err := util.DecodeHex(args.First()) - if err != nil { - return er.Errorf("unable to decode node public key: %v", err) - } - args = args.Tail() - req.NodePubkey = nodePubHex - default: - return er.Errorf("node id argument missing") - } - - // As soon as we can confirm that the node's node_key was set, rather - // than the peer_id, we can check if the host:port was also set to - // connect to it before opening the channel. - if req.NodePubkey != nil && ctx.IsSet("connect") { - addr := &lnrpc.LightningAddress{ - Pubkey: hex.EncodeToString(req.NodePubkey), - Host: ctx.String("connect"), - } - - req := &lnrpc.ConnectPeerRequest{ - Addr: addr, - Perm: false, - } - - // Check if connecting to the node was successful. - // We discard the peer id returned as it is not needed. - _, err := client.ConnectPeer(ctxb, req) - if err != nil && - !strings.Contains(err.Error(), "already connected") { - return er.E(err) - } - } - - switch { - case ctx.IsSet("local_amt"): - req.LocalFundingAmount = int64(ctx.Int("local_amt")) - case args.Present(): - req.LocalFundingAmount, errr = strconv.ParseInt(args.First(), 10, 64) - if errr != nil { - return er.Errorf("unable to decode local amt: %v", errr) - } - args = args.Tail() - default: - return er.Errorf("local amt argument missing") - } - - if ctx.IsSet("push_amt") { - req.PushSat = int64(ctx.Int("push_amt")) - } else if args.Present() { - req.PushSat, errr = strconv.ParseInt(args.First(), 10, 64) - if errr != nil { - return er.Errorf("unable to decode push amt: %v", errr) - } - } - - req.Private = ctx.Bool("private") - - // PSBT funding is a more involved, interactive process that is too - // large to also fit into this already long function. - if ctx.Bool("psbt") { - return openChannelPsbt(ctx, client, req) - } - if !ctx.Bool("psbt") && ctx.Bool("no_publish") { - return er.Errorf("the --no_publish flag can only be used in " + - "combination with the --psbt flag") - } - - stream, errr := client.OpenChannel(ctxb, req) - if errr != nil { - return er.E(errr) - } - - for { - resp, err := stream.Recv() - if err == io.EOF { - return nil - } else if err != nil { - return er.E(err) - } - - switch update := resp.Update.(type) { - case *lnrpc.OpenStatusUpdate_ChanPending: - err := printChanPending(update) - if err != nil { - return err - } - - if !ctx.Bool("block") { - return nil - } - - case *lnrpc.OpenStatusUpdate_ChanOpen: - return printChanOpen(update) - } - } -} - -// openChannelPsbt starts an interactive channel open protocol that uses a -// partially signed bitcoin transaction (PSBT) to fund the channel output. The -// protocol involves several steps between the RPC server and the CLI client: -// -// RPC server CLI client -// | | -// | |<------open channel (stream)-----| -// | |-------ready for funding----->| | -// | |<------PSBT verify------------| | -// | |-------ready for signing----->| | -// | |<------PSBT finalize----------| | -// | |-------channel pending------->| | -// | |-------channel open------------->| -// | | -func openChannelPsbt(ctx *cli.Context, client lnrpc.LightningClient, - req *lnrpc.OpenChannelRequest) er.R { - - var ( - pendingChanID [32]byte - shimPending = true - basePsbtBytes []byte - quit = make(chan struct{}) - srvMsg = make(chan *lnrpc.OpenStatusUpdate, 1) - srvErr = make(chan er.R, 1) - ctxc, cancel = context.WithCancel(context.Background()) - ) - defer cancel() - - // Make sure the user didn't supply any command line flags that are - // incompatible with PSBT funding. - err := checkPsbtFlags(req) - if err != nil { - return err - } - - // If the user supplied a base PSBT, only make sure it's valid base64. - // The RPC server will make sure it's also a valid PSBT. - basePsbt := ctx.String("base_psbt") - if basePsbt != "" { - var err error - basePsbtBytes, err = base64.StdEncoding.DecodeString(basePsbt) - if err != nil { - return er.Errorf("error parsing base PSBT: %v", err) - } - } - - // Generate a new, random pending channel ID that we'll use as the main - // identifier when sending update messages to the RPC server. - if _, err := rand.Read(pendingChanID[:]); err != nil { - return er.Errorf("unable to generate random chan ID: %v", err) - } - fmt.Printf("Starting PSBT funding flow with pending channel ID %x.\n", - pendingChanID) - - // maybeCancelShim is a helper function that cancels the funding shim - // with the RPC server in case we end up aborting early. - maybeCancelShim := func() { - // If the user canceled while there was still a shim registered - // with the wallet, release the resources now. - if shimPending { - fmt.Printf("Canceling PSBT funding flow for pending "+ - "channel ID %x.\n", pendingChanID) - cancelMsg := &lnrpc.FundingTransitionMsg{ - Trigger: &lnrpc.FundingTransitionMsg_ShimCancel{ - ShimCancel: &lnrpc.FundingShimCancel{ - PendingChanId: pendingChanID[:], - }, - }, - } - err := sendFundingState(ctxc, ctx, cancelMsg) - if err != nil { - fmt.Printf("Error canceling shim: %v\n", err) - } - shimPending = false - } - - // Abort the stream connection to the server. - cancel() - } - defer maybeCancelShim() - - // Create the PSBT funding shim that will tell the funding manager we - // want to use a PSBT. - req.FundingShim = &lnrpc.FundingShim{ - Shim: &lnrpc.FundingShim_PsbtShim{ - PsbtShim: &lnrpc.PsbtShim{ - PendingChanId: pendingChanID[:], - BasePsbt: basePsbtBytes, - NoPublish: ctx.Bool("no_publish"), - }, - }, - } - - // Start the interactive process by opening the stream connection to the - // daemon. If the user cancels by pressing we need to cancel - // the shim. To not just kill the process on interrupt, we need to - // explicitly capture the signal. - stream, errr := client.OpenChannel(ctxc, req) - if errr != nil { - return er.Errorf("opening stream to server failed: %v", errr) - } - - if err := signal.Intercept(); err != nil { - return err - } - - // We also need to spawn a goroutine that reads from the server. This - // will copy the messages to the channel as long as they come in or add - // exactly one error to the error stream and then bail out. - go func() { - for { - // Recv blocks until a message or error arrives. - resp, err := stream.Recv() - if err == io.EOF { - srvErr <- er.Errorf("lnd shutting down: %v", - err) - return - } else if err != nil { - srvErr <- er.Errorf("got error from server: "+ - "%v", err) - return - } - - // Don't block on sending in case of shutting down. - select { - case srvMsg <- resp: - case <-quit: - return - } - } - }() - - // Spawn another goroutine that only handles abort from user or errors - // from the server. Both will trigger an attempt to cancel the shim with - // the server. - go func() { - select { - case <-signal.ShutdownChannel(): - fmt.Printf("\nInterrupt signal received.\n") - close(quit) - - case err := <-srvErr: - fmt.Printf("\nError received: %v\n", err) - - // If the remote peer canceled on us, the reservation - // has already been deleted. We don't need to try to - // remove it again, this would just produce another - // error. - if chanfunding.ErrRemoteCanceled.Is(err) { - shimPending = false - } - close(quit) - - case <-quit: - } - }() - - // Our main event loop where we wait for triggers - for { - var srvResponse *lnrpc.OpenStatusUpdate - select { - case srvResponse = <-srvMsg: - case <-quit: - return nil - } - - switch update := srvResponse.Update.(type) { - case *lnrpc.OpenStatusUpdate_PsbtFund: - // First tell the user how to create the PSBT with the - // address and amount we now know. - amt := btcutil.Amount(update.PsbtFund.FundingAmount) - addr := update.PsbtFund.FundingAddress - fmt.Printf( - userMsgFund, req.NodePubkey, amt, amt, addr, - addr, amt.ToBTC(), - base64.StdEncoding.EncodeToString( - update.PsbtFund.Psbt, - ), - ) - - // Read the user's response and send it to the server to - // verify everything's correct before anything is - // signed. - psbtBase64, err := readLine(quit) - if er.Wrapped(err) == io.EOF { - return nil - } - if err != nil { - return er.Errorf("reading from console "+ - "failed: %v", err) - } - fundedPsbt, errr := base64.StdEncoding.DecodeString( - strings.TrimSpace(psbtBase64), - ) - if errr != nil { - return er.Errorf("base64 decode failed: %v", - errr) - } - verifyMsg := &lnrpc.FundingTransitionMsg{ - Trigger: &lnrpc.FundingTransitionMsg_PsbtVerify{ - PsbtVerify: &lnrpc.FundingPsbtVerify{ - FundedPsbt: fundedPsbt, - PendingChanId: pendingChanID[:], - }, - }, - } - err = sendFundingState(ctxc, ctx, verifyMsg) - if err != nil { - return er.Errorf("verifying PSBT by lnd "+ - "failed: %v", err) - } - - // Now that we know the PSBT looks good, we can let it - // be signed by the user. - fmt.Print(userMsgSign) - - // Read the signed PSBT and send it to lnd. - finalTxStr, err := readLine(quit) - if er.Wrapped(err) == io.EOF { - return nil - } - if err != nil { - return er.Errorf("reading from console "+ - "failed: %v", err) - } - finalizeMsg, err := finalizeMsgFromString( - finalTxStr, pendingChanID[:], - ) - if err != nil { - return err - } - transitionMsg := &lnrpc.FundingTransitionMsg{ - Trigger: finalizeMsg, - } - err = sendFundingState(ctxc, ctx, transitionMsg) - if err != nil { - return er.Errorf("finalizing PSBT funding "+ - "flow failed: %v", err) - } - - case *lnrpc.OpenStatusUpdate_ChanPending: - // As soon as the channel is pending, there is no more - // shim that needs to be canceled. If the user - // interrupts now, we don't need to clean up anything. - shimPending = false - - err := printChanPending(update) - if err != nil { - return err - } - - if !ctx.Bool("block") { - return nil - } - - case *lnrpc.OpenStatusUpdate_ChanOpen: - return printChanOpen(update) - } - } -} - -// printChanOpen prints the channel point of the channel open message. -func printChanOpen(update *lnrpc.OpenStatusUpdate_ChanOpen) er.R { - channelPoint := update.ChanOpen.ChannelPoint - - // A channel point's funding txid can be get/set as a - // byte slice or a string. In the case it is a string, - // decode it. - var txidHash []byte - switch channelPoint.GetFundingTxid().(type) { - case *lnrpc.ChannelPoint_FundingTxidBytes: - txidHash = channelPoint.GetFundingTxidBytes() - case *lnrpc.ChannelPoint_FundingTxidStr: - s := channelPoint.GetFundingTxidStr() - h, err := chainhash.NewHashFromStr(s) - if err != nil { - return err - } - - txidHash = h[:] - } - - txid, err := chainhash.NewHash(txidHash) - if err != nil { - return err - } - - index := channelPoint.OutputIndex - printJSON(struct { - ChannelPoint string `json:"channel_point"` - }{ - ChannelPoint: fmt.Sprintf("%v:%v", txid, index), - }) - return nil -} - -// printChanPending prints the funding transaction ID of the channel pending -// message. -func printChanPending(update *lnrpc.OpenStatusUpdate_ChanPending) er.R { - txid, err := chainhash.NewHash(update.ChanPending.Txid) - if err != nil { - return err - } - - printJSON(struct { - FundingTxid string `json:"funding_txid"` - }{ - FundingTxid: txid.String(), - }) - return nil -} - -// readLine reads a line from standard in but does not block in case of a -// system interrupt like syscall.SIGINT (Ctrl+C). -func readLine(quit chan struct{}) (string, er.R) { - msg := make(chan string, 1) - - // In a normal console, reading from stdin won't signal EOF when the - // user presses Ctrl+C. That's why we need to put this in a separate - // goroutine so it doesn't block. - go func() { - for { - var str string - _, _ = fmt.Scan(&str) - msg <- str - return - } - }() - for { - select { - case <-quit: - return "", er.E(io.EOF) - - case str := <-msg: - return str, nil - } - } -} - -// checkPsbtFlags make sure a request to open a channel doesn't set any -// parameters that are incompatible with the PSBT funding flow. -func checkPsbtFlags(req *lnrpc.OpenChannelRequest) er.R { - if req.MinConfs != defaultUtxoMinConf || req.SpendUnconfirmed { - return er.Errorf("specifying minimum confirmations for PSBT " + - "funding is not supported") - } - if req.TargetConf != 0 || req.SatPerByte != 0 { - return er.Errorf("setting fee estimation parameters not " + - "supported for PSBT funding") - } - return nil -} - -// sendFundingState sends a single funding state step message by using a new -// client connection. This is necessary if the whole funding flow takes longer -// than the default macaroon timeout, then we cannot use a single client -// connection. -func sendFundingState(cancelCtx context.Context, cliCtx *cli.Context, - msg *lnrpc.FundingTransitionMsg) er.R { - - client, cleanUp := getClient(cliCtx) - defer cleanUp() - - _, errr := client.FundingStateStep(cancelCtx, msg) - return er.E(errr) -} - -// finalizeMsgFromString creates the final message for the PsbtFinalize step -// from either a hex encoded raw wire transaction or a base64 encoded PSBT -// packet. -func finalizeMsgFromString(tx string, - pendingChanID []byte) (*lnrpc.FundingTransitionMsg_PsbtFinalize, er.R) { - - rawTx, err := util.DecodeHex(strings.TrimSpace(tx)) - if err == nil { - // Hex decoding succeeded so we assume we have a raw wire format - // transaction. Let's submit that instead of a PSBT packet. - tx := &wire.MsgTx{} - err := tx.Deserialize(bytes.NewReader(rawTx)) - if err != nil { - return nil, er.Errorf("deserializing as raw wire "+ - "transaction failed: %v", err) - } - return &lnrpc.FundingTransitionMsg_PsbtFinalize{ - PsbtFinalize: &lnrpc.FundingPsbtFinalize{ - FinalRawTx: rawTx, - PendingChanId: pendingChanID, - }, - }, nil - } - - // If the string isn't a hex encoded transaction, we assume it must be - // a base64 encoded PSBT packet. - psbtBytes, errr := base64.StdEncoding.DecodeString(strings.TrimSpace(tx)) - if errr != nil { - return nil, er.Errorf("base64 decode failed: %v", errr) - } - return &lnrpc.FundingTransitionMsg_PsbtFinalize{ - PsbtFinalize: &lnrpc.FundingPsbtFinalize{ - SignedPsbt: psbtBytes, - PendingChanId: pendingChanID, - }, - }, nil -} diff --git a/lnd/cmd/lncli/cmd_pay.go b/lnd/cmd/lncli/cmd_pay.go deleted file mode 100644 index fe87c20d..00000000 --- a/lnd/cmd/lncli/cmd_pay.go +++ /dev/null @@ -1,887 +0,0 @@ -package main - -import ( - "bytes" - "context" - "crypto/rand" - "fmt" - "io/ioutil" - "os" - "runtime" - "strconv" - "strings" - "time" - - "github.com/jedib0t/go-pretty/table" - "github.com/jedib0t/go-pretty/text" - "github.com/lightninglabs/protobuf-hex-display/jsonpb" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/record" - "github.com/pkt-cash/pktd/lnd/routing/route" - "github.com/urfave/cli" -) - -const ( - // paymentTimeout is the default timeout for the payment loop in lnd. - // No new attempts will be started after the timeout. - paymentTimeout = time.Second * 60 -) - -var ( - cltvLimitFlag = cli.UintFlag{ - Name: "cltv_limit", - Usage: "the maximum time lock that may be used for " + - "this payment", - } - - lastHopFlag = cli.StringFlag{ - Name: "last_hop", - Usage: "pubkey of the last hop (penultimate node in the path) " + - "to route through for this payment", - } - - dataFlag = cli.StringFlag{ - Name: "data", - Usage: "attach custom data to the payment. The required " + - "format is: =,=" + - ",.. For example: --data 3438382=0a21ff. " + - "Custom record ids start from 65536.", - } - - inflightUpdatesFlag = cli.BoolFlag{ - Name: "inflight_updates", - Usage: "if set, intermediate payment state updates will be " + - "displayed. Only valid in combination with --json.", - } - - maxPartsFlag = cli.UintFlag{ - Name: "max_parts", - Usage: "the maximum number of partial payments that may be " + - "used", - Value: 1, - } - - jsonFlag = cli.BoolFlag{ - Name: "json", - Usage: "if set, payment updates are printed as json " + - "messages. Set by default on Windows because table " + - "formatting is unsupported.", - } -) - -// paymentFlags returns common flags for sendpayment and payinvoice. -func paymentFlags() []cli.Flag { - return []cli.Flag{ - cli.StringFlag{ - Name: "pay_req", - Usage: "a zpay32 encoded payment request to fulfill", - }, - cli.Int64Flag{ - Name: "fee_limit", - Usage: "maximum fee allowed in satoshis when " + - "sending the payment", - }, - cli.Int64Flag{ - Name: "fee_limit_percent", - Usage: "percentage of the payment's amount used as " + - "the maximum fee allowed when sending the " + - "payment", - }, - cli.DurationFlag{ - Name: "timeout", - Usage: "the maximum amount of time we should spend " + - "trying to fulfill the payment, failing " + - "after the timeout has elapsed", - Value: paymentTimeout, - }, - cltvLimitFlag, - lastHopFlag, - cli.Uint64Flag{ - Name: "outgoing_chan_id", - Usage: "short channel id of the outgoing channel to " + - "use for the first hop of the payment", - Value: 0, - }, - cli.BoolFlag{ - Name: "force, f", - Usage: "will skip payment request confirmation", - }, - cli.BoolFlag{ - Name: "allow_self_payment", - Usage: "allow sending a circular payment to self", - }, - dataFlag, inflightUpdatesFlag, maxPartsFlag, jsonFlag, - } -} - -var sendPaymentCommand = cli.Command{ - Name: "sendpayment", - Category: "Payments", - Usage: "Send a payment over lightning.", - Description: ` - Send a payment over Lightning. One can either specify the full - parameters of the payment, or just use a payment request which encodes - all the payment details. - - If payment isn't manually specified, then only a payment request needs - to be passed using the --pay_req argument. - - If the payment *is* manually specified, then all four alternative - arguments need to be specified in order to complete the payment: - * --dest=N - * --amt=A - * --final_cltv_delta=T - * --payment_hash=H - `, - ArgsUsage: "dest amt payment_hash final_cltv_delta | --pay_req=[payment request]", - Flags: append(paymentFlags(), - cli.StringFlag{ - Name: "dest, d", - Usage: "the compressed identity pubkey of the " + - "payment recipient", - }, - cli.Int64Flag{ - Name: "amt, a", - Usage: "number of satoshis to send", - }, - cli.StringFlag{ - Name: "payment_hash, r", - Usage: "the hash to use within the payment's HTLC", - }, - cli.Int64Flag{ - Name: "final_cltv_delta", - Usage: "the number of blocks the last hop has to reveal the preimage", - }, - cli.BoolFlag{ - Name: "keysend", - Usage: "will generate a pre-image and encode it in the sphinx packet, a dest must be set [experimental]", - }, - ), - Action: sendPayment, -} - -// retrieveFeeLimit retrieves the fee limit based on the different fee limit -// flags passed. It always returns a value and doesn't rely on lnd applying a -// default. -func retrieveFeeLimit(ctx *cli.Context, amt int64) (int64, er.R) { - switch { - - case ctx.IsSet("fee_limit") && ctx.IsSet("fee_limit_percent"): - return 0, er.Errorf("either fee_limit or fee_limit_percent " + - "can be set, but not both") - - case ctx.IsSet("fee_limit"): - return ctx.Int64("fee_limit"), nil - - case ctx.IsSet("fee_limit_percent"): - // Round up the fee limit to prevent hitting zero on small - // amounts. - feeLimitRoundedUp := - (amt*ctx.Int64("fee_limit_percent") + 99) / 100 - - return feeLimitRoundedUp, nil - } - - // If no fee limit is set, use the payment amount as a limit (100%). - return amt, nil -} - -func confirmPayReq(resp *lnrpc.PayReq, amt, feeLimit int64) er.R { - fmt.Printf("Payment hash: %v\n", resp.GetPaymentHash()) - fmt.Printf("Description: %v\n", resp.GetDescription()) - fmt.Printf("Amount (in satoshis): %v\n", amt) - fmt.Printf("Fee limit (in satoshis): %v\n", feeLimit) - fmt.Printf("Destination: %v\n", resp.GetDestination()) - - confirm := promptForConfirmation("Confirm payment (yes/no): ") - if !confirm { - return er.Errorf("payment not confirmed") - } - - return nil -} - -func sendPayment(ctx *cli.Context) er.R { - // Show command help if no arguments provided - if ctx.NArg() == 0 && ctx.NumFlags() == 0 { - _ = cli.ShowCommandHelp(ctx, "sendpayment") - return nil - } - - // If a payment request was provided, we can exit early since all of the - // details of the payment are encoded within the request. - if ctx.IsSet("pay_req") { - req := &routerrpc.SendPaymentRequest{ - PaymentRequest: ctx.String("pay_req"), - Amt: ctx.Int64("amt"), - } - - return sendPaymentRequest(ctx, req) - } - - var ( - destNode []byte - amount int64 - err er.R - ) - - args := ctx.Args() - - switch { - case ctx.IsSet("dest"): - destNode, err = util.DecodeHex(ctx.String("dest")) - case args.Present(): - destNode, err = util.DecodeHex(args.First()) - args = args.Tail() - default: - return er.Errorf("destination txid argument missing") - } - if err != nil { - return err - } - - if len(destNode) != 33 { - return er.Errorf("dest node pubkey must be exactly 33 bytes, is "+ - "instead: %v", len(destNode)) - } - - if ctx.IsSet("amt") { - amount = ctx.Int64("amt") - } else if args.Present() { - var err error - amount, err = strconv.ParseInt(args.First(), 10, 64) - args = args.Tail() - if err != nil { - return er.Errorf("unable to decode payment amount: %v", err) - } - } - - req := &routerrpc.SendPaymentRequest{ - Dest: destNode, - Amt: amount, - DestCustomRecords: make(map[uint64][]byte), - } - - var rHash []byte - - if ctx.Bool("keysend") { - if ctx.IsSet("payment_hash") { - return er.New("cannot set payment hash when using " + - "keysend") - } - var preimage lntypes.Preimage - if _, err := rand.Read(preimage[:]); err != nil { - return er.E(err) - } - - // Set the preimage. If the user supplied a preimage with the - // data flag, the preimage that is set here will be overwritten - // later. - req.DestCustomRecords[record.KeySendType] = preimage[:] - - hash := preimage.Hash() - rHash = hash[:] - } else { - switch { - case ctx.IsSet("payment_hash"): - rHash, err = util.DecodeHex(ctx.String("payment_hash")) - case args.Present(): - rHash, err = util.DecodeHex(args.First()) - args = args.Tail() - default: - return er.Errorf("payment hash argument missing") - } - } - - if err != nil { - return err - } - if len(rHash) != 32 { - return er.Errorf("payment hash must be exactly 32 "+ - "bytes, is instead %v", len(rHash)) - } - req.PaymentHash = rHash - - switch { - case ctx.IsSet("final_cltv_delta"): - req.FinalCltvDelta = int32(ctx.Int64("final_cltv_delta")) - case args.Present(): - delta, err := strconv.ParseInt(args.First(), 10, 64) - if err != nil { - return er.E(err) - } - req.FinalCltvDelta = int32(delta) - } - - return sendPaymentRequest(ctx, req) -} - -func sendPaymentRequest(ctx *cli.Context, - req *routerrpc.SendPaymentRequest) er.R { - - conn := getClientConn(ctx, false) - defer conn.Close() - - client := lnrpc.NewLightningClient(conn) - routerClient := routerrpc.NewRouterClient(conn) - - outChan := ctx.Uint64("outgoing_chan_id") - if outChan != 0 { - req.OutgoingChanIds = []uint64{outChan} - } - if ctx.IsSet(lastHopFlag.Name) { - lastHop, err := route.NewVertexFromStr( - ctx.String(lastHopFlag.Name), - ) - if err != nil { - return err - } - req.LastHopPubkey = lastHop[:] - } - - req.CltvLimit = int32(ctx.Int(cltvLimitFlag.Name)) - - pmtTimeout := ctx.Duration("timeout") - if pmtTimeout <= 0 { - return er.New("payment timeout must be greater than zero") - } - req.TimeoutSeconds = int32(pmtTimeout.Seconds()) - - req.AllowSelfPayment = ctx.Bool("allow_self_payment") - - req.MaxParts = uint32(ctx.Uint(maxPartsFlag.Name)) - var err er.R - - // Parse custom data records. - data := ctx.String(dataFlag.Name) - if data != "" { - records := strings.Split(data, ",") - for _, r := range records { - kv := strings.Split(r, "=") - if len(kv) != 2 { - return er.New("invalid data format: " + - "multiple equal signs in record") - } - - recordID, errr := strconv.ParseUint(kv[0], 10, 64) - if errr != nil { - return er.Errorf("invalid data format: %v", - errr) - } - - hexValue, err := util.DecodeHex(kv[1]) - if err != nil { - return er.Errorf("invalid data format: %v", - err) - } - - req.DestCustomRecords[recordID] = hexValue - } - } - - var feeLimit int64 - if req.PaymentRequest != "" { - // Decode payment request to find out the amount. - decodeReq := &lnrpc.PayReqString{PayReq: req.PaymentRequest} - decodeResp, errr := client.DecodePayReq( - context.Background(), decodeReq, - ) - if errr != nil { - return er.E(errr) - } - - // If amount is present in the request, override the request - // amount. - amt := req.Amt - invoiceAmt := decodeResp.GetNumSatoshis() - if invoiceAmt != 0 { - amt = invoiceAmt - } - - // Calculate fee limit based on the determined amount. - feeLimit, err = retrieveFeeLimit(ctx, amt) - if err != nil { - return err - } - - // Ask for confirmation of amount and fee limit if payment is - // forced. - if !ctx.Bool("force") { - err := confirmPayReq(decodeResp, amt, feeLimit) - if err != nil { - return err - } - } - } else { - var err er.R - feeLimit, err = retrieveFeeLimit(ctx, req.Amt) - if err != nil { - return err - } - } - - req.FeeLimitSat = feeLimit - - // Always print in-flight updates for the table output. - printJSON := ctx.Bool(jsonFlag.Name) - req.NoInflightUpdates = !ctx.Bool(inflightUpdatesFlag.Name) && printJSON - - stream, errr := routerClient.SendPaymentV2(context.Background(), req) - if errr != nil { - return er.E(errr) - } - - finalState, err := printLivePayment( - stream, client, printJSON, - ) - if err != nil { - return err - } - - // If we get a payment error back, we pass an error up - // to main which eventually calls fatal() and returns - // with a non-zero exit code. - if finalState.Status != lnrpc.Payment_SUCCEEDED { - return er.New(finalState.Status.String()) - } - - return nil -} - -var trackPaymentCommand = cli.Command{ - Name: "trackpayment", - Category: "Payments", - Usage: "Track progress of an existing payment.", - Description: ` - Pick up monitoring the progression of a previously initiated payment - specified by the hash argument. - `, - ArgsUsage: "hash", - Action: actionDecorator(trackPayment), -} - -func trackPayment(ctx *cli.Context) er.R { - args := ctx.Args() - - conn := getClientConn(ctx, false) - defer conn.Close() - - routerClient := routerrpc.NewRouterClient(conn) - - if !args.Present() { - return er.Errorf("hash argument missing") - } - - hash, err := util.DecodeHex(args.First()) - if err != nil { - return err - } - - req := &routerrpc.TrackPaymentRequest{ - PaymentHash: hash, - } - - stream, errr := routerClient.TrackPaymentV2(context.Background(), req) - if errr != nil { - return er.E(errr) - } - - client := lnrpc.NewLightningClient(conn) - _, err = printLivePayment(stream, client, ctx.Bool(jsonFlag.Name)) - return err -} - -// printLivePayment receives payment updates from the given stream and either -// outputs them as json or as a more user-friendly formatted table. The table -// option uses terminal control codes to rewrite the output. This call -// terminates when the payment reaches a final state. -func printLivePayment(stream routerrpc.Router_TrackPaymentV2Client, - client lnrpc.LightningClient, json bool) (*lnrpc.Payment, er.R) { - - // Terminal escape codes aren't supported on Windows, fall back to json. - if !json && runtime.GOOS == "windows" { - json = true - } - - aliases := newAliasCache(client) - - first := true - var lastLineCount int - for { - payment, errr := stream.Recv() - if errr != nil { - return nil, er.E(errr) - } - - if json { - // Delimit json messages by newlines (inspired by - // grpc over rest chunking). - if first { - first = false - } else { - fmt.Println() - } - - // Write raw json to stdout. - printRespJSON(payment) - } else { - table := formatPayment(payment, aliases) - - // Clear all previously written lines and print the - // updated table. - clearLines(lastLineCount) - fmt.Print(table) - - // Store the number of lines written for the next update - // pass. - lastLineCount = 0 - for _, b := range table { - if b == '\n' { - lastLineCount++ - } - } - } - - // Terminate loop if payments state is final. - if payment.Status != lnrpc.Payment_IN_FLIGHT { - return payment, nil - } - } -} - -// aliasCache allows cached retrieval of node aliases. -type aliasCache struct { - cache map[string]string - client lnrpc.LightningClient -} - -func newAliasCache(client lnrpc.LightningClient) *aliasCache { - return &aliasCache{ - client: client, - cache: make(map[string]string), - } -} - -// get returns a node alias either from cache or freshly requested from lnd. -func (a *aliasCache) get(pubkey string) string { - alias, ok := a.cache[pubkey] - if ok { - return alias - } - - // Request node info. - resp, err := a.client.GetNodeInfo( - context.Background(), - &lnrpc.NodeInfoRequest{ - PubKey: pubkey, - }, - ) - if err != nil { - // If no info is available, use the - // pubkey as identifier. - alias = pubkey[:6] - } else { - alias = resp.Node.Alias - } - a.cache[pubkey] = alias - - return alias -} - -// formatMsat formats msat amounts as fractional sats. -func formatMsat(amt int64) string { - return strconv.FormatFloat(float64(amt)/1000.0, 'f', -1, 64) -} - -// formatPayment formats the payment state as an ascii table. -func formatPayment(payment *lnrpc.Payment, aliases *aliasCache) string { - t := table.NewWriter() - - // Build table header. - t.AppendHeader(table.Row{ - "HTLC_STATE", "ATTEMPT_TIME", "RESOLVE_TIME", "RECEIVER_AMT", - "FEE", "TIMELOCK", "CHAN_OUT", "ROUTE", - }) - t.SetColumnConfigs([]table.ColumnConfig{ - {Name: "ATTEMPT_TIME", Align: text.AlignRight}, - {Name: "RESOLVE_TIME", Align: text.AlignRight}, - {Name: "CHAN_OUT", Align: text.AlignLeft, - AlignHeader: text.AlignLeft}, - }) - - // Add all htlcs as rows. - createTime := time.Unix(0, payment.CreationTimeNs) - var totalPaid, totalFees int64 - for _, htlc := range payment.Htlcs { - formatTime := func(timeNs int64) string { - if timeNs == 0 { - return "-" - } - resolveTime := time.Unix(0, timeNs) - resolveTimeDiff := resolveTime.Sub(createTime) - resolveTimeMs := resolveTimeDiff / time.Millisecond - return fmt.Sprintf( - "%.3f", float64(resolveTimeMs)/1000.0, - ) - } - - attemptTime := formatTime(htlc.AttemptTimeNs) - resolveTime := formatTime(htlc.ResolveTimeNs) - - route := htlc.Route - lastHop := route.Hops[len(route.Hops)-1] - - hops := []string{} - for _, h := range route.Hops { - alias := aliases.get(h.PubKey) - hops = append(hops, alias) - } - - state := htlc.Status.String() - if htlc.Failure != nil { - state = fmt.Sprintf( - "%v @ %v", - htlc.Failure.Code, - htlc.Failure.FailureSourceIndex, - ) - } - - t.AppendRow([]interface{}{ - state, attemptTime, resolveTime, - formatMsat(lastHop.AmtToForwardMsat), - formatMsat(route.TotalFeesMsat), - route.TotalTimeLock, route.Hops[0].ChanId, - strings.Join(hops, "->")}, - ) - - if htlc.Status == lnrpc.HTLCAttempt_SUCCEEDED { - totalPaid += lastHop.AmtToForwardMsat - totalFees += route.TotalFeesMsat - } - } - - // Render table. - b := &bytes.Buffer{} - t.SetOutputMirror(b) - t.Render() - - // Add additional payment-level data. - fmt.Fprintf(b, "Amount + fee: %v + %v sat\n", - formatMsat(totalPaid), formatMsat(totalFees)) - fmt.Fprintf(b, "Payment hash: %v\n", payment.PaymentHash) - fmt.Fprintf(b, "Payment status: %v", payment.Status) - switch payment.Status { - case lnrpc.Payment_SUCCEEDED: - fmt.Fprintf(b, ", preimage: %v", payment.PaymentPreimage) - case lnrpc.Payment_FAILED: - fmt.Fprintf(b, ", reason: %v", payment.FailureReason) - } - fmt.Fprintf(b, "\n") - - return b.String() -} - -var payInvoiceCommand = cli.Command{ - Name: "payinvoice", - Category: "Payments", - Usage: "Pay an invoice over lightning.", - ArgsUsage: "pay_req", - Flags: append(paymentFlags(), - cli.Int64Flag{ - Name: "amt", - Usage: "(optional) number of satoshis to fulfill the " + - "invoice", - }, - ), - Action: actionDecorator(payInvoice), -} - -func payInvoice(ctx *cli.Context) er.R { - args := ctx.Args() - - var payReq string - switch { - case ctx.IsSet("pay_req"): - payReq = ctx.String("pay_req") - case args.Present(): - payReq = args.First() - default: - return er.Errorf("pay_req argument missing") - } - - req := &routerrpc.SendPaymentRequest{ - PaymentRequest: payReq, - Amt: ctx.Int64("amt"), - DestCustomRecords: make(map[uint64][]byte), - } - - return sendPaymentRequest(ctx, req) -} - -var sendToRouteCommand = cli.Command{ - Name: "sendtoroute", - Category: "Payments", - Usage: "Send a payment over a predefined route.", - Description: ` - Send a payment over Lightning using a specific route. One must specify - the route to attempt and the payment hash. This command can even - be chained with the response to queryroutes or buildroute. This command - can be used to implement channel rebalancing by crafting a self-route, - or even atomic swaps using a self-route that crosses multiple chains. - - There are three ways to specify a route: - * using the --routes parameter to manually specify a JSON encoded - route in the format of the return value of queryroutes or - buildroute: - (lncli sendtoroute --payment_hash= --routes=) - - * passing the route as a positional argument: - (lncli sendtoroute --payment_hash=pay_hash ) - - * or reading in the route from stdin, which can allow chaining the - response from queryroutes or buildroute, or even read in a file - with a pre-computed route: - (lncli queryroutes --args.. | lncli sendtoroute --payment_hash= - - - notice the '-' at the end, which signals that lncli should read - the route in from stdin - `, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "payment_hash, pay_hash", - Usage: "the hash to use within the payment's HTLC", - }, - cli.StringFlag{ - Name: "routes, r", - Usage: "a json array string in the format of the response " + - "of queryroutes that denotes which routes to use", - }, - }, - Action: sendToRoute, -} - -func sendToRoute(ctx *cli.Context) er.R { - // Show command help if no arguments provided. - if ctx.NArg() == 0 && ctx.NumFlags() == 0 { - _ = cli.ShowCommandHelp(ctx, "sendtoroute") - return nil - } - - args := ctx.Args() - - var ( - rHash []byte - err er.R - ) - switch { - case ctx.IsSet("payment_hash"): - rHash, err = util.DecodeHex(ctx.String("payment_hash")) - case args.Present(): - rHash, err = util.DecodeHex(args.First()) - - args = args.Tail() - default: - return er.Errorf("payment hash argument missing") - } - - if err != nil { - return err - } - - if len(rHash) != 32 { - return er.Errorf("payment hash must be exactly 32 "+ - "bytes, is instead %d", len(rHash)) - } - - var jsonRoutes string - switch { - // The user is specifying the routes explicitly via the key word - // argument. - case ctx.IsSet("routes"): - jsonRoutes = ctx.String("routes") - - // The user is specifying the routes as a positional argument. - case args.Present() && args.First() != "-": - jsonRoutes = args.First() - - // The user is signalling that we should read stdin in order to parse - // the set of target routes. - case args.Present() && args.First() == "-": - b, err := ioutil.ReadAll(os.Stdin) - if err != nil { - return er.E(err) - } - if len(b) == 0 { - return er.Errorf("queryroutes output is empty") - } - - jsonRoutes = string(b) - } - - // Try to parse the provided json both in the legacy QueryRoutes format - // that contains a list of routes and the single route BuildRoute - // format. - var route *lnrpc.Route - routes := &lnrpc.QueryRoutesResponse{} - errr := jsonpb.UnmarshalString(jsonRoutes, routes) - if errr == nil { - if len(routes.Routes) == 0 { - return er.Errorf("no routes provided") - } - - if len(routes.Routes) != 1 { - return er.Errorf("expected a single route, but got %v", - len(routes.Routes)) - } - - route = routes.Routes[0] - } else { - routes := &routerrpc.BuildRouteResponse{} - errr = jsonpb.UnmarshalString(jsonRoutes, routes) - if errr != nil { - return er.Errorf("unable to unmarshal json string "+ - "from incoming array of routes: %v", errr) - } - - route = routes.Route - } - - req := &routerrpc.SendToRouteRequest{ - PaymentHash: rHash, - Route: route, - } - - return sendToRouteRequest(ctx, req) -} - -func sendToRouteRequest(ctx *cli.Context, req *routerrpc.SendToRouteRequest) er.R { - conn := getClientConn(ctx, false) - defer conn.Close() - - client := routerrpc.NewRouterClient(conn) - - resp, errr := client.SendToRouteV2(context.Background(), req) - if errr != nil { - return er.E(errr) - } - - printRespJSON(resp) - - return nil -} - -// ESC is the ASCII code for escape character -const ESC = 27 - -// clearCode defines a terminal escape code to clear the currently line and move -// the cursor up. -var clearCode = fmt.Sprintf("%c[%dA%c[2K", ESC, 1, ESC) - -// clearLines erases the last count lines in the terminal window. -func clearLines(count int) { - _, _ = fmt.Print(strings.Repeat(clearCode, count)) -} diff --git a/lnd/cmd/lncli/cmd_profile.go b/lnd/cmd/lncli/cmd_profile.go deleted file mode 100644 index f550cb09..00000000 --- a/lnd/cmd/lncli/cmd_profile.go +++ /dev/null @@ -1,450 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "strings" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lncfg" - "github.com/urfave/cli" - "gopkg.in/macaroon.v2" -) - -var ( - // defaultLncliDir is the default directory to store the profile file - // in. This defaults to: - // C:\Users\\AppData\Local\Lncli\ on Windows - // ~/.lncli/ on Linux - // ~/Library/Application Support/Lncli/ on MacOS - defaultLncliDir = btcutil.AppDataDir("lncli", false) - - // defaultProfileFile is the full, absolute path of the profile file. - defaultProfileFile = path.Join(defaultLncliDir, "profiles.json") -) - -var profileSubCommand = cli.Command{ - Name: "profile", - Category: "Profiles", - Usage: "Create and manage lncli profiles", - Description: ` - Profiles for lncli are an easy and comfortable way to manage multiple - nodes from the command line by storing node specific parameters like RPC - host, network, TLS certificate path or macaroons in a named profile. - - To use a predefined profile, just use the '--profile=myprofile' (or - short version '-p=myprofile') with any lncli command. - - A default profile can also be defined, lncli will then always use the - connection/node parameters from that profile instead of the default - values. - - WARNING: Setting a default profile changes the default behavior of - lncli! To disable the use of the default profile for a single command, - set '--profile= '. - - The profiles are stored in a file called profiles.json in the user's - home directory, for example: - C:\Users\\AppData\Local\Lncli\profiles.json on Windows - ~/.lncli/profiles.json on Linux - ~/Library/Application Support/Lncli/profiles.json on MacOS - `, - Subcommands: []cli.Command{ - profileListCommand, - profileAddCommand, - profileRemoveCommand, - profileSetDefaultCommand, - profileUnsetDefaultCommand, - profileAddMacaroonCommand, - }, -} - -var profileListCommand = cli.Command{ - Name: "list", - Usage: "Lists all lncli profiles", - Action: profileList, -} - -func profileList(_ *cli.Context) er.R { - f, err := loadProfileFile(defaultProfileFile) - if err != nil { - return err - } - - printJSON(f) - return nil -} - -var profileAddCommand = cli.Command{ - Name: "add", - Usage: "Add a new profile", - ArgsUsage: "name", - Description: ` - Add a new named profile to the main profiles.json. All global options - (see 'lncli --help') passed into this command are stored in that named - profile. - `, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "name", - Usage: "the name of the new profile", - }, - cli.BoolFlag{ - Name: "default", - Usage: "set the new profile to be the default profile", - }, - }, - Action: profileAdd, -} - -func profileAdd(ctx *cli.Context) er.R { - if ctx.NArg() == 0 && ctx.NumFlags() == 0 { - return er.E(cli.ShowCommandHelp(ctx, "add")) - } - - // Load the default profile file or create a new one if it doesn't exist - // yet. - f, err := loadProfileFile(defaultProfileFile) - switch { - case errNoProfileFile.Is(err): - f = &profileFile{} - _ = os.MkdirAll(path.Dir(defaultProfileFile), 0700) - - case err != nil: - return err - } - - // Create a profile struct from all the global options. - profile, err := profileFromContext(ctx, true, false) - if err != nil { - return er.Errorf("could not load global options: %v", err) - } - - // Finally, all that's left is to get the profile name from either - // positional argument or flag. - args := ctx.Args() - switch { - case ctx.IsSet("name"): - profile.Name = ctx.String("name") - case args.Present(): - profile.Name = args.First() - default: - return er.Errorf("name argument missing") - } - - // Is there already a profile with that name? - for _, p := range f.Profiles { - if p.Name == profile.Name { - return er.Errorf("a profile with the name %s already "+ - "exists", profile.Name) - } - } - - // Do we need to update the default entry to be this one? - if ctx.Bool("default") { - f.Default = profile.Name - } - - // All done, store the updated profile file. - f.Profiles = append(f.Profiles, profile) - if err = saveProfileFile(defaultProfileFile, f); err != nil { - return er.Errorf("error writing profile file %s: %v", - defaultProfileFile, err) - } - - fmt.Printf("Profile %s added to file %s.\n", profile.Name, - defaultProfileFile) - return nil -} - -var profileRemoveCommand = cli.Command{ - Name: "remove", - Usage: "Remove a profile", - ArgsUsage: "name", - Description: `Remove the specified profile from the profile file.`, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "name", - Usage: "the name of the profile to delete", - }, - }, - Action: profileRemove, -} - -func profileRemove(ctx *cli.Context) er.R { - if ctx.NArg() == 0 && ctx.NumFlags() == 0 { - return er.E(cli.ShowCommandHelp(ctx, "remove")) - } - - // Load the default profile file. - f, err := loadProfileFile(defaultProfileFile) - if err != nil { - return er.Errorf("could not load profile file: %v", err) - } - - // Get the profile name from either positional argument or flag. - var ( - args = ctx.Args() - name string - found = false - ) - switch { - case ctx.IsSet("name"): - name = ctx.String("name") - case args.Present(): - name = args.First() - default: - return er.Errorf("name argument missing") - } - - // Create a copy of all profiles but don't include the one to delete. - newProfiles := make([]*profileEntry, 0, len(f.Profiles)-1) - for _, p := range f.Profiles { - // Skip the one we want to delete. - if p.Name == name { - found = true - - if p.Name == f.Default { - fmt.Println("Warning: removing default profile.") - } - continue - } - - // Keep all others. - newProfiles = append(newProfiles, p) - } - - // If what we were looking for didn't exist in the first place, there's - // no need for updating the file. - if !found { - return er.Errorf("profile with name %s not found in file", - name) - } - - // Great, everything updated, now let's save the file. - f.Profiles = newProfiles - return saveProfileFile(defaultProfileFile, f) -} - -var profileSetDefaultCommand = cli.Command{ - Name: "setdefault", - Usage: "Set the default profile", - ArgsUsage: "name", - Description: ` - Set a specified profile to be used as the default profile. - - WARNING: Setting a default profile changes the default behavior of - lncli! To disable the use of the default profile for a single command, - set '--profile= '. - `, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "name", - Usage: "the name of the profile to set as default", - }, - }, - Action: profileSetDefault, -} - -func profileSetDefault(ctx *cli.Context) er.R { - if ctx.NArg() == 0 && ctx.NumFlags() == 0 { - return er.E(cli.ShowCommandHelp(ctx, "setdefault")) - } - - // Load the default profile file. - f, err := loadProfileFile(defaultProfileFile) - if err != nil { - return er.Errorf("could not load profile file: %v", err) - } - - // Get the profile name from either positional argument or flag. - var ( - args = ctx.Args() - name string - found = false - ) - switch { - case ctx.IsSet("name"): - name = ctx.String("name") - case args.Present(): - name = args.First() - default: - return er.Errorf("name argument missing") - } - - // Make sure the new default profile actually exists. - for _, p := range f.Profiles { - if p.Name == name { - found = true - f.Default = p.Name - - break - } - } - - // If the default profile doesn't exist, there's no need for updating - // the file. - if !found { - return er.Errorf("profile with name %s not found in file", - name) - } - - // Great, everything updated, now let's save the file. - return saveProfileFile(defaultProfileFile, f) -} - -var profileUnsetDefaultCommand = cli.Command{ - Name: "unsetdefault", - Usage: "Unsets the default profile", - Description: ` - Disables the use of a default profile and restores lncli to its original - behavior. - `, - Action: profileUnsetDefault, -} - -func profileUnsetDefault(_ *cli.Context) er.R { - // Load the default profile file. - f, err := loadProfileFile(defaultProfileFile) - if err != nil { - return er.Errorf("could not load profile file: %v", err) - } - - // Save the file with the flag disabled. - f.Default = "" - return saveProfileFile(defaultProfileFile, f) -} - -var profileAddMacaroonCommand = cli.Command{ - Name: "addmacaroon", - Usage: "Add a macaroon to a profile's macaroon jar", - ArgsUsage: "macaroon-name", - Description: ` - Add an additional macaroon specified by the global option --macaroonpath - to an existing profile's macaroon jar. - - If no profile is selected, the macaroon is added to the default profile - (if one exists). To add a macaroon to a specific profile, use the global - --profile=myprofile option. - - If multiple macaroons exist in a profile's macaroon jar, the one to use - can be specified with the global option --macfromjar=xyz. - `, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "name", - Usage: "the name of the macaroon", - }, - cli.BoolFlag{ - Name: "default", - Usage: "set the new macaroon to be the default " + - "macaroon in the jar", - }, - }, - Action: profileAddMacaroon, -} - -func profileAddMacaroon(ctx *cli.Context) er.R { - if ctx.NArg() == 0 && ctx.NumFlags() == 0 { - return er.E(cli.ShowCommandHelp(ctx, "addmacaroon")) - } - - // Load the default profile file or create a new one if it doesn't exist - // yet. - f, err := loadProfileFile(defaultProfileFile) - if err != nil { - return er.Errorf("could not load profile file: %v", err) - } - - // Finally, all that's left is to get the profile name from either - // positional argument or flag. - var ( - args = ctx.Args() - profileName string - macName string - ) - switch { - case ctx.IsSet("name"): - macName = ctx.String("name") - case args.Present(): - macName = args.First() - default: - return er.Errorf("name argument missing") - } - - // Make sure the user actually set a macaroon path to use. - if !ctx.GlobalIsSet("macaroonpath") { - return er.Errorf("macaroonpath global option missing") - } - - // Find out which profile we should add the macaroon. The global flag - // takes precedence over the default profile. - if f.Default != "" { - profileName = f.Default - } - if ctx.GlobalIsSet("profile") { - profileName = ctx.GlobalString("profile") - } - if len(strings.TrimSpace(profileName)) == 0 { - return er.Errorf("no profile specified and no default " + - "profile exists") - } - - // Is there a profile with that name? - var selectedProfile *profileEntry - for _, p := range f.Profiles { - if p.Name == profileName { - selectedProfile = p - break - } - } - if selectedProfile == nil { - return er.Errorf("profile with name %s not found", profileName) - } - - // Does a macaroon with that name already exist? - for _, m := range selectedProfile.Macaroons.Jar { - if m.Name == macName { - return er.Errorf("a macaroon with the name %s "+ - "already exists", macName) - } - } - - // Do we need to update the default entry to be this one? - if ctx.Bool("default") { - selectedProfile.Macaroons.Default = macName - } - - // Now load and possibly encrypt the macaroon file. - macPath := lncfg.CleanAndExpandPath(ctx.GlobalString("macaroonpath")) - macBytes, errr := ioutil.ReadFile(macPath) - if errr != nil { - return er.Errorf("unable to read macaroon path: %v", errr) - } - mac := &macaroon.Macaroon{} - if errr = mac.UnmarshalBinary(macBytes); errr != nil { - return er.Errorf("unable to decode macaroon: %v", errr) - } - macEntry := &macaroonEntry{ - Name: macName, - } - if err = macEntry.storeMacaroon(mac, nil); err != nil { - return er.Errorf("unable to store macaroon: %v", err) - } - - // All done, store the updated profile file. - selectedProfile.Macaroons.Jar = append( - selectedProfile.Macaroons.Jar, macEntry, - ) - if err = saveProfileFile(defaultProfileFile, f); err != nil { - return er.Errorf("error writing profile file %s: %v", - defaultProfileFile, err) - } - - fmt.Printf("Macaroon %s added to profile %s in file %s.\n", macName, - selectedProfile.Name, defaultProfileFile) - return nil -} diff --git a/lnd/cmd/lncli/cmd_query_mission_control.go b/lnd/cmd/lncli/cmd_query_mission_control.go deleted file mode 100644 index 822abcf1..00000000 --- a/lnd/cmd/lncli/cmd_query_mission_control.go +++ /dev/null @@ -1,35 +0,0 @@ -package main - -import ( - "context" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc" - - "github.com/urfave/cli" -) - -var queryMissionControlCommand = cli.Command{ - Name: "querymc", - Category: "Payments", - Usage: "Query the internal mission control state.", - Action: actionDecorator(queryMissionControl), -} - -func queryMissionControl(ctx *cli.Context) er.R { - conn := getClientConn(ctx, false) - defer conn.Close() - - client := routerrpc.NewRouterClient(conn) - - req := &routerrpc.QueryMissionControlRequest{} - rpcCtx := context.Background() - snapshot, errr := client.QueryMissionControl(rpcCtx, req) - if errr != nil { - return er.E(errr) - } - - printRespJSON(snapshot) - - return nil -} diff --git a/lnd/cmd/lncli/cmd_query_probability.go b/lnd/cmd/lncli/cmd_query_probability.go deleted file mode 100644 index 37db1359..00000000 --- a/lnd/cmd/lncli/cmd_query_probability.go +++ /dev/null @@ -1,68 +0,0 @@ -package main - -import ( - "context" - "strconv" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/routing/route" - "github.com/urfave/cli" -) - -var queryProbCommand = cli.Command{ - Name: "queryprob", - Category: "Payments", - Usage: "Estimate a success probability.", - ArgsUsage: "from-node to-node amt", - Action: actionDecorator(queryProb), -} - -func queryProb(ctx *cli.Context) er.R { - args := ctx.Args() - - if len(args) != 3 { - return er.E(cli.ShowCommandHelp(ctx, "queryprob")) - } - - fromNode, err := route.NewVertexFromStr(args.Get(0)) - if err != nil { - return er.Errorf("invalid from node key: %v", err) - } - - toNode, err := route.NewVertexFromStr(args.Get(1)) - if err != nil { - return er.Errorf("invalid to node key: %v", err) - } - - amtSat, errr := strconv.ParseUint(args.Get(2), 10, 64) - if errr != nil { - return er.Errorf("invalid amt: %v", errr) - } - - amtMsat := lnwire.NewMSatFromSatoshis( - btcutil.Amount(amtSat), - ) - - conn := getClientConn(ctx, false) - defer conn.Close() - - client := routerrpc.NewRouterClient(conn) - - req := &routerrpc.QueryProbabilityRequest{ - FromNode: fromNode[:], - ToNode: toNode[:], - AmtMsat: int64(amtMsat), - } - rpcCtx := context.Background() - response, errr := client.QueryProbability(rpcCtx, req) - if errr != nil { - return er.E(errr) - } - - printRespJSON(response) - - return nil -} diff --git a/lnd/cmd/lncli/cmd_reset_mission_control.go b/lnd/cmd/lncli/cmd_reset_mission_control.go deleted file mode 100644 index 95b02d77..00000000 --- a/lnd/cmd/lncli/cmd_reset_mission_control.go +++ /dev/null @@ -1,29 +0,0 @@ -package main - -import ( - "context" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc" - - "github.com/urfave/cli" -) - -var resetMissionControlCommand = cli.Command{ - Name: "resetmc", - Category: "Payments", - Usage: "Reset internal mission control state.", - Action: actionDecorator(resetMissionControl), -} - -func resetMissionControl(ctx *cli.Context) er.R { - conn := getClientConn(ctx, false) - defer conn.Close() - - client := routerrpc.NewRouterClient(conn) - - req := &routerrpc.ResetMissionControlRequest{} - rpcCtx := context.Background() - _, errr := client.ResetMissionControl(rpcCtx, req) - return er.E(errr) -} diff --git a/lnd/cmd/lncli/cmd_version.go b/lnd/cmd/lncli/cmd_version.go deleted file mode 100644 index 399676fc..00000000 --- a/lnd/cmd/lncli/cmd_version.go +++ /dev/null @@ -1,51 +0,0 @@ -package main - -import ( - "context" - "fmt" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnrpc/lnclipb" - "github.com/pkt-cash/pktd/lnd/lnrpc/verrpc" - "github.com/pkt-cash/pktd/pktconfig/version" - "github.com/urfave/cli" -) - -var versionCommand = cli.Command{ - Name: "version", - Usage: "Display lncli and lnd version info.", - Description: ` - Returns version information about both lncli and lnd. If lncli is unable - to connect to lnd, the command fails but still prints the lncli version. - `, - Action: actionDecorator(v), -} - -func v(ctx *cli.Context) er.R { - conn := getClientConn(ctx, false) - defer conn.Close() - - versions := &lnclipb.VersionResponse{ - Lncli: &verrpc.Version{ - Version: version.Version(), - AppMajor: uint32(version.AppMajorVersion()), - AppMinor: uint32(version.AppMinorVersion()), - AppPatch: uint32(version.AppPatchVersion()), - AppPreRelease: fmt.Sprintf("%v", version.IsPrerelease()), - }, - } - - client := verrpc.NewVersionerClient(conn) - - ctxb := context.Background() - lndVersion, err := client.GetVersion(ctxb, &verrpc.VersionRequest{}) - if err != nil { - printRespJSON(versions) - return er.Errorf("unable fetch version from lnd: %v", err) - } - versions.Lnd = lndVersion - - printRespJSON(versions) - - return nil -} diff --git a/lnd/cmd/lncli/cmd_walletunlocker.go b/lnd/cmd/lncli/cmd_walletunlocker.go deleted file mode 100644 index 850ac8a1..00000000 --- a/lnd/cmd/lncli/cmd_walletunlocker.go +++ /dev/null @@ -1,663 +0,0 @@ -package main - -import ( - "bufio" - "bytes" - "context" - "encoding/hex" - "fmt" - "io/ioutil" - "os" - "strconv" - "strings" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lncfg" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/walletunlocker" - "github.com/pkt-cash/pktd/pktwallet/wallet/seedwords" - "github.com/urfave/cli" -) - -var ( - statelessInitFlag = cli.BoolFlag{ - Name: "stateless_init", - Usage: "do not create any macaroon files in the file " + - "system of the daemon", - } - saveToFlag = cli.StringFlag{ - Name: "save_to", - Usage: "save returned admin macaroon to this file", - } -) - -var createCommand = cli.Command{ - Name: "create", - Category: "Startup", - Usage: "Initialize a wallet when starting lnd for the first time.", - Description: ` - The create command is used to initialize an lnd wallet from scratch for - the very first time. This is interactive command with one required - argument (the password), and one optional argument (the mnemonic - passphrase). - - The first argument (the password) is required and MUST be greater than - 8 characters. This will be used to encrypt the wallet within lnd. This - MUST be remembered as it will be required to fully start up the daemon. - - The second argument is an optional 24-word mnemonic derived from BIP - 39. If provided, then the internal wallet will use the seed derived - from this mnemonic to generate all keys. - - This command returns a 24-word seed in the scenario that NO mnemonic - was provided by the user. This should be written down as it can be used - to potentially recover all on-chain funds, and most off-chain funds as - well. - - If the --stateless_init flag is set, no macaroon files are created by - the daemon. Instead, the binary serialized admin macaroon is returned - in the answer. This answer MUST be stored somewhere, otherwise all - access to the RPC server will be lost and the wallet must be recreated - to re-gain access. - If the --save_to parameter is set, the macaroon is saved to this file, - otherwise it is printed to standard out. - - Finally, it's also possible to use this command and a set of static - channel backups to trigger a recover attempt for the provided Static - Channel Backups. Only one of the three parameters will be accepted. See - the restorechanbackup command for further details w.r.t the format - accepted. - `, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "single_backup", - Usage: "a hex encoded single channel backup obtained " + - "from exportchanbackup", - }, - cli.StringFlag{ - Name: "multi_backup", - Usage: "a hex encoded multi-channel backup obtained " + - "from exportchanbackup", - }, - cli.StringFlag{ - Name: "multi_file", - Usage: "the path to a multi-channel back up file", - }, - statelessInitFlag, - saveToFlag, - }, - Action: actionDecorator(create), -} - -// monowidthColumns takes a set of words, and the number of desired columns, -// and returns a new set of words that have had white space appended to the -// word in order to create a mono-width column. -func monowidthColumns(words []string, ncols int) []string { - // Determine max size of words in each column. - colWidths := make([]int, ncols) - for i, word := range words { - col := i % ncols - curWidth := colWidths[col] - if len(word) > curWidth { - colWidths[col] = len(word) - } - } - - // Append whitespace to each word to make columns mono-width. - finalWords := make([]string, len(words)) - for i, word := range words { - col := i % ncols - width := colWidths[col] - - diff := width - len(word) - finalWords[i] = word + strings.Repeat(" ", diff) - } - - return finalWords -} - -func create(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getWalletUnlockerClient(ctx) - defer cleanUp() - - var ( - chanBackups *lnrpc.ChanBackupSnapshot - - // We use var restoreSCB to track if we will be including an SCB - // recovery in the init wallet request. - restoreSCB = false - ) - - backups, err := parseChanBackups(ctx) - - // We'll check to see if the user provided any static channel backups (SCB), - // if so, we will warn the user that SCB recovery closes all open channels - // and ask them to confirm their intention. - // If the user agrees, we'll add the SCB recovery onto the final init wallet - // request. - switch { - // parseChanBackups returns an errMissingBackup error (which we ignore) if - // the user did not request a SCB recovery. - case errMissingChanBackup.Is(err): - - // Passed an invalid channel backup file. - case err != nil: - return er.Errorf("unable to parse chan backups: %v", err) - - // We have an SCB recovery option with a valid backup file. - default: - - warningLoop: - for { - - fmt.Println() - fmt.Printf("WARNING: You are attempting to restore from a " + - "static channel backup (SCB) file.\nThis action will CLOSE " + - "all currently open channels, and you will pay on-chain fees." + - "\n\nAre you sure you want to recover funds from a" + - " static channel backup? (Enter y/n): ") - - reader := bufio.NewReader(os.Stdin) - answer, err := reader.ReadString('\n') - if err != nil { - return er.E(err) - } - - answer = strings.TrimSpace(answer) - answer = strings.ToLower(answer) - - switch answer { - case "y": - restoreSCB = true - break warningLoop - case "n": - fmt.Println("Aborting SCB recovery") - return nil - } - } - } - - // Proceed with SCB recovery. - if restoreSCB { - fmt.Println("Static Channel Backup (SCB) recovery selected!") - if backups != nil { - switch { - case backups.GetChanBackups() != nil: - singleBackup := backups.GetChanBackups() - chanBackups = &lnrpc.ChanBackupSnapshot{ - SingleChanBackups: singleBackup, - } - - case backups.GetMultiChanBackup() != nil: - multiBackup := backups.GetMultiChanBackup() - chanBackups = &lnrpc.ChanBackupSnapshot{ - MultiChanBackup: &lnrpc.MultiChanBackup{ - MultiChanBackup: multiBackup, - }, - } - } - } - } - - // Should the daemon be initialized stateless? Then we expect an answer - // with the admin macaroon later. Because the --save_to is related to - // stateless init, it doesn't make sense to be set on its own. - statelessInit := ctx.Bool(statelessInitFlag.Name) - if !statelessInit && ctx.IsSet(saveToFlag.Name) { - return er.Errorf("cannot set save_to parameter without " + - "stateless_init") - } - - walletPassword, err := capturePassword( - "Input wallet password: ", false, walletunlocker.ValidatePassword, - ) - if err != nil { - return err - } - - // Next, we'll see if the user has 24-word mnemonic they want to use to - // derive a seed within the wallet. - var ( - hasMnemonic bool - ) - -mnemonicCheck: - for { - fmt.Println() - fmt.Printf("Do you have an existing Pktwallet seed " + - "you want to use? (Enter y/n): ") - - reader := bufio.NewReader(os.Stdin) - answer, errr := reader.ReadString('\n') - if errr != nil { - return er.E(errr) - } - - fmt.Println() - - answer = strings.TrimSpace(answer) - answer = strings.ToLower(answer) - - switch answer { - case "y": - hasMnemonic = true - break mnemonicCheck - case "n": - hasMnemonic = false - break mnemonicCheck - } - } - - // If the user *does* have an existing seed they want to use, then - // we'll read that in directly from the terminal. - var ( - cipherSeedMnemonic []string - aezeedPass []byte - recoveryWindow int32 - ) - if hasMnemonic { - fmt.Printf("Input your 15-word Pktwallet seed separated by spaces: ") - reader := bufio.NewReader(os.Stdin) - mnemonic, errr := reader.ReadString('\n') - if errr != nil { - return er.E(errr) - } - - // We'll trim off extra spaces, and ensure the mnemonic is all - // lower case, then populate our request. - mnemonic = strings.TrimSpace(mnemonic) - mnemonic = strings.ToLower(mnemonic) - - cipherSeedMnemonic = strings.Split(mnemonic, " ") - - fmt.Println() - - if len(cipherSeedMnemonic) != 15 { - return er.Errorf("wrong cipher seed mnemonic "+ - "length: got %v words, expecting %v words", - len(cipherSeedMnemonic), 15) - } - - seedEnc, err := seedwords.SeedFromWords(mnemonic) - if err != nil { - return err - } - if seedEnc.NeedsPassphrase() { - aezeedPass, err = readPassword("This seed is encrypted " + - "with a passphrase please enter it now: ") - } - - /// This should be automatic - // for { - // fmt.Println() - // fmt.Printf("Input an optional address look-ahead "+ - // "used to scan for used keys (default %d): ", - // defaultRecoveryWindow) - - // reader := bufio.NewReader(os.Stdin) - // answer, errr := reader.ReadString('\n') - // if errr != nil { - // return er.E(errr) - // } - - // fmt.Println() - - // answer = strings.TrimSpace(answer) - - // if len(answer) == 0 { - // recoveryWindow = defaultRecoveryWindow - // break - // } - - // lookAhead, err := strconv.Atoi(answer) - // if err != nil { - // fmt.Printf("Unable to parse recovery "+ - // "window: %v\n", err) - // continue - // } - - // recoveryWindow = int32(lookAhead) - // break - // } - } else { - // Otherwise, if the user doesn't have a mnemonic that they - // want to use, we'll generate a fresh one with the GenSeed - // command. - fmt.Println("Your cipher seed can optionally be encrypted.") - - instruction := "Input your passphrase if you wish to encrypt it " + - "(or press enter to proceed without a cipher seed " + - "passphrase): " - aezeedPass, err = capturePassword( - instruction, true, func(_ []byte) er.R { return nil }, - ) - if err != nil { - return err - } - - fmt.Println() - fmt.Println("Generating fresh cipher seed...") - fmt.Println() - - genSeedReq := &lnrpc.GenSeedRequest{ - AezeedPassphrase: aezeedPass, - } - seedResp, err := client.GenSeed(ctxb, genSeedReq) - if err != nil { - return er.Errorf("unable to generate seed: %v", err) - } - - cipherSeedMnemonic = seedResp.CipherSeedMnemonic - } - - // Before we initialize the wallet, we'll display the cipher seed to - // the user so they can write it down. - - fmt.Println("!!!YOU MUST WRITE DOWN THIS SEED AND YOUR PASSWORD TO BE ABLE TO " + - "RESTORE THE WALLET!!!") - fmt.Println() - - fmt.Println("---------------BEGIN LND CIPHER SEED---------------") - - fmt.Printf("%v\n", strings.Join(cipherSeedMnemonic, " ")) - - fmt.Println("---------------END LND CIPHER SEED-----------------") - - fmt.Println("\n!!!YOU MUST WRITE DOWN THIS SEED AND YOUR PASSWORD TO BE ABLE TO " + - "RESTORE THE WALLET!!!") - - // With either the user's prior cipher seed, or a newly generated one, - // we'll go ahead and initialize the wallet. - req := &lnrpc.InitWalletRequest{ - WalletPassword: walletPassword, - CipherSeedMnemonic: cipherSeedMnemonic, - AezeedPassphrase: aezeedPass, - RecoveryWindow: recoveryWindow, - ChannelBackups: chanBackups, - StatelessInit: statelessInit, - } - response, errr := client.InitWallet(ctxb, req) - if errr != nil { - return er.E(errr) - } - - fmt.Println("\npld successfully initialized!") - - if statelessInit { - return storeOrPrintAdminMac(ctx, response.AdminMacaroon) - } - - return nil -} - -// capturePassword returns a password value that has been entered twice by the -// user, to ensure that the user knows what password they have entered. The user -// will be prompted to retry until the passwords match. If the optional param is -// true, the function may return an empty byte array if the user opts against -// using a password. -func capturePassword(instruction string, optional bool, - validate func([]byte) er.R) ([]byte, er.R) { - - for { - password, err := readPassword(instruction) - if err != nil { - return nil, err - } - - // Do not require users to repeat password if - // it is optional and they are not using one. - if len(password) == 0 && optional { - return nil, nil - } - - // If the password provided is not valid, restart - // password capture process from the beginning. - if err := validate(password); err != nil { - fmt.Println(err.String()) - fmt.Println() - continue - } - - passwordConfirmed, err := readPassword("Confirm password: ") - if err != nil { - return nil, err - } - - if bytes.Equal(password, passwordConfirmed) { - return password, nil - } - - fmt.Println("Passwords don't match, please try again") - fmt.Println() - } -} - -var unlockCommand = cli.Command{ - Name: "unlock", - Category: "Startup", - Usage: "Unlock an encrypted wallet at startup.", - Description: ` - The unlock command is used to decrypt lnd's wallet state in order to - start up. This command MUST be run after booting up lnd before it's - able to carry out its duties. An exception is if a user is running with - --noseedbackup, then a default passphrase will be used. - - If the --stateless_init flag is set, no macaroon files are created by - the daemon. This should be set for every unlock if the daemon was - initially initialized stateless. Otherwise the daemon will create - unencrypted macaroon files which could leak information to the system - that the daemon runs on. - `, - Flags: []cli.Flag{ - cli.IntFlag{ - Name: "recovery_window", - Usage: "address lookahead to resume recovery rescan, " + - "value should be non-zero -- To recover all " + - "funds, this should be greater than the " + - "maximum number of consecutive, unused " + - "addresses ever generated by the wallet.", - }, - cli.BoolFlag{ - Name: "stdin", - Usage: "read password from standard input instead of " + - "prompting for it. THIS IS CONSIDERED TO " + - "BE DANGEROUS if the password is located in " + - "a file that can be read by another user. " + - "This flag should only be used in " + - "combination with some sort of password " + - "manager or secrets vault.", - }, - statelessInitFlag, - }, - Action: actionDecorator(unlock), -} - -func unlock(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getWalletUnlockerClient(ctx) - defer cleanUp() - - var ( - pw []byte - errr error - err er.R - ) - switch { - // Read the password from standard in as if it were a file. This should - // only be used if the password is piped into lncli from some sort of - // password manager. If the user types the password instead, it will be - // echoed in the console. - case ctx.IsSet("stdin"): - reader := bufio.NewReader(os.Stdin) - pw, errr = reader.ReadBytes('\n') - - // Remove carriage return and newline characters. - pw = bytes.Trim(pw, "\r\n") - - // Read the password from a terminal by default. This requires the - // terminal to be a real tty and will fail if a string is piped into - // lncli. - default: - pw, err = readPassword("Input wallet password: ") - } - if err != nil { - return err - } - if errr != nil { - return er.E(errr) - } - - args := ctx.Args() - - // Parse the optional recovery window if it is specified. By default, - // the recovery window will be 0, indicating no lookahead should be - // used. - var recoveryWindow int32 - switch { - case ctx.IsSet("recovery_window"): - recoveryWindow = int32(ctx.Int64("recovery_window")) - case args.Present(): - window, errr := strconv.ParseInt(args.First(), 10, 64) - if errr != nil { - return er.E(errr) - } - recoveryWindow = int32(window) - } - - req := &lnrpc.UnlockWalletRequest{ - WalletPassword: pw, - RecoveryWindow: recoveryWindow, - StatelessInit: ctx.Bool(statelessInitFlag.Name), - } - _, errr = client.UnlockWallet(ctxb, req) - if errr != nil { - return er.E(errr) - } - - fmt.Println("\nlnd successfully unlocked!") - - // TODO(roasbeef): add ability to accept hex single and multi backups - - return nil -} - -var changePasswordCommand = cli.Command{ - Name: "changepassword", - Category: "Startup", - Usage: "Change an encrypted wallet's password at startup.", - Description: ` - The changepassword command is used to Change lnd's encrypted wallet's - password. It will automatically unlock the daemon if the password change - is successful. - - If one did not specify a password for their wallet (running lnd with - --noseedbackup), one must restart their daemon without - --noseedbackup and use this command. The "current password" field - should be left empty. - - If the daemon was originally initialized stateless, then the - --stateless_init flag needs to be set for the change password request - as well! Otherwise the daemon will generate unencrypted macaroon files - in its file system again and possibly leak sensitive information. - Changing the password will by default not change the macaroon root key - (just re-encrypt the macaroon database with the new password). So all - macaroons will still be valid. - If one wants to make sure that all previously created macaroons are - invalidated, a new macaroon root key can be generated by using the - --new_mac_root_key flag. - - After a successful password change with the --stateless_init flag set, - the current or new admin macaroon is returned binary serialized in the - answer. This answer MUST then be stored somewhere, otherwise - all access to the RPC server will be lost and the wallet must be re- - created to re-gain access. If the --save_to parameter is set, the - macaroon is saved to this file, otherwise it is printed to standard out. - `, - Flags: []cli.Flag{ - statelessInitFlag, - saveToFlag, - cli.BoolFlag{ - Name: "new_mac_root_key", - Usage: "rotate the macaroon root key resulting in " + - "all previously created macaroons to be " + - "invalidated", - }, - }, - Action: actionDecorator(changePassword), -} - -func changePassword(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getWalletUnlockerClient(ctx) - defer cleanUp() - - currentPw, err := readPassword("Input current wallet password: ") - if err != nil { - return err - } - - newPw, err := readPassword("Input new wallet password: ") - if err != nil { - return err - } - - confirmPw, err := readPassword("Confirm new wallet password: ") - if err != nil { - return err - } - - if !bytes.Equal(newPw, confirmPw) { - return er.Errorf("passwords don't match") - } - - // Should the daemon be initialized stateless? Then we expect an answer - // with the admin macaroon later. Because the --save_to is related to - // stateless init, it doesn't make sense to be set on its own. - statelessInit := ctx.Bool(statelessInitFlag.Name) - if !statelessInit && ctx.IsSet(saveToFlag.Name) { - return er.Errorf("cannot set save_to parameter without " + - "stateless_init") - } - - req := &lnrpc.ChangePasswordRequest{ - CurrentPassword: currentPw, - NewPassword: newPw, - StatelessInit: statelessInit, - NewMacaroonRootKey: ctx.Bool("new_mac_root_key"), - } - - response, errr := client.ChangePassword(ctxb, req) - if errr != nil { - return er.E(errr) - } - - if statelessInit { - return storeOrPrintAdminMac(ctx, response.AdminMacaroon) - } - - return nil -} - -// storeOrPrintAdminMac either stores the admin macaroon to a file specified or -// prints it to standard out, depending on the user flags set. -func storeOrPrintAdminMac(ctx *cli.Context, adminMac []byte) er.R { - // The user specified the optional --save_to parameter. We'll save the - // macaroon to that file. - if ctx.IsSet("save_to") { - macSavePath := lncfg.CleanAndExpandPath(ctx.String("save_to")) - err := ioutil.WriteFile(macSavePath, adminMac, 0644) - if err != nil { - _ = os.Remove(macSavePath) - return er.E(err) - } - fmt.Printf("Admin macaroon saved to %s\n", macSavePath) - return nil - } - - // Otherwise we just print it. The user MUST store this macaroon - // somewhere so we either save it to a provided file path or just print - // it to standard output. - fmt.Printf("Admin macaroon: %s\n", hex.EncodeToString(adminMac)) - return nil -} diff --git a/lnd/cmd/lncli/commands.go b/lnd/cmd/lncli/commands.go deleted file mode 100644 index 2ccd8941..00000000 --- a/lnd/cmd/lncli/commands.go +++ /dev/null @@ -1,2844 +0,0 @@ -package main - -import ( - "bufio" - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "math" - "os" - "strconv" - "strings" - "sync" - "time" - - "github.com/lightninglabs/protobuf-hex-display/json" - "github.com/lightninglabs/protobuf-hex-display/jsonpb" - "github.com/lightninglabs/protobuf-hex-display/proto" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/routing/route" - "github.com/pkt-cash/pktd/wire" - "github.com/urfave/cli" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// TODO(roasbeef): cli logic for supporting both positional and unix style -// arguments. - -// TODO(roasbeef): expose all fee conf targets - -const defaultRecoveryWindow int32 = 2500 - -const ( - defaultUtxoMinConf = 1 -) - -func printJSON(resp interface{}) { - b, err := json.Marshal(resp) - if err != nil { - fatal(er.E(err)) - } - - var out bytes.Buffer - json.Indent(&out, b, "", "\t") - out.WriteString("\n") - out.WriteTo(os.Stdout) -} - -func printRespJSON(resp proto.Message) { - jsonMarshaler := &jsonpb.Marshaler{ - EmitDefaults: true, - OrigName: true, - Indent: " ", - } - - jsonStr, err := jsonMarshaler.MarshalToString(resp) - if err != nil { - fmt.Println("unable to decode response: ", err) - return - } - - fmt.Println(jsonStr) -} - -// actionDecorator is used to add additional information and error handling -// to command actions. -func actionDecorator(f func(*cli.Context) er.R) func(*cli.Context) er.R { - return func(c *cli.Context) er.R { - if err := f(c); err != nil { - s, ok := status.FromError(er.Wrapped(err)) - - // If it's a command for the UnlockerService (like - // 'create' or 'unlock') but the wallet is already - // unlocked, then these methods aren't recognized any - // more because this service is shut down after - // successful unlock. That's why the code - // 'Unimplemented' means something different for these - // two commands. - if s.Code() == codes.Unimplemented && - (c.Command.Name == "create" || - c.Command.Name == "unlock") { - return er.Errorf("Wallet is already unlocked") - } - - // lnd might be active, but not possible to contact - // using RPC if the wallet is encrypted. If we get - // error code Unimplemented, it means that lnd is - // running, but the RPC server is not active yet (only - // WalletUnlocker server active) and most likely this - // is because of an encrypted wallet. - // exclude getinfo in order to work even when wallet is locked - if ok && s.Code() == codes.Unimplemented && c.Command.Name != "getinfo" { - return er.Errorf("Wallet is encrypted. " + - "Please unlock using 'lncli unlock', " + - "or set password using 'lncli create'" + - " if this is the first time starting " + - "lnd.") - } - return err - } - return nil - } -} - -var newAddressCommand = cli.Command{ - Name: "newaddress", - Category: "Wallet", - Usage: "Generates a new address.", - ArgsUsage: "address-type", - Description: ` - Generate a wallet new address. Address-types has to be one of: - - p2wkh: Pay to witness key hash - - np2wkh: Pay to nested witness key hash`, - Action: actionDecorator(newAddress), -} - -func newAddress(ctx *cli.Context) er.R { - client, cleanUp := getClient(ctx) - defer cleanUp() - - stringAddrType := ctx.Args().First() - - // Map the string encoded address type, to the concrete typed address - // type enum. An unrecognized address type will result in an error. - var addrType lnrpc.AddressType - switch stringAddrType { // TODO(roasbeef): make them ints on the cli? - case "p2wkh": - addrType = lnrpc.AddressType_WITNESS_PUBKEY_HASH - case "np2wkh": - addrType = lnrpc.AddressType_NESTED_PUBKEY_HASH - default: - return er.Errorf("invalid address type %v, support address type "+ - "are: p2wkh and np2wkh", stringAddrType) - } - - ctxb := context.Background() - addr, err := client.NewAddress(ctxb, &lnrpc.NewAddressRequest{ - Type: addrType, - }) - if err != nil { - return er.E(err) - } - - printRespJSON(addr) - return nil -} - -var estimateFeeCommand = cli.Command{ - Name: "estimatefee", - Category: "On-chain", - Usage: "Get fee estimates for sending bitcoin on-chain to multiple addresses.", - ArgsUsage: "send-json-string [--conf_target=N]", - Description: ` - Get fee estimates for sending a transaction paying the specified amount(s) to the passed address(es). - - The send-json-string' param decodes addresses and the amount to send respectively in the following format: - - '{"ExampleAddr": NumCoinsInSatoshis, "SecondAddr": NumCoins}' - `, - Flags: []cli.Flag{ - cli.Int64Flag{ - Name: "conf_target", - Usage: "(optional) the number of blocks that the transaction *should* " + - "confirm in", - }, - }, - Action: actionDecorator(estimateFees), -} - -func estimateFees(ctx *cli.Context) er.R { - var amountToAddr map[string]int64 - - jsonMap := ctx.Args().First() - if err := json.Unmarshal([]byte(jsonMap), &amountToAddr); err != nil { - return er.E(err) - } - - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - resp, err := client.EstimateFee(ctxb, &lnrpc.EstimateFeeRequest{ - AddrToAmount: amountToAddr, - TargetConf: int32(ctx.Int64("conf_target")), - }) - if err != nil { - return er.E(err) - } - - printRespJSON(resp) - return nil -} - -var txLabelFlag = cli.StringFlag{ - Name: "label", - Usage: "(optional) a label for the transaction", -} - -var sendCoinsCommand = cli.Command{ - Name: "sendcoins", - Category: "On-chain", - Usage: "Send bitcoin on-chain to an address.", - ArgsUsage: "addr amt", - Description: ` - Send amt coins in satoshis to the base58 or bech32 encoded bitcoin address addr. - - Fees used when sending the transaction can be specified via the --conf_target, or - --sat_per_byte optional flags. - - Positional arguments and flags can be used interchangeably but not at the same time! - `, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "addr", - Usage: "the base58 or bech32 encoded bitcoin address to send coins " + - "to on-chain", - }, - cli.BoolFlag{ - Name: "sweepall", - Usage: "if set, then the amount field will be ignored, " + - "and the wallet will attempt to sweep all " + - "outputs within the wallet to the target " + - "address", - }, - cli.Int64Flag{ - Name: "amt", - Usage: "the number of bitcoin denominated in satoshis to send", - }, - cli.Int64Flag{ - Name: "conf_target", - Usage: "(optional) the number of blocks that the " + - "transaction *should* confirm in, will be " + - "used for fee estimation", - }, - cli.Int64Flag{ - Name: "sat_per_byte", - Usage: "(optional) a manual fee expressed in " + - "sat/byte that should be used when crafting " + - "the transaction", - }, - cli.Uint64Flag{ - Name: "min_confs", - Usage: "(optional) the minimum number of confirmations " + - "each one of your outputs used for the transaction " + - "must satisfy", - Value: defaultUtxoMinConf, - }, - txLabelFlag, - }, - Action: actionDecorator(sendCoins), -} - -func sendCoins(ctx *cli.Context) er.R { - var ( - addr string - amt int64 - err error - ) - args := ctx.Args() - - if ctx.NArg() == 0 && ctx.NumFlags() == 0 { - cli.ShowCommandHelp(ctx, "sendcoins") - return nil - } - - if ctx.IsSet("conf_target") && ctx.IsSet("sat_per_byte") { - return er.Errorf("either conf_target or sat_per_byte should be " + - "set, but not both") - } - - switch { - case ctx.IsSet("addr"): - addr = ctx.String("addr") - case args.Present(): - addr = args.First() - args = args.Tail() - default: - return er.Errorf("Address argument missing") - } - - switch { - case ctx.IsSet("amt"): - amt = ctx.Int64("amt") - case args.Present(): - amt, err = strconv.ParseInt(args.First(), 10, 64) - case !ctx.Bool("sweepall"): - return er.Errorf("Amount argument missing") - } - if err != nil { - return er.Errorf("unable to decode amount: %v", err) - } - - if amt != 0 && ctx.Bool("sweepall") { - return er.Errorf("amount cannot be set if attempting to " + - "sweep all coins out of the wallet") - } - - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - minConfs := int32(ctx.Uint64("min_confs")) - req := &lnrpc.SendCoinsRequest{ - Addr: addr, - Amount: amt, - TargetConf: int32(ctx.Int64("conf_target")), - SatPerByte: ctx.Int64("sat_per_byte"), - SendAll: ctx.Bool("sweepall"), - Label: ctx.String(txLabelFlag.Name), - MinConfs: minConfs, - SpendUnconfirmed: minConfs == 0, - } - txid, err := client.SendCoins(ctxb, req) - if err != nil { - return er.E(err) - } - - printRespJSON(txid) - return nil -} - -var listUnspentCommand = cli.Command{ - Name: "listunspent", - Category: "On-chain", - Usage: "List utxos available for spending.", - ArgsUsage: "[min-confs [max-confs]] [--unconfirmed_only]", - Description: ` - For each spendable utxo currently in the wallet, with at least min_confs - confirmations, and at most max_confs confirmations, lists the txid, - index, amount, address, address type, scriptPubkey and number of - confirmations. Use --min_confs=0 to include unconfirmed coins. To list - all coins with at least min_confs confirmations, omit the second - argument or flag '--max_confs'. To list all confirmed and unconfirmed - coins, no arguments are required. To see only unconfirmed coins, use - '--unconfirmed_only' with '--min_confs' and '--max_confs' set to zero or - not present. - `, - Flags: []cli.Flag{ - cli.Int64Flag{ - Name: "min_confs", - Usage: "the minimum number of confirmations for a utxo", - }, - cli.Int64Flag{ - Name: "max_confs", - Usage: "the maximum number of confirmations for a utxo", - }, - cli.BoolFlag{ - Name: "unconfirmed_only", - Usage: "when min_confs and max_confs are zero, " + - "setting false implicitly overrides max_confs " + - "to be MaxInt32, otherwise max_confs remains " + - "zero. An error is returned if the value is " + - "true and both min_confs and max_confs are " + - "non-zero. (default: false)", - }, - }, - Action: actionDecorator(listUnspent), -} - -func listUnspent(ctx *cli.Context) er.R { - var ( - minConfirms int64 - maxConfirms int64 - err error - ) - args := ctx.Args() - - if ctx.IsSet("max_confs") && !ctx.IsSet("min_confs") { - return er.Errorf("max_confs cannot be set without " + - "min_confs being set") - } - - switch { - case ctx.IsSet("min_confs"): - minConfirms = ctx.Int64("min_confs") - case args.Present(): - minConfirms, err = strconv.ParseInt(args.First(), 10, 64) - if err != nil { - cli.ShowCommandHelp(ctx, "listunspent") - return nil - } - args = args.Tail() - } - - switch { - case ctx.IsSet("max_confs"): - maxConfirms = ctx.Int64("max_confs") - case args.Present(): - maxConfirms, err = strconv.ParseInt(args.First(), 10, 64) - if err != nil { - cli.ShowCommandHelp(ctx, "listunspent") - return nil - } - args = args.Tail() - } - - unconfirmedOnly := ctx.Bool("unconfirmed_only") - - // Force minConfirms and maxConfirms to be zero if unconfirmedOnly is - // true. - if unconfirmedOnly && (minConfirms != 0 || maxConfirms != 0) { - cli.ShowCommandHelp(ctx, "listunspent") - return nil - } - - // When unconfirmedOnly is inactive, we will override maxConfirms to be - // a MaxInt32 to return all confirmed and unconfirmed utxos. - if maxConfirms == 0 && !unconfirmedOnly { - maxConfirms = math.MaxInt32 - } - - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - req := &lnrpc.ListUnspentRequest{ - MinConfs: int32(minConfirms), - MaxConfs: int32(maxConfirms), - } - resp, err := client.ListUnspent(ctxb, req) - if err != nil { - return er.E(err) - } - - // Parse the response into the final json object that will be printed - // to stdout. At the moment, this filters out the raw txid bytes from - // each utxo's outpoint and only prints the txid string. - var listUnspentResp = struct { - Utxos []*Utxo `json:"utxos"` - }{ - Utxos: make([]*Utxo, 0, len(resp.Utxos)), - } - for _, protoUtxo := range resp.Utxos { - utxo := NewUtxoFromProto(protoUtxo) - listUnspentResp.Utxos = append(listUnspentResp.Utxos, utxo) - } - - printJSON(listUnspentResp) - - return nil -} - -var sendManyCommand = cli.Command{ - Name: "sendmany", - Category: "On-chain", - Usage: "Send bitcoin on-chain to multiple addresses.", - ArgsUsage: "send-json-string [--conf_target=N] [--sat_per_byte=P]", - Description: ` - Create and broadcast a transaction paying the specified amount(s) to the passed address(es). - - The send-json-string' param decodes addresses and the amount to send - respectively in the following format: - - '{"ExampleAddr": NumCoinsInSatoshis, "SecondAddr": NumCoins}' - `, - Flags: []cli.Flag{ - cli.Int64Flag{ - Name: "conf_target", - Usage: "(optional) the number of blocks that the transaction *should* " + - "confirm in, will be used for fee estimation", - }, - cli.Int64Flag{ - Name: "sat_per_byte", - Usage: "(optional) a manual fee expressed in sat/byte that should be " + - "used when crafting the transaction", - }, - cli.Uint64Flag{ - Name: "min_confs", - Usage: "(optional) the minimum number of confirmations " + - "each one of your outputs used for the transaction " + - "must satisfy", - Value: defaultUtxoMinConf, - }, - txLabelFlag, - }, - Action: actionDecorator(sendMany), -} - -func sendMany(ctx *cli.Context) er.R { - var amountToAddr map[string]int64 - - jsonMap := ctx.Args().First() - if err := json.Unmarshal([]byte(jsonMap), &amountToAddr); err != nil { - return er.E(err) - } - - if ctx.IsSet("conf_target") && ctx.IsSet("sat_per_byte") { - return er.Errorf("either conf_target or sat_per_byte should be " + - "set, but not both") - } - - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - minConfs := int32(ctx.Uint64("min_confs")) - txid, err := client.SendMany(ctxb, &lnrpc.SendManyRequest{ - AddrToAmount: amountToAddr, - TargetConf: int32(ctx.Int64("conf_target")), - SatPerByte: ctx.Int64("sat_per_byte"), - Label: ctx.String(txLabelFlag.Name), - MinConfs: minConfs, - SpendUnconfirmed: minConfs == 0, - }) - if err != nil { - return er.E(err) - } - - printRespJSON(txid) - return nil -} - -var connectCommand = cli.Command{ - Name: "connect", - Category: "Peers", - Usage: "Connect to a remote lnd peer.", - ArgsUsage: "@host", - Description: ` - Connect to a peer using its and host. - - A custom timeout on the connection is supported. For instance, to timeout - the connection request in 30 seconds, use the following: - - lncli connect @host --timeout 30s - `, - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "perm", - Usage: "If set, the daemon will attempt to persistently " + - "connect to the target peer.\n" + - " If not, the call will be synchronous.", - }, - cli.DurationFlag{ - Name: "timeout", - Usage: "The connection timeout value for current request. " + - "Valid uints are {ms, s, m, h}.\n" + - "If not set, the global connection " + - "timeout value (default to 120s) is used.", - }, - }, - Action: actionDecorator(connectPeer), -} - -func connectPeer(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - targetAddress := ctx.Args().First() - splitAddr := strings.Split(targetAddress, "@") - if len(splitAddr) != 2 { - return er.Errorf("target address expected in format: " + - "pubkey@host:port") - } - - addr := &lnrpc.LightningAddress{ - Pubkey: splitAddr[0], - Host: splitAddr[1], - } - req := &lnrpc.ConnectPeerRequest{ - Addr: addr, - Perm: ctx.Bool("perm"), - Timeout: uint64(ctx.Duration("timeout").Seconds()), - } - - lnid, err := client.ConnectPeer(ctxb, req) - if err != nil { - return er.E(err) - } - - printRespJSON(lnid) - return nil -} - -var disconnectCommand = cli.Command{ - Name: "disconnect", - Category: "Peers", - Usage: "Disconnect a remote lnd peer identified by public key.", - ArgsUsage: "", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "node_key", - Usage: "The hex-encoded compressed public key of the peer " + - "to disconnect from", - }, - }, - Action: actionDecorator(disconnectPeer), -} - -func disconnectPeer(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - var pubKey string - switch { - case ctx.IsSet("node_key"): - pubKey = ctx.String("node_key") - case ctx.Args().Present(): - pubKey = ctx.Args().First() - default: - return er.Errorf("must specify target public key") - } - - req := &lnrpc.DisconnectPeerRequest{ - PubKey: pubKey, - } - - lnid, err := client.DisconnectPeer(ctxb, req) - if err != nil { - return er.E(err) - } - - printRespJSON(lnid) - return nil -} - -// TODO(roasbeef): also allow short relative channel ID. - -var closeChannelCommand = cli.Command{ - Name: "closechannel", - Category: "Channels", - Usage: "Close an existing channel.", - Description: ` - Close an existing channel. The channel can be closed either cooperatively, - or unilaterally (--force). - - A unilateral channel closure means that the latest commitment - transaction will be broadcast to the network. As a result, any settled - funds will be time locked for a few blocks before they can be spent. - - In the case of a cooperative closure, one can manually set the fee to - be used for the closing transaction via either the --conf_target or - --sat_per_byte arguments. This will be the starting value used during - fee negotiation. This is optional. - - In the case of a cooperative closure, one can manually set the address - to deliver funds to upon closure. This is optional, and may only be used - if an upfront shutdown address has not already been set. If neither are - set the funds will be delivered to a new wallet address. - - To view which funding_txids/output_indexes can be used for a channel close, - see the channel_point values within the listchannels command output. - The format for a channel_point is 'funding_txid:output_index'.`, - ArgsUsage: "funding_txid [output_index]", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "funding_txid", - Usage: "the txid of the channel's funding transaction", - }, - cli.IntFlag{ - Name: "output_index", - Usage: "the output index for the funding output of the funding " + - "transaction", - }, - cli.BoolFlag{ - Name: "force", - Usage: "attempt an uncooperative closure", - }, - cli.BoolFlag{ - Name: "block", - Usage: "block until the channel is closed", - }, - cli.Int64Flag{ - Name: "conf_target", - Usage: "(optional) the number of blocks that the " + - "transaction *should* confirm in, will be " + - "used for fee estimation", - }, - cli.Int64Flag{ - Name: "sat_per_byte", - Usage: "(optional) a manual fee expressed in " + - "sat/byte that should be used when crafting " + - "the transaction", - }, - cli.StringFlag{ - Name: "delivery_addr", - Usage: "(optional) an address to deliver funds " + - "upon cooperative channel closing, may only " + - "be used if an upfront shutdown address is not " + - "already set", - }, - }, - Action: actionDecorator(closeChannel), -} - -func closeChannel(ctx *cli.Context) er.R { - client, cleanUp := getClient(ctx) - defer cleanUp() - - // Show command help if no arguments and flags were provided. - if ctx.NArg() == 0 && ctx.NumFlags() == 0 { - cli.ShowCommandHelp(ctx, "closechannel") - return nil - } - - channelPoint, err := parseChannelPoint(ctx) - if err != nil { - return err - } - - // TODO(roasbeef): implement time deadline within server - req := &lnrpc.CloseChannelRequest{ - ChannelPoint: channelPoint, - Force: ctx.Bool("force"), - TargetConf: int32(ctx.Int64("conf_target")), - SatPerByte: ctx.Int64("sat_per_byte"), - DeliveryAddress: ctx.String("delivery_addr"), - } - - // After parsing the request, we'll spin up a goroutine that will - // retrieve the closing transaction ID when attempting to close the - // channel. We do this to because `executeChannelClose` can block, so we - // would like to present the closing transaction ID to the user as soon - // as it is broadcasted. - var wg sync.WaitGroup - txidChan := make(chan string, 1) - - wg.Add(1) - go func() { - defer wg.Done() - - printJSON(struct { - ClosingTxid string `json:"closing_txid"` - }{ - ClosingTxid: <-txidChan, - }) - }() - - err = executeChannelClose(client, req, txidChan, ctx.Bool("block")) - if err != nil { - return err - } - - // In the case that the user did not provide the `block` flag, then we - // need to wait for the goroutine to be done to prevent it from being - // destroyed when exiting before printing the closing transaction ID. - wg.Wait() - - return nil -} - -// executeChannelClose attempts to close the channel from a request. The closing -// transaction ID is sent through `txidChan` as soon as it is broadcasted to the -// network. The block boolean is used to determine if we should block until the -// closing transaction receives all of its required confirmations. -func executeChannelClose(client lnrpc.LightningClient, req *lnrpc.CloseChannelRequest, - txidChan chan<- string, block bool) er.R { - - stream, err := client.CloseChannel(context.Background(), req) - if err != nil { - return er.E(err) - } - - for { - resp, err := stream.Recv() - if err == io.EOF { - return nil - } else if err != nil { - return er.E(err) - } - - switch update := resp.Update.(type) { - case *lnrpc.CloseStatusUpdate_ClosePending: - closingHash := update.ClosePending.Txid - txid, err := chainhash.NewHash(closingHash) - if err != nil { - return err - } - - txidChan <- txid.String() - - if !block { - return nil - } - case *lnrpc.CloseStatusUpdate_ChanClose: - return nil - } - } -} - -var closeAllChannelsCommand = cli.Command{ - Name: "closeallchannels", - Category: "Channels", - Usage: "Close all existing channels.", - Description: ` - Close all existing channels. - - Channels will be closed either cooperatively or unilaterally, depending - on whether the channel is active or not. If the channel is inactive, any - settled funds within it will be time locked for a few blocks before they - can be spent. - - One can request to close inactive channels only by using the - --inactive_only flag. - - By default, one is prompted for confirmation every time an inactive - channel is requested to be closed. To avoid this, one can set the - --force flag, which will only prompt for confirmation once for all - inactive channels and proceed to close them. - - In the case of cooperative closures, one can manually set the fee to - be used for the closing transactions via either the --conf_target or - --sat_per_byte arguments. This will be the starting value used during - fee negotiation. This is optional.`, - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "inactive_only", - Usage: "close inactive channels only", - }, - cli.BoolFlag{ - Name: "force", - Usage: "ask for confirmation once before attempting " + - "to close existing channels", - }, - cli.Int64Flag{ - Name: "conf_target", - Usage: "(optional) the number of blocks that the " + - "closing transactions *should* confirm in, will be " + - "used for fee estimation", - }, - cli.Int64Flag{ - Name: "sat_per_byte", - Usage: "(optional) a manual fee expressed in " + - "sat/byte that should be used when crafting " + - "the closing transactions", - }, - }, - Action: actionDecorator(closeAllChannels), -} - -func closeAllChannels(ctx *cli.Context) er.R { - client, cleanUp := getClient(ctx) - defer cleanUp() - - listReq := &lnrpc.ListChannelsRequest{} - openChannels, err := client.ListChannels(context.Background(), listReq) - if err != nil { - return er.Errorf("unable to fetch open channels: %v", err) - } - - if len(openChannels.Channels) == 0 { - return er.New("no open channels to close") - } - - var channelsToClose []*lnrpc.Channel - - switch { - case ctx.Bool("force") && ctx.Bool("inactive_only"): - msg := "Unilaterally close all inactive channels? The funds " + - "within these channels will be locked for some blocks " + - "(CSV delay) before they can be spent. (yes/no): " - - confirmed := promptForConfirmation(msg) - - // We can safely exit if the user did not confirm. - if !confirmed { - return nil - } - - // Go through the list of open channels and only add inactive - // channels to the closing list. - for _, channel := range openChannels.Channels { - if !channel.GetActive() { - channelsToClose = append( - channelsToClose, channel, - ) - } - } - case ctx.Bool("force"): - msg := "Close all active and inactive channels? Inactive " + - "channels will be closed unilaterally, so funds " + - "within them will be locked for a few blocks (CSV " + - "delay) before they can be spent. (yes/no): " - - confirmed := promptForConfirmation(msg) - - // We can safely exit if the user did not confirm. - if !confirmed { - return nil - } - - channelsToClose = openChannels.Channels - default: - // Go through the list of open channels and determine which - // should be added to the closing list. - for _, channel := range openChannels.Channels { - // If the channel is inactive, we'll attempt to - // unilaterally close the channel, so we should prompt - // the user for confirmation beforehand. - if !channel.GetActive() { - msg := fmt.Sprintf("Unilaterally close channel "+ - "with node %s and channel point %s? "+ - "The closing transaction will need %d "+ - "confirmations before the funds can be "+ - "spent. (yes/no): ", channel.RemotePubkey, - channel.ChannelPoint, channel.LocalConstraints.CsvDelay) - - confirmed := promptForConfirmation(msg) - - if confirmed { - channelsToClose = append( - channelsToClose, channel, - ) - } - } else if !ctx.Bool("inactive_only") { - // Otherwise, we'll only add active channels if - // we were not requested to close inactive - // channels only. - channelsToClose = append( - channelsToClose, channel, - ) - } - } - } - - // result defines the result of closing a channel. The closing - // transaction ID is populated if a channel is successfully closed. - // Otherwise, the error that prevented closing the channel is populated. - type result struct { - RemotePubKey string `json:"remote_pub_key"` - ChannelPoint string `json:"channel_point"` - ClosingTxid string `json:"closing_txid"` - FailErr string `json:"error"` - } - - // Launch each channel closure in a goroutine in order to execute them - // in parallel. Once they're all executed, we will print the results as - // they come. - resultChan := make(chan result, len(channelsToClose)) - for _, channel := range channelsToClose { - go func(channel *lnrpc.Channel) { - res := result{} - res.RemotePubKey = channel.RemotePubkey - res.ChannelPoint = channel.ChannelPoint - defer func() { - resultChan <- res - }() - - // Parse the channel point in order to create the close - // channel request. - s := strings.Split(res.ChannelPoint, ":") - if len(s) != 2 { - res.FailErr = "expected channel point with " + - "format txid:index" - return - } - index, errr := strconv.ParseUint(s[1], 10, 32) - if errr != nil { - res.FailErr = fmt.Sprintf("unable to parse "+ - "channel point output index: %v", errr) - return - } - - req := &lnrpc.CloseChannelRequest{ - ChannelPoint: &lnrpc.ChannelPoint{ - FundingTxid: &lnrpc.ChannelPoint_FundingTxidStr{ - FundingTxidStr: s[0], - }, - OutputIndex: uint32(index), - }, - Force: !channel.GetActive(), - TargetConf: int32(ctx.Int64("conf_target")), - SatPerByte: ctx.Int64("sat_per_byte"), - } - - txidChan := make(chan string, 1) - err := executeChannelClose(client, req, txidChan, false) - if err != nil { - res.FailErr = fmt.Sprintf("unable to close "+ - "channel: %v", err) - return - } - - res.ClosingTxid = <-txidChan - }(channel) - } - - for range channelsToClose { - res := <-resultChan - printJSON(res) - } - - return nil -} - -// promptForConfirmation continuously prompts the user for the message until -// receiving a response of "yes" or "no" and returns their answer as a bool. -func promptForConfirmation(msg string) bool { - reader := bufio.NewReader(os.Stdin) - - for { - fmt.Print(msg) - - answer, err := reader.ReadString('\n') - if err != nil { - return false - } - - answer = strings.ToLower(strings.TrimSpace(answer)) - - switch { - case answer == "yes": - return true - case answer == "no": - return false - default: - continue - } - } -} - -var abandonChannelCommand = cli.Command{ - Name: "abandonchannel", - Category: "Channels", - Usage: "Abandons an existing channel.", - Description: ` - Removes all channel state from the database except for a close - summary. This method can be used to get rid of permanently unusable - channels due to bugs fixed in newer versions of lnd. - - Only available when lnd is built in debug mode. - - To view which funding_txids/output_indexes can be used for this command, - see the channel_point values within the listchannels command output. - The format for a channel_point is 'funding_txid:output_index'.`, - ArgsUsage: "funding_txid [output_index]", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "funding_txid", - Usage: "the txid of the channel's funding transaction", - }, - cli.IntFlag{ - Name: "output_index", - Usage: "the output index for the funding output of the funding " + - "transaction", - }, - }, - Action: actionDecorator(abandonChannel), -} - -func abandonChannel(ctx *cli.Context) er.R { - ctxb := context.Background() - - client, cleanUp := getClient(ctx) - defer cleanUp() - - // Show command help if no arguments and flags were provided. - if ctx.NArg() == 0 && ctx.NumFlags() == 0 { - cli.ShowCommandHelp(ctx, "abandonchannel") - return nil - } - - channelPoint, err := parseChannelPoint(ctx) - if err != nil { - return err - } - - req := &lnrpc.AbandonChannelRequest{ - ChannelPoint: channelPoint, - } - - resp, errr := client.AbandonChannel(ctxb, req) - if errr != nil { - return er.E(errr) - } - - printRespJSON(resp) - return nil -} - -// parseChannelPoint parses a funding txid and output index from the command -// line. Both named options as well as unnamed parameters are supported. -func parseChannelPoint(ctx *cli.Context) (*lnrpc.ChannelPoint, er.R) { - channelPoint := &lnrpc.ChannelPoint{} - - args := ctx.Args() - - switch { - case ctx.IsSet("funding_txid"): - channelPoint.FundingTxid = &lnrpc.ChannelPoint_FundingTxidStr{ - FundingTxidStr: ctx.String("funding_txid"), - } - case args.Present(): - channelPoint.FundingTxid = &lnrpc.ChannelPoint_FundingTxidStr{ - FundingTxidStr: args.First(), - } - args = args.Tail() - default: - return nil, er.Errorf("funding txid argument missing") - } - - switch { - case ctx.IsSet("output_index"): - channelPoint.OutputIndex = uint32(ctx.Int("output_index")) - case args.Present(): - index, err := strconv.ParseUint(args.First(), 10, 32) - if err != nil { - return nil, er.Errorf("unable to decode output index: %v", err) - } - channelPoint.OutputIndex = uint32(index) - default: - channelPoint.OutputIndex = 0 - } - - return channelPoint, nil -} - -var listPeersCommand = cli.Command{ - Name: "listpeers", - Category: "Peers", - Usage: "List all active, currently connected peers.", - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "list_errors", - Usage: "list a full set of most recent errors for the peer", - }, - }, - Action: actionDecorator(listPeers), -} - -func listPeers(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - // By default, we display a single error on the cli. If the user - // specifically requests a full error set, then we will provide it. - req := &lnrpc.ListPeersRequest{ - LatestError: !ctx.IsSet("list_errors"), - } - resp, err := client.ListPeers(ctxb, req) - if err != nil { - return er.E(err) - } - - printRespJSON(resp) - return nil -} - -var walletBalanceCommand = cli.Command{ - Name: "walletbalance", - Category: "Wallet", - Usage: "Compute and display the wallet's current balance.", - Action: actionDecorator(walletBalance), -} - -func walletBalance(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - req := &lnrpc.WalletBalanceRequest{} - resp, err := client.WalletBalance(ctxb, req) - if err != nil { - return er.E(err) - } - - printRespJSON(resp) - return nil -} - -var getAddressBalancesCommand = cli.Command{ - Name: "getaddressbalances", - Category: "Wallet", - Description: ` - Get the balance for each active address, including: - * The amount which is immediately spendable - * The amount which is unconfirmed (specify minconf to decide how many confirms are needed) - * The amount which is a mining reward that has not yet matured`, - Flags: []cli.Flag{ - cli.IntFlag{ - Name: "minconf", - Usage: "The minimum required confirms for a transaction to be considered confirmed", - }, - cli.BoolFlag{ - Name: "show_zero_balance", - Usage: "Show addresses which are active in the wallet but have no known coins", - }, - }, - Usage: "Compute and display balances for each address in the wallet.", - Action: actionDecorator(getAddressBalances), -} - -func getAddressBalances(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - minconf := 1 - if ctx.IsSet("minconf") { - minconf = ctx.Int("minconf") - } - - req := &lnrpc.GetAddressBalancesRequest{ - Minconf: int32(minconf), - Showzerobalance: ctx.IsSet("show_zero_balance"), - } - resp, err := client.GetAddressBalances(ctxb, req) - if err != nil { - return er.E(err) - } - - printRespJSON(resp) - return nil -} - -var channelBalanceCommand = cli.Command{ - Name: "channelbalance", - Category: "Channels", - Usage: "Returns the sum of the total available channel balance across " + - "all open channels.", - Action: actionDecorator(channelBalance), -} - -func channelBalance(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - req := &lnrpc.ChannelBalanceRequest{} - resp, err := client.ChannelBalance(ctxb, req) - if err != nil { - return er.E(err) - } - - printRespJSON(resp) - return nil -} - -var getInfoCommand = cli.Command{ - Name: "getinfo", - Usage: "Returns basic information related to the active daemon.", - Action: actionDecorator(getInfo), -} - -func getInfo(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - inforeq := &lnrpc.GetInfoRequest{} - inforesp, infoerr := client.GetInfo(ctxb, inforeq) - if infoerr != nil { - inforesp = nil - } - // call getinfo2 from metaservice hat will return some info even when wallet is locked - metaclient, cleanUpMeta := getMetaServiceClient(ctx) - defer cleanUpMeta() - info2req := &lnrpc.GetInfo2Request{ - InfoResponse: inforesp, - } - info2resp, info2err := metaclient.GetInfo2(ctxb, info2req) - if info2err != nil { - return er.E(info2err) - } - - printRespJSON(info2resp) - return nil -} - -var getRecoveryInfoCommand = cli.Command{ - Name: "getrecoveryinfo", - Usage: "Display information about an ongoing recovery attempt.", - Action: actionDecorator(getRecoveryInfo), -} - -func getRecoveryInfo(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - req := &lnrpc.GetRecoveryInfoRequest{} - resp, err := client.GetRecoveryInfo(ctxb, req) - if err != nil { - return er.E(err) - } - - printRespJSON(resp) - return nil -} - -var pendingChannelsCommand = cli.Command{ - Name: "pendingchannels", - Category: "Channels", - Usage: "Display information pertaining to pending channels.", - Action: actionDecorator(pendingChannels), -} - -func pendingChannels(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - req := &lnrpc.PendingChannelsRequest{} - resp, err := client.PendingChannels(ctxb, req) - if err != nil { - return er.E(err) - } - - printRespJSON(resp) - - return nil -} - -var listChannelsCommand = cli.Command{ - Name: "listchannels", - Category: "Channels", - Usage: "List all open channels.", - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "active_only", - Usage: "only list channels which are currently active", - }, - cli.BoolFlag{ - Name: "inactive_only", - Usage: "only list channels which are currently inactive", - }, - cli.BoolFlag{ - Name: "public_only", - Usage: "only list channels which are currently public", - }, - cli.BoolFlag{ - Name: "private_only", - Usage: "only list channels which are currently private", - }, - cli.StringFlag{ - Name: "peer", - Usage: "(optional) only display channels with a " + - "particular peer, accepts 66-byte, " + - "hex-encoded pubkeys", - }, - }, - Action: actionDecorator(listChannels), -} - -func listChannels(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - peer := ctx.String("peer") - - // If the user requested channels with a particular key, parse the - // provided pubkey. - var peerKey []byte - if len(peer) > 0 { - pk, err := route.NewVertexFromStr(peer) - if err != nil { - return er.Errorf("invalid --peer pubkey: %v", err) - } - - peerKey = pk[:] - } - - req := &lnrpc.ListChannelsRequest{ - ActiveOnly: ctx.Bool("active_only"), - InactiveOnly: ctx.Bool("inactive_only"), - PublicOnly: ctx.Bool("public_only"), - PrivateOnly: ctx.Bool("private_only"), - Peer: peerKey, - } - - resp, err := client.ListChannels(ctxb, req) - if err != nil { - return er.E(err) - } - - printRespJSON(resp) - - return nil -} - -var closedChannelsCommand = cli.Command{ - Name: "closedchannels", - Category: "Channels", - Usage: "List all closed channels.", - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "cooperative", - Usage: "list channels that were closed cooperatively", - }, - cli.BoolFlag{ - Name: "local_force", - Usage: "list channels that were force-closed " + - "by the local node", - }, - cli.BoolFlag{ - Name: "remote_force", - Usage: "list channels that were force-closed " + - "by the remote node", - }, - cli.BoolFlag{ - Name: "breach", - Usage: "list channels for which the remote node " + - "attempted to broadcast a prior " + - "revoked channel state", - }, - cli.BoolFlag{ - Name: "funding_canceled", - Usage: "list channels that were never fully opened", - }, - cli.BoolFlag{ - Name: "abandoned", - Usage: "list channels that were abandoned by " + - "the local node", - }, - }, - Action: actionDecorator(closedChannels), -} - -func closedChannels(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - req := &lnrpc.ClosedChannelsRequest{ - Cooperative: ctx.Bool("cooperative"), - LocalForce: ctx.Bool("local_force"), - RemoteForce: ctx.Bool("remote_force"), - Breach: ctx.Bool("breach"), - FundingCanceled: ctx.Bool("funding_canceled"), - Abandoned: ctx.Bool("abandoned"), - } - - resp, err := client.ClosedChannels(ctxb, req) - if err != nil { - return er.E(err) - } - - printRespJSON(resp) - - return nil -} - -var describeGraphCommand = cli.Command{ - Name: "describegraph", - Category: "Graph", - Description: "Prints a human readable version of the known channel " + - "graph from the PoV of the node", - Usage: "Describe the network graph.", - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "include_unannounced", - Usage: "If set, unannounced channels will be included in the " + - "graph. Unannounced channels are both private channels, and " + - "public channels that are not yet announced to the network.", - }, - }, - Action: actionDecorator(describeGraph), -} - -func describeGraph(ctx *cli.Context) er.R { - client, cleanUp := getClient(ctx) - defer cleanUp() - - req := &lnrpc.ChannelGraphRequest{ - IncludeUnannounced: ctx.Bool("include_unannounced"), - } - - graph, err := client.DescribeGraph(context.Background(), req) - if err != nil { - return er.E(err) - } - - printRespJSON(graph) - return nil -} - -var getNodeMetricsCommand = cli.Command{ - Name: "getnodemetrics", - Category: "Graph", - Description: "Prints out node metrics calculated from the current graph", - Usage: "Get node metrics.", - Action: actionDecorator(getNodeMetrics), -} - -func getNodeMetrics(ctx *cli.Context) er.R { - client, cleanUp := getClient(ctx) - defer cleanUp() - - req := &lnrpc.NodeMetricsRequest{ - Types: []lnrpc.NodeMetricType{lnrpc.NodeMetricType_BETWEENNESS_CENTRALITY}, - } - - nodeMetrics, err := client.GetNodeMetrics(context.Background(), req) - if err != nil { - return er.E(err) - } - - printRespJSON(nodeMetrics) - return nil -} - -var listPaymentsCommand = cli.Command{ - Name: "listpayments", - Category: "Payments", - Usage: "List all outgoing payments.", - Description: "This command enables the retrieval of payments stored " + - "in the database. Pagination is supported by the usage of " + - "index_offset in combination with the paginate_forwards flag. " + - "Reversed pagination is enabled by default to receive " + - "current payments first. Pagination can be resumed by using " + - "the returned last_index_offset (for forwards order), or " + - "first_index_offset (for reversed order) as the offset_index. ", - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "include_incomplete", - Usage: "if set to true, payments still in flight (or " + - "failed) will be returned as well, keeping" + - "indices for payments the same as without " + - "the flag", - }, - cli.UintFlag{ - Name: "index_offset", - Usage: "The index of a payment that will be used as " + - "either the start (in forwards mode) or end " + - "(in reverse mode) of a query to determine " + - "which payments should be returned in the " + - "response, where the index_offset is " + - "excluded. If index_offset is set to zero in " + - "reversed mode, the query will end with the " + - "last payment made.", - }, - cli.UintFlag{ - Name: "max_payments", - Usage: "the max number of payments to return, by " + - "default, all completed payments are returned", - }, - cli.BoolFlag{ - Name: "paginate_forwards", - Usage: "if set, payments succeeding the " + - "index_offset will be returned, allowing " + - "forwards pagination", - }, - }, - Action: actionDecorator(listPayments), -} - -func listPayments(ctx *cli.Context) er.R { - client, cleanUp := getClient(ctx) - defer cleanUp() - - req := &lnrpc.ListPaymentsRequest{ - IncludeIncomplete: ctx.Bool("include_incomplete"), - IndexOffset: uint64(ctx.Uint("index_offset")), - MaxPayments: uint64(ctx.Uint("max_payments")), - Reversed: !ctx.Bool("paginate_forwards"), - } - - payments, err := client.ListPayments(context.Background(), req) - if err != nil { - return er.E(err) - } - - printRespJSON(payments) - return nil -} - -var getChanInfoCommand = cli.Command{ - Name: "getchaninfo", - Category: "Graph", - Usage: "Get the state of a channel.", - Description: "Prints out the latest authenticated state for a " + - "particular channel", - ArgsUsage: "chan_id", - Flags: []cli.Flag{ - cli.Int64Flag{ - Name: "chan_id", - Usage: "the 8-byte compact channel ID to query for", - }, - }, - Action: actionDecorator(getChanInfo), -} - -func getChanInfo(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - var ( - chanID int64 - err error - ) - - switch { - case ctx.IsSet("chan_id"): - chanID = ctx.Int64("chan_id") - case ctx.Args().Present(): - chanID, err = strconv.ParseInt(ctx.Args().First(), 10, 64) - if err != nil { - return er.Errorf("error parsing chan_id: %s", err) - } - default: - return er.Errorf("chan_id argument missing") - } - - req := &lnrpc.ChanInfoRequest{ - ChanId: uint64(chanID), - } - - chanInfo, err := client.GetChanInfo(ctxb, req) - if err != nil { - return er.E(err) - } - - printRespJSON(chanInfo) - return nil -} - -var getNodeInfoCommand = cli.Command{ - Name: "getnodeinfo", - Category: "Graph", - Usage: "Get information on a specific node.", - Description: "Prints out the latest authenticated node state for an " + - "advertised node", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "pub_key", - Usage: "the 33-byte hex-encoded compressed public of the target " + - "node", - }, - cli.BoolFlag{ - Name: "include_channels", - Usage: "if true, will return all known channels " + - "associated with the node", - }, - }, - Action: actionDecorator(getNodeInfo), -} - -func getNodeInfo(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - args := ctx.Args() - - var pubKey string - switch { - case ctx.IsSet("pub_key"): - pubKey = ctx.String("pub_key") - case args.Present(): - pubKey = args.First() - default: - return er.Errorf("pub_key argument missing") - } - - req := &lnrpc.NodeInfoRequest{ - PubKey: pubKey, - IncludeChannels: ctx.Bool("include_channels"), - } - - nodeInfo, err := client.GetNodeInfo(ctxb, req) - if err != nil { - return er.E(err) - } - - printRespJSON(nodeInfo) - return nil -} - -var queryRoutesCommand = cli.Command{ - Name: "queryroutes", - Category: "Payments", - Usage: "Query a route to a destination.", - Description: "Queries the channel router for a potential path to the destination that has sufficient flow for the amount including fees", - ArgsUsage: "dest amt", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "dest", - Usage: "the 33-byte hex-encoded public key for the payment " + - "destination", - }, - cli.Int64Flag{ - Name: "amt", - Usage: "the amount to send expressed in satoshis", - }, - cli.Int64Flag{ - Name: "fee_limit", - Usage: "maximum fee allowed in satoshis when sending " + - "the payment", - }, - cli.Int64Flag{ - Name: "fee_limit_percent", - Usage: "percentage of the payment's amount used as the " + - "maximum fee allowed when sending the payment", - }, - cli.Int64Flag{ - Name: "final_cltv_delta", - Usage: "(optional) number of blocks the last hop has to reveal " + - "the preimage", - }, - cli.BoolFlag{ - Name: "use_mc", - Usage: "use mission control probabilities", - }, - cli.Uint64Flag{ - Name: "outgoing_chanid", - Usage: "(optional) the channel id of the channel " + - "that must be taken to the first hop", - }, - cltvLimitFlag, - }, - Action: actionDecorator(queryRoutes), -} - -func queryRoutes(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - var ( - dest string - amt int64 - err er.R - errr error - ) - - args := ctx.Args() - - switch { - case ctx.IsSet("dest"): - dest = ctx.String("dest") - case args.Present(): - dest = args.First() - args = args.Tail() - default: - return er.Errorf("dest argument missing") - } - - switch { - case ctx.IsSet("amt"): - amt = ctx.Int64("amt") - case args.Present(): - amt, errr = strconv.ParseInt(args.First(), 10, 64) - if errr != nil { - return er.Errorf("unable to decode amt argument: %v", errr) - } - default: - return er.Errorf("amt argument missing") - } - - feeLimit, err := retrieveFeeLimitLegacy(ctx) - if err != nil { - return err - } - - req := &lnrpc.QueryRoutesRequest{ - PubKey: dest, - Amt: amt, - FeeLimit: feeLimit, - FinalCltvDelta: int32(ctx.Int("final_cltv_delta")), - UseMissionControl: ctx.Bool("use_mc"), - CltvLimit: uint32(ctx.Uint64(cltvLimitFlag.Name)), - OutgoingChanId: ctx.Uint64("outgoing_chanid"), - } - - route, errr := client.QueryRoutes(ctxb, req) - if errr != nil { - return er.E(errr) - } - - printRespJSON(route) - return nil -} - -// retrieveFeeLimitLegacy retrieves the fee limit based on the different fee -// limit flags passed. This function will eventually disappear in favor of -// retrieveFeeLimit and the new payment rpc. -func retrieveFeeLimitLegacy(ctx *cli.Context) (*lnrpc.FeeLimit, er.R) { - switch { - case ctx.IsSet("fee_limit") && ctx.IsSet("fee_limit_percent"): - return nil, er.Errorf("either fee_limit or fee_limit_percent " + - "can be set, but not both") - case ctx.IsSet("fee_limit"): - return &lnrpc.FeeLimit{ - Limit: &lnrpc.FeeLimit_Fixed{ - Fixed: ctx.Int64("fee_limit"), - }, - }, nil - case ctx.IsSet("fee_limit_percent"): - feeLimitPercent := ctx.Int64("fee_limit_percent") - if feeLimitPercent < 0 { - return nil, er.New("negative fee limit percentage " + - "provided") - } - return &lnrpc.FeeLimit{ - Limit: &lnrpc.FeeLimit_Percent{ - Percent: feeLimitPercent, - }, - }, nil - } - - // Since the fee limit flags aren't required, we don't return an error - // if they're not set. - return nil, nil -} - -var getNetworkInfoCommand = cli.Command{ - Name: "getnetworkinfo", - Category: "Channels", - Usage: "Get statistical information about the current " + - "state of the network.", - Description: "Returns a set of statistics pertaining to the known " + - "channel graph", - Action: actionDecorator(getNetworkInfo), -} - -func getNetworkInfo(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - req := &lnrpc.NetworkInfoRequest{} - - netInfo, err := client.GetNetworkInfo(ctxb, req) - if err != nil { - return er.E(err) - } - - printRespJSON(netInfo) - return nil -} - -var debugLevelCommand = cli.Command{ - Name: "debuglevel", - Usage: "Set the debug level.", - Description: `Logging level for all subsystems {trace, debug, info, warn, error, critical, off} - You may also specify =,=,... to set the log level for individual subsystems - - Use show to list available subsystems`, - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "show", - Usage: "if true, then the list of available sub-systems will be printed out", - }, - cli.StringFlag{ - Name: "level", - Usage: "the level specification to target either a coarse logging level, or granular set of specific sub-systems with logging levels for each", - }, - }, - Action: actionDecorator(debugLevel), -} - -func debugLevel(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - req := &lnrpc.DebugLevelRequest{ - Show: ctx.Bool("show"), - LevelSpec: ctx.String("level"), - } - - resp, err := client.DebugLevel(ctxb, req) - if err != nil { - return er.E(err) - } - - printRespJSON(resp) - return nil -} - -var listChainTxnsCommand = cli.Command{ - Name: "listchaintxns", - Category: "On-chain", - Usage: "List transactions from the wallet.", - Flags: []cli.Flag{ - cli.Int64Flag{ - Name: "start_height", - Usage: "the block height from which to list " + - "transactions, inclusive", - }, - cli.Int64Flag{ - Name: "end_height", - Usage: "the block height until which to list " + - "transactions, inclusive, to get transactions " + - "until the chain tip, including unconfirmed, " + - "set this value to -1", - }, - }, - Description: ` - List all transactions an address of the wallet was involved in. - - This call will return a list of wallet related transactions that paid - to an address our wallet controls, or spent utxos that we held. The - start_height and end_height flags can be used to specify an inclusive - block range over which to query for transactions. If the end_height is - less than the start_height, transactions will be queried in reverse. - To get all transactions until the chain tip, including unconfirmed - transactions (identifiable with BlockHeight=0), set end_height to -1. - By default, this call will get all transactions our wallet was involved - in, including unconfirmed transactions. -`, - Action: actionDecorator(listChainTxns), -} - -func listChainTxns(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - req := &lnrpc.GetTransactionsRequest{} - - if ctx.IsSet("start_height") { - req.StartHeight = int32(ctx.Int64("start_height")) - } - if ctx.IsSet("end_height") { - req.EndHeight = int32(ctx.Int64("end_height")) - } - - resp, err := client.GetTransactions(ctxb, req) - if err != nil { - return er.E(err) - } - - printRespJSON(resp) - return nil -} - -var stopCommand = cli.Command{ - Name: "stop", - Usage: "Stop and shutdown the daemon.", - Description: ` - Gracefully stop all daemon subsystems before stopping the daemon itself. - This is equivalent to stopping it using CTRL-C.`, - Action: actionDecorator(stopDaemon), -} - -func stopDaemon(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - _, err := client.StopDaemon(ctxb, &lnrpc.StopRequest{}) - if err != nil { - return er.E(err) - } - - return nil -} - -var signMessageCommand = cli.Command{ - Name: "signmessage", - Category: "Wallet", - Usage: "Sign a message with the node's private key.", - ArgsUsage: "msg", - Description: ` - Sign msg with the resident node's private key. - Returns the signature as a zbase32 string. - - Positional arguments and flags can be used interchangeably but not at the same time!`, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "msg", - Usage: "the message to sign", - }, - }, - Action: actionDecorator(signMessage), -} - -func signMessage(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - var msg []byte - - switch { - case ctx.IsSet("msg"): - msg = []byte(ctx.String("msg")) - case ctx.Args().Present(): - msg = []byte(ctx.Args().First()) - default: - return er.Errorf("msg argument missing") - } - - resp, err := client.SignMessage(ctxb, &lnrpc.SignMessageRequest{Msg: msg}) - if err != nil { - return er.E(err) - } - - printRespJSON(resp) - return nil -} - -var verifyMessageCommand = cli.Command{ - Name: "verifymessage", - Category: "Wallet", - Usage: "Verify a message signed with the signature.", - ArgsUsage: "msg signature", - Description: ` - Verify that the message was signed with a properly-formed signature - The signature must be zbase32 encoded and signed with the private key of - an active node in the resident node's channel database. - - Positional arguments and flags can be used interchangeably but not at the same time!`, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "msg", - Usage: "the message to verify", - }, - cli.StringFlag{ - Name: "sig", - Usage: "the zbase32 encoded signature of the message", - }, - }, - Action: actionDecorator(verifyMessage), -} - -func verifyMessage(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - var ( - msg []byte - sig string - ) - - args := ctx.Args() - - switch { - case ctx.IsSet("msg"): - msg = []byte(ctx.String("msg")) - case args.Present(): - msg = []byte(ctx.Args().First()) - args = args.Tail() - default: - return er.Errorf("msg argument missing") - } - - switch { - case ctx.IsSet("sig"): - sig = ctx.String("sig") - case args.Present(): - sig = args.First() - default: - return er.Errorf("signature argument missing") - } - - req := &lnrpc.VerifyMessageRequest{Msg: msg, Signature: sig} - resp, err := client.VerifyMessage(ctxb, req) - if err != nil { - return er.E(err) - } - - printRespJSON(resp) - return nil -} - -var feeReportCommand = cli.Command{ - Name: "feereport", - Category: "Channels", - Usage: "Display the current fee policies of all active channels.", - Description: ` - Returns the current fee policies of all active channels. - Fee policies can be updated using the updatechanpolicy command.`, - Action: actionDecorator(feeReport), -} - -func feeReport(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - req := &lnrpc.FeeReportRequest{} - resp, err := client.FeeReport(ctxb, req) - if err != nil { - return er.E(err) - } - - printRespJSON(resp) - return nil -} - -var updateChannelPolicyCommand = cli.Command{ - Name: "updatechanpolicy", - Category: "Channels", - Usage: "Update the channel policy for all channels, or a single " + - "channel.", - ArgsUsage: "base_fee_msat fee_rate time_lock_delta " + - "[--max_htlc_msat=N] [channel_point]", - Description: ` - Updates the channel policy for all channels, or just a particular channel - identified by its channel point. The update will be committed, and - broadcast to the rest of the network within the next batch. - Channel points are encoded as: funding_txid:output_index`, - Flags: []cli.Flag{ - cli.Int64Flag{ - Name: "base_fee_msat", - Usage: "the base fee in milli-satoshis that will " + - "be charged for each forwarded HTLC, regardless " + - "of payment size", - }, - cli.StringFlag{ - Name: "fee_rate", - Usage: "the fee rate that will be charged " + - "proportionally based on the value of each " + - "forwarded HTLC, the lowest possible rate is 0 " + - "with a granularity of 0.000001 (millionths)", - }, - cli.Int64Flag{ - Name: "time_lock_delta", - Usage: "the CLTV delta that will be applied to all " + - "forwarded HTLCs", - }, - cli.Uint64Flag{ - Name: "min_htlc_msat", - Usage: "if set, the min HTLC size that will be applied " + - "to all forwarded HTLCs. If unset, the min HTLC " + - "is left unchanged.", - }, - cli.Uint64Flag{ - Name: "max_htlc_msat", - Usage: "if set, the max HTLC size that will be applied " + - "to all forwarded HTLCs. If unset, the max HTLC " + - "is left unchanged.", - }, - cli.StringFlag{ - Name: "chan_point", - Usage: "The channel whose fee policy should be " + - "updated, if nil the policies for all channels " + - "will be updated. Takes the form of: txid:output_index", - }, - }, - Action: actionDecorator(updateChannelPolicy), -} - -func parseChanPoint(s string) (*lnrpc.ChannelPoint, er.R) { - split := strings.Split(s, ":") - if len(split) != 2 { - return nil, er.Errorf("expecting chan_point to be in format of: " + - "txid:index") - } - - index, errr := strconv.ParseInt(split[1], 10, 32) - if errr != nil { - return nil, er.Errorf("unable to decode output index: %v", errr) - } - - txid, err := chainhash.NewHashFromStr(split[0]) - if err != nil { - return nil, er.Errorf("unable to parse hex string: %v", err) - } - - return &lnrpc.ChannelPoint{ - FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ - FundingTxidBytes: txid[:], - }, - OutputIndex: uint32(index), - }, nil -} - -func updateChannelPolicy(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - var ( - baseFee int64 - feeRate float64 - timeLockDelta int64 - err er.R - errr error - ) - args := ctx.Args() - - switch { - case ctx.IsSet("base_fee_msat"): - baseFee = ctx.Int64("base_fee_msat") - case args.Present(): - baseFee, errr = strconv.ParseInt(args.First(), 10, 64) - if errr != nil { - return er.Errorf("unable to decode base_fee_msat: %v", errr) - } - args = args.Tail() - default: - return er.Errorf("base_fee_msat argument missing") - } - - switch { - case ctx.IsSet("fee_rate"): - feeRate = ctx.Float64("fee_rate") - case args.Present(): - feeRate, errr = strconv.ParseFloat(args.First(), 64) - if errr != nil { - return er.Errorf("unable to decode fee_rate: %v", errr) - } - - args = args.Tail() - default: - return er.Errorf("fee_rate argument missing") - } - - switch { - case ctx.IsSet("time_lock_delta"): - timeLockDelta = ctx.Int64("time_lock_delta") - case args.Present(): - timeLockDelta, errr = strconv.ParseInt(args.First(), 10, 64) - if errr != nil { - return er.Errorf("unable to decode time_lock_delta: %v", - errr) - } - - args = args.Tail() - default: - return er.Errorf("time_lock_delta argument missing") - } - - var ( - chanPoint *lnrpc.ChannelPoint - chanPointStr string - ) - - switch { - case ctx.IsSet("chan_point"): - chanPointStr = ctx.String("chan_point") - case args.Present(): - chanPointStr = args.First() - } - - if chanPointStr != "" { - chanPoint, err = parseChanPoint(chanPointStr) - if err != nil { - return er.Errorf("unable to parse chan point: %v", err) - } - } - - req := &lnrpc.PolicyUpdateRequest{ - BaseFeeMsat: baseFee, - FeeRate: feeRate, - TimeLockDelta: uint32(timeLockDelta), - MaxHtlcMsat: ctx.Uint64("max_htlc_msat"), - } - - if ctx.IsSet("min_htlc_msat") { - req.MinHtlcMsat = ctx.Uint64("min_htlc_msat") - req.MinHtlcMsatSpecified = true - } - - if chanPoint != nil { - req.Scope = &lnrpc.PolicyUpdateRequest_ChanPoint{ - ChanPoint: chanPoint, - } - } else { - req.Scope = &lnrpc.PolicyUpdateRequest_Global{ - Global: true, - } - } - - resp, errr := client.UpdateChannelPolicy(ctxb, req) - if errr != nil { - return er.E(errr) - } - - printRespJSON(resp) - return nil -} - -var forwardingHistoryCommand = cli.Command{ - Name: "fwdinghistory", - Category: "Payments", - Usage: "Query the history of all forwarded HTLCs.", - ArgsUsage: "start_time [end_time] [index_offset] [max_events]", - Description: ` - Query the HTLC switch's internal forwarding log for all completed - payment circuits (HTLCs) over a particular time range (--start_time and - --end_time). The start and end times are meant to be expressed in - seconds since the Unix epoch. - Alternatively negative time ranges can be used, e.g. "-3d". Supports - s(seconds), m(minutes), h(ours), d(ays), w(eeks), M(onths), y(ears). - Month equals 30.44 days, year equals 365.25 days. - If --start_time isn't provided, then 24 hours ago is used. If - --end_time isn't provided, then the current time is used. - - The max number of events returned is 50k. The default number is 100, - callers can use the --max_events param to modify this value. - - Finally, callers can skip a series of events using the --index_offset - parameter. Each response will contain the offset index of the last - entry. Using this callers can manually paginate within a time slice. - `, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "start_time", - Usage: "the starting time for the query " + - `as unix timestamp or relative e.g. "-1w"`, - }, - cli.StringFlag{ - Name: "end_time", - Usage: "the end time for the query " + - `as unix timestamp or relative e.g. "-1w"`, - }, - cli.Int64Flag{ - Name: "index_offset", - Usage: "the number of events to skip", - }, - cli.Int64Flag{ - Name: "max_events", - Usage: "the max number of events to return", - }, - }, - Action: actionDecorator(forwardingHistory), -} - -func forwardingHistory(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - var ( - startTime, endTime uint64 - indexOffset, maxEvents uint32 - err er.R - ) - args := ctx.Args() - now := time.Now() - - switch { - case ctx.IsSet("start_time"): - startTime, err = parseTime(ctx.String("start_time"), now) - case args.Present(): - startTime, err = parseTime(args.First(), now) - args = args.Tail() - default: - now := time.Now() - startTime = uint64(now.Add(-time.Hour * 24).Unix()) - } - if err != nil { - return er.Errorf("unable to decode start_time: %v", err) - } - - switch { - case ctx.IsSet("end_time"): - endTime, err = parseTime(ctx.String("end_time"), now) - case args.Present(): - endTime, err = parseTime(args.First(), now) - args = args.Tail() - default: - endTime = uint64(now.Unix()) - } - if err != nil { - return er.Errorf("unable to decode end_time: %v", err) - } - - switch { - case ctx.IsSet("index_offset"): - indexOffset = uint32(ctx.Int64("index_offset")) - case args.Present(): - i, err := strconv.ParseInt(args.First(), 10, 64) - if err != nil { - return er.Errorf("unable to decode index_offset: %v", err) - } - indexOffset = uint32(i) - args = args.Tail() - } - - switch { - case ctx.IsSet("max_events"): - maxEvents = uint32(ctx.Int64("max_events")) - case args.Present(): - m, err := strconv.ParseInt(args.First(), 10, 64) - if err != nil { - return er.Errorf("unable to decode max_events: %v", err) - } - maxEvents = uint32(m) - args = args.Tail() - } - - req := &lnrpc.ForwardingHistoryRequest{ - StartTime: startTime, - EndTime: endTime, - IndexOffset: indexOffset, - NumMaxEvents: maxEvents, - } - resp, errr := client.ForwardingHistory(ctxb, req) - if errr != nil { - return er.E(errr) - } - - printRespJSON(resp) - return nil -} - -var exportChanBackupCommand = cli.Command{ - Name: "exportchanbackup", - Category: "Channels", - Usage: "Obtain a static channel back up for a selected channels, " + - "or all known channels", - ArgsUsage: "[chan_point] [--all] [--output_file]", - Description: ` - This command allows a user to export a Static Channel Backup (SCB) for - a selected channel. SCB's are encrypted backups of a channel's initial - state that are encrypted with a key derived from the seed of a user. In - the case of partial or complete data loss, the SCB will allow the user - to reclaim settled funds in the channel at its final state. The - exported channel backups can be restored at a later time using the - restorechanbackup command. - - This command will return one of two types of channel backups depending - on the set of passed arguments: - - * If a target channel point is specified, then a single channel - backup containing only the information for that channel will be - returned. - - * If the --all flag is passed, then a multi-channel backup will be - returned. A multi backup is a single encrypted blob (displayed in - hex encoding) that contains several channels in a single cipher - text. - - Both of the backup types can be restored using the restorechanbackup - command. - `, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "chan_point", - Usage: "the target channel to obtain an SCB for", - }, - cli.BoolFlag{ - Name: "all", - Usage: "if specified, then a multi backup of all " + - "active channels will be returned", - }, - cli.StringFlag{ - Name: "output_file", - Usage: ` - if specified, then rather than printing a JSON output - of the static channel backup, a serialized version of - the backup (either Single or Multi) will be written to - the target file, this is the same format used by lnd in - its channels.backup file `, - }, - }, - Action: actionDecorator(exportChanBackup), -} - -func exportChanBackup(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - // Show command help if no arguments provided - if ctx.NArg() == 0 && ctx.NumFlags() == 0 { - cli.ShowCommandHelp(ctx, "exportchanbackup") - return nil - } - - var ( - err error - chanPointStr string - ) - args := ctx.Args() - - switch { - case ctx.IsSet("chan_point"): - chanPointStr = ctx.String("chan_point") - - case args.Present(): - chanPointStr = args.First() - - case !ctx.IsSet("all"): - return er.Errorf("must specify chan_point if --all isn't set") - } - - if chanPointStr != "" { - chanPointRPC, err := parseChanPoint(chanPointStr) - if err != nil { - return err - } - - chanBackup, errr := client.ExportChannelBackup( - ctxb, &lnrpc.ExportChannelBackupRequest{ - ChanPoint: chanPointRPC, - }, - ) - if errr != nil { - return er.E(errr) - } - - txid, err := chainhash.NewHash( - chanPointRPC.GetFundingTxidBytes(), - ) - if err != nil { - return err - } - - chanPoint := wire.OutPoint{ - Hash: *txid, - Index: chanPointRPC.OutputIndex, - } - - printJSON(struct { - ChanPoint string `json:"chan_point"` - ChanBackup []byte `json:"chan_backup"` - }{ - ChanPoint: chanPoint.String(), - ChanBackup: chanBackup.ChanBackup, - }) - return nil - } - - if !ctx.IsSet("all") { - return er.Errorf("if a channel isn't specified, -all must be") - } - - chanBackup, err := client.ExportAllChannelBackups( - ctxb, &lnrpc.ChanBackupExportRequest{}, - ) - if err != nil { - return er.E(err) - } - - if ctx.IsSet("output_file") { - return er.E(ioutil.WriteFile( - ctx.String("output_file"), - chanBackup.MultiChanBackup.MultiChanBackup, - 0666, - )) - } - - // TODO(roasbeef): support for export | restore ? - - var chanPoints []string - for _, chanPoint := range chanBackup.MultiChanBackup.ChanPoints { - txid, err := chainhash.NewHash(chanPoint.GetFundingTxidBytes()) - if err != nil { - return err - } - - chanPoints = append(chanPoints, wire.OutPoint{ - Hash: *txid, - Index: chanPoint.OutputIndex, - }.String()) - } - - printRespJSON(chanBackup) - - return nil -} - -var verifyChanBackupCommand = cli.Command{ - Name: "verifychanbackup", - Category: "Channels", - Usage: "Verify an existing channel backup", - ArgsUsage: "[--single_backup] [--multi_backup] [--multi_file]", - Description: ` - This command allows a user to verify an existing Single or Multi channel - backup for integrity. This is useful when a user has a backup, but is - unsure as to if it's valid or for the target node. - - The command will accept backups in one of three forms: - - * A single channel packed SCB, which can be obtained from - exportchanbackup. This should be passed in hex encoded format. - - * A packed multi-channel SCB, which couples several individual - static channel backups in single blob. - - * A file path which points to a packed multi-channel backup within a - file, using the same format that lnd does in its channels.backup - file. - `, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "single_backup", - Usage: "a hex encoded single channel backup obtained " + - "from exportchanbackup", - }, - cli.StringFlag{ - Name: "multi_backup", - Usage: "a hex encoded multi-channel backup obtained " + - "from exportchanbackup", - }, - cli.StringFlag{ - Name: "multi_file", - Usage: "the path to a multi-channel back up file", - }, - }, - Action: actionDecorator(verifyChanBackup), -} - -func verifyChanBackup(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - // Show command help if no arguments provided - if ctx.NArg() == 0 && ctx.NumFlags() == 0 { - cli.ShowCommandHelp(ctx, "verifychanbackup") - return nil - } - - backups, err := parseChanBackups(ctx) - if err != nil { - return err - } - - verifyReq := lnrpc.ChanBackupSnapshot{} - - if backups.GetChanBackups() != nil { - verifyReq.SingleChanBackups = backups.GetChanBackups() - } - if backups.GetMultiChanBackup() != nil { - verifyReq.MultiChanBackup = &lnrpc.MultiChanBackup{ - MultiChanBackup: backups.GetMultiChanBackup(), - } - } - - resp, errr := client.VerifyChanBackup(ctxb, &verifyReq) - if errr != nil { - return er.E(errr) - } - - printRespJSON(resp) - return nil -} - -var restoreChanBackupCommand = cli.Command{ - Name: "restorechanbackup", - Category: "Channels", - Usage: "Restore an existing single or multi-channel static channel " + - "backup", - ArgsUsage: "[--single_backup] [--multi_backup] [--multi_file=", - Description: ` - Allows a user to restore a Static Channel Backup (SCB) that was - obtained either via the exportchanbackup command, or from lnd's - automatically manged channels.backup file. This command should be used - if a user is attempting to restore a channel due to data loss on a - running node restored with the same seed as the node that created the - channel. If successful, this command will allows the user to recover - the settled funds stored in the recovered channels. - - The command will accept backups in one of three forms: - - * A single channel packed SCB, which can be obtained from - exportchanbackup. This should be passed in hex encoded format. - - * A packed multi-channel SCB, which couples several individual - static channel backups in single blob. - - * A file path which points to a packed multi-channel backup within a - file, using the same format that lnd does in its channels.backup - file. - `, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "single_backup", - Usage: "a hex encoded single channel backup obtained " + - "from exportchanbackup", - }, - cli.StringFlag{ - Name: "multi_backup", - Usage: "a hex encoded multi-channel backup obtained " + - "from exportchanbackup", - }, - cli.StringFlag{ - Name: "multi_file", - Usage: "the path to a multi-channel back up file", - }, - }, - Action: actionDecorator(restoreChanBackup), -} - -// errMissingChanBackup is an error returned when we attempt to parse a channel -// backup from a CLI command and it is missing. -var errMissingChanBackup = er.GenericErrorType.CodeWithDetail("errMissingChanBackup", - "missing channel backup") - -func parseChanBackups(ctx *cli.Context) (*lnrpc.RestoreChanBackupRequest, er.R) { - switch { - case ctx.IsSet("single_backup"): - packedBackup, err := util.DecodeHex( - ctx.String("single_backup"), - ) - if err != nil { - return nil, er.Errorf("unable to decode single packed "+ - "backup: %v", err) - } - - return &lnrpc.RestoreChanBackupRequest{ - Backup: &lnrpc.RestoreChanBackupRequest_ChanBackups{ - ChanBackups: &lnrpc.ChannelBackups{ - ChanBackups: []*lnrpc.ChannelBackup{ - { - ChanBackup: packedBackup, - }, - }, - }, - }, - }, nil - - case ctx.IsSet("multi_backup"): - packedMulti, err := util.DecodeHex( - ctx.String("multi_backup"), - ) - if err != nil { - return nil, er.Errorf("unable to decode multi packed "+ - "backup: %v", err) - } - - return &lnrpc.RestoreChanBackupRequest{ - Backup: &lnrpc.RestoreChanBackupRequest_MultiChanBackup{ - MultiChanBackup: packedMulti, - }, - }, nil - - case ctx.IsSet("multi_file"): - packedMulti, err := ioutil.ReadFile(ctx.String("multi_file")) - if err != nil { - return nil, er.Errorf("unable to decode multi packed "+ - "backup: %v", err) - } - - return &lnrpc.RestoreChanBackupRequest{ - Backup: &lnrpc.RestoreChanBackupRequest_MultiChanBackup{ - MultiChanBackup: packedMulti, - }, - }, nil - - default: - return nil, errMissingChanBackup.Default() - } -} - -func restoreChanBackup(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - // Show command help if no arguments provided - if ctx.NArg() == 0 && ctx.NumFlags() == 0 { - cli.ShowCommandHelp(ctx, "restorechanbackup") - return nil - } - - var req lnrpc.RestoreChanBackupRequest - - backups, err := parseChanBackups(ctx) - if err != nil { - return err - } - - req.Backup = backups.Backup - - _, errr := client.RestoreChannelBackups(ctxb, &req) - if errr != nil { - return er.Errorf("unable to restore chan backups: %v", errr) - } - - return nil -} - -var resyncCommand = cli.Command{ - Name: "resync", - Category: "Wallet", - Usage: "Scan over the chain to find any transactions which may not have been recorded in the wallet's database", - ArgsUsage: "", - Description: `Scan over the chain to find any transactions which may not have been recorded in the wallet's database`, - Flags: []cli.Flag{ - cli.Int64Flag{ - Name: "fromHeight", - Usage: "Start re-syncing to the chain from specified height, default or -1 will use the height of the chain when the wallet was created", - }, - cli.Int64Flag{ - Name: "toHeight", - Usage: "Stop resyncing when this height is reached, default or -1 will use the tip of the chain", - }, - cli.StringFlag{ - Name: "addresses", - Usage: "If specified, the wallet will ONLY scan the chain for these addresses, not others. If dropdb is specified then it will scan all addresses including these", - }, - cli.BoolFlag{ - Name: "dropDB", - Usage: "Clean most of the data out of the wallet transaction store, this is not a real resync, it just drops the wallet and then lets it begin working again", - }, - }, - Action: actionDecorator(resync), -} - -func resync(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - fh := int32(-1) - if ctx.IsSet("fromHeight") { - fh = int32(ctx.Int64("fromHeight")) - } - th := int32(-1) - if ctx.IsSet("toHeight") { - th = int32(ctx.Int64("toHeight")) - } - var a []string - if ctx.IsSet("addresses") { - a = ctx.StringSlice("addresses") - } - drop := false - if ctx.IsSet("dropDB") { - drop = ctx.Bool("dropDB") - } - req := &lnrpc.ReSyncChainRequest{ - FromHeight: fh, - ToHeight: th, - Addresses: a, - DropDb: drop, - } - - resp, err := client.ReSync(ctxb, req) - if err != nil { - return er.E(err) - } - - printRespJSON(resp) - return nil -} - -var stopresyncCommand = cli.Command{ - Name: "stopresync", - Category: "Wallet", - Usage: "Stop a re-synchronization job before it's completion", - ArgsUsage: "", - Description: `Stop a re-synchronization job before it's completion`, - Action: actionDecorator(stopresync), -} - -func stopresync(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - var req lnrpc.StopReSyncRequest - - resp, err := client.StopReSync(ctxb, &req) - if err != nil { - return er.E(err) - } - printRespJSON(resp) - return nil -} diff --git a/lnd/cmd/lncli/invoicesrpc_active.go b/lnd/cmd/lncli/invoicesrpc_active.go deleted file mode 100644 index ac51f570..00000000 --- a/lnd/cmd/lncli/invoicesrpc_active.go +++ /dev/null @@ -1,260 +0,0 @@ -// +build invoicesrpc - -package main - -import ( - "context" - "encoding/hex" - "fmt" - - "strconv" - - "github.com/pkt-cash/pktd/lnd/lnrpc/invoicesrpc" - "github.com/urfave/cli" -) - -// invoicesCommands will return nil for non-invoicesrpc builds. -func invoicesCommands() []cli.Command { - return []cli.Command{ - cancelInvoiceCommand, - addHoldInvoiceCommand, - settleInvoiceCommand, - } -} - -func getInvoicesClient(ctx *cli.Context) (invoicesrpc.InvoicesClient, func()) { - conn := getClientConn(ctx, false) - - cleanUp := func() { - conn.Close() - } - - return invoicesrpc.NewInvoicesClient(conn), cleanUp -} - -var settleInvoiceCommand = cli.Command{ - Name: "settleinvoice", - Category: "Invoices", - Usage: "Reveal a preimage and use it to settle the corresponding invoice.", - Description: ` - Todo.`, - ArgsUsage: "preimage", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "preimage", - Usage: "the hex-encoded preimage (32 byte) which will " + - "allow settling an incoming HTLC payable to this " + - "preimage.", - }, - }, - Action: actionDecorator(settleInvoice), -} - -func settleInvoice(ctx *cli.Context) er.R { - var ( - preimage []byte - err error - ) - - client, cleanUp := getInvoicesClient(ctx) - defer cleanUp() - - args := ctx.Args() - - switch { - case ctx.IsSet("preimage"): - preimage, err = util.DecodeHex(ctx.String("preimage")) - case args.Present(): - preimage, err = util.DecodeHex(args.First()) - } - - if err != nil { - return er.Errorf("unable to parse preimage: %v", err) - } - - invoice := &invoicesrpc.SettleInvoiceMsg{ - Preimage: preimage, - } - - resp, err := client.SettleInvoice(context.Background(), invoice) - if err != nil { - return err - } - - printRespJSON(resp) - - return nil -} - -var cancelInvoiceCommand = cli.Command{ - Name: "cancelinvoice", - Category: "Invoices", - Usage: "Cancels a (hold) invoice", - Description: ` - Todo.`, - ArgsUsage: "paymenthash", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "paymenthash", - Usage: "the hex-encoded payment hash (32 byte) for which the " + - "corresponding invoice will be canceled.", - }, - }, - Action: actionDecorator(cancelInvoice), -} - -func cancelInvoice(ctx *cli.Context) er.R { - var ( - paymentHash []byte - err error - ) - - client, cleanUp := getInvoicesClient(ctx) - defer cleanUp() - - args := ctx.Args() - - switch { - case ctx.IsSet("paymenthash"): - paymentHash, err = util.DecodeHex(ctx.String("paymenthash")) - case args.Present(): - paymentHash, err = util.DecodeHex(args.First()) - } - - if err != nil { - return er.Errorf("unable to parse preimage: %v", err) - } - - invoice := &invoicesrpc.CancelInvoiceMsg{ - PaymentHash: paymentHash, - } - - resp, err := client.CancelInvoice(context.Background(), invoice) - if err != nil { - return err - } - - printRespJSON(resp) - - return nil -} - -var addHoldInvoiceCommand = cli.Command{ - Name: "addholdinvoice", - Category: "Invoices", - Usage: "Add a new hold invoice.", - Description: ` - Add a new invoice, expressing intent for a future payment. - - Invoices without an amount can be created by not supplying any - parameters or providing an amount of 0. These invoices allow the payee - to specify the amount of satoshis they wish to send.`, - ArgsUsage: "hash [amt]", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "memo", - Usage: "a description of the payment to attach along " + - "with the invoice (default=\"\")", - }, - cli.Int64Flag{ - Name: "amt", - Usage: "the amt of satoshis in this invoice", - }, - cli.Int64Flag{ - Name: "amt_msat", - Usage: "the amt of millisatoshis in this invoice", - }, - cli.StringFlag{ - Name: "description_hash", - Usage: "SHA-256 hash of the description of the payment. " + - "Used if the purpose of payment cannot naturally " + - "fit within the memo. If provided this will be " + - "used instead of the description(memo) field in " + - "the encoded invoice.", - }, - cli.StringFlag{ - Name: "fallback_addr", - Usage: "fallback on-chain address that can be used in " + - "case the lightning payment fails", - }, - cli.Int64Flag{ - Name: "expiry", - Usage: "the invoice's expiry time in seconds. If not " + - "specified, an expiry of 3600 seconds (1 hour) " + - "is implied.", - }, - cli.BoolTFlag{ - Name: "private", - Usage: "encode routing hints in the invoice with " + - "private channels in order to assist the " + - "payer in reaching you", - }, - }, - Action: actionDecorator(addHoldInvoice), -} - -func addHoldInvoice(ctx *cli.Context) er.R { - var ( - descHash []byte - err error - ) - - client, cleanUp := getInvoicesClient(ctx) - defer cleanUp() - - args := ctx.Args() - if ctx.NArg() == 0 { - cli.ShowCommandHelp(ctx, "addholdinvoice") - return nil - } - - hash, err := util.DecodeHex(args.First()) - if err != nil { - return er.Errorf("unable to parse hash: %v", err) - } - - args = args.Tail() - - amt := ctx.Int64("amt") - amtMsat := ctx.Int64("amt_msat") - - if !ctx.IsSet("amt") && !ctx.IsSet("amt_msat") && args.Present() { - amt, err = strconv.ParseInt(args.First(), 10, 64) - if err != nil { - return er.Errorf("unable to decode amt argument: %v", err) - } - } - - if err != nil { - return er.Errorf("unable to parse preimage: %v", err) - } - - descHash, err = util.DecodeHex(ctx.String("description_hash")) - if err != nil { - return er.Errorf("unable to parse description_hash: %v", err) - } - - invoice := &invoicesrpc.AddHoldInvoiceRequest{ - Memo: ctx.String("memo"), - Hash: hash, - Value: amt, - ValueMsat: amtMsat, - DescriptionHash: descHash, - FallbackAddr: ctx.String("fallback_addr"), - Expiry: ctx.Int64("expiry"), - Private: ctx.Bool("private"), - } - - resp, err := client.AddHoldInvoice(context.Background(), invoice) - if err != nil { - return err - } - - printJSON(struct { - PayReq string `json:"pay_req"` - }{ - PayReq: resp.PaymentRequest, - }) - - return nil -} diff --git a/lnd/cmd/lncli/invoicesrpc_default.go b/lnd/cmd/lncli/invoicesrpc_default.go deleted file mode 100644 index 570dfa69..00000000 --- a/lnd/cmd/lncli/invoicesrpc_default.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !invoicesrpc - -package main - -import "github.com/urfave/cli" - -// invoicesCommands will return nil for non-invoicesrpc builds. -func invoicesCommands() []cli.Command { - return nil -} diff --git a/lnd/cmd/lncli/macaroon_jar.go b/lnd/cmd/lncli/macaroon_jar.go deleted file mode 100644 index 2f04da25..00000000 --- a/lnd/cmd/lncli/macaroon_jar.go +++ /dev/null @@ -1,164 +0,0 @@ -package main - -import ( - "encoding/base64" - "encoding/hex" - "fmt" - "strings" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/pktwallet/snacl" - "gopkg.in/macaroon.v2" -) - -const ( - encryptionPrefix = "snacl:" -) - -// getPasswordFn is a function that asks the user to type a password after -// presenting it the given prompt. -type getPasswordFn func(prompt string) ([]byte, er.R) - -// macaroonJar is a struct that represents all macaroons of a profile. -type macaroonJar struct { - Default string `json:"default,omitempty"` - Timeout int64 `json:"timeout,omitempty"` - IP string `json:"ip,omitempty"` - Jar []*macaroonEntry `json:"jar"` -} - -// macaroonEntry is a struct that represents a single macaroon. Its content can -// either be cleartext (hex encoded) or encrypted (snacl secretbox). -type macaroonEntry struct { - Name string `json:"name"` - Data string `json:"data"` -} - -// loadMacaroon returns the fully usable macaroon instance from the entry. This -// detects whether the macaroon needs to be decrypted and does so if necessary. -// An encrypted macaroon that needs to be decrypted will prompt for the user's -// password by calling the provided password callback. Normally that should -// result in the user being prompted for the password in the terminal. -func (e *macaroonEntry) loadMacaroon( - pwCallback getPasswordFn) (*macaroon.Macaroon, er.R) { - - if len(strings.TrimSpace(e.Data)) == 0 { - return nil, er.Errorf("macaroon data is empty") - } - - var ( - macBytes []byte - err er.R - ) - - // Either decrypt or simply decode the macaroon data. - if strings.HasPrefix(e.Data, encryptionPrefix) { - parts := strings.Split(e.Data, ":") - if len(parts) != 3 { - return nil, er.Errorf("invalid encrypted macaroon " + - "format, expected 'snacl::" + - "'") - } - - pw, err := pwCallback("Enter macaroon encryption password: ") - if err != nil { - return nil, er.Errorf("could not read password from "+ - "terminal: %v", err) - } - - macBytes, err = decryptMacaroon(parts[1], parts[2], pw) - if err != nil { - return nil, er.Errorf("unable to decrypt macaroon: %v", - err) - } - } else { - macBytes, err = util.DecodeHex(e.Data) - if err != nil { - return nil, er.Errorf("unable to hex decode "+ - "macaroon: %v", err) - } - } - - // Parse the macaroon data into its native struct. - mac := &macaroon.Macaroon{} - if err := mac.UnmarshalBinary(macBytes); err != nil { - return nil, er.Errorf("unable to decode macaroon: %v", err) - } - return mac, nil -} - -// storeMacaroon stores a native macaroon instance to the entry. If a non-nil -// password is provided, then the macaroon is encrypted with that password. If -// not, the macaroon is stored as plain text. -func (e *macaroonEntry) storeMacaroon(mac *macaroon.Macaroon, pw []byte) er.R { - // First of all, make sure we can serialize the macaroon. - macBytes, errr := mac.MarshalBinary() - if errr != nil { - return er.Errorf("unable to marshal macaroon: %v", errr) - } - - if len(pw) == 0 { - e.Data = hex.EncodeToString(macBytes) - return nil - } - - // The user did set a password. Let's derive an encryption key from it. - key, err := snacl.NewSecretKey( - &pw, snacl.DefaultN, snacl.DefaultR, snacl.DefaultP, - ) - if err != nil { - return er.Errorf("unable to create encryption key: %v", err) - } - - // Encrypt the macaroon data with the derived key and store it in the - // human readable format snacl::. - encryptedMac, err := key.Encrypt(macBytes) - if err != nil { - return er.Errorf("unable to encrypt macaroon: %v", err) - } - - keyB64 := base64.StdEncoding.EncodeToString(key.Marshal()) - dataB64 := base64.StdEncoding.EncodeToString(encryptedMac) - e.Data = fmt.Sprintf("%s%s:%s", encryptionPrefix, keyB64, dataB64) - - return nil -} - -// decryptMacaroon decrypts the cipher text macaroon by using the serialized -// encryption key and the password. -func decryptMacaroon(keyB64, dataB64 string, pw []byte) ([]byte, er.R) { - // Base64 decode both the marshalled encryption key and macaroon data. - keyData, errr := base64.StdEncoding.DecodeString(keyB64) - if errr != nil { - return nil, er.Errorf("could not base64 decode encryption "+ - "key: %v", errr) - } - encryptedMac, errr := base64.StdEncoding.DecodeString(dataB64) - if errr != nil { - return nil, er.Errorf("could not base64 decode macaroon "+ - "data: %v", errr) - } - - // Unmarshal the encryption key and ask the user for the password. - key := &snacl.SecretKey{} - err := key.Unmarshal(keyData) - if err != nil { - return nil, er.Errorf("could not unmarshal encryption key: %v", - err) - } - - // Derive the final encryption key and then decrypt the macaroon with - // it. - err = key.DeriveKey(&pw) - if err != nil { - return nil, er.Errorf("could not derive encryption key, "+ - "possibly due to incorrect password: %v", err) - } - macBytes, err := key.Decrypt(encryptedMac) - if err != nil { - return nil, er.Errorf("could not decrypt macaroon data: %v", - err) - } - return macBytes, nil -} diff --git a/lnd/cmd/lncli/macaroon_jar_test.go b/lnd/cmd/lncli/macaroon_jar_test.go deleted file mode 100644 index 0bc840d5..00000000 --- a/lnd/cmd/lncli/macaroon_jar_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package main - -import ( - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/stretchr/testify/require" - "gopkg.in/macaroon.v2" -) - -var ( - dummyMacStr = "0201047465737402067788991234560000062052d26ed139ea5af8" + - "3e675500c4ccb2471f62191b745bab820f129e5588a255d2" - dummyMac, _ = util.DecodeHex(dummyMacStr) - encryptedEntry = &macaroonEntry{ - Name: "encryptedMac", - Data: "snacl:exX8xbUOb6Gih88ybL2jZGo+DBDPU2tYKkvo0eVVmbDGDoFP" + - "zlv5xvqNK5eml0LKLcB8LdZRw43qXK1W2OLs/gBAAAAAAAAACAAA" + - "AAAAAAABAAAAAAAAAA==:C8TN/aDOvSLiBCX+IdoPTx+UUWhVdGj" + - "NQvbcaWp+KXQWqPfpRZpjJQ6B2PDx5mJxImcezJGPx8ShAqMdxWe" + - "l2precU+1cOjk7HQFkYuu943eJ00s6JerAY+ssg==", - } - plaintextEntry = &macaroonEntry{ - Name: "plaintextMac", - Data: dummyMacStr, - } - - testPassword = []byte("S3curePazzw0rd") - pwCallback = func(string) ([]byte, er.R) { - return testPassword, nil - } - noPwCallback = func(string) ([]byte, er.R) { - return nil, nil - } -) - -// TestMacaroonJarEncrypted tests that a macaroon can be stored and retrieved -// safely by encrypting/decrypting it with a password. -func TestMacaroonJarEncrypted(t *testing.T) { - // Create a new macaroon entry from the dummy macaroon and encrypt it - // with the test password. - newEntry := &macaroonEntry{ - Name: "encryptedMac", - } - err := newEntry.storeMacaroon(toMacaroon(t, dummyMac), testPassword) - util.RequireNoErr(t, err) - - // Now decrypt it again and make sure we get the same content back. - mac, err := newEntry.loadMacaroon(pwCallback) - util.RequireNoErr(t, err) - macBytes, errr := mac.MarshalBinary() - util.RequireNoErr(t, er.E(errr)) - require.Equal(t, dummyMac, macBytes) - - // The encrypted data of the entry we just created shouldn't be the - // same as our test entry because of the salt snacl uses. - require.NotEqual(t, encryptedEntry.Data, newEntry.Data) - - // Decrypt the hard coded test entry and make sure the decrypted content - // matches our created entry. - mac, err = encryptedEntry.loadMacaroon(pwCallback) - util.RequireNoErr(t, err) - macBytes, errr = mac.MarshalBinary() - util.RequireNoErr(t, er.E(errr)) - require.Equal(t, dummyMac, macBytes) -} - -// TestMacaroonJarPlaintext tests that a macaroon can be stored and retrieved -// as plaintext as well. -func TestMacaroonJarPlaintext(t *testing.T) { - // Create a new macaroon entry from the dummy macaroon and encrypt it - // with the test password. - newEntry := &macaroonEntry{ - Name: "plaintextMac", - } - err := newEntry.storeMacaroon(toMacaroon(t, dummyMac), nil) - util.RequireNoErr(t, err) - - // Now decrypt it again and make sure we get the same content back. - mac, err := newEntry.loadMacaroon(noPwCallback) - util.RequireNoErr(t, err) - macBytes, errr := mac.MarshalBinary() - util.RequireNoErr(t, er.E(errr)) - require.Equal(t, dummyMac, macBytes) - require.Equal(t, plaintextEntry.Data, newEntry.Data) - - // Load the hard coded plaintext test entry and make sure the loaded - // content matches our created entry. - mac, err = plaintextEntry.loadMacaroon(noPwCallback) - util.RequireNoErr(t, err) - macBytes, errr = mac.MarshalBinary() - util.RequireNoErr(t, er.E(errr)) - require.Equal(t, dummyMac, macBytes) -} - -func toMacaroon(t *testing.T, macData []byte) *macaroon.Macaroon { - mac := &macaroon.Macaroon{} - errr := mac.UnmarshalBinary(macData) - util.RequireNoErr(t, er.E(errr)) - - return mac -} diff --git a/lnd/cmd/lncli/main.go b/lnd/cmd/lncli/main.go deleted file mode 100644 index 62b783e0..00000000 --- a/lnd/cmd/lncli/main.go +++ /dev/null @@ -1,402 +0,0 @@ -// Copyright (c) 2013-2017 The btcsuite developers -// Copyright (c) 2015-2016 The Decred developers -// Copyright (C) 2015-2017 The Lightning Network Developers - -package main - -import ( - "crypto/tls" - "fmt" - "os" - "path/filepath" - "strings" - "syscall" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lncfg" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/macaroons" - "github.com/pkt-cash/pktd/pktconfig/version" - "github.com/urfave/cli" - - "golang.org/x/crypto/ssh/terminal" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" -) - -const ( - defaultDataDir = "data" - defaultChainSubDir = "chain" - defaultTLSCertFilename = "tls.cert" - defaultMacaroonFilename = "admin.macaroon" - defaultRPCPort = "10009" - defaultRPCHostPort = "localhost:" + defaultRPCPort -) - -var ( - defaultLndDir = btcutil.AppDataDir("lnd", false) - defaultTLSCertPath = filepath.Join(defaultLndDir, defaultTLSCertFilename) - - // maxMsgRecvSize is the largest message our client will receive. We - // set this to 200MiB atm. - maxMsgRecvSize = grpc.MaxCallRecvMsgSize(1 * 1024 * 1024 * 200) -) - -func fatal(err er.R) { - fmt.Fprintf(os.Stderr, "[lncli] %v\n", err) - os.Exit(1) -} - -func getWalletUnlockerClient(ctx *cli.Context) (lnrpc.WalletUnlockerClient, func()) { - conn := getClientConn(ctx, true) - - cleanUp := func() { - conn.Close() - } - - return lnrpc.NewWalletUnlockerClient(conn), cleanUp -} - -func getMetaServiceClient(ctx *cli.Context) (lnrpc.MetaServiceClient, func()) { - conn := getClientConn(ctx, true) - - cleanUp := func() { - conn.Close() - } - - return lnrpc.NewMetaServiceClient(conn), cleanUp -} - - -func getClient(ctx *cli.Context) (lnrpc.LightningClient, func()) { - conn := getClientConn(ctx, false) - - cleanUp := func() { - conn.Close() - } - - return lnrpc.NewLightningClient(conn), cleanUp -} - -func getClientConn(ctx *cli.Context, skipMacaroons bool) *grpc.ClientConn { - // First, we'll get the selected stored profile or an ephemeral one - // created from the global options in the CLI context. - profile, err := getGlobalOptions(ctx, skipMacaroons) - if err != nil { - fatal(er.Errorf("could not load global options: %v", err)) - } - - // Load the specified TLS certificate. - certPool, err := profile.cert() - if err != nil { - fatal(er.Errorf("could not create cert pool: %v", err)) - } - - var opts []grpc.DialOption - if ctx.GlobalBool("notls") { - opts = append(opts, grpc.WithInsecure()) - } else { - // Build transport credentials from the certificate pool. If there is no - // certificate pool, we expect the server to use a non-self-signed - // certificate such as a certificate obtained from Let's Encrypt. - var creds credentials.TransportCredentials - if certPool != nil { - creds = credentials.NewClientTLSFromCert(certPool, "") - } else { - // Fallback to the system pool. Using an empty tls config is an - // alternative to x509.SystemCertPool(). That call is not - // supported on Windows. - creds = credentials.NewTLS(&tls.Config{}) - } - - // Create a dial options array. - opts = append(opts, grpc.WithTransportCredentials(creds)) - } - - // Only process macaroon credentials if --no-macaroons isn't set and - // if we're not skipping macaroon processing. - if !profile.NoMacaroons && !skipMacaroons { - // Find out which macaroon to load. - macName := profile.Macaroons.Default - if ctx.GlobalIsSet("macfromjar") { - macName = ctx.GlobalString("macfromjar") - } - var macEntry *macaroonEntry - for _, entry := range profile.Macaroons.Jar { - if entry.Name == macName { - macEntry = entry - break - } - } - if macEntry == nil { - fatal(er.Errorf("macaroon with name '%s' not found "+ - "in profile", macName)) - } - - // Get and possibly decrypt the specified macaroon. - // - // TODO(guggero): Make it possible to cache the password so we - // don't need to ask for it every time. - mac, err := macEntry.loadMacaroon(readPassword) - if err != nil { - fatal(er.Errorf("could not load macaroon: %v", err)) - } - - macConstraints := []macaroons.Constraint{ - // We add a time-based constraint to prevent replay of the - // macaroon. It's good for 60 seconds by default to make up for - // any discrepancy between client and server clocks, but leaking - // the macaroon before it becomes invalid makes it possible for - // an attacker to reuse the macaroon. In addition, the validity - // time of the macaroon is extended by the time the server clock - // is behind the client clock, or shortened by the time the - // server clock is ahead of the client clock (or invalid - // altogether if, in the latter case, this time is more than 60 - // seconds). - // TODO(aakselrod): add better anti-replay protection. - macaroons.TimeoutConstraint(profile.Macaroons.Timeout), - - // Lock macaroon down to a specific IP address. - macaroons.IPLockConstraint(profile.Macaroons.IP), - - // ... Add more constraints if needed. - } - - // Apply constraints to the macaroon. - constrainedMac, err := macaroons.AddConstraints( - mac, macConstraints..., - ) - if err != nil { - fatal(err) - } - - // Now we append the macaroon credentials to the dial options. - cred := macaroons.NewMacaroonCredential(constrainedMac) - opts = append(opts, grpc.WithPerRPCCredentials(cred)) - } - - // We need to use a custom dialer so we can also connect to unix sockets - // and not just TCP addresses. - genericDialer := lncfg.ClientAddressDialer(defaultRPCPort) - opts = append(opts, grpc.WithContextDialer(genericDialer)) - opts = append(opts, grpc.WithDefaultCallOptions(maxMsgRecvSize)) - - conn, errr := grpc.Dial(profile.RPCServer, opts...) - if errr != nil { - fatal(er.Errorf("unable to connect to RPC server: %v", errr)) - } - - return conn -} - -// extractPathArgs parses the TLS certificate and macaroon paths from the -// command. -func extractPathArgs(ctx *cli.Context) (string, string, er.R) { - // We'll start off by parsing the active chain and network. These are - // needed to determine the correct path to the macaroon when not - // specified. - chain := strings.ToLower(ctx.GlobalString("chain")) - switch chain { - case "bitcoin", "litecoin", "pkt": - default: - return "", "", er.Errorf("unknown chain: %v", chain) - } - - network := strings.ToLower(ctx.GlobalString("network")) - switch network { - case "mainnet", "testnet", "regtest", "simnet": - default: - return "", "", er.Errorf("unknown network: %v", network) - } - - // We'll now fetch the lnddir so we can make a decision on how to - // properly read the macaroons (if needed) and also the cert. This will - // either be the default, or will have been overwritten by the end - // user. - lndDir := lncfg.CleanAndExpandPath(ctx.GlobalString("lnddir")) - - // If the macaroon path as been manually provided, then we'll only - // target the specified file. - var macPath string - if ctx.GlobalString("macaroonpath") != "" { - macPath = lncfg.CleanAndExpandPath(ctx.GlobalString("macaroonpath")) - } else { - // Otherwise, we'll go into the path: - // lnddir/data/chain// in order to fetch the - // macaroon that we need. - macPath = filepath.Join( - lndDir, defaultDataDir, defaultChainSubDir, chain, - network, defaultMacaroonFilename, - ) - } - - tlsCertPath := lncfg.CleanAndExpandPath(ctx.GlobalString("tlscertpath")) - - // If a custom lnd directory was set, we'll also check if custom paths - // for the TLS cert and macaroon file were set as well. If not, we'll - // override their paths so they can be found within the custom lnd - // directory set. This allows us to set a custom lnd directory, along - // with custom paths to the TLS cert and macaroon file. - if lndDir != defaultLndDir { - tlsCertPath = filepath.Join(lndDir, defaultTLSCertFilename) - } - - return tlsCertPath, macPath, nil -} - -func main() { - app := cli.NewApp() - app.Name = "lncli" - app.Version = version.Version() - app.Usage = "control plane for your Lightning Network Daemon (lnd)" - app.Flags = []cli.Flag{ - cli.StringFlag{ - Name: "rpcserver", - Value: defaultRPCHostPort, - Usage: "The host:port of LN daemon.", - }, - cli.StringFlag{ - Name: "lnddir", - Value: defaultLndDir, - Usage: "The path to lnd's base directory.", - }, - cli.BoolFlag{ - Name: "notls", - Usage: "Disable TLS, needed if --notls is passed to pld.", - }, - cli.StringFlag{ - Name: "tlscertpath", - Value: defaultTLSCertPath, - Usage: "The path to lnd's TLS certificate.", - }, - cli.StringFlag{ - Name: "chain, c", - Usage: "The chain lnd is running on, e.g. pkt.", - Value: "pkt", - }, - cli.StringFlag{ - Name: "network, n", - Usage: "The network lnd is running on, e.g. mainnet, " + - "testnet, etc.", - Value: "mainnet", - }, - cli.BoolFlag{ - Name: "no-macaroons", - Usage: "Disable macaroon authentication.", - }, - cli.StringFlag{ - Name: "macaroonpath", - Usage: "The path to macaroon file.", - }, - cli.Int64Flag{ - Name: "macaroontimeout", - Value: 60, - Usage: "Anti-replay macaroon validity time in seconds.", - }, - cli.StringFlag{ - Name: "macaroonip", - Usage: "If set, lock macaroon to specific IP address.", - }, - cli.StringFlag{ - Name: "profile, p", - Usage: "Instead of reading settings from command " + - "line parameters or using the default " + - "profile, use a specific profile. If " + - "a default profile is set, this flag can be " + - "set to an empty string to disable reading " + - "values from the profiles file.", - }, - cli.StringFlag{ - Name: "macfromjar", - Usage: "Use this macaroon from the profile's " + - "macaroon jar instead of the default one. " + - "Can only be used if profiles are defined.", - }, - } - app.Commands = []cli.Command{ - createCommand, - unlockCommand, - changePasswordCommand, - newAddressCommand, - estimateFeeCommand, - sendManyCommand, - sendCoinsCommand, - listUnspentCommand, - connectCommand, - disconnectCommand, - openChannelCommand, - closeChannelCommand, - closeAllChannelsCommand, - abandonChannelCommand, - listPeersCommand, - walletBalanceCommand, - getAddressBalancesCommand, - channelBalanceCommand, - getInfoCommand, - getRecoveryInfoCommand, - pendingChannelsCommand, - sendPaymentCommand, - payInvoiceCommand, - sendToRouteCommand, - addInvoiceCommand, - lookupInvoiceCommand, - listInvoicesCommand, - listChannelsCommand, - closedChannelsCommand, - listPaymentsCommand, - describeGraphCommand, - getNodeMetricsCommand, - getChanInfoCommand, - getNodeInfoCommand, - queryRoutesCommand, - getNetworkInfoCommand, - debugLevelCommand, - decodePayReqCommand, - listChainTxnsCommand, - stopCommand, - signMessageCommand, - verifyMessageCommand, - feeReportCommand, - updateChannelPolicyCommand, - forwardingHistoryCommand, - exportChanBackupCommand, - verifyChanBackupCommand, - restoreChanBackupCommand, - bakeMacaroonCommand, - listMacaroonIDsCommand, - deleteMacaroonIDCommand, - listPermissionsCommand, - printMacaroonCommand, - trackPaymentCommand, - versionCommand, - profileSubCommand, - resyncCommand, - stopresyncCommand, - } - - // Add any extra commands determined by build flags. - app.Commands = append(app.Commands, autopilotCommands()...) - app.Commands = append(app.Commands, invoicesCommands()...) - app.Commands = append(app.Commands, routerCommands()...) - app.Commands = append(app.Commands, walletCommands()...) - app.Commands = append(app.Commands, watchtowerCommands()...) - app.Commands = append(app.Commands, wtclientCommands()...) - - if err := app.Run(os.Args); err != nil { - fatal(er.E(err)) - } -} - -// readPassword reads a password from the terminal. This requires there to be an -// actual TTY so passing in a password from stdin won't work. -func readPassword(text string) ([]byte, er.R) { - fmt.Print(text) - - // The variable syscall.Stdin is of a different type in the Windows API - // that's why we need the explicit cast. And of course the linter - // doesn't like it either. - pw, err := terminal.ReadPassword(int(syscall.Stdin)) // nolint:unconvert - fmt.Println() - return pw, er.E(err) -} diff --git a/lnd/cmd/lncli/profile.go b/lnd/cmd/lncli/profile.go deleted file mode 100644 index 2846bff9..00000000 --- a/lnd/cmd/lncli/profile.go +++ /dev/null @@ -1,258 +0,0 @@ -package main - -import ( - "bytes" - "crypto/x509" - "encoding/json" - "io/ioutil" - "path" - "strings" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lncfg" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/walletunlocker" - "github.com/urfave/cli" - "gopkg.in/macaroon.v2" -) - -var ( - errNoProfileFile = er.GenericErrorType.CodeWithDetail("errNoProfileFile", - "no profile file found") -) - -// profileEntry is a struct that represents all settings for one specific -// profile. -type profileEntry struct { - Name string `json:"name"` - RPCServer string `json:"rpcserver"` - LndDir string `json:"lnddir"` - Chain string `json:"chain"` - Network string `json:"network"` - NoMacaroons bool `json:"no-macaroons,omitempty"` - TLSCert string `json:"tlscert"` - Macaroons *macaroonJar `json:"macaroons"` -} - -// cert returns the profile's TLS certificate as a x509 certificate pool. -func (e *profileEntry) cert() (*x509.CertPool, er.R) { - if e.TLSCert == "" { - return nil, nil - } - - cp := x509.NewCertPool() - if !cp.AppendCertsFromPEM([]byte(e.TLSCert)) { - return nil, er.Errorf("credentials: failed to append " + - "certificate") - } - return cp, nil -} - -// getGlobalOptions returns the global connection options. If a profile file -// exists, these global options might be read from a predefined profile. If no -// profile exists, the global options from the command line are returned as an -// ephemeral profile entry. -func getGlobalOptions(ctx *cli.Context, skipMacaroons bool) (*profileEntry, er.R) { - - var profileName string - - // Try to load the default profile file and depending on its existence - // what profile to use. - f, err := loadProfileFile(defaultProfileFile) - switch { - // The legacy case where no profile file exists and the user also didn't - // request to use one. We only consider the global options here. - case errNoProfileFile.Is(err) && !ctx.GlobalIsSet("profile"): - return profileFromContext(ctx, false, skipMacaroons) - - // The file doesn't exist but the user specified an explicit profile. - case errNoProfileFile.Is(err) && ctx.GlobalIsSet("profile"): - return nil, er.Errorf("profile file %s does not exist", - defaultProfileFile) - - // There is a file but we couldn't read/parse it. - case err != nil: - return nil, er.Errorf("could not read profile file %s: "+ - "%v", defaultProfileFile, err) - - // The user explicitly disabled the use of profiles for this command by - // setting the flag to an empty string. We fall back to the default/old - // behavior. - case ctx.GlobalIsSet("profile") && ctx.GlobalString("profile") == "": - return profileFromContext(ctx, false, skipMacaroons) - - // There is a file, but no default profile is specified. The user also - // didn't specify a profile to use so we fall back to the default/old - // behavior. - case !ctx.GlobalIsSet("profile") && len(f.Default) == 0: - return profileFromContext(ctx, false, skipMacaroons) - - // The user didn't specify a profile but there is a default one defined. - case !ctx.GlobalIsSet("profile") && len(f.Default) > 0: - profileName = f.Default - - // The user specified a specific profile to use. - case ctx.GlobalIsSet("profile"): - profileName = ctx.GlobalString("profile") - } - - // If we got to here, we do have a profile file and know the name of the - // profile to use. Now we just need to make sure it does exist. - for _, prof := range f.Profiles { - if prof.Name == profileName { - return prof, nil - } - } - - return nil, er.Errorf("profile '%s' not found in file %s", profileName, - defaultProfileFile) -} - -// profileFromContext creates an ephemeral profile entry from the global options -// set in the CLI context. -func profileFromContext(ctx *cli.Context, store, skipMacaroons bool) ( - *profileEntry, er.R) { - - // Parse the paths of the cert and macaroon. This will validate the - // chain and network value as well. - tlsCertPath, macPath, err := extractPathArgs(ctx) - if err != nil { - return nil, err - } - - // Load the certificate file now, if specified. We store it as plain PEM - // directly. - var tlsCert []byte - if lnrpc.FileExists(tlsCertPath) { - var err error - tlsCert, err = ioutil.ReadFile(tlsCertPath) - if err != nil { - return nil, er.Errorf("could not load TLS cert file "+ - "%s: %v", tlsCertPath, err) - } - } - - entry := &profileEntry{ - RPCServer: ctx.GlobalString("rpcserver"), - LndDir: lncfg.CleanAndExpandPath(ctx.GlobalString("lnddir")), - Chain: ctx.GlobalString("chain"), - Network: ctx.GlobalString("network"), - NoMacaroons: ctx.GlobalBool("no-macaroons"), - TLSCert: string(tlsCert), - } - - // If we aren't using macaroons in general (flag --no-macaroons) or - // don't need macaroons for this command (wallet unlocker), we can now - // return already. - if skipMacaroons || ctx.GlobalBool("no-macaroons") { - return entry, nil - } - - // Now load and possibly encrypt the macaroon file. - macBytes, errr := ioutil.ReadFile(macPath) - if errr != nil { - return nil, er.Errorf("unable to read macaroon path (check "+ - "the network setting!): %v", errr) - } - mac := &macaroon.Macaroon{} - if errr = mac.UnmarshalBinary(macBytes); errr != nil { - return nil, er.Errorf("unable to decode macaroon: %v", errr) - } - - var pw []byte - if store { - // Read a password from the terminal. If it's empty, we won't - // encrypt the macaroon and store it plaintext. - pw, err = capturePassword( - "Enter password to encrypt macaroon with or leave "+ - "blank to store in plaintext: ", true, - walletunlocker.ValidatePassword, - ) - if err != nil { - return nil, er.Errorf("unable to get encryption "+ - "password: %v", err) - } - } - macEntry := &macaroonEntry{} - if err = macEntry.storeMacaroon(mac, pw); err != nil { - return nil, er.Errorf("unable to store macaroon: %v", err) - } - - // We determine the name of the macaroon from the file itself but cut - // off the ".macaroon" at the end. - macEntry.Name = path.Base(macPath) - if path.Ext(macEntry.Name) == "macaroon" { - macEntry.Name = strings.TrimSuffix(macEntry.Name, ".macaroon") - } - - // Now that we have the macaroon jar as well, let's return the entry - // with all the values populated. - entry.Macaroons = &macaroonJar{ - Default: macEntry.Name, - Timeout: ctx.GlobalInt64("macaroontimeout"), - IP: ctx.GlobalString("macaroonip"), - Jar: []*macaroonEntry{macEntry}, - } - - return entry, nil -} - -// loadProfileFile tries to load the file specified and JSON deserialize it into -// the profile file struct. -func loadProfileFile(file string) (*profileFile, er.R) { - if !lnrpc.FileExists(file) { - return nil, errNoProfileFile.Default() - } - - content, errr := ioutil.ReadFile(file) - if errr != nil { - return nil, er.Errorf("could not load profile file %s: %v", - file, errr) - } - f := &profileFile{} - err := f.unmarshalJSON(content) - if err != nil { - return nil, er.Errorf("could not unmarshal profile file %s: "+ - "%v", file, err) - } - return f, nil -} - -// saveProfileFile stores the given profile file struct in the specified file, -// overwriting it if it already existed. -func saveProfileFile(file string, f *profileFile) er.R { - content, err := f.marshalJSON() - if err != nil { - return er.Errorf("could not marshal profile: %v", err) - } - return er.E(ioutil.WriteFile(file, content, 0644)) -} - -// profileFile is a struct that represents the whole content of a profile file. -type profileFile struct { - Default string `json:"default,omitempty"` - Profiles []*profileEntry `json:"profiles"` -} - -// unmarshalJSON tries to parse the given JSON and unmarshal it into the -// receiving instance. -func (f *profileFile) unmarshalJSON(content []byte) er.R { - return er.E(json.Unmarshal(content, f)) -} - -// marshalJSON serializes the receiving instance to formatted/indented JSON. -func (f *profileFile) marshalJSON() ([]byte, er.R) { - b, err := json.Marshal(f) - if err != nil { - return nil, er.Errorf("error JSON marshalling profile: %v", - err) - } - - var out bytes.Buffer - err = json.Indent(&out, b, "", " ") - if err != nil { - return nil, er.Errorf("error indenting profile JSON: %v", err) - } - out.WriteString("\n") - return out.Bytes(), nil -} diff --git a/lnd/cmd/lncli/routerrpc.go b/lnd/cmd/lncli/routerrpc.go deleted file mode 100644 index 819f66d2..00000000 --- a/lnd/cmd/lncli/routerrpc.go +++ /dev/null @@ -1,13 +0,0 @@ -package main - -import "github.com/urfave/cli" - -// routerCommands returns a list of routerrpc commands. -func routerCommands() []cli.Command { - return []cli.Command{ - queryMissionControlCommand, - queryProbCommand, - resetMissionControlCommand, - buildRouteCommand, - } -} diff --git a/lnd/cmd/lncli/types.go b/lnd/cmd/lncli/types.go deleted file mode 100644 index 58ccd7fb..00000000 --- a/lnd/cmd/lncli/types.go +++ /dev/null @@ -1,68 +0,0 @@ -package main - -import ( - "encoding/hex" - "fmt" - "strconv" - "strings" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/lnrpc" -) - -// OutPoint displays an outpoint string in the form ":". -type OutPoint string - -// NewOutPointFromProto formats the lnrpc.OutPoint into an OutPoint for display. -func NewOutPointFromProto(op *lnrpc.OutPoint) OutPoint { - var hash chainhash.Hash - copy(hash[:], op.TxidBytes) - return OutPoint(fmt.Sprintf("%v:%d", hash, op.OutputIndex)) -} - -// NewProtoOutPoint parses an OutPoint into its corresponding lnrpc.OutPoint -// type. -func NewProtoOutPoint(op string) (*lnrpc.OutPoint, er.R) { - parts := strings.Split(op, ":") - if len(parts) != 2 { - return nil, er.New("outpoint should be of the form txid:index") - } - txid := parts[0] - if hex.DecodedLen(len(txid)) != chainhash.HashSize { - return nil, er.Errorf("invalid hex-encoded txid %v", txid) - } - outputIndex, err := strconv.Atoi(parts[1]) - if err != nil { - return nil, er.Errorf("invalid output index: %v", err) - } - return &lnrpc.OutPoint{ - TxidStr: txid, - OutputIndex: uint32(outputIndex), - }, nil -} - -// Utxo displays information about an unspent output, including its address, -// amount, pkscript, and confirmations. -type Utxo struct { - Type lnrpc.AddressType `json:"address_type"` - Address string `json:"address"` - AmountSat int64 `json:"amount_sat"` - PkScript string `json:"pk_script"` - OutPoint OutPoint `json:"outpoint"` - Confirmations int64 `json:"confirmations"` -} - -// NewUtxoFromProto creates a display Utxo from the Utxo proto. This filters out -// the raw txid bytes from the provided outpoint, which will otherwise be -// printed in base64. -func NewUtxoFromProto(utxo *lnrpc.Utxo) *Utxo { - return &Utxo{ - Type: utxo.AddressType, - Address: utxo.Address, - AmountSat: utxo.AmountSat, - PkScript: utxo.PkScript, - OutPoint: NewOutPointFromProto(utxo.Outpoint), - Confirmations: utxo.Confirmations, - } -} diff --git a/lnd/cmd/lncli/walletrpc_active.go b/lnd/cmd/lncli/walletrpc_active.go deleted file mode 100644 index 365cfaaf..00000000 --- a/lnd/cmd/lncli/walletrpc_active.go +++ /dev/null @@ -1,759 +0,0 @@ -// +build walletrpc - -package main - -import ( - "context" - "encoding/base64" - "encoding/hex" - "encoding/json" - "fmt" - "sort" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/walletrpc" - "github.com/urfave/cli" -) - -var ( - // psbtCommand is a wallet subcommand that is responsible for PSBT - // operations. - psbtCommand = cli.Command{ - Name: "psbt", - Usage: "Interact with partially signed bitcoin transactions " + - "(PSBTs).", - Subcommands: []cli.Command{ - fundPsbtCommand, - finalizePsbtCommand, - }, - } -) - -// walletCommands will return the set of commands to enable for walletrpc -// builds. -func walletCommands() []cli.Command { - return []cli.Command{ - { - Name: "wallet", - Category: "Wallet", - Usage: "Interact with the wallet.", - Description: "", - Subcommands: []cli.Command{ - pendingSweepsCommand, - bumpFeeCommand, - bumpCloseFeeCommand, - listSweepsCommand, - labelTxCommand, - releaseOutputCommand, - psbtCommand, - }, - }, - } -} - -func getWalletClient(ctx *cli.Context) (walletrpc.WalletKitClient, func()) { - conn := getClientConn(ctx, false) - cleanUp := func() { - conn.Close() - } - return walletrpc.NewWalletKitClient(conn), cleanUp -} - -var pendingSweepsCommand = cli.Command{ - Name: "pendingsweeps", - Usage: "List all outputs that are pending to be swept within lnd.", - ArgsUsage: "", - Description: ` - List all on-chain outputs that lnd is currently attempting to sweep - within its central batching engine. Outputs with similar fee rates are - batched together in order to sweep them within a single transaction. - `, - Flags: []cli.Flag{}, - Action: actionDecorator(pendingSweeps), -} - -func pendingSweeps(ctx *cli.Context) er.R { - ctxb := context.Background() - client, cleanUp := getWalletClient(ctx) - defer cleanUp() - - req := &walletrpc.PendingSweepsRequest{} - resp, err := client.PendingSweeps(ctxb, req) - if err != nil { - return err - } - - // Sort them in ascending fee rate order for display purposes. - sort.Slice(resp.PendingSweeps, func(i, j int) bool { - return resp.PendingSweeps[i].SatPerByte < - resp.PendingSweeps[j].SatPerByte - }) - - var pendingSweepsResp = struct { - PendingSweeps []*PendingSweep `json:"pending_sweeps"` - }{ - PendingSweeps: make([]*PendingSweep, 0, len(resp.PendingSweeps)), - } - - for _, protoPendingSweep := range resp.PendingSweeps { - pendingSweep := NewPendingSweepFromProto(protoPendingSweep) - pendingSweepsResp.PendingSweeps = append( - pendingSweepsResp.PendingSweeps, pendingSweep, - ) - } - - printJSON(pendingSweepsResp) - - return nil -} - -var bumpFeeCommand = cli.Command{ - Name: "bumpfee", - Usage: "Bumps the fee of an arbitrary input/transaction.", - ArgsUsage: "outpoint", - Description: ` - This command takes a different approach than bitcoind's bumpfee command. - lnd has a central batching engine in which inputs with similar fee rates - are batched together to save on transaction fees. Due to this, we cannot - rely on bumping the fee on a specific transaction, since transactions - can change at any point with the addition of new inputs. The list of - inputs that currently exist within lnd's central batching engine can be - retrieved through lncli pendingsweeps. - - When bumping the fee of an input that currently exists within lnd's - central batching engine, a higher fee transaction will be created that - replaces the lower fee transaction through the Replace-By-Fee (RBF) - policy. - - This command also serves useful when wanting to perform a - Child-Pays-For-Parent (CPFP), where the child transaction pays for its - parent's fee. This can be done by specifying an outpoint within the low - fee transaction that is under the control of the wallet. - - A fee preference must be provided, either through the conf_target or - sat_per_byte parameters. - - Note that this command currently doesn't perform any validation checks - on the fee preference being provided. For now, the responsibility of - ensuring that the new fee preference is sufficient is delegated to the - user. - - The force flag enables sweeping of inputs that are negatively yielding. - Normally it does not make sense to lose money on sweeping, unless a - parent transaction needs to get confirmed and there is only a small - output available to attach the child transaction to. - `, - Flags: []cli.Flag{ - cli.Uint64Flag{ - Name: "conf_target", - Usage: "the number of blocks that the output should " + - "be swept on-chain within", - }, - cli.Uint64Flag{ - Name: "sat_per_byte", - Usage: "a manual fee expressed in sat/byte that " + - "should be used when sweeping the output", - }, - cli.BoolFlag{ - Name: "force", - Usage: "sweep even if the yield is negative", - }, - }, - Action: actionDecorator(bumpFee), -} - -func bumpFee(ctx *cli.Context) er.R { - // Display the command's help message if we do not have the expected - // number of arguments/flags. - if ctx.NArg() != 1 { - return er.E(cli.ShowCommandHelp(ctx, "bumpfee")) - } - - // Validate and parse the relevant arguments/flags. - protoOutPoint, err := NewProtoOutPoint(ctx.Args().Get(0)) - if err != nil { - return err - } - - client, cleanUp := getWalletClient(ctx) - defer cleanUp() - - resp, err := client.BumpFee(context.Background(), &walletrpc.BumpFeeRequest{ - Outpoint: protoOutPoint, - TargetConf: uint32(ctx.Uint64("conf_target")), - SatPerByte: uint32(ctx.Uint64("sat_per_byte")), - Force: ctx.Bool("force"), - }) - if err != nil { - return err - } - - printRespJSON(resp) - - return nil -} - -var bumpCloseFeeCommand = cli.Command{ - Name: "bumpclosefee", - Usage: "Bumps the fee of a channel closing transaction.", - ArgsUsage: "channel_point", - Description: ` - This command allows the fee of a channel closing transaction to be - increased by using the child-pays-for-parent mechanism. It will instruct - the sweeper to sweep the anchor outputs of transactions in the set - of valid commitments for the specified channel at the requested fee - rate or confirmation target. - `, - Flags: []cli.Flag{ - cli.Uint64Flag{ - Name: "conf_target", - Usage: "the number of blocks that the output should " + - "be swept on-chain within", - }, - cli.Uint64Flag{ - Name: "sat_per_byte", - Usage: "a manual fee expressed in sat/byte that " + - "should be used when sweeping the output", - }, - }, - Action: actionDecorator(bumpCloseFee), -} - -func bumpCloseFee(ctx *cli.Context) er.R { - // Display the command's help message if we do not have the expected - // number of arguments/flags. - if ctx.NArg() != 1 { - return er.E(cli.ShowCommandHelp(ctx, "bumpclosefee")) - } - - // Validate the channel point. - channelPoint := ctx.Args().Get(0) - _, err := NewProtoOutPoint(channelPoint) - if err != nil { - return err - } - - // Fetch all waiting close channels. - client, cleanUp := getClient(ctx) - defer cleanUp() - - // Fetch waiting close channel commitments. - commitments, err := getWaitingCloseCommitments(client, channelPoint) - if err != nil { - return err - } - - // Retrieve pending sweeps. - walletClient, cleanUp := getWalletClient(ctx) - defer cleanUp() - - ctxb := context.Background() - sweeps, err := walletClient.PendingSweeps( - ctxb, &walletrpc.PendingSweepsRequest{}, - ) - if err != nil { - return err - } - - // Match pending sweeps with commitments of the channel for which a bump - // is requested and bump their fees. - commitSet := map[string]struct{}{ - commitments.LocalTxid: {}, - commitments.RemoteTxid: {}, - } - if commitments.RemotePendingTxid != "" { - commitSet[commitments.RemotePendingTxid] = struct{}{} - } - - for _, sweep := range sweeps.PendingSweeps { - // Only bump anchor sweeps. - if sweep.WitnessType != walletrpc.WitnessType_COMMITMENT_ANCHOR { - continue - } - - // Skip unrelated sweeps. - sweepTxID, err := chainhash.NewHash(sweep.Outpoint.TxidBytes) - if err != nil { - return err - } - if _, match := commitSet[sweepTxID.String()]; !match { - continue - } - - // Bump fee of the anchor sweep. - fmt.Printf("Bumping fee of %v:%v\n", - sweepTxID, sweep.Outpoint.OutputIndex) - - _, err = walletClient.BumpFee(ctxb, &walletrpc.BumpFeeRequest{ - Outpoint: sweep.Outpoint, - TargetConf: uint32(ctx.Uint64("conf_target")), - SatPerByte: uint32(ctx.Uint64("sat_per_byte")), - Force: true, - }) - if err != nil { - return err - } - } - - return nil -} - -func getWaitingCloseCommitments(client lnrpc.LightningClient, - channelPoint string) (*lnrpc.PendingChannelsResponse_Commitments, - error) { - - ctxb := context.Background() - - req := &lnrpc.PendingChannelsRequest{} - resp, err := client.PendingChannels(ctxb, req) - if err != nil { - return nil, err - } - - // Lookup the channel commit tx hashes. - for _, channel := range resp.WaitingCloseChannels { - if channel.Channel.ChannelPoint == channelPoint { - return channel.Commitments, nil - } - } - - return nil, er.New("channel not found") -} - -var listSweepsCommand = cli.Command{ - Name: "listsweeps", - Usage: "Lists all sweeps that have been published by our node.", - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "verbose", - Usage: "lookup full transaction", - }, - }, - Description: ` - Get a list of the hex-encoded transaction ids of every sweep that our - node has published. Note that these sweeps may not be confirmed on chain - yet, as we store them on transaction broadcast, not confirmation. - - If the verbose flag is set, the full set of transactions will be - returned, otherwise only the sweep transaction ids will be returned. - `, - Action: actionDecorator(listSweeps), -} - -func listSweeps(ctx *cli.Context) er.R { - client, cleanUp := getWalletClient(ctx) - defer cleanUp() - - resp, err := client.ListSweeps( - context.Background(), &walletrpc.ListSweepsRequest{ - Verbose: ctx.IsSet("verbose"), - }, - ) - if err != nil { - return err - } - - printJSON(resp) - - return nil -} - -var labelTxCommand = cli.Command{ - Name: "labeltx", - Usage: "adds a label to a transaction", - ArgsUsage: "txid label", - Description: ` - Add a label to a transaction. If the transaction already has a label, - this call will fail unless the overwrite option is set. The label is - limited to 500 characters. Note that multi word labels must be contained - in quotation marks (""). - `, - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "overwrite", - Usage: "set to overwrite existing labels", - }, - }, - Action: actionDecorator(labelTransaction), -} - -func labelTransaction(ctx *cli.Context) er.R { - // Display the command's help message if we do not have the expected - // number of arguments/flags. - if ctx.NArg() != 2 { - return er.E(cli.ShowCommandHelp(ctx, "labeltx")) - } - - // Get the transaction id and check that it is a valid hash. - txid := ctx.Args().Get(0) - hash, err := chainhash.NewHashFromStr(txid) - if err != nil { - return err - } - - label := ctx.Args().Get(1) - - walletClient, cleanUp := getWalletClient(ctx) - defer cleanUp() - - ctxb := context.Background() - _, err = walletClient.LabelTransaction( - ctxb, &walletrpc.LabelTransactionRequest{ - Txid: hash[:], - Label: label, - Overwrite: ctx.Bool("overwrite"), - }, - ) - if err != nil { - return err - } - - fmt.Printf("Transaction: %v labelled with: %v\n", txid, label) - - return nil -} - -// utxoLease contains JSON annotations for a lease on an unspent output. -type utxoLease struct { - ID string `json:"id"` - OutPoint OutPoint `json:"outpoint"` - Expiration uint64 `json:"expiration"` -} - -// fundPsbtResponse is a struct that contains JSON annotations for nice result -// serialization. -type fundPsbtResponse struct { - Psbt string `json:"psbt"` - ChangeOutputIndex int32 `json:"change_output_index"` - Locks []*utxoLease `json:"locks"` -} - -var fundPsbtCommand = cli.Command{ - Name: "fund", - Usage: "Fund a Partially Signed Bitcoin Transaction (PSBT).", - ArgsUsage: "[--template_psbt=T | [--outputs=O [--inputs=I]]] " + - "[--conf_target=C | --sat_per_vbyte=S]", - Description: ` - The fund command creates a fully populated PSBT that contains enough - inputs to fund the outputs specified in either the PSBT or the - --outputs flag. - - If there are no inputs specified in the template (or --inputs flag), - coin selection is performed automatically. If inputs are specified, the - wallet assumes that full coin selection happened externally and it will - not add any additional inputs to the PSBT. If the specified inputs - aren't enough to fund the outputs with the given fee rate, an error is - returned. - - After either selecting or verifying the inputs, all input UTXOs are - locked with an internal app ID. - - The 'outputs' flag decodes addresses and the amount to send respectively - in the following JSON format: - - --outputs='{"ExampleAddr": NumCoinsInSatoshis, "SecondAddr": Sats}' - - The optional 'inputs' flag decodes a JSON list of UTXO outpoints as - returned by the listunspent command for example: - - --inputs='[":",":",...]' - `, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "template_psbt", - Usage: "the outputs to fund and optional inputs to " + - "spend provided in the base64 PSBT format", - }, - cli.StringFlag{ - Name: "outputs", - Usage: "a JSON compatible map of destination " + - "addresses to amounts to send, must not " + - "include a change address as that will be " + - "added automatically by the wallet", - }, - cli.StringFlag{ - Name: "inputs", - Usage: "an optional JSON compatible list of UTXO " + - "outpoints to use as the PSBT's inputs", - }, - cli.Uint64Flag{ - Name: "conf_target", - Usage: "the number of blocks that the transaction " + - "should be confirmed on-chain within", - Value: 6, - }, - cli.Uint64Flag{ - Name: "sat_per_vbyte", - Usage: "a manual fee expressed in sat/vbyte that " + - "should be used when creating the transaction", - }, - }, - Action: actionDecorator(fundPsbt), -} - -func fundPsbt(ctx *cli.Context) er.R { - // Display the command's help message if there aren't any flags - // specified. - if ctx.NumFlags() == 0 { - return er.E(cli.ShowCommandHelp(ctx, "fund")) - } - - req := &walletrpc.FundPsbtRequest{} - - // Parse template flags. - switch { - // The PSBT flag is mutally exclusive with the outputs/inputs flags. - case ctx.IsSet("template_psbt") && - (ctx.IsSet("inputs") || ctx.IsSet("outputs")): - - return er.Errorf("cannot set template_psbt and inputs/" + - "outputs flags at the same time") - - // Use a pre-existing PSBT as the transaction template. - case len(ctx.String("template_psbt")) > 0: - psbtBase64 := ctx.String("template_psbt") - psbtBytes, err := base64.StdEncoding.DecodeString(psbtBase64) - if err != nil { - return err - } - - req.Template = &walletrpc.FundPsbtRequest_Psbt{ - Psbt: psbtBytes, - } - - // The user manually specified outputs and optional inputs in JSON - // format. - case len(ctx.String("outputs")) > 0: - var ( - tpl = &walletrpc.TxTemplate{} - amountToAddr map[string]uint64 - ) - - // Parse the address to amount map as JSON now. At least one - // entry must be present. - jsonMap := []byte(ctx.String("outputs")) - if err := json.Unmarshal(jsonMap, &amountToAddr); err != nil { - return er.Errorf("error parsing outputs JSON: %v", - err) - } - if len(amountToAddr) == 0 { - return er.Errorf("at least one output must be " + - "specified") - } - tpl.Outputs = amountToAddr - - // Inputs are optional. - if len(ctx.String("inputs")) > 0 { - var inputs []string - - jsonList := []byte(ctx.String("inputs")) - if err := json.Unmarshal(jsonList, &inputs); err != nil { - return er.Errorf("error parsing inputs JSON: "+ - "%v", err) - } - - for idx, input := range inputs { - op, err := NewProtoOutPoint(input) - if err != nil { - return er.Errorf("error parsing "+ - "UTXO outpoint %d: %v", idx, - err) - } - tpl.Inputs = append(tpl.Inputs, op) - } - } - - req.Template = &walletrpc.FundPsbtRequest_Raw{ - Raw: tpl, - } - - default: - return er.Errorf("must specify either template_psbt or " + - "outputs flag") - } - - // Parse fee flags. - switch { - case ctx.IsSet("conf_target") && ctx.IsSet("sat_per_vbyte"): - return er.Errorf("cannot set conf_target and sat_per_vbyte " + - "at the same time") - - case ctx.Uint64("conf_target") > 0: - req.Fees = &walletrpc.FundPsbtRequest_TargetConf{ - TargetConf: uint32(ctx.Uint64("conf_target")), - } - - case ctx.Uint64("sat_per_vbyte") > 0: - req.Fees = &walletrpc.FundPsbtRequest_SatPerVbyte{ - SatPerVbyte: uint32(ctx.Uint64("sat_per_vbyte")), - } - } - - walletClient, cleanUp := getWalletClient(ctx) - defer cleanUp() - - response, err := walletClient.FundPsbt(context.Background(), req) - if err != nil { - return err - } - - jsonLocks := make([]*utxoLease, len(response.LockedUtxos)) - for idx, lock := range response.LockedUtxos { - jsonLocks[idx] = &utxoLease{ - ID: hex.EncodeToString(lock.Id), - OutPoint: NewOutPointFromProto(lock.Outpoint), - Expiration: lock.Expiration, - } - } - - printJSON(&fundPsbtResponse{ - Psbt: base64.StdEncoding.EncodeToString( - response.FundedPsbt, - ), - ChangeOutputIndex: response.ChangeOutputIndex, - Locks: jsonLocks, - }) - - return nil -} - -// finalizePsbtResponse is a struct that contains JSON annotations for nice -// result serialization. -type finalizePsbtResponse struct { - Psbt string `json:"psbt"` - FinalTx string `json:"final_tx"` -} - -var finalizePsbtCommand = cli.Command{ - Name: "finalize", - Usage: "Finalize a Partially Signed Bitcoin Transaction (PSBT).", - ArgsUsage: "funded_psbt", - Description: ` - The finalize command expects a partial transaction with all inputs - and outputs fully declared and tries to sign all inputs that belong to - the wallet. Lnd must be the last signer of the transaction. That means, - if there are any unsigned non-witness inputs or inputs without UTXO - information attached or inputs without witness data that do not belong - to lnd's wallet, this method will fail. If no error is returned, the - PSBT is ready to be extracted and the final TX within to be broadcast. - - This method does NOT publish the transaction after it's been finalized - successfully. - `, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "funded_psbt", - Usage: "the base64 encoded PSBT to finalize", - }, - }, - Action: actionDecorator(finalizePsbt), -} - -func finalizePsbt(ctx *cli.Context) er.R { - // Display the command's help message if we do not have the expected - // number of arguments/flags. - if ctx.NArg() != 1 && ctx.NumFlags() != 1 { - return er.E(cli.ShowCommandHelp(ctx, "finalize")) - } - - var ( - args = ctx.Args() - psbtBase64 string - ) - switch { - case ctx.IsSet("funded_psbt"): - psbtBase64 = ctx.String("funded_psbt") - case args.Present(): - psbtBase64 = args.First() - default: - return er.Errorf("funded_psbt argument missing") - } - - psbtBytes, err := base64.StdEncoding.DecodeString(psbtBase64) - if err != nil { - return err - } - req := &walletrpc.FinalizePsbtRequest{ - FundedPsbt: psbtBytes, - } - - walletClient, cleanUp := getWalletClient(ctx) - defer cleanUp() - - response, err := walletClient.FinalizePsbt(context.Background(), req) - if err != nil { - return err - } - - printJSON(&finalizePsbtResponse{ - Psbt: base64.StdEncoding.EncodeToString(response.SignedPsbt), - FinalTx: hex.EncodeToString(response.RawFinalTx), - }) - - return nil -} - -var releaseOutputCommand = cli.Command{ - Name: "releaseoutput", - Usage: "Release an output previously locked by lnd.", - ArgsUsage: "outpoint", - Description: ` - The releaseoutput command unlocks an output, allowing it to be available - for coin selection if it remains unspent. - - The internal lnd app lock ID is used when releasing the output. - Therefore only UTXOs locked by the fundpsbt command can currently be - released with this command. - `, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "outpoint", - Usage: "the output to unlock", - }, - }, - Action: actionDecorator(releaseOutput), -} - -func releaseOutput(ctx *cli.Context) er.R { - // Display the command's help message if we do not have the expected - // number of arguments/flags. - if ctx.NArg() != 1 && ctx.NumFlags() != 1 { - return er.E(cli.ShowCommandHelp(ctx, "releaseoutput")) - } - - var ( - args = ctx.Args() - outpointStr string - ) - switch { - case ctx.IsSet("outpoint"): - outpointStr = ctx.String("outpoint") - case args.Present(): - outpointStr = args.First() - default: - return er.Errorf("outpoint argument missing") - } - - outpoint, err := NewProtoOutPoint(outpointStr) - if err != nil { - return er.Errorf("error parsing outpoint: %v", err) - } - req := &walletrpc.ReleaseOutputRequest{ - Outpoint: outpoint, - Id: walletrpc.LndInternalLockID[:], - } - - walletClient, cleanUp := getWalletClient(ctx) - defer cleanUp() - - response, err := walletClient.ReleaseOutput(context.Background(), req) - if err != nil { - return err - } - - printRespJSON(response) - - return nil -} diff --git a/lnd/cmd/lncli/walletrpc_default.go b/lnd/cmd/lncli/walletrpc_default.go deleted file mode 100644 index f919a993..00000000 --- a/lnd/cmd/lncli/walletrpc_default.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !walletrpc - -package main - -import "github.com/urfave/cli" - -// walletCommands will return nil for non-walletrpc builds. -func walletCommands() []cli.Command { - return nil -} diff --git a/lnd/cmd/lncli/walletrpc_types.go b/lnd/cmd/lncli/walletrpc_types.go deleted file mode 100644 index fec3cedf..00000000 --- a/lnd/cmd/lncli/walletrpc_types.go +++ /dev/null @@ -1,33 +0,0 @@ -package main - -import "github.com/pkt-cash/pktd/lnd/lnrpc/walletrpc" - -// PendingSweep is a CLI-friendly type of the walletrpc.PendingSweep proto. We -// use this to show more useful string versions of byte slices and enums. -type PendingSweep struct { - OutPoint OutPoint `json:"outpoint"` - WitnessType string `json:"witness_type"` - AmountSat uint32 `json:"amount_sat"` - SatPerByte uint32 `json:"sat_per_byte"` - BroadcastAttempts uint32 `json:"broadcast_attempts"` - NextBroadcastHeight uint32 `json:"next_broadcast_height"` - RequestedSatPerByte uint32 `json:"requested_sat_per_byte"` - RequestedConfTarget uint32 `json:"requested_conf_target"` - Force bool `json:"force"` -} - -// NewPendingSweepFromProto converts the walletrpc.PendingSweep proto type into -// its corresponding CLI-friendly type. -func NewPendingSweepFromProto(pendingSweep *walletrpc.PendingSweep) *PendingSweep { - return &PendingSweep{ - OutPoint: NewOutPointFromProto(pendingSweep.Outpoint), - WitnessType: pendingSweep.WitnessType.String(), - AmountSat: pendingSweep.AmountSat, - SatPerByte: pendingSweep.SatPerByte, - BroadcastAttempts: pendingSweep.BroadcastAttempts, - NextBroadcastHeight: pendingSweep.NextBroadcastHeight, - RequestedSatPerByte: pendingSweep.RequestedSatPerByte, - RequestedConfTarget: pendingSweep.RequestedConfTarget, - Force: pendingSweep.Force, - } -} diff --git a/lnd/cmd/lncli/watchtower_active.go b/lnd/cmd/lncli/watchtower_active.go deleted file mode 100644 index 72217239..00000000 --- a/lnd/cmd/lncli/watchtower_active.go +++ /dev/null @@ -1,57 +0,0 @@ -// +build watchtowerrpc - -package main - -import ( - "context" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnrpc/watchtowerrpc" - "github.com/urfave/cli" -) - -func watchtowerCommands() []cli.Command { - return []cli.Command{ - { - Name: "tower", - Usage: "Interact with the watchtower.", - Category: "Watchtower", - Subcommands: []cli.Command{ - towerInfoCommand, - }, - }, - } -} - -func getWatchtowerClient(ctx *cli.Context) (watchtowerrpc.WatchtowerClient, func()) { - conn := getClientConn(ctx, false) - cleanup := func() { - conn.Close() - } - return watchtowerrpc.NewWatchtowerClient(conn), cleanup -} - -var towerInfoCommand = cli.Command{ - Name: "info", - Usage: "Returns basic information related to the active watchtower.", - Action: actionDecorator(towerInfo), -} - -func towerInfo(ctx *cli.Context) er.R { - if ctx.NArg() != 0 || ctx.NumFlags() > 0 { - return er.E(cli.ShowCommandHelp(ctx, "info")) - } - - client, cleanup := getWatchtowerClient(ctx) - defer cleanup() - - req := &watchtowerrpc.GetInfoRequest{} - resp, err := client.GetInfo(context.Background(), req) - if err != nil { - return err - } - - printRespJSON(resp) - - return nil -} diff --git a/lnd/cmd/lncli/watchtower_default.go b/lnd/cmd/lncli/watchtower_default.go deleted file mode 100644 index 41d887a9..00000000 --- a/lnd/cmd/lncli/watchtower_default.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !watchtowerrpc - -package main - -import "github.com/urfave/cli" - -// watchtowerCommands will return nil for non-watchtowerrpc builds. -func watchtowerCommands() []cli.Command { - return nil -} diff --git a/lnd/cmd/lncli/wtclient.go b/lnd/cmd/lncli/wtclient.go deleted file mode 100644 index 6aadcfe8..00000000 --- a/lnd/cmd/lncli/wtclient.go +++ /dev/null @@ -1,272 +0,0 @@ -package main - -import ( - "context" - "strings" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/lnrpc/wtclientrpc" - "github.com/urfave/cli" -) - -// wtclientCommands will return nil for non-wtclientrpc builds. -func wtclientCommands() []cli.Command { - return []cli.Command{ - { - Name: "wtclient", - Usage: "Interact with the watchtower client.", - Category: "Watchtower", - Subcommands: []cli.Command{ - addTowerCommand, - removeTowerCommand, - listTowersCommand, - getTowerCommand, - statsCommand, - policyCommand, - }, - }, - } -} - -// getWtclient initializes a connection to the watchtower client RPC in order to -// interact with it. -func getWtclient(ctx *cli.Context) (wtclientrpc.WatchtowerClientClient, func()) { - conn := getClientConn(ctx, false) - cleanUp := func() { - conn.Close() - } - return wtclientrpc.NewWatchtowerClientClient(conn), cleanUp -} - -var addTowerCommand = cli.Command{ - Name: "add", - Usage: "Register a watchtower to use for future sessions/backups.", - Description: "If the watchtower has already been registered, then " + - "this command serves as a way of updating the watchtower " + - "with new addresses it is reachable over.", - ArgsUsage: "pubkey@address", - Action: actionDecorator(addTower), -} - -func addTower(ctx *cli.Context) er.R { - // Display the command's help message if the number of arguments/flags - // is not what we expect. - if ctx.NArg() != 1 || ctx.NumFlags() > 0 { - return er.E(cli.ShowCommandHelp(ctx, "add")) - } - - parts := strings.Split(ctx.Args().First(), "@") - if len(parts) != 2 { - return er.New("expected tower of format pubkey@address") - } - pubKey, err := util.DecodeHex(parts[0]) - if err != nil { - return er.Errorf("invalid public key: %v", err) - } - address := parts[1] - - client, cleanUp := getWtclient(ctx) - defer cleanUp() - - req := &wtclientrpc.AddTowerRequest{ - Pubkey: pubKey, - Address: address, - } - resp, errr := client.AddTower(context.Background(), req) - if errr != nil { - return er.E(errr) - } - - printRespJSON(resp) - return nil -} - -var removeTowerCommand = cli.Command{ - Name: "remove", - Usage: "Remove a watchtower to prevent its use for future " + - "sessions/backups.", - Description: "An optional address can be provided to remove, " + - "indicating that the watchtower is no longer reachable at " + - "this address. If an address isn't provided, then the " + - "watchtower will no longer be used for future sessions/backups.", - ArgsUsage: "pubkey | pubkey@address", - Action: actionDecorator(removeTower), -} - -func removeTower(ctx *cli.Context) er.R { - // Display the command's help message if the number of arguments/flags - // is not what we expect. - if ctx.NArg() != 1 || ctx.NumFlags() > 0 { - return er.E(cli.ShowCommandHelp(ctx, "remove")) - } - - // The command can have only one argument, but it can be interpreted in - // either of the following formats: - // - // pubkey or pubkey@address - // - // The hex-encoded public key of the watchtower is always required, - // while the second is an optional address we'll remove from the - // watchtower's database record. - parts := strings.Split(ctx.Args().First(), "@") - if len(parts) > 2 { - return er.New("expected tower of format pubkey@address") - } - pubKey, err := util.DecodeHex(parts[0]) - if err != nil { - return er.Errorf("invalid public key: %v", err) - } - var address string - if len(parts) == 2 { - address = parts[1] - } - - client, cleanUp := getWtclient(ctx) - defer cleanUp() - - req := &wtclientrpc.RemoveTowerRequest{ - Pubkey: pubKey, - Address: address, - } - resp, errr := client.RemoveTower(context.Background(), req) - if errr != nil { - return er.E(errr) - } - - printRespJSON(resp) - return nil -} - -var listTowersCommand = cli.Command{ - Name: "towers", - Usage: "Display information about all registered watchtowers.", - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "include_sessions", - Usage: "include sessions with the watchtower in the " + - "response", - }, - }, - Action: actionDecorator(listTowers), -} - -func listTowers(ctx *cli.Context) er.R { - // Display the command's help message if the number of arguments/flags - // is not what we expect. - if ctx.NArg() > 0 || ctx.NumFlags() > 1 { - return er.E(cli.ShowCommandHelp(ctx, "towers")) - } - - client, cleanUp := getWtclient(ctx) - defer cleanUp() - - req := &wtclientrpc.ListTowersRequest{ - IncludeSessions: ctx.Bool("include_sessions"), - } - resp, errr := client.ListTowers(context.Background(), req) - if errr != nil { - return er.E(errr) - } - - printRespJSON(resp) - - return nil -} - -var getTowerCommand = cli.Command{ - Name: "tower", - Usage: "Display information about a specific registered watchtower.", - ArgsUsage: "pubkey", - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "include_sessions", - Usage: "include sessions with the watchtower in the " + - "response", - }, - }, - Action: actionDecorator(getTower), -} - -func getTower(ctx *cli.Context) er.R { - // Display the command's help message if the number of arguments/flags - // is not what we expect. - if ctx.NArg() != 1 || ctx.NumFlags() > 1 { - return er.E(cli.ShowCommandHelp(ctx, "tower")) - } - - // The command only has one argument, which we expect to be the - // hex-encoded public key of the watchtower we'll display information - // about. - pubKey, err := util.DecodeHex(ctx.Args().Get(0)) - if err != nil { - return er.Errorf("invalid public key: %v", err) - } - - client, cleanUp := getWtclient(ctx) - defer cleanUp() - - req := &wtclientrpc.GetTowerInfoRequest{ - Pubkey: pubKey, - IncludeSessions: ctx.Bool("include_sessions"), - } - resp, errr := client.GetTowerInfo(context.Background(), req) - if errr != nil { - return er.E(errr) - } - - printRespJSON(resp) - return nil -} - -var statsCommand = cli.Command{ - Name: "stats", - Usage: "Display the session stats of the watchtower client.", - Action: actionDecorator(stats), -} - -func stats(ctx *cli.Context) er.R { - // Display the command's help message if the number of arguments/flags - // is not what we expect. - if ctx.NArg() > 0 || ctx.NumFlags() > 0 { - return er.E(cli.ShowCommandHelp(ctx, "stats")) - } - - client, cleanUp := getWtclient(ctx) - defer cleanUp() - - req := &wtclientrpc.StatsRequest{} - resp, errr := client.Stats(context.Background(), req) - if errr != nil { - return er.E(errr) - } - - printRespJSON(resp) - return nil -} - -var policyCommand = cli.Command{ - Name: "policy", - Usage: "Display the active watchtower client policy configuration.", - Action: actionDecorator(policy), -} - -func policy(ctx *cli.Context) er.R { - // Display the command's help message if the number of arguments/flags - // is not what we expect. - if ctx.NArg() > 0 || ctx.NumFlags() > 0 { - return er.E(cli.ShowCommandHelp(ctx, "policy")) - } - - client, cleanUp := getWtclient(ctx) - defer cleanUp() - - req := &wtclientrpc.PolicyRequest{} - resp, errr := client.Policy(context.Background(), req) - if errr != nil { - return er.E(errr) - } - - printRespJSON(resp) - return nil -} diff --git a/lnd/cmd/lnd/main.go b/lnd/cmd/lnd/main.go deleted file mode 100644 index 5e9bea96..00000000 --- a/lnd/cmd/lnd/main.go +++ /dev/null @@ -1,43 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/jessevdk/go-flags" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd" - "github.com/pkt-cash/pktd/lnd/signal" -) - -func main() { - // Load the configuration, and parse any command line options. This - // function will also set up logging properly. - loadedConfig, err := lnd.LoadConfig() - if err != nil { - errr := er.Wrapped(err) - if e, ok := errr.(*flags.Error); !ok || e.Type != flags.ErrHelp { - // Print error if not due to help request. - _, _ = fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - - // Help was requested, exit normally. - os.Exit(0) - } - - // Hook interceptor for os signals. - if err := signal.Intercept(); err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - - // Call the "real" main in a nested manner so the defers will properly - // be executed in the case of a graceful shutdown. - if err := lnd.Main( - loadedConfig, lnd.ListenerCfg{}, signal.ShutdownChannel(), - ); err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} diff --git a/lnd/config.go b/lnd/config.go deleted file mode 100644 index 9d4a7cad..00000000 --- a/lnd/config.go +++ /dev/null @@ -1,1638 +0,0 @@ -// Copyright (c) 2013-2017 The btcsuite developers -// Copyright (c) 2015-2016 The Decred developers -// Copyright (C) 2015-2020 The Lightning Network Developers - -package lnd - -import ( - "fmt" - "io/ioutil" - "net" - "os" - "os/user" - "path" - "path/filepath" - "regexp" - "strconv" - "strings" - "time" - - flags "github.com/jessevdk/go-flags" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/globalcfg" - "github.com/pkt-cash/pktd/lnd/autopilot" - "github.com/pkt-cash/pktd/lnd/chainreg" - "github.com/pkt-cash/pktd/lnd/chanbackup" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/discovery" - "github.com/pkt-cash/pktd/lnd/htlcswitch" - "github.com/pkt-cash/pktd/lnd/htlcswitch/hodl" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/lncfg" - "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/signrpc" - "github.com/pkt-cash/pktd/lnd/routing" - "github.com/pkt-cash/pktd/lnd/tor" - "github.com/pkt-cash/pktd/neutrino" - "github.com/pkt-cash/pktd/pktconfig/version" - "github.com/pkt-cash/pktd/pktlog/log" -) - -const ( - defaultDataDirname = "data" - defaultChainSubDirname = "chain" - defaultGraphSubDirname = "graph" - defaultTowerSubDirname = "watchtower" - defaultTLSCertFilename = "tls.cert" - defaultTLSKeyFilename = "tls.key" - defaultAdminMacFilename = "admin.macaroon" - defaultReadMacFilename = "readonly.macaroon" - defaultInvoiceMacFilename = "invoice.macaroon" - defaultLogLevel = "info" - defaultLogDirname = "logs" - defaultLogFilename = "lnd.log" - defaultRPCPort = 10009 - defaultRESTPort = 8080 - defaultPeerPort = 9735 - defaultRPCHost = "localhost" - - defaultNoSeedBackup = false - defaultPaymentsExpirationGracePeriod = time.Duration(0) - defaultTrickleDelay = 90 * 1000 - defaultChanStatusSampleInterval = time.Minute - defaultChanEnableTimeout = 19 * time.Minute - defaultChanDisableTimeout = 20 * time.Minute - defaultHeightHintCacheQueryDisable = false - defaultMaxLogFiles = 3 - defaultMaxLogFileSize = 10 - defaultMinBackoff = time.Second - defaultMaxBackoff = time.Hour - defaultLetsEncryptDirname = "letsencrypt" - defaultLetsEncryptListen = ":80" - - defaultTorSOCKSPort = 9050 - defaultTorDNSHost = "soa.nodes.lightning.directory" - defaultTorDNSPort = 53 - defaultTorControlPort = 9051 - defaultTorV2PrivateKeyFilename = "v2_onion_private_key" - defaultTorV3PrivateKeyFilename = "v3_onion_private_key" - - // minTimeLockDelta is the minimum timelock we require for incoming - // HTLCs on our channels. - minTimeLockDelta = routing.MinCLTVDelta - - // defaultAcceptorTimeout is the time after which an RPCAcceptor will time - // out and return false if it hasn't yet received a response. - defaultAcceptorTimeout = 15 * time.Second - - defaultAlias = "" - defaultColor = "#3399FF" - - // defaultHostSampleInterval is the default amount of time that the - // HostAnnouncer will wait between DNS resolutions to check if the - // backing IP of a host has changed. - defaultHostSampleInterval = time.Minute * 5 - - defaultChainInterval = time.Minute - defaultChainTimeout = time.Second * 10 - defaultChainBackoff = time.Second * 30 - defaultChainAttempts = 3 - - // Set defaults for a health check which ensures that we have space - // available on disk. Although this check is off by default so that we - // avoid breaking any existing setups (particularly on mobile), we still - // set the other default values so that the health check can be easily - // enabled with sane defaults. - defaultRequiredDisk = 0.1 - defaultDiskInterval = time.Hour * 12 - defaultDiskTimeout = time.Second * 5 - defaultDiskBackoff = time.Minute - defaultDiskAttempts = 0 - - // defaultRemoteMaxHtlcs specifies the default limit for maximum - // concurrent HTLCs the remote party may add to commitment transactions. - // This value can be overridden with --default-remote-max-htlcs. - defaultRemoteMaxHtlcs = 483 - - // defaultMaxLocalCSVDelay is the maximum delay we accept on our - // commitment output. - // TODO(halseth): find a more scientific choice of value. - defaultMaxLocalCSVDelay = 10000 -) - -var ( - // DefaultLndDir is the default directory where lnd tries to find its - // configuration file and store its data. This is a directory in the - // user's application data, for example: - // C:\Users\\AppData\Local\Lnd on Windows - // ~/.lnd on Linux - // ~/Library/Application Support/Lnd on MacOS - DefaultLndDir = btcutil.AppDataDir("lnd", false) - - // DefaultConfigFile is the default full path of lnd's configuration - // file. - DefaultConfigFile = filepath.Join(DefaultLndDir, lncfg.DefaultConfigFilename) - - defaultDataDir = filepath.Join(DefaultLndDir, defaultDataDirname) - defaultLogDir = filepath.Join(DefaultLndDir, defaultLogDirname) - - defaultTowerDir = filepath.Join(defaultDataDir, defaultTowerSubDirname) - - defaultTLSCertPath = filepath.Join(DefaultLndDir, defaultTLSCertFilename) - defaultTLSKeyPath = filepath.Join(DefaultLndDir, defaultTLSKeyFilename) - defaultLetsEncryptDir = filepath.Join(DefaultLndDir, defaultLetsEncryptDirname) - - defaultBtcdDir = btcutil.AppDataDir("btcd", false) - defaultBtcdRPCCertFile = filepath.Join(defaultBtcdDir, "rpc.cert") - - defaultLtcdDir = btcutil.AppDataDir("ltcd", false) - defaultLtcdRPCCertFile = filepath.Join(defaultLtcdDir, "rpc.cert") - - defaultBitcoindDir = btcutil.AppDataDir("bitcoin", false) - defaultLitecoindDir = btcutil.AppDataDir("litecoin", false) - - defaultTorSOCKS = net.JoinHostPort("localhost", strconv.Itoa(defaultTorSOCKSPort)) - defaultTorDNS = net.JoinHostPort(defaultTorDNSHost, strconv.Itoa(defaultTorDNSPort)) - defaultTorControl = net.JoinHostPort("localhost", strconv.Itoa(defaultTorControlPort)) - - // bitcoindEsimateModes defines all the legal values for bitcoind's - // estimatesmartfee RPC call. - defaultBitcoindEstimateMode = "CONSERVATIVE" - bitcoindEstimateModes = [2]string{"ECONOMICAL", defaultBitcoindEstimateMode} - - defaultSphinxDbName = "sphinxreplay.db" -) - -// Config defines the configuration options for lnd. -// -// See LoadConfig for further details regarding the configuration -// loading+parsing process. -type Config struct { - ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"` - - LndDir string `long:"lnddir" description:"The base directory that contains lnd's data, logs, configuration file, etc."` - ConfigFile string `short:"C" long:"configfile" description:"Path to configuration file"` - DataDir string `short:"b" long:"datadir" description:"The directory to store lnd's data within"` - SyncFreelist bool `long:"sync-freelist" description:"Whether the databases used within lnd should sync their freelist to disk. This is disabled by default resulting in improved memory performance during operation, but with an increase in startup time."` - - NoTLS bool `long:"notls" description:"Disable TLS on RPC and REST"` - TLSCertPath string `long:"tlscertpath" description:"Path to write the TLS certificate for lnd's RPC and REST services"` - TLSKeyPath string `long:"tlskeypath" description:"Path to write the TLS private key for lnd's RPC and REST services"` - TLSExtraIPs []string `long:"tlsextraip" description:"Adds an extra ip to the generated certificate"` - TLSExtraDomains []string `long:"tlsextradomain" description:"Adds an extra domain to the generated certificate"` - TLSAutoRefresh bool `long:"tlsautorefresh" description:"Re-generate TLS certificate and key if the IPs or domains are changed"` - TLSDisableAutofill bool `long:"tlsdisableautofill" description:"Do not include the interface IPs or the system hostname in TLS certificate, use first --tlsextradomain as Common Name instead, if set"` - - NoMacaroons bool `long:"no-macaroons" description:"Disable macaroon authentication, can only be used if server is not listening on a public interface."` - AdminMacPath string `long:"adminmacaroonpath" description:"Path to write the admin macaroon for lnd's RPC and REST services if it doesn't exist"` - ReadMacPath string `long:"readonlymacaroonpath" description:"Path to write the read-only macaroon for lnd's RPC and REST services if it doesn't exist"` - InvoiceMacPath string `long:"invoicemacaroonpath" description:"Path to the invoice-only macaroon for lnd's RPC and REST services if it doesn't exist"` - LogDir string `long:"logdir" description:"Directory to log output."` - MaxLogFiles int `long:"maxlogfiles" description:"Maximum logfiles to keep (0 for no rotation)"` - MaxLogFileSize int `long:"maxlogfilesize" description:"Maximum logfile size in MB"` - AcceptorTimeout time.Duration `long:"acceptortimeout" description:"Time after which an RPCAcceptor will time out and return false if it hasn't yet received a response"` - - LetsEncryptDir string `long:"letsencryptdir" description:"The directory to store Let's Encrypt certificates within"` - LetsEncryptListen string `long:"letsencryptlisten" description:"The IP:port on which lnd will listen for Let's Encrypt challenges. Let's Encrypt will always try to contact on port 80. Often non-root processes are not allowed to bind to ports lower than 1024. This configuration option allows a different port to be used, but must be used in combination with port forwarding from port 80. This configuration can also be used to specify another IP address to listen on, for example an IPv6 address."` - LetsEncryptDomain string `long:"letsencryptdomain" description:"Request a Let's Encrypt certificate for this domain. Note that the certicate is only requested and stored when the first rpc connection comes in."` - - // We'll parse these 'raw' string arguments into real net.Addrs in the - // loadConfig function. We need to expose the 'raw' strings so the - // command line library can access them. - // Only the parsed net.Addrs should be used! - RawRPCListeners []string `long:"rpclisten" description:"Add an interface/port/socket to listen for RPC connections"` - RawRESTListeners []string `long:"restlisten" description:"Add an interface/port/socket to listen for REST connections"` - RawListeners []string `long:"listen" description:"Add an interface/port to listen for peer connections"` - RawExternalIPs []string `long:"externalip" description:"Add an ip:port to the list of local addresses we claim to listen on to peers. If a port is not specified, the default (9735) will be used regardless of other parameters"` - ExternalHosts []string `long:"externalhosts" description:"A set of hosts that should be periodically resolved to announce IPs for"` - RPCListeners []net.Addr - RESTListeners []net.Addr - RestCORS []string `long:"restcors" description:"Add an ip:port/hostname to allow cross origin access from. To allow all origins, set as \"*\"."` - Listeners []net.Addr - ExternalIPs []net.Addr - DisableListen bool `long:"nolisten" description:"Disable listening for incoming peer connections"` - DisableRest bool `long:"norest" description:"Disable REST API"` - DisableRestTLS bool `long:"no-rest-tls" description:"Disable TLS for REST connections"` - NAT bool `long:"nat" description:"Toggle NAT traversal support (using either UPnP or NAT-PMP) to automatically advertise your external IP address to the network -- NOTE this does not support devices behind multiple NATs"` - MinBackoff time.Duration `long:"minbackoff" description:"Shortest backoff when reconnecting to persistent peers. Valid time units are {s, m, h}."` - MaxBackoff time.Duration `long:"maxbackoff" description:"Longest backoff when reconnecting to persistent peers. Valid time units are {s, m, h}."` - ConnectionTimeout time.Duration `long:"connectiontimeout" description:"The timeout value for network connections. Valid time units are {ms, s, m, h}."` - - DebugLevel string `short:"d" long:"debuglevel" description:"Logging level for all subsystems {trace, debug, info, warn, error, critical} -- You may also specify ,=,=,... to set the log level for individual subsystems -- Use show to list available subsystems"` - - CPUProfile string `long:"cpuprofile" description:"Write CPU profile to the specified file"` - - Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65535"` - - UnsafeDisconnect bool `long:"unsafe-disconnect" description:"DEPRECATED: Allows the rpcserver to intentionally disconnect from peers with open channels. THIS FLAG WILL BE REMOVED IN 0.10.0"` - UnsafeReplay bool `long:"unsafe-replay" description:"Causes a link to replay the adds on its commitment txn after starting up, this enables testing of the sphinx replay logic."` - MaxPendingChannels int `long:"maxpendingchannels" description:"The maximum number of incoming pending channels permitted per peer."` - BackupFilePath string `long:"backupfilepath" description:"The target location of the channel backup file"` - - FeeURL string `long:"feeurl" description:"Optional URL for external fee estimation. If no URL is specified, the method for fee estimation will depend on the chosen backend and network."` - - Bitcoin *lncfg.Chain `group:"Bitcoin" namespace:"bitcoin"` - BtcdMode *lncfg.Btcd `group:"btcd" namespace:"btcd"` - BitcoindMode *lncfg.Bitcoind `group:"bitcoind" namespace:"bitcoind"` - NeutrinoMode *lncfg.Neutrino `group:"neutrino" namespace:"neutrino"` - - Litecoin *lncfg.Chain `group:"Litecoin" namespace:"litecoin"` - LtcdMode *lncfg.Btcd `group:"ltcd" namespace:"ltcd"` - LitecoindMode *lncfg.Bitcoind `group:"litecoind" namespace:"litecoind"` - - Pkt *lncfg.Chain `group:"PKT" namespace:"pkt"` - - Autopilot *lncfg.AutoPilot `group:"Autopilot" namespace:"autopilot"` - - Tor *lncfg.Tor `group:"Tor" namespace:"tor"` - - SubRPCServers *subRPCServerConfigs `group:"subrpc"` - - Hodl *hodl.Config `group:"hodl" namespace:"hodl"` - - NoNetBootstrap bool `long:"nobootstrap" description:"If true, then automatic network bootstrapping will not be attempted."` - - NoSeedBackup bool `long:"noseedbackup" description:"If true, NO SEED WILL BE EXPOSED -- EVER, AND THE WALLET WILL BE ENCRYPTED USING THE DEFAULT PASSPHRASE. THIS FLAG IS ONLY FOR TESTING AND SHOULD NEVER BE USED ON MAINNET."` - - ResetWalletTransactions bool `long:"reset-wallet-transactions" description:"Removes all transaction history from the on-chain wallet on startup, forcing a full chain rescan starting at the wallet's birthday. Implements the same functionality as btcwallet's dropwtxmgr command. Should be set to false after successful execution to avoid rescanning on every restart of lnd."` - - PaymentsExpirationGracePeriod time.Duration `long:"payments-expiration-grace-period" description:"A period to wait before force closing channels with outgoing htlcs that have timed-out and are a result of this node initiated payments."` - TrickleDelay int `long:"trickledelay" description:"Time in milliseconds between each release of announcements to the network"` - ChanEnableTimeout time.Duration `long:"chan-enable-timeout" description:"The duration that a peer connection must be stable before attempting to send a channel update to reenable or cancel a pending disables of the peer's channels on the network."` - ChanDisableTimeout time.Duration `long:"chan-disable-timeout" description:"The duration that must elapse after first detecting that an already active channel is actually inactive and sending channel update disabling it to the network. The pending disable can be canceled if the peer reconnects and becomes stable for chan-enable-timeout before the disable update is sent."` - ChanStatusSampleInterval time.Duration `long:"chan-status-sample-interval" description:"The polling interval between attempts to detect if an active channel has become inactive due to its peer going offline."` - HeightHintCacheQueryDisable bool `long:"height-hint-cache-query-disable" description:"Disable queries from the height-hint cache to try to recover channels stuck in the pending close state. Disabling height hint queries may cause longer chain rescans, resulting in a performance hit. Unset this after channels are unstuck so you can get better performance again."` - Alias string `long:"alias" description:"The node alias. Used as a moniker by peers and intelligence services"` - Color string `long:"color" description:"The color of the node in hex format (i.e. '#3399FF'). Used to customize node appearance in intelligence services"` - MinChanSize int64 `long:"minchansize" description:"The smallest channel size (in satoshis) that we should accept. Incoming channels smaller than this will be rejected"` - MaxChanSize int64 `long:"maxchansize" description:"The largest channel size (in satoshis) that we should accept. Incoming channels larger than this will be rejected"` - - DefaultRemoteMaxHtlcs uint16 `long:"default-remote-max-htlcs" description:"The default max_htlc applied when opening or accepting channels. This value limits the number of concurrent HTLCs that the remote party can add to the commitment. The maximum possible value is 483."` - - NumGraphSyncPeers int `long:"numgraphsyncpeers" description:"The number of peers that we should receive new graph updates from. This option can be tuned to save bandwidth for light clients or routing nodes."` - HistoricalSyncInterval time.Duration `long:"historicalsyncinterval" description:"The polling interval between historical graph sync attempts. Each historical graph sync attempt ensures we reconcile with the remote peer's graph from the genesis block."` - - IgnoreHistoricalGossipFilters bool `long:"ignore-historical-gossip-filters" description:"If true, will not reply with historical data that matches the range specified by a remote peer's gossip_timestamp_filter. Doing so will result in lower memory and bandwidth requirements."` - - RejectPush bool `long:"rejectpush" description:"If true, lnd will not accept channel opening requests with non-zero push amounts. This should prevent accidental pushes to merchant nodes."` - - RejectHTLC bool `long:"rejecthtlc" description:"If true, lnd will not forward any HTLCs that are meant as onward payments. This option will still allow lnd to send HTLCs and receive HTLCs but lnd won't be used as a hop."` - - StaggerInitialReconnect bool `long:"stagger-initial-reconnect" description:"If true, will apply a randomized staggering between 0s and 30s when reconnecting to persistent peers on startup. The first 10 reconnections will be attempted instantly, regardless of the flag's value"` - - MaxOutgoingCltvExpiry uint32 `long:"max-cltv-expiry" description:"The maximum number of blocks funds could be locked up for when forwarding payments."` - - MaxChannelFeeAllocation float64 `long:"max-channel-fee-allocation" description:"The maximum percentage of total funds that can be allocated to a channel's commitment fee. This only applies for the initiator of the channel. Valid values are within [0.1, 1]."` - - DryRunMigration bool `long:"dry-run-migration" description:"If true, lnd will abort committing a migration if it would otherwise have been successful. This leaves the database unmodified, and still compatible with the previously active version of lnd."` - - net tor.Net - - EnableUpfrontShutdown bool `long:"enable-upfront-shutdown" description:"If true, option upfront shutdown script will be enabled. If peers that we open channels with support this feature, we will automatically set the script to which cooperative closes should be paid out to on channel open. This offers the partial protection of a channel peer disconnecting from us if cooperative close is attempted with a different script."` - - AcceptKeySend bool `long:"accept-keysend" description:"If true, spontaneous payments through keysend will be accepted. [experimental]"` - - KeysendHoldTime time.Duration `long:"keysend-hold-time" description:"If non-zero, keysend payments are accepted but not immediately settled. If the payment isn't settled manually after the specified time, it is canceled automatically. [experimental]"` - - GcCanceledInvoicesOnStartup bool `long:"gc-canceled-invoices-on-startup" description:"If true, we'll attempt to garbage collect canceled invoices upon start."` - - GcCanceledInvoicesOnTheFly bool `long:"gc-canceled-invoices-on-the-fly" description:"If true, we'll delete newly canceled invoices on the fly."` - - Routing *lncfg.Routing `group:"routing" namespace:"routing"` - - Workers *lncfg.Workers `group:"workers" namespace:"workers"` - - Caches *lncfg.Caches `group:"caches" namespace:"caches"` - - Prometheus lncfg.Prometheus `group:"prometheus" namespace:"prometheus"` - - WtClient *lncfg.WtClient `group:"wtclient" namespace:"wtclient"` - - Watchtower *lncfg.Watchtower `group:"watchtower" namespace:"watchtower"` - - ProtocolOptions *lncfg.ProtocolOptions `group:"protocol" namespace:"protocol"` - - AllowCircularRoute bool `long:"allow-circular-route" description:"If true, our node will allow htlc forwards that arrive and depart on the same channel."` - - HealthChecks *lncfg.HealthCheckConfig `group:"healthcheck" namespace:"healthcheck"` - - DB *lncfg.DB `group:"db" namespace:"db"` - - // registeredChains keeps track of all chains that have been registered - // with the daemon. - registeredChains *chainreg.ChainRegistry - - // networkDir is the path to the directory of the currently active - // network. This path will hold the files related to each different - // network. - networkDir string - - // ActiveNetParams contains parameters of the target chain. - ActiveNetParams chainreg.BitcoinNetParams -} - -// DefaultConfig returns all default values for the Config struct. -func DefaultConfig() Config { - return Config{ - LndDir: DefaultLndDir, - ConfigFile: DefaultConfigFile, - DataDir: defaultDataDir, - DebugLevel: defaultLogLevel, - NoTLS: false, - TLSCertPath: defaultTLSCertPath, - TLSKeyPath: defaultTLSKeyPath, - LetsEncryptDir: defaultLetsEncryptDir, - LetsEncryptListen: defaultLetsEncryptListen, - LogDir: defaultLogDir, - MaxLogFiles: defaultMaxLogFiles, - MaxLogFileSize: defaultMaxLogFileSize, - AcceptorTimeout: defaultAcceptorTimeout, - Bitcoin: &lncfg.Chain{ - MinHTLCIn: chainreg.DefaultBitcoinMinHTLCInMSat, - MinHTLCOut: chainreg.DefaultBitcoinMinHTLCOutMSat, - BaseFee: chainreg.DefaultBitcoinBaseFeeMSat, - FeeRate: chainreg.DefaultBitcoinFeeRate, - TimeLockDelta: chainreg.DefaultBitcoinTimeLockDelta, - MaxLocalDelay: defaultMaxLocalCSVDelay, - Node: "btcd", - }, - BtcdMode: &lncfg.Btcd{ - Dir: defaultBtcdDir, - RPCHost: defaultRPCHost, - RPCCert: defaultBtcdRPCCertFile, - }, - BitcoindMode: &lncfg.Bitcoind{ - Dir: defaultBitcoindDir, - RPCHost: defaultRPCHost, - EstimateMode: defaultBitcoindEstimateMode, - }, - Litecoin: &lncfg.Chain{ - MinHTLCIn: chainreg.DefaultLitecoinMinHTLCInMSat, - MinHTLCOut: chainreg.DefaultLitecoinMinHTLCOutMSat, - BaseFee: chainreg.DefaultLitecoinBaseFeeMSat, - FeeRate: chainreg.DefaultLitecoinFeeRate, - TimeLockDelta: chainreg.DefaultLitecoinTimeLockDelta, - MaxLocalDelay: defaultMaxLocalCSVDelay, - Node: "ltcd", - }, - LtcdMode: &lncfg.Btcd{ - Dir: defaultLtcdDir, - RPCHost: defaultRPCHost, - RPCCert: defaultLtcdRPCCertFile, - }, - LitecoindMode: &lncfg.Bitcoind{ - Dir: defaultLitecoindDir, - RPCHost: defaultRPCHost, - EstimateMode: defaultBitcoindEstimateMode, - }, - Pkt: &lncfg.Chain{ - MinHTLCIn: chainreg.DefaultPktMinHTLCInMSat, - MinHTLCOut: chainreg.DefaultPktMinHTLCOutMSat, - BaseFee: chainreg.DefaultPktBaseFeeMSat, - FeeRate: chainreg.DefaultPktFeeRate, - TimeLockDelta: chainreg.DefaultPktTimeLockDelta, - MaxLocalDelay: defaultMaxLocalCSVDelay, - Node: "neutrino", - }, - NeutrinoMode: &lncfg.Neutrino{ - UserAgentName: neutrino.UserAgentName, - UserAgentVersion: neutrino.UserAgentVersion, - }, - UnsafeDisconnect: true, - MaxPendingChannels: lncfg.DefaultMaxPendingChannels, - NoSeedBackup: defaultNoSeedBackup, - MinBackoff: defaultMinBackoff, - MaxBackoff: defaultMaxBackoff, - ConnectionTimeout: tor.DefaultConnTimeout, - SubRPCServers: &subRPCServerConfigs{ - SignRPC: &signrpc.Config{}, - RouterRPC: routerrpc.DefaultConfig(), - }, - Autopilot: &lncfg.AutoPilot{ - MaxChannels: 5, - Allocation: 0.6, - MinChannelSize: int64(minChanFundingSize), - MaxChannelSize: int64(MaxFundingAmount), - MinConfs: 1, - ConfTarget: autopilot.DefaultConfTarget, - Heuristic: map[string]float64{ - "top_centrality": 1.0, - }, - }, - PaymentsExpirationGracePeriod: defaultPaymentsExpirationGracePeriod, - TrickleDelay: defaultTrickleDelay, - ChanStatusSampleInterval: defaultChanStatusSampleInterval, - ChanEnableTimeout: defaultChanEnableTimeout, - ChanDisableTimeout: defaultChanDisableTimeout, - HeightHintCacheQueryDisable: defaultHeightHintCacheQueryDisable, - Alias: defaultAlias, - Color: defaultColor, - MinChanSize: int64(minChanFundingSize), - MaxChanSize: int64(0), - DefaultRemoteMaxHtlcs: defaultRemoteMaxHtlcs, - NumGraphSyncPeers: defaultMinPeers, - HistoricalSyncInterval: discovery.DefaultHistoricalSyncInterval, - Tor: &lncfg.Tor{ - SOCKS: defaultTorSOCKS, - DNS: defaultTorDNS, - Control: defaultTorControl, - }, - net: &tor.ClearNet{}, - Workers: &lncfg.Workers{ - Read: lncfg.DefaultReadWorkers, - Write: lncfg.DefaultWriteWorkers, - Sig: lncfg.DefaultSigWorkers, - }, - Caches: &lncfg.Caches{ - RejectCacheSize: channeldb.DefaultRejectCacheSize, - ChannelCacheSize: channeldb.DefaultChannelCacheSize, - }, - Prometheus: lncfg.DefaultPrometheus(), - Watchtower: &lncfg.Watchtower{ - TowerDir: defaultTowerDir, - }, - HealthChecks: &lncfg.HealthCheckConfig{ - ChainCheck: &lncfg.CheckConfig{ - Interval: defaultChainInterval, - Timeout: defaultChainTimeout, - Attempts: defaultChainAttempts, - Backoff: defaultChainBackoff, - }, - DiskCheck: &lncfg.DiskCheckConfig{ - RequiredRemaining: defaultRequiredDisk, - CheckConfig: &lncfg.CheckConfig{ - Interval: defaultDiskInterval, - Attempts: defaultDiskAttempts, - Timeout: defaultDiskTimeout, - Backoff: defaultDiskBackoff, - }, - }, - }, - MaxOutgoingCltvExpiry: htlcswitch.DefaultMaxOutgoingCltvExpiry, - MaxChannelFeeAllocation: htlcswitch.DefaultMaxLinkFeeAllocation, - DB: lncfg.DefaultDB(), - registeredChains: chainreg.NewChainRegistry(), - ActiveNetParams: chainreg.BitcoinTestNetParams, - } -} - -// LoadConfig initializes and parses the config using a config file and command -// line options. -// -// The configuration proceeds as follows: -// 1) Start with a default config with sane settings -// 2) Pre-parse the command line to check for an alternative config file -// 3) Load configuration file overwriting defaults with any specified options -// 4) Parse CLI options and overwrite/add any specified options -func LoadConfig() (*Config, er.R) { - // Pre-parse the command line options to pick up an alternative config - // file. - preCfg := DefaultConfig() - if _, err := flags.Parse(&preCfg); err != nil { - return nil, er.E(err) - } - - // Show the version and exit if the version flag was specified. - appName := filepath.Base(os.Args[0]) - appName = strings.TrimSuffix(appName, filepath.Ext(appName)) - usageMessage := fmt.Sprintf("Use %s -h to show usage", appName) - if preCfg.ShowVersion { - fmt.Println(appName, "version", version.Version()) - os.Exit(0) - } - - // If the config file path has not been modified by the user, then we'll - // use the default config file path. However, if the user has modified - // their lnddir, then we should assume they intend to use the config - // file within it. - configFileDir := CleanAndExpandPath(preCfg.LndDir) - configFilePath := CleanAndExpandPath(preCfg.ConfigFile) - if configFileDir != DefaultLndDir { - if configFilePath == DefaultConfigFile { - configFilePath = filepath.Join( - configFileDir, lncfg.DefaultConfigFilename, - ) - } - } - - // Next, load any additional configuration options from the file. - var configFileError error - cfg := preCfg - if err := flags.IniParse(configFilePath, &cfg); err != nil { - // If it's a parsing related error, then we'll return - // immediately, otherwise we can proceed as possibly the config - // file doesn't exist which is OK. - if _, ok := err.(*flags.IniError); ok { - return nil, er.E(err) - } - - configFileError = err - } - - // Finally, parse the remaining command line options again to ensure - // they take precedence. - if _, err := flags.Parse(&cfg); err != nil { - return nil, er.E(err) - } - - // Make sure everything we just loaded makes sense. - cleanCfg, err := ValidateConfig(cfg, usageMessage) - if err != nil { - return nil, err - } - - // Warn about missing config file only after all other configuration is - // done. This prevents the warning on help messages and invalid - // options. Note this should go directly before the return. - if configFileError != nil { - log.Warnf("%v", configFileError) - } - - return cleanCfg, nil -} - -// ValidateConfig check the given configuration to be sane. This makes sure no -// illegal values or combination of values are set. All file system paths are -// normalized. The cleaned up config is returned on success. -func ValidateConfig(cfg Config, usageMessage string) (*Config, er.R) { - // If the provided lnd directory is not the default, we'll modify the - // path to all of the files and directories that will live within it. - lndDir := CleanAndExpandPath(cfg.LndDir) - if lndDir != DefaultLndDir { - cfg.DataDir = filepath.Join(lndDir, defaultDataDirname) - cfg.LetsEncryptDir = filepath.Join( - lndDir, defaultLetsEncryptDirname, - ) - cfg.TLSCertPath = filepath.Join(lndDir, defaultTLSCertFilename) - cfg.TLSKeyPath = filepath.Join(lndDir, defaultTLSKeyFilename) - cfg.LogDir = filepath.Join(lndDir, defaultLogDirname) - - // If the watchtower's directory is set to the default, i.e. the - // user has not requested a different location, we'll move the - // location to be relative to the specified lnd directory. - if cfg.Watchtower.TowerDir == defaultTowerDir { - cfg.Watchtower.TowerDir = - filepath.Join(cfg.DataDir, defaultTowerSubDirname) - } - } - - funcName := "loadConfig" - makeDirectory := func(dir string) er.R { - errr := os.MkdirAll(dir, 0700) - if errr != nil { - // Show a nicer error message if it's because a symlink - // is linked to a directory that does not exist - // (probably because it's not mounted). - var err er.R - if e, ok := errr.(*os.PathError); ok && os.IsExist(errr) { - link, lerr := os.Readlink(e.Path) - if lerr == nil { - str := "is symlink %s -> %s mounted?" - err = er.Errorf(str, e.Path, link) - } - } else { - err = er.E(errr) - } - - str := "%s: Failed to create lnd directory: %v" - err = er.Errorf(str, funcName, err) - _, _ = fmt.Fprintln(os.Stderr, err) - return err - } - - return nil - } - - // As soon as we're done parsing configuration options, ensure all paths - // to directories and files are cleaned and expanded before attempting - // to use them later on. - cfg.DataDir = CleanAndExpandPath(cfg.DataDir) - cfg.TLSCertPath = CleanAndExpandPath(cfg.TLSCertPath) - cfg.TLSKeyPath = CleanAndExpandPath(cfg.TLSKeyPath) - cfg.LetsEncryptDir = CleanAndExpandPath(cfg.LetsEncryptDir) - cfg.AdminMacPath = CleanAndExpandPath(cfg.AdminMacPath) - cfg.ReadMacPath = CleanAndExpandPath(cfg.ReadMacPath) - cfg.InvoiceMacPath = CleanAndExpandPath(cfg.InvoiceMacPath) - cfg.LogDir = CleanAndExpandPath(cfg.LogDir) - cfg.BtcdMode.Dir = CleanAndExpandPath(cfg.BtcdMode.Dir) - cfg.LtcdMode.Dir = CleanAndExpandPath(cfg.LtcdMode.Dir) - cfg.BitcoindMode.Dir = CleanAndExpandPath(cfg.BitcoindMode.Dir) - cfg.LitecoindMode.Dir = CleanAndExpandPath(cfg.LitecoindMode.Dir) - cfg.Tor.PrivateKeyPath = CleanAndExpandPath(cfg.Tor.PrivateKeyPath) - cfg.Tor.WatchtowerKeyPath = CleanAndExpandPath(cfg.Tor.WatchtowerKeyPath) - cfg.Watchtower.TowerDir = CleanAndExpandPath(cfg.Watchtower.TowerDir) - - // Create the lnd directory and all other sub directories if they don't - // already exist. This makes sure that directory trees are also created - // for files that point to outside of the lnddir. - dirs := []string{ - lndDir, cfg.DataDir, - cfg.LetsEncryptDir, cfg.Watchtower.TowerDir, - filepath.Dir(cfg.TLSCertPath), filepath.Dir(cfg.TLSKeyPath), - filepath.Dir(cfg.AdminMacPath), filepath.Dir(cfg.ReadMacPath), - filepath.Dir(cfg.InvoiceMacPath), - filepath.Dir(cfg.Tor.PrivateKeyPath), - filepath.Dir(cfg.Tor.WatchtowerKeyPath), - } - for _, dir := range dirs { - if err := makeDirectory(dir); err != nil { - return nil, err - } - } - - // Ensure that the user didn't attempt to specify negative values for - // any of the autopilot params. - if cfg.Autopilot.MaxChannels < 0 { - str := "%s: autopilot.maxchannels must be non-negative" - err := er.Errorf(str, funcName) - _, _ = fmt.Fprintln(os.Stderr, err) - return nil, err - } - if cfg.Autopilot.Allocation < 0 { - str := "%s: autopilot.allocation must be non-negative" - err := er.Errorf(str, funcName) - _, _ = fmt.Fprintln(os.Stderr, err) - return nil, err - } - if cfg.Autopilot.MinChannelSize < 0 { - str := "%s: autopilot.minchansize must be non-negative" - err := er.Errorf(str, funcName) - _, _ = fmt.Fprintln(os.Stderr, err) - return nil, err - } - if cfg.Autopilot.MaxChannelSize < 0 { - str := "%s: autopilot.maxchansize must be non-negative" - err := er.Errorf(str, funcName) - _, _ = fmt.Fprintln(os.Stderr, err) - return nil, err - } - if cfg.Autopilot.MinConfs < 0 { - str := "%s: autopilot.minconfs must be non-negative" - err := er.Errorf(str, funcName) - _, _ = fmt.Fprintln(os.Stderr, err) - return nil, err - } - if cfg.Autopilot.ConfTarget < 1 { - str := "%s: autopilot.conftarget must be positive" - err := er.Errorf(str, funcName) - _, _ = fmt.Fprintln(os.Stderr, err) - return nil, err - } - - // Ensure that the specified values for the min and max channel size - // are within the bounds of the normal chan size constraints. - if cfg.Autopilot.MinChannelSize < int64(minChanFundingSize) { - cfg.Autopilot.MinChannelSize = int64(minChanFundingSize) - } - if cfg.Autopilot.MaxChannelSize > int64(MaxFundingAmount) { - cfg.Autopilot.MaxChannelSize = int64(MaxFundingAmount) - } - - if _, err := validateAtplCfg(cfg.Autopilot); err != nil { - return nil, err - } - - // Ensure that --maxchansize is properly handled when set by user. - // For non-Wumbo channels this limit remains 16777215 satoshis by default - // as specified in BOLT-02. For wumbo channels this limit is 1,000,000,000. - // satoshis (10 BTC). Always enforce --maxchansize explicitly set by user. - // If unset (marked by 0 value), then enforce proper default. - if cfg.MaxChanSize == 0 { - if cfg.ProtocolOptions.Wumbo() { - cfg.MaxChanSize = int64(MaxBtcFundingAmountWumbo) - } else { - cfg.MaxChanSize = int64(MaxBtcFundingAmount) - } - } - - // Ensure that the user specified values for the min and max channel - // size make sense. - if cfg.MaxChanSize < cfg.MinChanSize { - return nil, er.Errorf("invalid channel size parameters: "+ - "max channel size %v, must be no less than min chan size %v", - cfg.MaxChanSize, cfg.MinChanSize, - ) - } - - // Don't allow superflous --maxchansize greater than - // BOLT 02 soft-limit for non-wumbo channel - if !cfg.ProtocolOptions.Wumbo() && cfg.MaxChanSize > int64(MaxFundingAmount) { - return nil, er.Errorf("invalid channel size parameters: "+ - "maximum channel size %v is greater than maximum non-wumbo"+ - " channel size %v", - cfg.MaxChanSize, MaxFundingAmount, - ) - } - - // Ensure a valid max channel fee allocation was set. - if cfg.MaxChannelFeeAllocation <= 0 || cfg.MaxChannelFeeAllocation > 1 { - return nil, er.Errorf("invalid max channel fee allocation: "+ - "%v, must be within (0, 1]", - cfg.MaxChannelFeeAllocation) - } - - // Validate the Tor config parameters. - socks, err := lncfg.ParseAddressString( - cfg.Tor.SOCKS, strconv.Itoa(defaultTorSOCKSPort), - cfg.net.ResolveTCPAddr, - ) - if err != nil { - return nil, err - } - cfg.Tor.SOCKS = socks.String() - - // We'll only attempt to normalize and resolve the DNS host if it hasn't - // changed, as it doesn't need to be done for the default. - if cfg.Tor.DNS != defaultTorDNS { - dns, err := lncfg.ParseAddressString( - cfg.Tor.DNS, strconv.Itoa(defaultTorDNSPort), - cfg.net.ResolveTCPAddr, - ) - if err != nil { - return nil, err - } - cfg.Tor.DNS = dns.String() - } - - control, err := lncfg.ParseAddressString( - cfg.Tor.Control, strconv.Itoa(defaultTorControlPort), - cfg.net.ResolveTCPAddr, - ) - if err != nil { - return nil, err - } - cfg.Tor.Control = control.String() - - // Ensure that tor socks host:port is not equal to tor control - // host:port. This would lead to lnd not starting up properly. - if cfg.Tor.SOCKS == cfg.Tor.Control { - str := "%s: tor.socks and tor.control can not use " + - "the same host:port" - return nil, er.Errorf(str, funcName) - } - - switch { - case cfg.Tor.V2 && cfg.Tor.V3: - return nil, er.New("either tor.v2 or tor.v3 can be set, " + - "but not both") - case cfg.DisableListen && (cfg.Tor.V2 || cfg.Tor.V3): - return nil, er.New("listening must be enabled when " + - "enabling inbound connections over Tor") - } - - if cfg.Tor.PrivateKeyPath == "" { - switch { - case cfg.Tor.V2: - cfg.Tor.PrivateKeyPath = filepath.Join( - lndDir, defaultTorV2PrivateKeyFilename, - ) - case cfg.Tor.V3: - cfg.Tor.PrivateKeyPath = filepath.Join( - lndDir, defaultTorV3PrivateKeyFilename, - ) - } - } - - if cfg.Tor.WatchtowerKeyPath == "" { - switch { - case cfg.Tor.V2: - cfg.Tor.WatchtowerKeyPath = filepath.Join( - cfg.Watchtower.TowerDir, defaultTorV2PrivateKeyFilename, - ) - case cfg.Tor.V3: - cfg.Tor.WatchtowerKeyPath = filepath.Join( - cfg.Watchtower.TowerDir, defaultTorV3PrivateKeyFilename, - ) - } - } - - // Set up the network-related functions that will be used throughout - // the daemon. We use the standard Go "net" package functions by - // default. If we should be proxying all traffic through Tor, then - // we'll use the Tor proxy specific functions in order to avoid leaking - // our real information. - if cfg.Tor.Active { - cfg.net = &tor.ProxyNet{ - SOCKS: cfg.Tor.SOCKS, - DNS: cfg.Tor.DNS, - StreamIsolation: cfg.Tor.StreamIsolation, - } - } - - if cfg.DisableListen && cfg.NAT { - return nil, er.New("NAT traversal cannot be used when " + - "listening is disabled") - } - if cfg.NAT && len(cfg.ExternalHosts) != 0 { - return nil, er.New("NAT support and externalhosts are " + - "mutually exclusive, only one should be selected") - } - - if !cfg.Bitcoin.Active && !cfg.Litecoin.Active && !cfg.Pkt.Active { - // Default to PKT - cfg.Pkt.Active = true - } - - // Determine the active chain configuration and its parameters. - switch { - // At this moment, multiple active chains are not supported. - case cfg.Litecoin.Active && cfg.Bitcoin.Active: - str := "%s: Currently both Bitcoin and Litecoin cannot be " + - "active together" - return nil, er.Errorf(str, funcName) - - // Either Bitcoin must be active, or Litecoin must be active. - // Otherwise, we don't know which chain we're on. - case !cfg.Bitcoin.Active && !cfg.Litecoin.Active && !cfg.Pkt.Active: - return nil, er.Errorf("%s: either bitcoin.active or "+ - "litecoin.active must be set to 1 (true)", funcName) - - case cfg.Pkt.Active: - cfg.ActiveNetParams = chainreg.PktMainNetParams - // Calling it /pkt/mainnet makes life easier - cfg.ActiveNetParams.Name = "mainnet" - cfg.Pkt.ChainDir = filepath.Join(cfg.DataDir, - defaultChainSubDirname, - chainreg.PktChain.String()) - - // Finally we'll register the litecoin chain as our current - // primary chain. - cfg.registeredChains.RegisterPrimaryChain(chainreg.PktChain) - MaxFundingAmount = maxPktFundingAmount - - case cfg.Litecoin.Active: - err := cfg.Litecoin.Validate(minTimeLockDelta, minLtcRemoteDelay) - if err != nil { - return nil, err - } - - // Multiple networks can't be selected simultaneously. Count - // number of network flags passed; assign active network params - // while we're at it. - numNets := 0 - var ltcParams chainreg.LitecoinNetParams - if cfg.Litecoin.MainNet { - numNets++ - ltcParams = chainreg.LitecoinMainNetParams - } - if cfg.Litecoin.TestNet3 { - numNets++ - ltcParams = chainreg.LitecoinTestNetParams - } - if cfg.Litecoin.RegTest { - numNets++ - ltcParams = chainreg.LitecoinRegTestNetParams - } - if cfg.Litecoin.SimNet { - numNets++ - ltcParams = chainreg.LitecoinSimNetParams - } - - if numNets > 1 { - str := "%s: The mainnet, testnet, and simnet params " + - "can't be used together -- choose one of the " + - "three" - err := er.Errorf(str, funcName) - return nil, err - } - - // The target network must be provided, otherwise, we won't - // know how to initialize the daemon. - if numNets == 0 { - str := "%s: either --litecoin.mainnet, or " + - "litecoin.testnet must be specified" - err := er.Errorf(str, funcName) - return nil, err - } - - // The litecoin chain is the current active chain. However - // throughout the codebase we required chaincfg.Params. So as a - // temporary hack, we'll mutate the default net params for - // bitcoin with the litecoin specific information. - chainreg.ApplyLitecoinParams(&cfg.ActiveNetParams, <cParams) - - switch cfg.Litecoin.Node { - case "ltcd": - err := parseRPCParams(cfg.Litecoin, cfg.LtcdMode, - chainreg.LitecoinChain, funcName, cfg.ActiveNetParams) - if err != nil { - err := er.Errorf("unable to load RPC "+ - "credentials for ltcd: %v", err) - return nil, err - } - case "litecoind": - if cfg.Litecoin.SimNet { - return nil, er.Errorf("%s: litecoind does not "+ - "support simnet", funcName) - } - err := parseRPCParams(cfg.Litecoin, cfg.LitecoindMode, - chainreg.LitecoinChain, funcName, cfg.ActiveNetParams) - if err != nil { - err := er.Errorf("unable to load RPC "+ - "credentials for litecoind: %v", err) - return nil, err - } - default: - str := "%s: only ltcd and litecoind mode supported for " + - "litecoin at this time" - return nil, er.Errorf(str, funcName) - } - - cfg.Litecoin.ChainDir = filepath.Join(cfg.DataDir, - defaultChainSubDirname, - chainreg.LitecoinChain.String()) - - // Finally we'll register the litecoin chain as our current - // primary chain. - cfg.registeredChains.RegisterPrimaryChain(chainreg.LitecoinChain) - MaxFundingAmount = maxLtcFundingAmount - - case cfg.Bitcoin.Active: - // Multiple networks can't be selected simultaneously. Count - // number of network flags passed; assign active network params - // while we're at it. - numNets := 0 - if cfg.Bitcoin.MainNet { - numNets++ - cfg.ActiveNetParams = chainreg.BitcoinMainNetParams - } - if cfg.Bitcoin.TestNet3 { - numNets++ - cfg.ActiveNetParams = chainreg.BitcoinTestNetParams - } - if cfg.Bitcoin.RegTest { - numNets++ - cfg.ActiveNetParams = chainreg.BitcoinRegTestNetParams - } - if cfg.Bitcoin.SimNet { - numNets++ - cfg.ActiveNetParams = chainreg.BitcoinSimNetParams - } - if numNets > 1 { - str := "%s: The mainnet, testnet, regtest, and " + - "simnet params can't be used together -- " + - "choose one of the four" - err := er.Errorf(str, funcName) - return nil, err - } - - // The target network must be provided, otherwise, we won't - // know how to initialize the daemon. - if numNets == 0 { - str := "%s: either --bitcoin.mainnet, or " + - "bitcoin.testnet, bitcoin.simnet, or bitcoin.regtest " + - "must be specified" - err := er.Errorf(str, funcName) - return nil, err - } - - err := cfg.Bitcoin.Validate(minTimeLockDelta, minBtcRemoteDelay) - if err != nil { - return nil, err - } - - switch cfg.Bitcoin.Node { - case "btcd": - err := parseRPCParams( - cfg.Bitcoin, cfg.BtcdMode, chainreg.BitcoinChain, funcName, - cfg.ActiveNetParams, - ) - if err != nil { - err := er.Errorf("unable to load RPC "+ - "credentials for btcd: %v", err) - return nil, err - } - case "bitcoind": - if cfg.Bitcoin.SimNet { - return nil, er.Errorf("%s: bitcoind does not "+ - "support simnet", funcName) - } - - err := parseRPCParams( - cfg.Bitcoin, cfg.BitcoindMode, chainreg.BitcoinChain, funcName, - cfg.ActiveNetParams, - ) - if err != nil { - err := er.Errorf("unable to load RPC "+ - "credentials for bitcoind: %v", err) - return nil, err - } - case "neutrino": - // No need to get RPC parameters. - - default: - str := "%s: only btcd, bitcoind, and neutrino mode " + - "supported for bitcoin at this time" - return nil, er.Errorf(str, funcName) - } - - cfg.Bitcoin.ChainDir = filepath.Join(cfg.DataDir, - defaultChainSubDirname, - chainreg.BitcoinChain.String()) - - // Finally we'll register the bitcoin chain as our current - // primary chain. - cfg.registeredChains.RegisterPrimaryChain(chainreg.BitcoinChain) - } - globalcfg.SelectConfig(cfg.ActiveNetParams.GlobalConf) - - // Ensure that the user didn't attempt to specify negative values for - // any of the autopilot params. - if cfg.Autopilot.MaxChannels < 0 { - str := "%s: autopilot.maxchannels must be non-negative" - err := er.Errorf(str, funcName) - _, _ = fmt.Fprintln(os.Stderr, err) - return nil, err - } - if cfg.Autopilot.Allocation < 0 { - str := "%s: autopilot.allocation must be non-negative" - err := er.Errorf(str, funcName) - _, _ = fmt.Fprintln(os.Stderr, err) - return nil, err - } - if cfg.Autopilot.MinChannelSize < 0 { - str := "%s: autopilot.minchansize must be non-negative" - err := er.Errorf(str, funcName) - _, _ = fmt.Fprintln(os.Stderr, err) - return nil, err - } - if cfg.Autopilot.MaxChannelSize < 0 { - str := "%s: autopilot.maxchansize must be non-negative" - err := er.Errorf(str, funcName) - _, _ = fmt.Fprintln(os.Stderr, err) - return nil, err - } - - // Ensure that the specified values for the min and max channel size - // don't are within the bounds of the normal chan size constraints. - if cfg.Autopilot.MinChannelSize < int64(minChanFundingSize) { - cfg.Autopilot.MinChannelSize = int64(minChanFundingSize) - } - if cfg.Autopilot.MaxChannelSize > int64(MaxFundingAmount) { - cfg.Autopilot.MaxChannelSize = int64(MaxFundingAmount) - } - - // Validate profile port number. - if cfg.Profile != "" { - profilePort, err := strconv.Atoi(cfg.Profile) - if err != nil || profilePort < 1024 || profilePort > 65535 { - str := "%s: The profile port must be between 1024 and 65535" - err := er.Errorf(str, funcName) - _, _ = fmt.Fprintln(os.Stderr, err) - _, _ = fmt.Fprintln(os.Stderr, usageMessage) - return nil, err - } - } - - // We'll now construct the network directory which will be where we - // store all the data specific to this chain/network. - cfg.networkDir = filepath.Join( - cfg.DataDir, defaultChainSubDirname, - cfg.registeredChains.PrimaryChain().String(), - lncfg.NormalizeNetwork(cfg.ActiveNetParams.Name), - ) - - // If a custom macaroon directory wasn't specified and the data - // directory has changed from the default path, then we'll also update - // the path for the macaroons to be generated. - if cfg.AdminMacPath == "" { - cfg.AdminMacPath = filepath.Join( - cfg.networkDir, defaultAdminMacFilename, - ) - } - if cfg.ReadMacPath == "" { - cfg.ReadMacPath = filepath.Join( - cfg.networkDir, defaultReadMacFilename, - ) - } - if cfg.InvoiceMacPath == "" { - cfg.InvoiceMacPath = filepath.Join( - cfg.networkDir, defaultInvoiceMacFilename, - ) - } - - // Similarly, if a custom back up file path wasn't specified, then - // we'll update the file location to match our set network directory. - if cfg.BackupFilePath == "" { - cfg.BackupFilePath = filepath.Join( - cfg.networkDir, chanbackup.DefaultBackupFileName, - ) - } - - // Append the network type to the log directory so it is "namespaced" - // per network in the same fashion as the data directory. - cfg.LogDir = filepath.Join(cfg.LogDir, - cfg.registeredChains.PrimaryChain().String(), - lncfg.NormalizeNetwork(cfg.ActiveNetParams.Name)) - - // Parse, validate, and set debug log level(s). - err = log.SetLogLevels(cfg.DebugLevel) - if err != nil { - err = er.Errorf("%s: %v", funcName, err.String()) - _, _ = fmt.Fprintln(os.Stderr, err) - _, _ = fmt.Fprintln(os.Stderr, usageMessage) - return nil, err - } - - // At least one RPCListener is required. So listen on localhost per - // default. - if len(cfg.RawRPCListeners) == 0 { - addr := fmt.Sprintf("localhost:%d", defaultRPCPort) - cfg.RawRPCListeners = append(cfg.RawRPCListeners, addr) - } - - // Listen on localhost if no REST listeners were specified. - if len(cfg.RawRESTListeners) == 0 { - addr := fmt.Sprintf("localhost:%d", defaultRESTPort) - cfg.RawRESTListeners = append(cfg.RawRESTListeners, addr) - } - - // Listen on the default interface/port if no listeners were specified. - // An empty address string means default interface/address, which on - // most unix systems is the same as 0.0.0.0. If Tor is active, we - // default to only listening on localhost for hidden service - // connections. - if len(cfg.RawListeners) == 0 { - addr := fmt.Sprintf(":%d", defaultPeerPort) - if cfg.Tor.Active { - addr = fmt.Sprintf("localhost:%d", defaultPeerPort) - } - cfg.RawListeners = append(cfg.RawListeners, addr) - } - - // Add default port to all RPC listener addresses if needed and remove - // duplicate addresses. - cfg.RPCListeners, err = lncfg.NormalizeAddresses( - cfg.RawRPCListeners, strconv.Itoa(defaultRPCPort), - cfg.net.ResolveTCPAddr, - ) - if err != nil { - return nil, err - } - - // Add default port to all REST listener addresses if needed and remove - // duplicate addresses. - cfg.RESTListeners, err = lncfg.NormalizeAddresses( - cfg.RawRESTListeners, strconv.Itoa(defaultRESTPort), - cfg.net.ResolveTCPAddr, - ) - if err != nil { - return nil, err - } - - // For each of the RPC listeners (REST+gRPC), we'll ensure that users - // have specified a safe combo for authentication. If not, we'll bail - // out with an error. Since we don't allow disabling TLS for gRPC - // connections we pass in tlsActive=true. - err = lncfg.EnforceSafeAuthentication( - cfg.RPCListeners, !cfg.NoMacaroons, true, - ) - if err != nil { - return nil, err - } - - if cfg.DisableRest { - log.Infof("REST API is disabled!") - cfg.RESTListeners = nil - } else { - err = lncfg.EnforceSafeAuthentication( - cfg.RESTListeners, !cfg.NoMacaroons, !cfg.DisableRestTLS, - ) - if err != nil { - return nil, err - } - } - - // Remove the listening addresses specified if listening is disabled. - if cfg.DisableListen { - log.Infof("Listening on the p2p interface is disabled!") - cfg.Listeners = nil - cfg.ExternalIPs = nil - } else { - - // Add default port to all listener addresses if needed and remove - // duplicate addresses. - cfg.Listeners, err = lncfg.NormalizeAddresses( - cfg.RawListeners, strconv.Itoa(defaultPeerPort), - cfg.net.ResolveTCPAddr, - ) - if err != nil { - return nil, err - } - - // Add default port to all external IP addresses if needed and remove - // duplicate addresses. - cfg.ExternalIPs, err = lncfg.NormalizeAddresses( - cfg.RawExternalIPs, strconv.Itoa(defaultPeerPort), - cfg.net.ResolveTCPAddr, - ) - if err != nil { - return nil, err - } - - // For the p2p port it makes no sense to listen to an Unix socket. - // Also, we would need to refactor the brontide listener to support - // that. - for _, p2pListener := range cfg.Listeners { - if lncfg.IsUnix(p2pListener) { - err := er.Errorf("unix socket addresses cannot be "+ - "used for the p2p connection listener: %s", - p2pListener) - return nil, err - } - } - } - - // Ensure that the specified minimum backoff is below or equal to the - // maximum backoff. - if cfg.MinBackoff > cfg.MaxBackoff { - return nil, er.Errorf("maxbackoff must be greater than " + - "minbackoff") - } - - // Newer versions of lnd added a new sub-config for bolt-specific - // parameters. However we want to also allow existing users to use the - // value on the top-level config. If the outer config value is set, - // then we'll use that directly. - if cfg.SyncFreelist { - cfg.DB.Bolt.SyncFreelist = cfg.SyncFreelist - } - - // Ensure that the user hasn't chosen a remote-max-htlc value greater - // than the protocol maximum. - maxRemoteHtlcs := uint16(input.MaxHTLCNumber / 2) - if cfg.DefaultRemoteMaxHtlcs > maxRemoteHtlcs { - return nil, er.Errorf("default-remote-max-htlcs (%v) must be "+ - "less than %v", cfg.DefaultRemoteMaxHtlcs, - maxRemoteHtlcs) - } - - // Validate the subconfigs for workers, caches, and the tower client. - err = lncfg.Validate( - cfg.Workers, - cfg.Caches, - cfg.WtClient, - cfg.DB, - cfg.HealthChecks, - ) - if err != nil { - return nil, err - } - - // Finally, ensure that the user's color is correctly formatted, - // otherwise the server will not be able to start after the unlocking - // the wallet. - _, err = parseHexColor(cfg.Color) - if err != nil { - return nil, er.Errorf("unable to parse node color: %v", err) - } - - // All good, return the sanitized result. - return &cfg, err -} - -// localDatabaseDir returns the default directory where the -// local bolt db files are stored. -func (c *Config) localDatabaseDir() string { - return filepath.Join(c.DataDir, - defaultGraphSubDirname, - lncfg.NormalizeNetwork(c.ActiveNetParams.Name)) -} - -func (c *Config) networkName() string { - return lncfg.NormalizeNetwork(c.ActiveNetParams.Name) -} - -// CleanAndExpandPath expands environment variables and leading ~ in the -// passed path, cleans the result, and returns it. -// This function is taken from https://github.com/btcsuite/btcd -func CleanAndExpandPath(path string) string { - if path == "" { - return "" - } - - // Expand initial ~ to OS specific home directory. - if strings.HasPrefix(path, "~") { - var homeDir string - u, err := user.Current() - if err == nil { - homeDir = u.HomeDir - } else { - homeDir = os.Getenv("HOME") - } - - path = strings.Replace(path, "~", homeDir, 1) - } - - // NOTE: The os.ExpandEnv doesn't work with Windows-style %VARIABLE%, - // but the variables can still be expanded via POSIX-style $VARIABLE. - return filepath.Clean(os.ExpandEnv(path)) -} - -func parseRPCParams(cConfig *lncfg.Chain, nodeConfig interface{}, - net chainreg.ChainCode, funcName string, - netParams chainreg.BitcoinNetParams) er.R { // nolint:unparam - - // First, we'll check our node config to make sure the RPC parameters - // were set correctly. We'll also determine the path to the conf file - // depending on the backend node. - var daemonName, confDir, confFile string - switch conf := nodeConfig.(type) { - case *lncfg.Btcd: - // If both RPCUser and RPCPass are set, we assume those - // credentials are good to use. - if conf.RPCUser != "" && conf.RPCPass != "" { - return nil - } - - // Get the daemon name for displaying proper errors. - switch net { - case chainreg.BitcoinChain: - daemonName = "btcd" - confDir = conf.Dir - confFile = "btcd" - case chainreg.LitecoinChain: - daemonName = "ltcd" - confDir = conf.Dir - confFile = "ltcd" - } - - // If only ONE of RPCUser or RPCPass is set, we assume the - // user did that unintentionally. - if conf.RPCUser != "" || conf.RPCPass != "" { - return er.Errorf("please set both or neither of "+ - "%[1]v.rpcuser, %[1]v.rpcpass", daemonName) - } - - case *lncfg.Bitcoind: - // Ensure that if the ZMQ options are set, that they are not - // equal. - if conf.ZMQPubRawBlock != "" && conf.ZMQPubRawTx != "" { - err := checkZMQOptions( - conf.ZMQPubRawBlock, conf.ZMQPubRawTx, - ) - if err != nil { - return err - } - } - - // Ensure that if the estimate mode is set, that it is a legal - // value. - if conf.EstimateMode != "" { - err := checkEstimateMode(conf.EstimateMode) - if err != nil { - return err - } - } - - // If all of RPCUser, RPCPass, ZMQBlockHost, and ZMQTxHost are - // set, we assume those parameters are good to use. - if conf.RPCUser != "" && conf.RPCPass != "" && - conf.ZMQPubRawBlock != "" && conf.ZMQPubRawTx != "" { - return nil - } - - // Get the daemon name for displaying proper errors. - switch net { - case chainreg.BitcoinChain: - daemonName = "bitcoind" - confDir = conf.Dir - confFile = "bitcoin" - case chainreg.LitecoinChain: - daemonName = "litecoind" - confDir = conf.Dir - confFile = "litecoin" - } - - // If not all of the parameters are set, we'll assume the user - // did this unintentionally. - if conf.RPCUser != "" || conf.RPCPass != "" || - conf.ZMQPubRawBlock != "" || conf.ZMQPubRawTx != "" { - - return er.Errorf("please set all or none of "+ - "%[1]v.rpcuser, %[1]v.rpcpass, "+ - "%[1]v.zmqpubrawblock, %[1]v.zmqpubrawtx", - daemonName) - } - } - - // If we're in simnet mode, then the running btcd instance won't read - // the RPC credentials from the configuration. So if lnd wasn't - // specified the parameters, then we won't be able to start. - if cConfig.SimNet { - str := "%v: rpcuser and rpcpass must be set to your btcd " + - "node's RPC parameters for simnet mode" - return er.Errorf(str, funcName) - } - - fmt.Println("Attempting automatic RPC configuration to " + daemonName) - - confFile = filepath.Join(confDir, fmt.Sprintf("%v.conf", confFile)) - switch cConfig.Node { - case "btcd", "ltcd": - nConf := nodeConfig.(*lncfg.Btcd) - rpcUser, rpcPass, err := extractBtcdRPCParams(confFile) - if err != nil { - return er.Errorf("unable to extract RPC credentials:"+ - " %v, cannot start w/o RPC connection", - err) - } - nConf.RPCUser, nConf.RPCPass = rpcUser, rpcPass - case "bitcoind", "litecoind": - nConf := nodeConfig.(*lncfg.Bitcoind) - rpcUser, rpcPass, zmqBlockHost, zmqTxHost, err := - extractBitcoindRPCParams(netParams.Params.Name, confFile) - if err != nil { - return er.Errorf("unable to extract RPC credentials:"+ - " %v, cannot start w/o RPC connection", - err) - } - nConf.RPCUser, nConf.RPCPass = rpcUser, rpcPass - nConf.ZMQPubRawBlock, nConf.ZMQPubRawTx = zmqBlockHost, zmqTxHost - } - - fmt.Printf("Automatically obtained %v's RPC credentials\n", daemonName) - return nil -} - -// extractBtcdRPCParams attempts to extract the RPC credentials for an existing -// btcd instance. The passed path is expected to be the location of btcd's -// application data directory on the target system. -func extractBtcdRPCParams(btcdConfigPath string) (string, string, er.R) { - // First, we'll open up the btcd configuration file found at the target - // destination. - btcdConfigFile, errr := os.Open(btcdConfigPath) - if errr != nil { - return "", "", er.E(errr) - } - defer func() { _ = btcdConfigFile.Close() }() - - // With the file open extract the contents of the configuration file so - // we can attempt to locate the RPC credentials. - configContents, errr := ioutil.ReadAll(btcdConfigFile) - if errr != nil { - return "", "", er.E(errr) - } - - // Attempt to locate the RPC user using a regular expression. If we - // don't have a match for our regular expression then we'll exit with - // an error. - rpcUserRegexp, errr := regexp.Compile(`(?m)^\s*rpcuser\s*=\s*([^\s]+)`) - if errr != nil { - return "", "", er.E(errr) - } - userSubmatches := rpcUserRegexp.FindSubmatch(configContents) - if userSubmatches == nil { - return "", "", er.Errorf("unable to find rpcuser in config") - } - - // Similarly, we'll use another regular expression to find the set - // rpcpass (if any). If we can't find the pass, then we'll exit with an - // error. - rpcPassRegexp, errr := regexp.Compile(`(?m)^\s*rpcpass\s*=\s*([^\s]+)`) - if errr != nil { - return "", "", er.E(errr) - } - passSubmatches := rpcPassRegexp.FindSubmatch(configContents) - if passSubmatches == nil { - return "", "", er.Errorf("unable to find rpcuser in config") - } - - return string(userSubmatches[1]), string(passSubmatches[1]), nil -} - -// extractBitcoindRPCParams attempts to extract the RPC credentials for an -// existing bitcoind node instance. The passed path is expected to be the -// location of bitcoind's bitcoin.conf on the target system. The routine looks -// for a cookie first, optionally following the datadir configuration option in -// the bitcoin.conf. If it doesn't find one, it looks for rpcuser/rpcpassword. -func extractBitcoindRPCParams(networkName string, - bitcoindConfigPath string) (string, string, string, string, er.R) { - - // First, we'll open up the bitcoind configuration file found at the - // target destination. - bitcoindConfigFile, errr := os.Open(bitcoindConfigPath) - if errr != nil { - return "", "", "", "", er.E(errr) - } - defer func() { _ = bitcoindConfigFile.Close() }() - - // With the file open extract the contents of the configuration file so - // we can attempt to locate the RPC credentials. - configContents, errr := ioutil.ReadAll(bitcoindConfigFile) - if errr != nil { - return "", "", "", "", er.E(errr) - } - - // First, we'll look for the ZMQ hosts providing raw block and raw - // transaction notifications. - zmqBlockHostRE, errr := regexp.Compile( - `(?m)^\s*zmqpubrawblock\s*=\s*([^\s]+)`, - ) - if errr != nil { - return "", "", "", "", er.E(errr) - } - zmqBlockHostSubmatches := zmqBlockHostRE.FindSubmatch(configContents) - if len(zmqBlockHostSubmatches) < 2 { - return "", "", "", "", er.Errorf("unable to find " + - "zmqpubrawblock in config") - } - zmqTxHostRE, errr := regexp.Compile(`(?m)^\s*zmqpubrawtx\s*=\s*([^\s]+)`) - if errr != nil { - return "", "", "", "", er.E(errr) - } - zmqTxHostSubmatches := zmqTxHostRE.FindSubmatch(configContents) - if len(zmqTxHostSubmatches) < 2 { - return "", "", "", "", er.New("unable to find zmqpubrawtx " + - "in config") - } - zmqBlockHost := string(zmqBlockHostSubmatches[1]) - zmqTxHost := string(zmqTxHostSubmatches[1]) - if err := checkZMQOptions(zmqBlockHost, zmqTxHost); err != nil { - return "", "", "", "", err - } - - // Next, we'll try to find an auth cookie. We need to detect the chain - // by seeing if one is specified in the configuration file. - dataDir := path.Dir(bitcoindConfigPath) - dataDirRE, errr := regexp.Compile(`(?m)^\s*datadir\s*=\s*([^\s]+)`) - if errr != nil { - return "", "", "", "", er.E(errr) - } - dataDirSubmatches := dataDirRE.FindSubmatch(configContents) - if dataDirSubmatches != nil { - dataDir = string(dataDirSubmatches[1]) - } - - chainDir := "/" - switch networkName { - case "testnet3": - chainDir = "/testnet3/" - case "testnet4": - chainDir = "/testnet4/" - case "regtest": - chainDir = "/regtest/" - } - - cookie, err := ioutil.ReadFile(dataDir + chainDir + ".cookie") - if err == nil { - splitCookie := strings.Split(string(cookie), ":") - if len(splitCookie) == 2 { - return splitCookie[0], splitCookie[1], zmqBlockHost, - zmqTxHost, nil - } - } - - // We didn't find a cookie, so we attempt to locate the RPC user using - // a regular expression. If we don't have a match for our regular - // expression then we'll exit with an error. - rpcUserRegexp, errr := regexp.Compile(`(?m)^\s*rpcuser\s*=\s*([^\s]+)`) - if errr != nil { - return "", "", "", "", er.E(errr) - } - userSubmatches := rpcUserRegexp.FindSubmatch(configContents) - if userSubmatches == nil { - return "", "", "", "", er.Errorf("unable to find rpcuser in " + - "config") - } - - // Similarly, we'll use another regular expression to find the set - // rpcpass (if any). If we can't find the pass, then we'll exit with an - // error. - rpcPassRegexp, errr := regexp.Compile(`(?m)^\s*rpcpassword\s*=\s*([^\s]+)`) - if errr != nil { - return "", "", "", "", er.E(errr) - } - passSubmatches := rpcPassRegexp.FindSubmatch(configContents) - if passSubmatches == nil { - return "", "", "", "", er.Errorf("unable to find rpcpassword " + - "in config") - } - - return string(userSubmatches[1]), string(passSubmatches[1]), - zmqBlockHost, zmqTxHost, nil -} - -// checkZMQOptions ensures that the provided addresses to use as the hosts for -// ZMQ rawblock and rawtx notifications are different. -func checkZMQOptions(zmqBlockHost, zmqTxHost string) er.R { - if zmqBlockHost == zmqTxHost { - return er.New("zmqpubrawblock and zmqpubrawtx must be set " + - "to different addresses") - } - - return nil -} - -// checkEstimateMode ensures that the provided estimate mode is legal. -func checkEstimateMode(estimateMode string) er.R { - for _, mode := range bitcoindEstimateModes { - if estimateMode == mode { - return nil - } - } - - return er.Errorf("estimatemode must be one of the following: %v", - bitcoindEstimateModes[:]) -} diff --git a/lnd/contractcourt/anchor_resolver.go b/lnd/contractcourt/anchor_resolver.go deleted file mode 100644 index f1c78f67..00000000 --- a/lnd/contractcourt/anchor_resolver.go +++ /dev/null @@ -1,209 +0,0 @@ -package contractcourt - -import ( - "io" - "sync" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/sweep" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/wire" -) - -// anchorResolver is a resolver that will attempt to sweep our anchor output. -type anchorResolver struct { - // anchorSignDescriptor contains the information that is required to - // sweep the anchor. - anchorSignDescriptor input.SignDescriptor - - // anchor is the outpoint on the commitment transaction. - anchor wire.OutPoint - - // resolved reflects if the contract has been fully resolved or not. - resolved bool - - // broadcastHeight is the height that the original contract was - // broadcast to the main-chain at. We'll use this value to bound any - // historical queries to the chain for spends/confirmations. - broadcastHeight uint32 - - // chanPoint is the channel point of the original contract. - chanPoint wire.OutPoint - - // currentReport stores the current state of the resolver for reporting - // over the rpc interface. - currentReport ContractReport - - // reportLock prevents concurrent access to the resolver report. - reportLock sync.Mutex - - contractResolverKit -} - -// newAnchorResolver instantiates a new anchor resolver. -func newAnchorResolver(anchorSignDescriptor input.SignDescriptor, - anchor wire.OutPoint, broadcastHeight uint32, - chanPoint wire.OutPoint, resCfg ResolverConfig) *anchorResolver { - - amt := btcutil.Amount(anchorSignDescriptor.Output.Value) - - report := ContractReport{ - Outpoint: anchor, - Type: ReportOutputAnchor, - Amount: amt, - LimboBalance: amt, - RecoveredBalance: 0, - } - - r := &anchorResolver{ - contractResolverKit: *newContractResolverKit(resCfg), - anchorSignDescriptor: anchorSignDescriptor, - anchor: anchor, - broadcastHeight: broadcastHeight, - chanPoint: chanPoint, - currentReport: report, - } - - return r -} - -// ResolverKey returns an identifier which should be globally unique for this -// particular resolver within the chain the original contract resides within. -func (c *anchorResolver) ResolverKey() []byte { - // The anchor resolver is stateless and doesn't need a database key. - return nil -} - -// Resolve offers the anchor output to the sweeper and waits for it to be swept. -func (c *anchorResolver) Resolve() (ContractResolver, er.R) { - // Attempt to update the sweep parameters to the post-confirmation - // situation. We don't want to force sweep anymore, because the anchor - // lost its special purpose to get the commitment confirmed. It is just - // an output that we want to sweep only if it is economical to do so. - // - // An exclusive group is not necessary anymore, because we know that - // this is the only anchor that can be swept. - // - // We also clear the parent tx information for cpfp, because the - // commitment tx is confirmed. - // - // After a restart or when the remote force closes, the sweeper is not - // yet aware of the anchor. In that case, it will be added as new input - // to the sweeper. - relayFeeRate := c.Sweeper.RelayFeePerKW() - - anchorInput := input.MakeBaseInput( - &c.anchor, - input.CommitmentAnchor, - &c.anchorSignDescriptor, - c.broadcastHeight, - nil, - ) - - resultChan, err := c.Sweeper.SweepInput( - &anchorInput, - sweep.Params{ - Fee: sweep.FeePreference{ - FeeRate: relayFeeRate, - }, - }, - ) - if err != nil { - return nil, err - } - - var ( - outcome channeldb.ResolverOutcome - spendTx *chainhash.Hash - ) - - select { - case sweepRes := <-resultChan: - switch { - - // Anchor was swept successfully. - case sweepRes.Err == nil: - sweepTxID := sweepRes.Tx.TxHash() - - spendTx = &sweepTxID - outcome = channeldb.ResolverOutcomeClaimed - - // Anchor was swept by someone else. This is possible after the - // 16 block csv lock. - case sweep.ErrRemoteSpend.Is(sweepRes.Err): - log.Warnf("our anchor spent by someone else") - outcome = channeldb.ResolverOutcomeUnclaimed - - // The sweeper gave up on sweeping the anchor. This happens - // after the maximum number of sweep attempts has been reached. - // See sweep.DefaultMaxSweepAttempts. Sweep attempts are - // interspaced with random delays picked from a range that - // increases exponentially. - // - // We consider the anchor as being lost. - case sweep.ErrTooManyAttempts.Is(sweepRes.Err): - log.Warnf("anchor sweep abandoned") - outcome = channeldb.ResolverOutcomeUnclaimed - - // An unexpected error occurred. - default: - log.Errorf("unable to sweep anchor: %v", sweepRes.Err) - - return nil, sweepRes.Err - } - - case <-c.quit: - return nil, errResolverShuttingDown.Default() - } - - // Update report to reflect that funds are no longer in limbo. - c.reportLock.Lock() - if outcome == channeldb.ResolverOutcomeClaimed { - c.currentReport.RecoveredBalance = c.currentReport.LimboBalance - } - c.currentReport.LimboBalance = 0 - report := c.currentReport.resolverReport( - spendTx, channeldb.ResolverTypeAnchor, outcome, - ) - c.reportLock.Unlock() - - c.resolved = true - return nil, c.PutResolverReport(nil, report) -} - -// Stop signals the resolver to cancel any current resolution processes, and -// suspend. -// -// NOTE: Part of the ContractResolver interface. -func (c *anchorResolver) Stop() { - close(c.quit) -} - -// IsResolved returns true if the stored state in the resolve is fully -// resolved. In this case the target output can be forgotten. -// -// NOTE: Part of the ContractResolver interface. -func (c *anchorResolver) IsResolved() bool { - return c.resolved -} - -// report returns a report on the resolution state of the contract. -func (c *anchorResolver) report() *ContractReport { - c.reportLock.Lock() - defer c.reportLock.Unlock() - - reportCopy := c.currentReport - return &reportCopy -} - -func (c *anchorResolver) Encode(w io.Writer) er.R { - return er.New("serialization not supported") -} - -// A compile time assertion to ensure anchorResolver meets the -// ContractResolver interface. -var _ ContractResolver = (*anchorResolver)(nil) diff --git a/lnd/contractcourt/briefcase.go b/lnd/contractcourt/briefcase.go deleted file mode 100644 index 7ead441f..00000000 --- a/lnd/contractcourt/briefcase.go +++ /dev/null @@ -1,1254 +0,0 @@ -package contractcourt - -import ( - "bytes" - "io" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/wire" -) - -// ContractResolutions is a wrapper struct around the two forms of resolutions -// we may need to carry out once a contract is closing: resolving the -// commitment output, and resolving any incoming+outgoing HTLC's still present -// in the commitment. -type ContractResolutions struct { - // CommitHash is the txid of the commitment transaction. - CommitHash chainhash.Hash - - // CommitResolution contains all data required to fully resolve a - // commitment output. - CommitResolution *lnwallet.CommitOutputResolution - - // HtlcResolutions contains all data required to fully resolve any - // incoming+outgoing HTLC's present within the commitment transaction. - HtlcResolutions lnwallet.HtlcResolutions - - // AnchorResolution contains the data required to sweep the anchor - // output. If the channel type doesn't include anchors, the value of - // this field will be nil. - AnchorResolution *lnwallet.AnchorResolution -} - -// IsEmpty returns true if the set of resolutions is "empty". A resolution is -// empty if: our commitment output has been trimmed, and we don't have any -// incoming or outgoing HTLC's active. -func (c *ContractResolutions) IsEmpty() bool { - return c.CommitResolution == nil && - len(c.HtlcResolutions.IncomingHTLCs) == 0 && - len(c.HtlcResolutions.OutgoingHTLCs) == 0 && - c.AnchorResolution == nil -} - -// ArbitratorLog is the primary source of persistent storage for the -// ChannelArbitrator. The log stores the current state of the -// ChannelArbitrator's internal state machine, any items that are required to -// properly make a state transition, and any unresolved contracts. -type ArbitratorLog interface { - // TODO(roasbeef): document on interface the errors expected to be - // returned - - // CurrentState returns the current state of the ChannelArbitrator. It - // takes an optional database transaction, which will be used if it is - // non-nil, otherwise the lookup will be done in its own transaction. - CurrentState(tx kvdb.RTx) (ArbitratorState, er.R) - - // CommitState persists, the current state of the chain attendant. - CommitState(ArbitratorState) er.R - - // InsertUnresolvedContracts inserts a set of unresolved contracts into - // the log. The log will then persistently store each contract until - // they've been swapped out, or resolved. It takes a set of report which - // should be written to disk if as well if it is non-nil. - InsertUnresolvedContracts(reports []*channeldb.ResolverReport, - resolvers ...ContractResolver) er.R - - // FetchUnresolvedContracts returns all unresolved contracts that have - // been previously written to the log. - FetchUnresolvedContracts() ([]ContractResolver, er.R) - - // SwapContract performs an atomic swap of the old contract for the new - // contract. This method is used when after a contract has been fully - // resolved, it produces another contract that needs to be resolved. - SwapContract(old ContractResolver, new ContractResolver) er.R - - // ResolveContract marks a contract as fully resolved. Once a contract - // has been fully resolved, it is deleted from persistent storage. - ResolveContract(ContractResolver) er.R - - // LogContractResolutions stores a complete contract resolution for the - // contract under watch. This method will be called once the - // ChannelArbitrator either force closes a channel, or detects that the - // remote party has broadcast their commitment on chain. - LogContractResolutions(*ContractResolutions) er.R - - // FetchContractResolutions fetches the set of previously stored - // contract resolutions from persistent storage. - FetchContractResolutions() (*ContractResolutions, er.R) - - // InsertConfirmedCommitSet stores the known set of active HTLCs at the - // time channel closure. We'll use this to reconstruct our set of chain - // actions anew based on the confirmed and pending commitment state. - InsertConfirmedCommitSet(c *CommitSet) er.R - - // FetchConfirmedCommitSet fetches the known confirmed active HTLC set - // from the database. It takes an optional database transaction, which - // will be used if it is non-nil, otherwise the lookup will be done in - // its own transaction. - FetchConfirmedCommitSet(tx kvdb.RTx) (*CommitSet, er.R) - - // FetchChainActions attempts to fetch the set of previously stored - // chain actions. We'll use this upon restart to properly advance our - // state machine forward. - // - // NOTE: This method only exists in order to be able to serve nodes had - // channels in the process of closing before the CommitSet struct was - // introduced. - FetchChainActions() (ChainActionMap, er.R) - - // WipeHistory is to be called ONLY once *all* contracts have been - // fully resolved, and the channel closure if finalized. This method - // will delete all on-disk state within the persistent log. - WipeHistory() er.R -} - -// ArbitratorState is an enum that details the current state of the -// ChannelArbitrator's state machine. -type ArbitratorState uint8 - -const ( - // StateDefault is the default state. In this state, no major actions - // need to be executed. - StateDefault ArbitratorState = 0 - - // StateBroadcastCommit is a state that indicates that the attendant - // has decided to broadcast the commitment transaction, but hasn't done - // so yet. - StateBroadcastCommit ArbitratorState = 1 - - // StateCommitmentBroadcasted is a state that indicates that the - // attendant has broadcasted the commitment transaction, and is now - // waiting for it to confirm. - StateCommitmentBroadcasted ArbitratorState = 6 - - // StateContractClosed is a state that indicates the contract has - // already been "closed", meaning the commitment is confirmed on chain. - // At this point, we can now examine our active contracts, in order to - // create the proper resolver for each one. - StateContractClosed ArbitratorState = 2 - - // StateWaitingFullResolution is a state that indicates that the - // commitment transaction has been confirmed, and the attendant is now - // waiting for all unresolved contracts to be fully resolved. - StateWaitingFullResolution ArbitratorState = 3 - - // StateFullyResolved is the final state of the attendant. In this - // state, all related contracts have been resolved, and the attendant - // can now be garbage collected. - StateFullyResolved ArbitratorState = 4 - - // StateError is the only error state of the resolver. If we enter this - // state, then we cannot proceed with manual intervention as a state - // transition failed. - StateError ArbitratorState = 5 -) - -// String returns a human readable string describing the ArbitratorState. -func (a ArbitratorState) String() string { - switch a { - case StateDefault: - return "StateDefault" - - case StateBroadcastCommit: - return "StateBroadcastCommit" - - case StateCommitmentBroadcasted: - return "StateCommitmentBroadcasted" - - case StateContractClosed: - return "StateContractClosed" - - case StateWaitingFullResolution: - return "StateWaitingFullResolution" - - case StateFullyResolved: - return "StateFullyResolved" - - case StateError: - return "StateError" - - default: - return "unknown state" - } -} - -// resolverType is an enum that enumerates the various types of resolvers. When -// writing resolvers to disk, we prepend this to the raw bytes stored. This -// allows us to properly decode the resolver into the proper type. -type resolverType uint8 - -const ( - // resolverTimeout is the type of a resolver that's tasked with - // resolving an outgoing HTLC that is very close to timing out. - resolverTimeout resolverType = 0 - - // resolverSuccess is the type of a resolver that's tasked with - // resolving an incoming HTLC that we already know the preimage of. - resolverSuccess resolverType = 1 - - // resolverOutgoingContest is the type of a resolver that's tasked with - // resolving an outgoing HTLC that hasn't yet timed out. - resolverOutgoingContest resolverType = 2 - - // resolverIncomingContest is the type of a resolver that's tasked with - // resolving an incoming HTLC that we don't yet know the preimage to. - resolverIncomingContest resolverType = 3 - - // resolverUnilateralSweep is the type of resolver that's tasked with - // sweeping out direct commitment output form the remote party's - // commitment transaction. - resolverUnilateralSweep resolverType = 4 -) - -// resolverIDLen is the size of the resolver ID key. This is 36 bytes as we get -// 32 bytes from the hash of the prev tx, and 4 bytes for the output index. -const resolverIDLen = 36 - -// resolverID is a key that uniquely identifies a resolver within a particular -// chain. For this value we use the full outpoint of the resolver. -type resolverID [resolverIDLen]byte - -// newResolverID returns a resolverID given the outpoint of a contract. -func newResolverID(op wire.OutPoint) resolverID { - var r resolverID - - copy(r[:], op.Hash[:]) - - endian.PutUint32(r[32:], op.Index) - - return r -} - -// logScope is a key that we use to scope the storage of a ChannelArbitrator -// within the global log. We use this key to create a unique bucket within the -// database and ensure that we don't have any key collisions. The log's scope -// is define as: chainHash || chanPoint, where chanPoint is the chan point of -// the original channel. -type logScope [32 + 36]byte - -// newLogScope creates a new logScope key from the passed chainhash and -// chanPoint. -func newLogScope(chain chainhash.Hash, op wire.OutPoint) (*logScope, er.R) { - var l logScope - b := bytes.NewBuffer(l[0:0]) - - if _, err := b.Write(chain[:]); err != nil { - return nil, er.E(err) - } - if _, err := b.Write(op.Hash[:]); err != nil { - return nil, er.E(err) - } - - if err := util.WriteBin(b, endian, op.Index); err != nil { - return nil, err - } - - return &l, nil -} - -var ( - // stateKey is the key that we use to store the current state of the - // arbitrator. - stateKey = []byte("state") - - // contractsBucketKey is the bucket within the logScope that will store - // all the active unresolved contracts. - contractsBucketKey = []byte("contractkey") - - // resolutionsKey is the key under the logScope that we'll use to store - // the full set of resolutions for a channel. - resolutionsKey = []byte("resolutions") - - // anchorResolutionKey is the key under the logScope that we'll use to - // store the anchor resolution, if any. - anchorResolutionKey = []byte("anchor-resolution") - - // actionsBucketKey is the key under the logScope that we'll use to - // store all chain actions once they're determined. - actionsBucketKey = []byte("chain-actions") - - // commitSetKey is the primary key under the logScope that we'll use to - // store the confirmed active HTLC sets once we learn that a channel - // has closed out on chain. - commitSetKey = []byte("commit-set") -) - -var ( - // errScopeBucketNoExist is returned when we can't find the proper - // bucket for an arbitrator's scope. - errScopeBucketNoExist = Err.CodeWithDetail("errScopeBucketNoExist", "scope bucket not found") - - // errNoContracts is returned when no contracts are found within the - // log. - errNoContracts = Err.CodeWithDetail("errNoContracts", "no stored contracts") - - // errNoResolutions is returned when the log doesn't contain any active - // chain resolutions. - errNoResolutions = Err.CodeWithDetail("errNoResolutions", "no contract resolutions exist") - - // errNoActions is retuned when the log doesn't contain any stored - // chain actions. - errNoActions = Err.CodeWithDetail("errNoActions", "no chain actions exist") - - // errNoCommitSet is return when the log doesn't contained a CommitSet. - // This can happen if the channel hasn't closed yet, or a client is - // running an older version that didn't yet write this state. - errNoCommitSet = Err.CodeWithDetail("errNoCommitSet", "no commit set exists") -) - -// boltArbitratorLog is an implementation of the ArbitratorLog interface backed -// by a bolt DB instance. -type boltArbitratorLog struct { - db kvdb.Backend - - cfg ChannelArbitratorConfig - - scopeKey logScope -} - -// newBoltArbitratorLog returns a new instance of the boltArbitratorLog given -// an arbitrator config, and the items needed to create its log scope. -func newBoltArbitratorLog(db kvdb.Backend, cfg ChannelArbitratorConfig, - chainHash chainhash.Hash, chanPoint wire.OutPoint) (*boltArbitratorLog, er.R) { - - scope, err := newLogScope(chainHash, chanPoint) - if err != nil { - return nil, err - } - - return &boltArbitratorLog{ - db: db, - cfg: cfg, - scopeKey: *scope, - }, nil -} - -// A compile time check to ensure boltArbitratorLog meets the ArbitratorLog -// interface. -var _ ArbitratorLog = (*boltArbitratorLog)(nil) - -func fetchContractReadBucket(tx kvdb.RTx, scopeKey []byte) (kvdb.RBucket, er.R) { - scopeBucket := tx.ReadBucket(scopeKey) - if scopeBucket == nil { - return nil, errScopeBucketNoExist.Default() - } - - contractBucket := scopeBucket.NestedReadBucket(contractsBucketKey) - if contractBucket == nil { - return nil, errNoContracts.Default() - } - - return contractBucket, nil -} - -func fetchContractWriteBucket(tx kvdb.RwTx, scopeKey []byte) (kvdb.RwBucket, er.R) { - scopeBucket, err := tx.CreateTopLevelBucket(scopeKey) - if err != nil { - return nil, err - } - - contractBucket, err := scopeBucket.CreateBucketIfNotExists( - contractsBucketKey, - ) - if err != nil { - return nil, err - } - - return contractBucket, nil -} - -// writeResolver is a helper method that writes a contract resolver and stores -// it it within the passed contractBucket using its unique resolutionsKey key. -func (b *boltArbitratorLog) writeResolver(contractBucket kvdb.RwBucket, - res ContractResolver) er.R { - - // Only persist resolvers that are stateful. Stateless resolvers don't - // expose a resolver key. - resKey := res.ResolverKey() - if resKey == nil { - return nil - } - - // First, we'll write to the buffer the type of this resolver. Using - // this byte, we can later properly deserialize the resolver properly. - var ( - buf bytes.Buffer - rType resolverType - ) - switch res.(type) { - case *htlcTimeoutResolver: - rType = resolverTimeout - case *htlcSuccessResolver: - rType = resolverSuccess - case *htlcOutgoingContestResolver: - rType = resolverOutgoingContest - case *htlcIncomingContestResolver: - rType = resolverIncomingContest - case *commitSweepResolver: - rType = resolverUnilateralSweep - } - if _, err := buf.Write([]byte{byte(rType)}); err != nil { - return er.E(err) - } - - // With the type of the resolver written, we can then write out the raw - // bytes of the resolver itself. - if err := res.Encode(&buf); err != nil { - return err - } - - return contractBucket.Put(resKey, buf.Bytes()) -} - -// CurrentState returns the current state of the ChannelArbitrator. It takes an -// optional database transaction, which will be used if it is non-nil, otherwise -// the lookup will be done in its own transaction. -// -// NOTE: Part of the ContractResolver interface. -func (b *boltArbitratorLog) CurrentState(tx kvdb.RTx) (ArbitratorState, er.R) { - var ( - s ArbitratorState - err er.R - ) - - if tx != nil { - s, err = b.currentState(tx) - } else { - err = kvdb.View(b.db, func(tx kvdb.RTx) er.R { - s, err = b.currentState(tx) - return err - }, func() { - s = 0 - }) - } - - if err != nil && !errScopeBucketNoExist.Is(err) { - return s, err - } - - return s, nil -} - -func (b *boltArbitratorLog) currentState(tx kvdb.RTx) (ArbitratorState, er.R) { - scopeBucket := tx.ReadBucket(b.scopeKey[:]) - if scopeBucket == nil { - return 0, errScopeBucketNoExist.Default() - } - - stateBytes := scopeBucket.Get(stateKey) - if stateBytes == nil { - return 0, nil - } - - return ArbitratorState(stateBytes[0]), nil -} - -// CommitState persists, the current state of the chain attendant. -// -// NOTE: Part of the ContractResolver interface. -func (b *boltArbitratorLog) CommitState(s ArbitratorState) er.R { - return kvdb.Batch(b.db, func(tx kvdb.RwTx) er.R { - scopeBucket, err := tx.CreateTopLevelBucket(b.scopeKey[:]) - if err != nil { - return err - } - - return scopeBucket.Put(stateKey[:], []byte{uint8(s)}) - }) -} - -// FetchUnresolvedContracts returns all unresolved contracts that have been -// previously written to the log. -// -// NOTE: Part of the ContractResolver interface. -func (b *boltArbitratorLog) FetchUnresolvedContracts() ([]ContractResolver, er.R) { - resolverCfg := ResolverConfig{ - ChannelArbitratorConfig: b.cfg, - Checkpoint: b.checkpointContract, - } - var contracts []ContractResolver - err := kvdb.View(b.db, func(tx kvdb.RTx) er.R { - contractBucket, err := fetchContractReadBucket(tx, b.scopeKey[:]) - if err != nil { - return err - } - - return contractBucket.ForEach(func(resKey, resBytes []byte) er.R { - if len(resKey) != resolverIDLen { - return nil - } - - var res ContractResolver - - // We'll snip off the first byte of the raw resolver - // bytes in order to extract what type of resolver - // we're about to encode. - resType := resolverType(resBytes[0]) - - // Then we'll create a reader using the remaining - // bytes. - resReader := bytes.NewReader(resBytes[1:]) - - switch resType { - case resolverTimeout: - res, err = newTimeoutResolverFromReader( - resReader, resolverCfg, - ) - - case resolverSuccess: - res, err = newSuccessResolverFromReader( - resReader, resolverCfg, - ) - - case resolverOutgoingContest: - res, err = newOutgoingContestResolverFromReader( - resReader, resolverCfg, - ) - - case resolverIncomingContest: - res, err = newIncomingContestResolverFromReader( - resReader, resolverCfg, - ) - - case resolverUnilateralSweep: - res, err = newCommitSweepResolverFromReader( - resReader, resolverCfg, - ) - - default: - return er.Errorf("unknown resolver type: %v", resType) - } - - if err != nil { - return err - } - - contracts = append(contracts, res) - return nil - }) - }, func() { - contracts = nil - }) - if err != nil && !errScopeBucketNoExist.Is(err) && !errNoContracts.Is(err) { - return nil, err - } - - return contracts, nil -} - -// InsertUnresolvedContracts inserts a set of unresolved contracts into the -// log. The log will then persistently store each contract until they've been -// swapped out, or resolved. -// -// NOTE: Part of the ContractResolver interface. -func (b *boltArbitratorLog) InsertUnresolvedContracts(reports []*channeldb.ResolverReport, - resolvers ...ContractResolver) er.R { - - return kvdb.Batch(b.db, func(tx kvdb.RwTx) er.R { - contractBucket, err := fetchContractWriteBucket(tx, b.scopeKey[:]) - if err != nil { - return err - } - - for _, resolver := range resolvers { - err = b.writeResolver(contractBucket, resolver) - if err != nil { - return err - } - } - - // Persist any reports that are present. - for _, report := range reports { - err := b.cfg.PutResolverReport(tx, report) - if err != nil { - return err - } - } - - return nil - }) -} - -// SwapContract performs an atomic swap of the old contract for the new -// contract. This method is used when after a contract has been fully resolved, -// it produces another contract that needs to be resolved. -// -// NOTE: Part of the ContractResolver interface. -func (b *boltArbitratorLog) SwapContract(oldContract, newContract ContractResolver) er.R { - return kvdb.Batch(b.db, func(tx kvdb.RwTx) er.R { - contractBucket, err := fetchContractWriteBucket(tx, b.scopeKey[:]) - if err != nil { - return err - } - - oldContractkey := oldContract.ResolverKey() - if err := contractBucket.Delete(oldContractkey); err != nil { - return err - } - - return b.writeResolver(contractBucket, newContract) - }) -} - -// ResolveContract marks a contract as fully resolved. Once a contract has been -// fully resolved, it is deleted from persistent storage. -// -// NOTE: Part of the ContractResolver interface. -func (b *boltArbitratorLog) ResolveContract(res ContractResolver) er.R { - return kvdb.Batch(b.db, func(tx kvdb.RwTx) er.R { - contractBucket, err := fetchContractWriteBucket(tx, b.scopeKey[:]) - if err != nil { - return err - } - - resKey := res.ResolverKey() - return contractBucket.Delete(resKey) - }) -} - -// LogContractResolutions stores a set of chain actions which are derived from -// our set of active contracts, and the on-chain state. We'll write this et of -// cations when: we decide to go on-chain to resolve a contract, or we detect -// that the remote party has gone on-chain. -// -// NOTE: Part of the ContractResolver interface. -func (b *boltArbitratorLog) LogContractResolutions(c *ContractResolutions) er.R { - return kvdb.Batch(b.db, func(tx kvdb.RwTx) er.R { - scopeBucket, err := tx.CreateTopLevelBucket(b.scopeKey[:]) - if err != nil { - return err - } - - var b bytes.Buffer - - if _, err := b.Write(c.CommitHash[:]); err != nil { - return er.E(err) - } - - // First, we'll write out the commit output's resolution. - if c.CommitResolution == nil { - if err := util.WriteBin(&b, endian, false); err != nil { - return err - } - } else { - if err := util.WriteBin(&b, endian, true); err != nil { - return err - } - errr := encodeCommitResolution(&b, c.CommitResolution) - if errr != nil { - return errr - } - } - - // With the output for the commitment transaction written, we - // can now write out the resolutions for the incoming and - // outgoing HTLC's. - numIncoming := uint32(len(c.HtlcResolutions.IncomingHTLCs)) - if err := util.WriteBin(&b, endian, numIncoming); err != nil { - return err - } - for _, htlc := range c.HtlcResolutions.IncomingHTLCs { - err := encodeIncomingResolution(&b, &htlc) - if err != nil { - return err - } - } - numOutgoing := uint32(len(c.HtlcResolutions.OutgoingHTLCs)) - if err := util.WriteBin(&b, endian, numOutgoing); err != nil { - return err - } - for _, htlc := range c.HtlcResolutions.OutgoingHTLCs { - err := encodeOutgoingResolution(&b, &htlc) - if err != nil { - return err - } - } - - err = scopeBucket.Put(resolutionsKey, b.Bytes()) - if err != nil { - return err - } - - // Write out the anchor resolution if present. - if c.AnchorResolution != nil { - var b bytes.Buffer - err := encodeAnchorResolution(&b, c.AnchorResolution) - if err != nil { - return err - } - - err = scopeBucket.Put(anchorResolutionKey, b.Bytes()) - if err != nil { - return err - } - } - - return nil - }) -} - -// FetchContractResolutions fetches the set of previously stored contract -// resolutions from persistent storage. -// -// NOTE: Part of the ContractResolver interface. -func (b *boltArbitratorLog) FetchContractResolutions() (*ContractResolutions, er.R) { - var c *ContractResolutions - err := kvdb.View(b.db, func(tx kvdb.RTx) er.R { - scopeBucket := tx.ReadBucket(b.scopeKey[:]) - if scopeBucket == nil { - return errScopeBucketNoExist.Default() - } - - resolutionBytes := scopeBucket.Get(resolutionsKey) - if resolutionBytes == nil { - return errNoResolutions.Default() - } - - resReader := bytes.NewReader(resolutionBytes) - - _, err := util.ReadFull(resReader, c.CommitHash[:]) - if err != nil { - return err - } - - // First, we'll attempt to read out the commit resolution (if - // it exists). - var haveCommitRes bool - err = util.ReadBin(resReader, endian, &haveCommitRes) - if err != nil { - return err - } - if haveCommitRes { - c.CommitResolution = &lnwallet.CommitOutputResolution{} - err = decodeCommitResolution( - resReader, c.CommitResolution, - ) - if err != nil { - return err - } - } - - var ( - numIncoming uint32 - numOutgoing uint32 - ) - - // Next, we'll read out the incoming and outgoing HTLC - // resolutions. - err = util.ReadBin(resReader, endian, &numIncoming) - if err != nil { - return err - } - c.HtlcResolutions.IncomingHTLCs = make([]lnwallet.IncomingHtlcResolution, numIncoming) - for i := uint32(0); i < numIncoming; i++ { - err := decodeIncomingResolution( - resReader, &c.HtlcResolutions.IncomingHTLCs[i], - ) - if err != nil { - return err - } - } - - err = util.ReadBin(resReader, endian, &numOutgoing) - if err != nil { - return err - } - c.HtlcResolutions.OutgoingHTLCs = make([]lnwallet.OutgoingHtlcResolution, numOutgoing) - for i := uint32(0); i < numOutgoing; i++ { - err := decodeOutgoingResolution( - resReader, &c.HtlcResolutions.OutgoingHTLCs[i], - ) - if err != nil { - return err - } - } - - anchorResBytes := scopeBucket.Get(anchorResolutionKey) - if anchorResBytes != nil { - c.AnchorResolution = &lnwallet.AnchorResolution{} - resReader := bytes.NewReader(anchorResBytes) - err := decodeAnchorResolution( - resReader, c.AnchorResolution, - ) - if err != nil { - return err - } - } - - return nil - }, func() { - c = &ContractResolutions{} - }) - if err != nil { - return nil, err - } - - return c, err -} - -// FetchChainActions attempts to fetch the set of previously stored chain -// actions. We'll use this upon restart to properly advance our state machine -// forward. -// -// NOTE: Part of the ContractResolver interface. -func (b *boltArbitratorLog) FetchChainActions() (ChainActionMap, er.R) { - var actionsMap ChainActionMap - - err := kvdb.View(b.db, func(tx kvdb.RTx) er.R { - scopeBucket := tx.ReadBucket(b.scopeKey[:]) - if scopeBucket == nil { - return errScopeBucketNoExist.Default() - } - - actionsBucket := scopeBucket.NestedReadBucket(actionsBucketKey) - if actionsBucket == nil { - return errNoActions.Default() - } - - return actionsBucket.ForEach(func(action, htlcBytes []byte) er.R { - if htlcBytes == nil { - return nil - } - - chainAction := ChainAction(action[0]) - - htlcReader := bytes.NewReader(htlcBytes) - htlcs, err := channeldb.DeserializeHtlcs(htlcReader) - if err != nil { - return err - } - - actionsMap[chainAction] = htlcs - - return nil - }) - }, func() { - actionsMap = make(ChainActionMap) - }) - if err != nil { - return nil, err - } - - return actionsMap, nil -} - -// InsertConfirmedCommitSet stores the known set of active HTLCs at the time -// channel closure. We'll use this to reconstruct our set of chain actions anew -// based on the confirmed and pending commitment state. -// -// NOTE: Part of the ContractResolver interface. -func (b *boltArbitratorLog) InsertConfirmedCommitSet(c *CommitSet) er.R { - return kvdb.Batch(b.db, func(tx kvdb.RwTx) er.R { - scopeBucket, err := tx.CreateTopLevelBucket(b.scopeKey[:]) - if err != nil { - return err - } - - var b bytes.Buffer - if err := encodeCommitSet(&b, c); err != nil { - return err - } - - return scopeBucket.Put(commitSetKey, b.Bytes()) - }) -} - -// FetchConfirmedCommitSet fetches the known confirmed active HTLC set from the -// database. It takes an optional database transaction, which will be used if it -// is non-nil, otherwise the lookup will be done in its own transaction. -// -// NOTE: Part of the ContractResolver interface. -func (b *boltArbitratorLog) FetchConfirmedCommitSet(tx kvdb.RTx) (*CommitSet, er.R) { - if tx != nil { - return b.fetchConfirmedCommitSet(tx) - } - - var c *CommitSet - err := kvdb.View(b.db, func(tx kvdb.RTx) er.R { - var err er.R - c, err = b.fetchConfirmedCommitSet(tx) - return err - }, func() { - c = nil - }) - if err != nil { - return nil, err - } - - return c, nil -} - -func (b *boltArbitratorLog) fetchConfirmedCommitSet(tx kvdb.RTx) (*CommitSet, er.R) { - - scopeBucket := tx.ReadBucket(b.scopeKey[:]) - if scopeBucket == nil { - return nil, errScopeBucketNoExist.Default() - } - - commitSetBytes := scopeBucket.Get(commitSetKey) - if commitSetBytes == nil { - return nil, errNoCommitSet.Default() - } - - return decodeCommitSet(bytes.NewReader(commitSetBytes)) -} - -// WipeHistory is to be called ONLY once *all* contracts have been fully -// resolved, and the channel closure if finalized. This method will delete all -// on-disk state within the persistent log. -// -// NOTE: Part of the ContractResolver interface. -func (b *boltArbitratorLog) WipeHistory() er.R { - return kvdb.Update(b.db, func(tx kvdb.RwTx) er.R { - scopeBucket, err := tx.CreateTopLevelBucket(b.scopeKey[:]) - if err != nil { - return err - } - - // Once we have the main top-level bucket, we'll delete the key - // that stores the state of the arbitrator. - if err := scopeBucket.Delete(stateKey[:]); err != nil { - return err - } - - // Next, we'll delete any lingering contract state within the - // contracts bucket by removing the bucket itself. - err = scopeBucket.DeleteNestedBucket(contractsBucketKey) - if err != nil && !kvdb.ErrBucketNotFound.Is(err) { - return err - } - - // Next, we'll delete storage of any lingering contract - // resolutions. - if err := scopeBucket.Delete(resolutionsKey); err != nil { - return err - } - - // We'll delete any chain actions that are still stored by - // removing the enclosing bucket. - err = scopeBucket.DeleteNestedBucket(actionsBucketKey) - if err != nil && !kvdb.ErrBucketNotFound.Is(err) { - return err - } - - // Finally, we'll delete the enclosing bucket itself. - return tx.DeleteTopLevelBucket(b.scopeKey[:]) - }, func() {}) -} - -// checkpointContract is a private method that will be fed into -// ContractResolver instances to checkpoint their state once they reach -// milestones during contract resolution. If the report provided is non-nil, -// it should also be recorded. -func (b *boltArbitratorLog) checkpointContract(c ContractResolver, - reports ...*channeldb.ResolverReport) er.R { - - return kvdb.Update(b.db, func(tx kvdb.RwTx) er.R { - contractBucket, err := fetchContractWriteBucket(tx, b.scopeKey[:]) - if err != nil { - return err - } - - if err := b.writeResolver(contractBucket, c); err != nil { - return err - } - - for _, report := range reports { - if err := b.cfg.PutResolverReport(tx, report); err != nil { - return err - } - } - - return nil - }, func() {}) -} - -func encodeIncomingResolution(w io.Writer, i *lnwallet.IncomingHtlcResolution) er.R { - if _, err := util.Write(w, i.Preimage[:]); err != nil { - return err - } - - if i.SignedSuccessTx == nil { - if err := util.WriteBin(w, endian, false); err != nil { - return err - } - } else { - if err := util.WriteBin(w, endian, true); err != nil { - return err - } - - if err := i.SignedSuccessTx.Serialize(w); err != nil { - return err - } - } - - if err := util.WriteBin(w, endian, i.CsvDelay); err != nil { - return err - } - if _, err := util.Write(w, i.ClaimOutpoint.Hash[:]); err != nil { - return err - } - if err := util.WriteBin(w, endian, i.ClaimOutpoint.Index); err != nil { - return err - } - err := input.WriteSignDescriptor(w, &i.SweepSignDesc) - if err != nil { - return err - } - - return nil -} - -func decodeIncomingResolution(r io.Reader, h *lnwallet.IncomingHtlcResolution) er.R { - if _, err := util.ReadFull(r, h.Preimage[:]); err != nil { - return err - } - - var txPresent bool - if err := util.ReadBin(r, endian, &txPresent); err != nil { - return err - } - if txPresent { - h.SignedSuccessTx = &wire.MsgTx{} - if err := h.SignedSuccessTx.Deserialize(r); err != nil { - return err - } - } - - err := util.ReadBin(r, endian, &h.CsvDelay) - if err != nil { - return err - } - _, err = util.ReadFull(r, h.ClaimOutpoint.Hash[:]) - if err != nil { - return err - } - err = util.ReadBin(r, endian, &h.ClaimOutpoint.Index) - if err != nil { - return err - } - - return input.ReadSignDescriptor(r, &h.SweepSignDesc) -} - -func encodeOutgoingResolution(w io.Writer, o *lnwallet.OutgoingHtlcResolution) er.R { - if err := util.WriteBin(w, endian, o.Expiry); err != nil { - return err - } - - if o.SignedTimeoutTx == nil { - if err := util.WriteBin(w, endian, false); err != nil { - return err - } - } else { - if err := util.WriteBin(w, endian, true); err != nil { - return err - } - - if err := o.SignedTimeoutTx.Serialize(w); err != nil { - return err - } - } - - if err := util.WriteBin(w, endian, o.CsvDelay); err != nil { - return err - } - if _, err := util.Write(w, o.ClaimOutpoint.Hash[:]); err != nil { - return err - } - if err := util.WriteBin(w, endian, o.ClaimOutpoint.Index); err != nil { - return err - } - - return input.WriteSignDescriptor(w, &o.SweepSignDesc) -} - -func decodeOutgoingResolution(r io.Reader, o *lnwallet.OutgoingHtlcResolution) er.R { - err := util.ReadBin(r, endian, &o.Expiry) - if err != nil { - return err - } - - var txPresent bool - if err := util.ReadBin(r, endian, &txPresent); err != nil { - return err - } - if txPresent { - o.SignedTimeoutTx = &wire.MsgTx{} - if err := o.SignedTimeoutTx.Deserialize(r); err != nil { - return err - } - } - - err = util.ReadBin(r, endian, &o.CsvDelay) - if err != nil { - return err - } - _, err = util.ReadFull(r, o.ClaimOutpoint.Hash[:]) - if err != nil { - return err - } - err = util.ReadBin(r, endian, &o.ClaimOutpoint.Index) - if err != nil { - return err - } - - return input.ReadSignDescriptor(r, &o.SweepSignDesc) -} - -func encodeCommitResolution(w io.Writer, - c *lnwallet.CommitOutputResolution) er.R { - - if _, err := util.Write(w, c.SelfOutPoint.Hash[:]); err != nil { - return err - } - err := util.WriteBin(w, endian, c.SelfOutPoint.Index) - if err != nil { - return err - } - - err = input.WriteSignDescriptor(w, &c.SelfOutputSignDesc) - if err != nil { - return err - } - - return util.WriteBin(w, endian, c.MaturityDelay) -} - -func decodeCommitResolution(r io.Reader, - c *lnwallet.CommitOutputResolution) er.R { - - _, err := util.ReadFull(r, c.SelfOutPoint.Hash[:]) - if err != nil { - return err - } - err = util.ReadBin(r, endian, &c.SelfOutPoint.Index) - if err != nil { - return err - } - - err = input.ReadSignDescriptor(r, &c.SelfOutputSignDesc) - if err != nil { - return err - } - - return util.ReadBin(r, endian, &c.MaturityDelay) -} - -func encodeAnchorResolution(w io.Writer, - a *lnwallet.AnchorResolution) er.R { - - if _, err := util.Write(w, a.CommitAnchor.Hash[:]); err != nil { - return err - } - err := util.WriteBin(w, endian, a.CommitAnchor.Index) - if err != nil { - return err - } - - return input.WriteSignDescriptor(w, &a.AnchorSignDescriptor) -} - -func decodeAnchorResolution(r io.Reader, - a *lnwallet.AnchorResolution) er.R { - - _, err := util.ReadFull(r, a.CommitAnchor.Hash[:]) - if err != nil { - return err - } - err = util.ReadBin(r, endian, &a.CommitAnchor.Index) - if err != nil { - return err - } - - return input.ReadSignDescriptor(r, &a.AnchorSignDescriptor) -} - -func encodeHtlcSetKey(w io.Writer, h *HtlcSetKey) er.R { - err := util.WriteBin(w, endian, h.IsRemote) - if err != nil { - return err - } - return util.WriteBin(w, endian, h.IsPending) -} - -func encodeCommitSet(w io.Writer, c *CommitSet) er.R { - if err := encodeHtlcSetKey(w, c.ConfCommitKey); err != nil { - return err - } - - numSets := uint8(len(c.HtlcSets)) - if err := util.WriteBin(w, endian, numSets); err != nil { - return err - } - - for htlcSetKey, htlcs := range c.HtlcSets { - htlcSetKey := htlcSetKey - if err := encodeHtlcSetKey(w, &htlcSetKey); err != nil { - return err - } - - if err := channeldb.SerializeHtlcs(w, htlcs...); err != nil { - return err - } - } - - return nil -} - -func decodeHtlcSetKey(r io.Reader, h *HtlcSetKey) er.R { - err := util.ReadBin(r, endian, &h.IsRemote) - if err != nil { - return err - } - - return util.ReadBin(r, endian, &h.IsPending) -} - -func decodeCommitSet(r io.Reader) (*CommitSet, er.R) { - c := &CommitSet{ - ConfCommitKey: &HtlcSetKey{}, - HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC), - } - - if err := decodeHtlcSetKey(r, c.ConfCommitKey); err != nil { - return nil, err - } - - var numSets uint8 - if err := util.ReadBin(r, endian, &numSets); err != nil { - return nil, err - } - - for i := uint8(0); i < numSets; i++ { - var htlcSetKey HtlcSetKey - if err := decodeHtlcSetKey(r, &htlcSetKey); err != nil { - return nil, err - } - - htlcs, err := channeldb.DeserializeHtlcs(r) - if err != nil { - return nil, err - } - - c.HtlcSets[htlcSetKey] = htlcs - } - - return c, nil -} diff --git a/lnd/contractcourt/briefcase_test.go b/lnd/contractcourt/briefcase_test.go deleted file mode 100644 index 8c4e125e..00000000 --- a/lnd/contractcourt/briefcase_test.go +++ /dev/null @@ -1,774 +0,0 @@ -package contractcourt - -import ( - "crypto/rand" - "io/ioutil" - "os" - "reflect" - "testing" - "time" - - prand "math/rand" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/txscript/params" - "github.com/pkt-cash/pktd/wire" -) - -var ( - testChainHash = [chainhash.HashSize]byte{ - 0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, - 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, - 0x2d, 0xe7, 0x93, 0xe4, - } - - testChanPoint1 = wire.OutPoint{ - Hash: chainhash.Hash{ - 0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, - 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, - 0x2d, 0xe7, 0x93, 0xe4, - }, - Index: 1, - } - - testChanPoint2 = wire.OutPoint{ - Hash: chainhash.Hash{ - 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, - 0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, - 0x2d, 0xe7, 0x93, 0xe4, - }, - Index: 2, - } - - testChanPoint3 = wire.OutPoint{ - Hash: chainhash.Hash{ - 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, - 0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, - 0x2d, 0xe7, 0x93, 0xe4, - }, - Index: 3, - } - - testPreimage = [32]byte{ - 0x52, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, - 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, - 0x2d, 0xe7, 0x93, 0xe4, - } - - key1 = []byte{ - 0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, - 0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e, - 0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca, - 0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0, - 0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64, - 0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9, - 0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56, - 0xb4, 0x12, 0xa3, - } - - testSignDesc = input.SignDescriptor{ - SingleTweak: []byte{ - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, 0x02, - }, - WitnessScript: []byte{ - 0x00, 0x14, 0xee, 0x91, 0x41, 0x7e, 0x85, 0x6c, 0xde, - 0x10, 0xa2, 0x91, 0x1e, 0xdc, 0xbd, 0xbd, 0x69, 0xe2, - 0xef, 0xb5, 0x71, 0x48, - }, - Output: &wire.TxOut{ - Value: 5000000000, - PkScript: []byte{ - 0x41, // OP_DATA_65 - 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5, - 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42, - 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1, - 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24, - 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97, - 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78, - 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20, - 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63, - 0xa6, // 65-byte signature - 0xac, // OP_CHECKSIG - }, - }, - HashType: params.SigHashAll, - } -) - -func makeTestDB() (kvdb.Backend, func(), er.R) { - // First, create a temporary directory to be used for the duration of - // this test. - tempDirName, errr := ioutil.TempDir("", "arblog") - if errr != nil { - return nil, nil, er.E(errr) - } - - db, err := kvdb.Create(kvdb.BoltBackendName, tempDirName+"/test.db", true) - if err != nil { - return nil, nil, err - } - - cleanUp := func() { - db.Close() - os.RemoveAll(tempDirName) - } - - return db, cleanUp, nil -} - -func newTestBoltArbLog(chainhash chainhash.Hash, - op wire.OutPoint) (ArbitratorLog, func(), er.R) { - - testDB, cleanUp, err := makeTestDB() - if err != nil { - return nil, nil, err - } - - testArbCfg := ChannelArbitratorConfig{ - PutResolverReport: func(_ kvdb.RwTx, - _ *channeldb.ResolverReport) er.R { - return nil - }, - } - testLog, err := newBoltArbitratorLog(testDB, testArbCfg, chainhash, op) - if err != nil { - return nil, nil, err - } - - return testLog, cleanUp, err -} - -func randOutPoint() wire.OutPoint { - var op wire.OutPoint - rand.Read(op.Hash[:]) - op.Index = prand.Uint32() - - return op -} - -func assertResolversEqual(t *testing.T, originalResolver ContractResolver, - diskResolver ContractResolver) { - - assertTimeoutResEqual := func(ogRes, diskRes *htlcTimeoutResolver) { - if !reflect.DeepEqual(ogRes.htlcResolution, diskRes.htlcResolution) { - t.Fatalf("resolution mismatch: expected %#v, got %v#", - ogRes.htlcResolution, diskRes.htlcResolution) - } - if ogRes.outputIncubating != diskRes.outputIncubating { - t.Fatalf("expected %v, got %v", - ogRes.outputIncubating, diskRes.outputIncubating) - } - if ogRes.resolved != diskRes.resolved { - t.Fatalf("expected %v, got %v", ogRes.resolved, - diskRes.resolved) - } - if ogRes.broadcastHeight != diskRes.broadcastHeight { - t.Fatalf("expected %v, got %v", - ogRes.broadcastHeight, diskRes.broadcastHeight) - } - if ogRes.htlc.HtlcIndex != diskRes.htlc.HtlcIndex { - t.Fatalf("expected %v, got %v", ogRes.htlc.HtlcIndex, - diskRes.htlc.HtlcIndex) - } - } - - assertSuccessResEqual := func(ogRes, diskRes *htlcSuccessResolver) { - if !reflect.DeepEqual(ogRes.htlcResolution, diskRes.htlcResolution) { - t.Fatalf("resolution mismatch: expected %#v, got %v#", - ogRes.htlcResolution, diskRes.htlcResolution) - } - if ogRes.outputIncubating != diskRes.outputIncubating { - t.Fatalf("expected %v, got %v", - ogRes.outputIncubating, diskRes.outputIncubating) - } - if ogRes.resolved != diskRes.resolved { - t.Fatalf("expected %v, got %v", ogRes.resolved, - diskRes.resolved) - } - if ogRes.broadcastHeight != diskRes.broadcastHeight { - t.Fatalf("expected %v, got %v", - ogRes.broadcastHeight, diskRes.broadcastHeight) - } - if ogRes.htlc.RHash != diskRes.htlc.RHash { - t.Fatalf("expected %v, got %v", ogRes.htlc.RHash, - diskRes.htlc.RHash) - } - } - - switch ogRes := originalResolver.(type) { - case *htlcTimeoutResolver: - diskRes := diskResolver.(*htlcTimeoutResolver) - assertTimeoutResEqual(ogRes, diskRes) - - case *htlcSuccessResolver: - diskRes := diskResolver.(*htlcSuccessResolver) - assertSuccessResEqual(ogRes, diskRes) - - case *htlcOutgoingContestResolver: - diskRes := diskResolver.(*htlcOutgoingContestResolver) - assertTimeoutResEqual( - &ogRes.htlcTimeoutResolver, &diskRes.htlcTimeoutResolver, - ) - - case *htlcIncomingContestResolver: - diskRes := diskResolver.(*htlcIncomingContestResolver) - assertSuccessResEqual( - &ogRes.htlcSuccessResolver, &diskRes.htlcSuccessResolver, - ) - - if ogRes.htlcExpiry != diskRes.htlcExpiry { - t.Fatalf("expected %v, got %v", ogRes.htlcExpiry, - diskRes.htlcExpiry) - } - - case *commitSweepResolver: - diskRes := diskResolver.(*commitSweepResolver) - if !reflect.DeepEqual(ogRes.commitResolution, diskRes.commitResolution) { - t.Fatalf("resolution mismatch: expected %v, got %v", - ogRes.commitResolution, diskRes.commitResolution) - } - if ogRes.resolved != diskRes.resolved { - t.Fatalf("expected %v, got %v", ogRes.resolved, - diskRes.resolved) - } - if ogRes.broadcastHeight != diskRes.broadcastHeight { - t.Fatalf("expected %v, got %v", - ogRes.broadcastHeight, diskRes.broadcastHeight) - } - if ogRes.chanPoint != diskRes.chanPoint { - t.Fatalf("expected %v, got %v", ogRes.chanPoint, - diskRes.chanPoint) - } - } -} - -// TestContractInsertionRetrieval tests that were able to insert a set of -// unresolved contracts into the log, and retrieve the same set properly. -func TestContractInsertionRetrieval(t *testing.T) { - t.Parallel() - - // First, we'll create a test instance of the ArbitratorLog - // implementation backed by boltdb. - testLog, cleanUp, err := newTestBoltArbLog( - testChainHash, testChanPoint1, - ) - if err != nil { - t.Fatalf("unable to create test log: %v", err) - } - defer cleanUp() - - // The log created, we'll create a series of resolvers, each properly - // implementing the ContractResolver interface. - timeoutResolver := htlcTimeoutResolver{ - htlcResolution: lnwallet.OutgoingHtlcResolution{ - Expiry: 99, - SignedTimeoutTx: nil, - CsvDelay: 99, - ClaimOutpoint: randOutPoint(), - SweepSignDesc: testSignDesc, - }, - outputIncubating: true, - resolved: true, - broadcastHeight: 102, - htlc: channeldb.HTLC{ - HtlcIndex: 12, - }, - } - successResolver := htlcSuccessResolver{ - htlcResolution: lnwallet.IncomingHtlcResolution{ - Preimage: testPreimage, - SignedSuccessTx: nil, - CsvDelay: 900, - ClaimOutpoint: randOutPoint(), - SweepSignDesc: testSignDesc, - }, - outputIncubating: true, - resolved: true, - broadcastHeight: 109, - htlc: channeldb.HTLC{ - RHash: testPreimage, - }, - sweepTx: nil, - } - resolvers := []ContractResolver{ - &timeoutResolver, - &successResolver, - &commitSweepResolver{ - commitResolution: lnwallet.CommitOutputResolution{ - SelfOutPoint: testChanPoint2, - SelfOutputSignDesc: testSignDesc, - MaturityDelay: 99, - }, - resolved: false, - broadcastHeight: 109, - chanPoint: testChanPoint1, - }, - } - - // All resolvers require a unique ResolverKey() output. To achieve this - // for the composite resolvers, we'll mutate the underlying resolver - // with a new outpoint. - contestTimeout := timeoutResolver - contestTimeout.htlcResolution.ClaimOutpoint = randOutPoint() - resolvers = append(resolvers, &htlcOutgoingContestResolver{ - htlcTimeoutResolver: contestTimeout, - }) - contestSuccess := successResolver - contestSuccess.htlcResolution.ClaimOutpoint = randOutPoint() - resolvers = append(resolvers, &htlcIncomingContestResolver{ - htlcExpiry: 100, - htlcSuccessResolver: contestSuccess, - }) - - // For quick lookup during the test, we'll create this map which allow - // us to lookup a resolver according to its unique resolver key. - resolverMap := make(map[string]ContractResolver) - resolverMap[string(timeoutResolver.ResolverKey())] = resolvers[0] - resolverMap[string(successResolver.ResolverKey())] = resolvers[1] - resolverMap[string(resolvers[2].ResolverKey())] = resolvers[2] - resolverMap[string(resolvers[3].ResolverKey())] = resolvers[3] - resolverMap[string(resolvers[4].ResolverKey())] = resolvers[4] - - // Now, we'll insert the resolver into the log, we do not need to apply - // any closures, so we will pass in nil. - err = testLog.InsertUnresolvedContracts(nil, resolvers...) - if err != nil { - t.Fatalf("unable to insert resolvers: %v", err) - } - - // With the resolvers inserted, we'll now attempt to retrieve them from - // the database, so we can compare them to the versions we created - // above. - diskResolvers, err := testLog.FetchUnresolvedContracts() - if err != nil { - t.Fatalf("unable to retrieve resolvers: %v", err) - } - - if len(diskResolvers) != len(resolvers) { - t.Fatalf("expected %v got resolvers, instead got %v: %#v", - len(resolvers), len(diskResolvers), - diskResolvers) - } - - // Now we'll run through each of the resolvers, and ensure that it maps - // to a resolver perfectly that we inserted previously. - for _, diskResolver := range diskResolvers { - resKey := string(diskResolver.ResolverKey()) - originalResolver, ok := resolverMap[resKey] - if !ok { - t.Fatalf("unable to find resolver match for %T: %v", - diskResolver, resKey) - } - - assertResolversEqual(t, originalResolver, diskResolver) - } - - // We'll now delete the state, then attempt to retrieve the set of - // resolvers, no resolvers should be found. - if err := testLog.WipeHistory(); err != nil { - t.Fatalf("unable to wipe log: %v", err) - } - diskResolvers, err = testLog.FetchUnresolvedContracts() - if err != nil { - t.Fatalf("unable to fetch unresolved contracts: %v", err) - } - if len(diskResolvers) != 0 { - t.Fatalf("no resolvers should be found, instead %v were", - len(diskResolvers)) - } -} - -// TestContractResolution tests that once we mark a contract as resolved, it's -// properly removed from the database. -func TestContractResolution(t *testing.T) { - t.Parallel() - - // First, we'll create a test instance of the ArbitratorLog - // implementation backed by boltdb. - testLog, cleanUp, err := newTestBoltArbLog( - testChainHash, testChanPoint1, - ) - if err != nil { - t.Fatalf("unable to create test log: %v", err) - } - defer cleanUp() - - // We'll now create a timeout resolver that we'll be using for the - // duration of this test. - timeoutResolver := &htlcTimeoutResolver{ - htlcResolution: lnwallet.OutgoingHtlcResolution{ - Expiry: 991, - SignedTimeoutTx: nil, - CsvDelay: 992, - ClaimOutpoint: randOutPoint(), - SweepSignDesc: testSignDesc, - }, - outputIncubating: true, - resolved: true, - broadcastHeight: 192, - htlc: channeldb.HTLC{ - HtlcIndex: 9912, - }, - } - - // First, we'll insert the resolver into the database and ensure that - // we get the same resolver out the other side. We do not need to apply - // any closures. - err = testLog.InsertUnresolvedContracts(nil, timeoutResolver) - if err != nil { - t.Fatalf("unable to insert contract into db: %v", err) - } - dbContracts, err := testLog.FetchUnresolvedContracts() - if err != nil { - t.Fatalf("unable to fetch contracts from db: %v", err) - } - assertResolversEqual(t, timeoutResolver, dbContracts[0]) - - // Now, we'll mark the contract as resolved within the database. - if err := testLog.ResolveContract(timeoutResolver); err != nil { - t.Fatalf("unable to resolve contract: %v", err) - } - - // At this point, no contracts should exist within the log. - dbContracts, err = testLog.FetchUnresolvedContracts() - if err != nil { - t.Fatalf("unable to fetch contracts from db: %v", err) - } - if len(dbContracts) != 0 { - t.Fatalf("no contract should be from in the db, instead %v "+ - "were", len(dbContracts)) - } -} - -// TestContractSwapping ensures that callers are able to atomically swap to -// distinct contracts for one another. -func TestContractSwapping(t *testing.T) { - t.Parallel() - - // First, we'll create a test instance of the ArbitratorLog - // implementation backed by boltdb. - testLog, cleanUp, err := newTestBoltArbLog( - testChainHash, testChanPoint1, - ) - if err != nil { - t.Fatalf("unable to create test log: %v", err) - } - defer cleanUp() - - // We'll create two resolvers, a regular timeout resolver, and the - // contest resolver that eventually turns into the timeout resolver. - timeoutResolver := htlcTimeoutResolver{ - htlcResolution: lnwallet.OutgoingHtlcResolution{ - Expiry: 99, - SignedTimeoutTx: nil, - CsvDelay: 99, - ClaimOutpoint: randOutPoint(), - SweepSignDesc: testSignDesc, - }, - outputIncubating: true, - resolved: true, - broadcastHeight: 102, - htlc: channeldb.HTLC{ - HtlcIndex: 12, - }, - } - contestResolver := &htlcOutgoingContestResolver{ - htlcTimeoutResolver: timeoutResolver, - } - - // We'll first insert the contest resolver into the log with no - // additional updates. - err = testLog.InsertUnresolvedContracts(nil, contestResolver) - if err != nil { - t.Fatalf("unable to insert contract into db: %v", err) - } - - // With the resolver inserted, we'll now attempt to atomically swap it - // for its underlying timeout resolver. - err = testLog.SwapContract(contestResolver, &timeoutResolver) - if err != nil { - t.Fatalf("unable to swap contracts: %v", err) - } - - // At this point, there should now only be a single contract in the - // database. - dbContracts, err := testLog.FetchUnresolvedContracts() - if err != nil { - t.Fatalf("unable to fetch contracts from db: %v", err) - } - if len(dbContracts) != 1 { - t.Fatalf("one contract should be from in the db, instead %v "+ - "were", len(dbContracts)) - } - - // That single contract should be the underlying timeout resolver. - assertResolversEqual(t, &timeoutResolver, dbContracts[0]) -} - -// TestContractResolutionsStorage tests that we're able to properly store and -// retrieve contract resolutions written to disk. -func TestContractResolutionsStorage(t *testing.T) { - t.Parallel() - - // First, we'll create a test instance of the ArbitratorLog - // implementation backed by boltdb. - testLog, cleanUp, err := newTestBoltArbLog( - testChainHash, testChanPoint1, - ) - if err != nil { - t.Fatalf("unable to create test log: %v", err) - } - defer cleanUp() - - // With the test log created, we'll now craft a contact resolution that - // will be using for the duration of this test. - res := ContractResolutions{ - CommitHash: testChainHash, - CommitResolution: &lnwallet.CommitOutputResolution{ - SelfOutPoint: testChanPoint2, - SelfOutputSignDesc: testSignDesc, - MaturityDelay: 101, - }, - HtlcResolutions: lnwallet.HtlcResolutions{ - IncomingHTLCs: []lnwallet.IncomingHtlcResolution{ - { - Preimage: testPreimage, - SignedSuccessTx: nil, - CsvDelay: 900, - ClaimOutpoint: randOutPoint(), - SweepSignDesc: testSignDesc, - }, - }, - OutgoingHTLCs: []lnwallet.OutgoingHtlcResolution{ - { - Expiry: 103, - SignedTimeoutTx: nil, - CsvDelay: 923923, - ClaimOutpoint: randOutPoint(), - SweepSignDesc: testSignDesc, - }, - }, - }, - AnchorResolution: &lnwallet.AnchorResolution{ - CommitAnchor: testChanPoint3, - AnchorSignDescriptor: testSignDesc, - }, - } - - // First make sure that fetching unlogged contract resolutions will - // fail. - _, err = testLog.FetchContractResolutions() - if err == nil { - t.Fatalf("expected reading unlogged resolution from db to fail") - } - - // Insert the resolution into the database, then immediately retrieve - // them so we can compare equality against the original version. - if err := testLog.LogContractResolutions(&res); err != nil { - t.Fatalf("unable to insert resolutions into db: %v", err) - } - diskRes, err := testLog.FetchContractResolutions() - if err != nil { - t.Fatalf("unable to read resolution from db: %v", err) - } - - if !reflect.DeepEqual(&res, diskRes) { - t.Fatalf("resolution mismatch: expected %#v\n, got %#v", - &res, diskRes) - } - - // We'll now delete the state, then attempt to retrieve the set of - // resolvers, no resolutions should be found. - if err := testLog.WipeHistory(); err != nil { - t.Fatalf("unable to wipe log: %v", err) - } - _, err = testLog.FetchContractResolutions() - if !errScopeBucketNoExist.Is(err) { - t.Fatalf("unexpected error: %v", err) - } -} - -// TestStateMutation tests that we're able to properly mutate the state of the -// log, then retrieve that same mutated state from disk. -func TestStateMutation(t *testing.T) { - t.Parallel() - - testLog, cleanUp, err := newTestBoltArbLog( - testChainHash, testChanPoint1, - ) - if err != nil { - t.Fatalf("unable to create test log: %v", err) - } - defer cleanUp() - - // The default state of an arbitrator should be StateDefault. - arbState, err := testLog.CurrentState(nil) - if err != nil { - t.Fatalf("unable to read arb state: %v", err) - } - if arbState != StateDefault { - t.Fatalf("state mismatch: expected %v, got %v", StateDefault, - arbState) - } - - // We should now be able to mutate the state to an arbitrary one of our - // choosing, then read that same state back from disk. - if err := testLog.CommitState(StateFullyResolved); err != nil { - t.Fatalf("unable to write state: %v", err) - } - arbState, err = testLog.CurrentState(nil) - if err != nil { - t.Fatalf("unable to read arb state: %v", err) - } - if arbState != StateFullyResolved { - t.Fatalf("state mismatch: expected %v, got %v", StateFullyResolved, - arbState) - } - - // Next, we'll wipe our state and ensure that if we try to query for - // the current state, we get the proper error. - err = testLog.WipeHistory() - if err != nil { - t.Fatalf("unable to wipe history: %v", err) - } - - // If we try to query for the state again, we should get the default - // state again. - arbState, err = testLog.CurrentState(nil) - if err != nil { - t.Fatalf("unable to query current state: %v", err) - } - if arbState != StateDefault { - t.Fatalf("state mismatch: expected %v, got %v", StateDefault, - arbState) - } -} - -// TestScopeIsolation tests the two distinct ArbitratorLog instances with two -// distinct scopes, don't over write the state of one another. -func TestScopeIsolation(t *testing.T) { - t.Parallel() - - // We'll create two distinct test logs. Each log will have a unique - // scope key, and therefore should be isolated from the other on disk. - testLog1, cleanUp1, err := newTestBoltArbLog( - testChainHash, testChanPoint1, - ) - if err != nil { - t.Fatalf("unable to create test log: %v", err) - } - defer cleanUp1() - - testLog2, cleanUp2, err := newTestBoltArbLog( - testChainHash, testChanPoint2, - ) - if err != nil { - t.Fatalf("unable to create test log: %v", err) - } - defer cleanUp2() - - // We'll now update the current state of both the logs to a unique - // state. - if err := testLog1.CommitState(StateWaitingFullResolution); err != nil { - t.Fatalf("unable to write state: %v", err) - } - if err := testLog2.CommitState(StateContractClosed); err != nil { - t.Fatalf("unable to write state: %v", err) - } - - // Querying each log, the states should be the prior one we set, and be - // disjoint. - log1State, err := testLog1.CurrentState(nil) - if err != nil { - t.Fatalf("unable to read arb state: %v", err) - } - log2State, err := testLog2.CurrentState(nil) - if err != nil { - t.Fatalf("unable to read arb state: %v", err) - } - - if log1State == log2State { - t.Fatalf("log states are the same: %v", log1State) - } - - if log1State != StateWaitingFullResolution { - t.Fatalf("state mismatch: expected %v, got %v", - StateWaitingFullResolution, log1State) - } - if log2State != StateContractClosed { - t.Fatalf("state mismatch: expected %v, got %v", - StateContractClosed, log2State) - } -} - -// TestCommitSetStorage tests that we're able to properly read/write active -// commitment sets. -func TestCommitSetStorage(t *testing.T) { - t.Parallel() - - testLog, cleanUp, err := newTestBoltArbLog( - testChainHash, testChanPoint1, - ) - if err != nil { - t.Fatalf("unable to create test log: %v", err) - } - defer cleanUp() - - activeHTLCs := []channeldb.HTLC{ - { - Amt: 1000, - OnionBlob: make([]byte, 0), - Signature: make([]byte, 0), - }, - } - - confTypes := []HtlcSetKey{ - LocalHtlcSet, RemoteHtlcSet, RemotePendingHtlcSet, - } - for _, pendingRemote := range []bool{true, false} { - for _, confType := range confTypes { - commitSet := &CommitSet{ - ConfCommitKey: &confType, - HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC), - } - commitSet.HtlcSets[LocalHtlcSet] = activeHTLCs - commitSet.HtlcSets[RemoteHtlcSet] = activeHTLCs - - if pendingRemote { - commitSet.HtlcSets[RemotePendingHtlcSet] = activeHTLCs - } - - err := testLog.InsertConfirmedCommitSet(commitSet) - if err != nil { - t.Fatalf("unable to write commit set: %v", err) - } - - diskCommitSet, err := testLog.FetchConfirmedCommitSet(nil) - if err != nil { - t.Fatalf("unable to read commit set: %v", err) - } - - if !reflect.DeepEqual(commitSet, diskCommitSet) { - t.Fatalf("commit set mismatch: expected %v, got %v", - spew.Sdump(commitSet), spew.Sdump(diskCommitSet)) - } - } - } - -} - -func init() { - testSignDesc.KeyDesc.PubKey, _ = btcec.ParsePubKey(key1, btcec.S256()) - - prand.Seed(time.Now().Unix()) -} diff --git a/lnd/contractcourt/chain_arbitrator.go b/lnd/contractcourt/chain_arbitrator.go deleted file mode 100644 index be4948b7..00000000 --- a/lnd/contractcourt/chain_arbitrator.go +++ /dev/null @@ -1,1136 +0,0 @@ -package contractcourt - -import ( - "sync" - "sync/atomic" - "time" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/clock" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/labels" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/pktwallet/walletdb" - "github.com/pkt-cash/pktd/wire" -) - -var Err = er.NewErrorType("lnd.contractcourt") - -// ErrChainArbExiting signals that the chain arbitrator is shutting down. -var ErrChainArbExiting = Err.CodeWithDetail("ErrChainArbExiting", "ChainArbitrator exiting") - -// ResolutionMsg is a message sent by resolvers to outside sub-systems once an -// outgoing contract has been fully resolved. For multi-hop contracts, if we -// resolve the outgoing contract, we'll also need to ensure that the incoming -// contract is resolved as well. We package the items required to resolve the -// incoming contracts within this message. -type ResolutionMsg struct { - // SourceChan identifies the channel that this message is being sent - // from. This is the channel's short channel ID. - SourceChan lnwire.ShortChannelID - - // HtlcIndex is the index of the contract within the original - // commitment trace. - HtlcIndex uint64 - - // Failure will be non-nil if the incoming contract should be canceled - // all together. This can happen if the outgoing contract was dust, if - // if the outgoing HTLC timed out. - Failure lnwire.FailureMessage - - // PreImage will be non-nil if the incoming contract can successfully - // be redeemed. This can happen if we learn of the preimage from the - // outgoing HTLC on-chain. - PreImage *[32]byte -} - -// ChainArbitratorConfig is a configuration struct that contains all the -// function closures and interface that required to arbitrate on-chain -// contracts for a particular chain. -type ChainArbitratorConfig struct { - // ChainHash is the chain that this arbitrator is to operate within. - ChainHash chainhash.Hash - - // IncomingBroadcastDelta is the delta that we'll use to decide when to - // broadcast our commitment transaction if we have incoming htlcs. This - // value should be set based on our current fee estimation of the - // commitment transaction. We use this to determine when we should - // broadcast instead of just the HTLC timeout, as we want to ensure - // that the commitment transaction is already confirmed, by the time the - // HTLC expires. Otherwise we may end up not settling the htlc on-chain - // because the other party managed to time it out. - IncomingBroadcastDelta uint32 - - // OutgoingBroadcastDelta is the delta that we'll use to decide when to - // broadcast our commitment transaction if there are active outgoing - // htlcs. This value can be lower than the incoming broadcast delta. - OutgoingBroadcastDelta uint32 - - // NewSweepAddr is a function that returns a new address under control - // by the wallet. We'll use this to sweep any no-delay outputs as a - // result of unilateral channel closes. - // - // NOTE: This SHOULD return a p2wkh script. - NewSweepAddr func() ([]byte, er.R) - - // PublishTx reliably broadcasts a transaction to the network. Once - // this function exits without an error, then they transaction MUST - // continually be rebroadcast if needed. - PublishTx func(*wire.MsgTx, string) er.R - - // DeliverResolutionMsg is a function that will append an outgoing - // message to the "out box" for a ChannelLink. This is used to cancel - // backwards any HTLC's that are either dust, we're timing out, or - // settling on-chain to the incoming link. - DeliverResolutionMsg func(...ResolutionMsg) er.R - - // MarkLinkInactive is a function closure that the ChainArbitrator will - // use to mark that active HTLC's shouldn't be attempted to be routed - // over a particular channel. This function will be called in that a - // ChannelArbitrator decides that it needs to go to chain in order to - // resolve contracts. - // - // TODO(roasbeef): rename, routing based - MarkLinkInactive func(wire.OutPoint) er.R - - // ContractBreach is a function closure that the ChainArbitrator will - // use to notify the breachArbiter about a contract breach. It should - // only return a non-nil error when the breachArbiter has preserved the - // necessary breach info for this channel point, and it is safe to mark - // the channel as pending close in the database. - ContractBreach func(wire.OutPoint, *lnwallet.BreachRetribution) er.R - - // IsOurAddress is a function that returns true if the passed address - // is known to the underlying wallet. Otherwise, false should be - // returned. - IsOurAddress func(btcutil.Address) bool - - // IncubateOutput sends either an incoming HTLC, an outgoing HTLC, or - // both to the utxo nursery. Once this function returns, the nursery - // should have safely persisted the outputs to disk, and should start - // the process of incubation. This is used when a resolver wishes to - // pass off the output to the nursery as we're only waiting on an - // absolute/relative item block. - IncubateOutputs func(wire.OutPoint, *lnwallet.OutgoingHtlcResolution, - *lnwallet.IncomingHtlcResolution, uint32) er.R - - // PreimageDB is a global store of all known pre-images. We'll use this - // to decide if we should broadcast a commitment transaction to claim - // an HTLC on-chain. - PreimageDB WitnessBeacon - - // Notifier is an instance of a chain notifier we'll use to watch for - // certain on-chain events. - Notifier chainntnfs.ChainNotifier - - // Signer is a signer backed by the active lnd node. This should be - // capable of producing a signature as specified by a valid - // SignDescriptor. - Signer input.Signer - - // FeeEstimator will be used to return fee estimates. - FeeEstimator chainfee.Estimator - - // ChainIO allows us to query the state of the current main chain. - ChainIO lnwallet.BlockChainIO - - // DisableChannel disables a channel, resulting in it not being able to - // forward payments. - DisableChannel func(wire.OutPoint) er.R - - // Sweeper allows resolvers to sweep their final outputs. - Sweeper UtxoSweeper - - // Registry is the invoice database that is used by resolvers to lookup - // preimages and settle invoices. - Registry Registry - - // NotifyClosedChannel is a function closure that the ChainArbitrator - // will use to notify the ChannelNotifier about a newly closed channel. - NotifyClosedChannel func(wire.OutPoint) - - // OnionProcessor is used to decode onion payloads for on-chain - // resolution. - OnionProcessor OnionProcessor - - // PaymentsExpirationGracePeriod indicates a time window we let the - // other node to cancel an outgoing htlc that our node has initiated and - // has timed out. - PaymentsExpirationGracePeriod time.Duration - - // IsForwardedHTLC checks for a given htlc, identified by channel id and - // htlcIndex, if it is a forwarded one. - IsForwardedHTLC func(chanID lnwire.ShortChannelID, htlcIndex uint64) bool - - // Clock is the clock implementation that ChannelArbitrator uses. - // It is useful for testing. - Clock clock.Clock -} - -// ChainArbitrator is a sub-system that oversees the on-chain resolution of all -// active, and channel that are in the "pending close" state. Within the -// contractcourt package, the ChainArbitrator manages a set of active -// ContractArbitrators. Each ContractArbitrators is responsible for watching -// the chain for any activity that affects the state of the channel, and also -// for monitoring each contract in order to determine if any on-chain activity is -// required. Outside sub-systems interact with the ChainArbitrator in order to -// forcibly exit a contract, update the set of live signals for each contract, -// and to receive reports on the state of contract resolution. -type ChainArbitrator struct { - started int32 // To be used atomically. - stopped int32 // To be used atomically. - - sync.Mutex - - // activeChannels is a map of all the active contracts that are still - // open, and not fully resolved. - activeChannels map[wire.OutPoint]*ChannelArbitrator - - // activeWatchers is a map of all the active chainWatchers for channels - // that are still considered open. - activeWatchers map[wire.OutPoint]*chainWatcher - - // cfg is the config struct for the arbitrator that contains all - // methods and interface it needs to operate. - cfg ChainArbitratorConfig - - // chanSource will be used by the ChainArbitrator to fetch all the - // active channels that it must still watch over. - chanSource *channeldb.DB - - quit chan struct{} - - wg sync.WaitGroup -} - -// NewChainArbitrator returns a new instance of the ChainArbitrator using the -// passed config struct, and backing persistent database. -func NewChainArbitrator(cfg ChainArbitratorConfig, - db *channeldb.DB) *ChainArbitrator { - - return &ChainArbitrator{ - cfg: cfg, - activeChannels: make(map[wire.OutPoint]*ChannelArbitrator), - activeWatchers: make(map[wire.OutPoint]*chainWatcher), - chanSource: db, - quit: make(chan struct{}), - } -} - -// arbChannel is a wrapper around an open channel that channel arbitrators -// interact with. -type arbChannel struct { - // channel is the in-memory channel state. - channel *channeldb.OpenChannel - - // c references the chain arbitrator and is used by arbChannel - // internally. - c *ChainArbitrator -} - -// NewAnchorResolutions returns the anchor resolutions for currently valid -// commitment transactions. -// -// NOTE: Part of the ArbChannel interface. -func (a *arbChannel) NewAnchorResolutions() ([]*lnwallet.AnchorResolution, - er.R) { - - // Get a fresh copy of the database state to base the anchor resolutions - // on. Unfortunately the channel instance that we have here isn't the - // same instance that is used by the link. - chanPoint := a.channel.FundingOutpoint - - channel, err := a.c.chanSource.FetchChannel(chanPoint) - if err != nil { - return nil, err - } - - chanMachine, err := lnwallet.NewLightningChannel( - a.c.cfg.Signer, channel, nil, - ) - if err != nil { - return nil, err - } - - return chanMachine.NewAnchorResolutions() -} - -// ForceCloseChan should force close the contract that this attendant is -// watching over. We'll use this when we decide that we need to go to chain. It -// should in addition tell the switch to remove the corresponding link, such -// that we won't accept any new updates. The returned summary contains all items -// needed to eventually resolve all outputs on chain. -// -// NOTE: Part of the ArbChannel interface. -func (a *arbChannel) ForceCloseChan() (*lnwallet.LocalForceCloseSummary, er.R) { - // First, we mark the channel as borked, this ensure - // that no new state transitions can happen, and also - // that the link won't be loaded into the switch. - if err := a.channel.MarkBorked(); err != nil { - return nil, err - } - - // With the channel marked as borked, we'll now remove - // the link from the switch if its there. If the link - // is active, then this method will block until it - // exits. - chanPoint := a.channel.FundingOutpoint - - if err := a.c.cfg.MarkLinkInactive(chanPoint); err != nil { - log.Errorf("unable to mark link inactive: %v", err) - } - - // Now that we know the link can't mutate the channel - // state, we'll read the channel from disk the target - // channel according to its channel point. - channel, err := a.c.chanSource.FetchChannel(chanPoint) - if err != nil { - return nil, err - } - - // Finally, we'll force close the channel completing - // the force close workflow. - chanMachine, err := lnwallet.NewLightningChannel( - a.c.cfg.Signer, channel, nil, - ) - if err != nil { - return nil, err - } - return chanMachine.ForceClose() -} - -// newActiveChannelArbitrator creates a new instance of an active channel -// arbitrator given the state of the target channel. -func newActiveChannelArbitrator(channel *channeldb.OpenChannel, - c *ChainArbitrator, chanEvents *ChainEventSubscription) (*ChannelArbitrator, er.R) { - - log.Tracef("Creating ChannelArbitrator for ChannelPoint(%v)", - channel.FundingOutpoint) - - // TODO(roasbeef): fetch best height (or pass in) so can ensure block - // epoch delivers all the notifications to - - chanPoint := channel.FundingOutpoint - - // Next we'll create the matching configuration struct that contains - // all interfaces and methods the arbitrator needs to do its job. - arbCfg := ChannelArbitratorConfig{ - ChanPoint: chanPoint, - Channel: c.getArbChannel(channel), - ShortChanID: channel.ShortChanID(), - - MarkCommitmentBroadcasted: channel.MarkCommitmentBroadcasted, - MarkChannelClosed: func(summary *channeldb.ChannelCloseSummary, - statuses ...channeldb.ChannelStatus) er.R { - - err := channel.CloseChannel(summary, statuses...) - if err != nil { - return err - } - c.cfg.NotifyClosedChannel(summary.ChanPoint) - return nil - }, - IsPendingClose: false, - ChainArbitratorConfig: c.cfg, - ChainEvents: chanEvents, - PutResolverReport: func(tx kvdb.RwTx, - report *channeldb.ResolverReport) er.R { - - return c.chanSource.PutResolverReport( - tx, c.cfg.ChainHash, &channel.FundingOutpoint, - report, - ) - }, - } - - // The final component needed is an arbitrator log that the arbitrator - // will use to keep track of its internal state using a backed - // persistent log. - // - // TODO(roasbeef); abstraction leak... - // * rework: adaptor method to set log scope w/ factory func - chanLog, err := newBoltArbitratorLog( - c.chanSource.Backend, arbCfg, c.cfg.ChainHash, chanPoint, - ) - if err != nil { - return nil, err - } - - arbCfg.MarkChannelResolved = func() er.R { - return c.ResolveContract(chanPoint) - } - - // Finally, we'll need to construct a series of htlc Sets based on all - // currently known valid commitments. - htlcSets := make(map[HtlcSetKey]htlcSet) - htlcSets[LocalHtlcSet] = newHtlcSet(channel.LocalCommitment.Htlcs) - htlcSets[RemoteHtlcSet] = newHtlcSet(channel.RemoteCommitment.Htlcs) - - pendingRemoteCommitment, err := channel.RemoteCommitChainTip() - if err != nil && !channeldb.ErrNoPendingCommit.Is(err) { - return nil, err - } - if pendingRemoteCommitment != nil { - htlcSets[RemotePendingHtlcSet] = newHtlcSet( - pendingRemoteCommitment.Commitment.Htlcs, - ) - } - - return NewChannelArbitrator( - arbCfg, htlcSets, chanLog, - ), nil -} - -// getArbChannel returns an open channel wrapper for use by channel arbitrators. -func (c *ChainArbitrator) getArbChannel( - channel *channeldb.OpenChannel) *arbChannel { - - return &arbChannel{ - channel: channel, - c: c, - } -} - -// ResolveContract marks a contract as fully resolved within the database. -// This is only to be done once all contracts which were live on the channel -// before hitting the chain have been resolved. -func (c *ChainArbitrator) ResolveContract(chanPoint wire.OutPoint) er.R { - - log.Infof("Marking ChannelPoint(%v) fully resolved", chanPoint) - - // First, we'll we'll mark the channel as fully closed from the PoV of - // the channel source. - err := c.chanSource.MarkChanFullyClosed(&chanPoint) - if err != nil { - log.Errorf("ChainArbitrator: unable to mark ChannelPoint(%v) "+ - "fully closed: %v", chanPoint, err) - return err - } - - // Now that the channel has been marked as fully closed, we'll stop - // both the channel arbitrator and chain watcher for this channel if - // they're still active. - var arbLog ArbitratorLog - c.Lock() - chainArb := c.activeChannels[chanPoint] - delete(c.activeChannels, chanPoint) - - chainWatcher := c.activeWatchers[chanPoint] - delete(c.activeWatchers, chanPoint) - c.Unlock() - - if chainArb != nil { - arbLog = chainArb.log - - if err := chainArb.Stop(); err != nil { - log.Warnf("unable to stop ChannelArbitrator(%v): %v", - chanPoint, err) - } - } - if chainWatcher != nil { - if err := chainWatcher.Stop(); err != nil { - log.Warnf("unable to stop ChainWatcher(%v): %v", - chanPoint, err) - } - } - - // Once this has been marked as resolved, we'll wipe the log that the - // channel arbitrator was using to store its persistent state. We do - // this after marking the channel resolved, as otherwise, the - // arbitrator would be re-created, and think it was starting from the - // default state. - if arbLog != nil { - if err := arbLog.WipeHistory(); err != nil { - return err - } - } - - return nil -} - -// Start launches all goroutines that the ChainArbitrator needs to operate. -func (c *ChainArbitrator) Start() er.R { - if !atomic.CompareAndSwapInt32(&c.started, 0, 1) { - return nil - } - - log.Tracef("Starting ChainArbitrator") - - // First, we'll fetch all the channels that are still open, in order to - // collect them within our set of active contracts. - openChannels, err := c.chanSource.FetchAllChannels() - if err != nil { - return err - } - - if len(openChannels) > 0 { - log.Infof("Creating ChannelArbitrators for %v active channels", - len(openChannels)) - } - - // For each open channel, we'll configure then launch a corresponding - // ChannelArbitrator. - for _, channel := range openChannels { - chanPoint := channel.FundingOutpoint - channel := channel - - // First, we'll create an active chainWatcher for this channel - // to ensure that we detect any relevant on chain events. - chainWatcher, err := newChainWatcher( - chainWatcherConfig{ - chanState: channel, - notifier: c.cfg.Notifier, - signer: c.cfg.Signer, - isOurAddr: c.cfg.IsOurAddress, - contractBreach: func(retInfo *lnwallet.BreachRetribution) er.R { - return c.cfg.ContractBreach(chanPoint, retInfo) - }, - extractStateNumHint: lnwallet.GetStateNumHint, - }, - ) - if err != nil { - return err - } - - c.activeWatchers[chanPoint] = chainWatcher - channelArb, err := newActiveChannelArbitrator( - channel, c, chainWatcher.SubscribeChannelEvents(), - ) - if err != nil { - return err - } - - c.activeChannels[chanPoint] = channelArb - - // Republish any closing transactions for this channel. - err = c.publishClosingTxs(channel) - if err != nil { - return err - } - } - - // In addition to the channels that we know to be open, we'll also - // launch arbitrators to finishing resolving any channels that are in - // the pending close state. - closingChannels, err := c.chanSource.FetchClosedChannels(true) - if err != nil { - return err - } - - if len(closingChannels) > 0 { - log.Infof("Creating ChannelArbitrators for %v closing channels", - len(closingChannels)) - } - - // Next, for each channel is the closing state, we'll launch a - // corresponding more restricted resolver, as we don't have to watch - // the chain any longer, only resolve the contracts on the confirmed - // commitment. - for _, closeChanInfo := range closingChannels { - // We can leave off the CloseContract and ForceCloseChan - // methods as the channel is already closed at this point. - chanPoint := closeChanInfo.ChanPoint - arbCfg := ChannelArbitratorConfig{ - ChanPoint: chanPoint, - ShortChanID: closeChanInfo.ShortChanID, - ChainArbitratorConfig: c.cfg, - ChainEvents: &ChainEventSubscription{}, - IsPendingClose: true, - ClosingHeight: closeChanInfo.CloseHeight, - CloseType: closeChanInfo.CloseType, - PutResolverReport: func(tx kvdb.RwTx, - report *channeldb.ResolverReport) er.R { - - return c.chanSource.PutResolverReport( - tx, c.cfg.ChainHash, &chanPoint, report, - ) - }, - } - chanLog, err := newBoltArbitratorLog( - c.chanSource.Backend, arbCfg, c.cfg.ChainHash, chanPoint, - ) - if err != nil { - return err - } - arbCfg.MarkChannelResolved = func() er.R { - return c.ResolveContract(chanPoint) - } - - // We can also leave off the set of HTLC's here as since the - // channel is already in the process of being full resolved, no - // new HTLC's will be added. - c.activeChannels[chanPoint] = NewChannelArbitrator( - arbCfg, nil, chanLog, - ) - } - - // Now, we'll start all chain watchers in parallel to shorten start up - // duration. In neutrino mode, this allows spend registrations to take - // advantage of batch spend reporting, instead of doing a single rescan - // per chain watcher. - // - // NOTE: After this point, we Stop the chain arb to ensure that any - // lingering goroutines are cleaned up before exiting. - watcherErrs := make(chan er.R, len(c.activeWatchers)) - var wg sync.WaitGroup - for _, watcher := range c.activeWatchers { - wg.Add(1) - go func(w *chainWatcher) { - defer wg.Done() - select { - case watcherErrs <- w.Start(): - case <-c.quit: - watcherErrs <- ErrChainArbExiting.Default() - } - }(watcher) - } - - // Once all chain watchers have been started, seal the err chan to - // signal the end of the err stream. - go func() { - wg.Wait() - close(watcherErrs) - }() - - // stopAndLog is a helper function which shuts down the chain arb and - // logs errors if they occur. - stopAndLog := func() { - if err := c.Stop(); err != nil { - log.Errorf("ChainArbitrator could not shutdown: %v", err) - } - } - - // Handle all errors returned from spawning our chain watchers. If any - // of them failed, we will stop the chain arb to shutdown any active - // goroutines. - for err := range watcherErrs { - if err != nil { - stopAndLog() - return err - } - } - - // Before we start all of our arbitrators, we do a preliminary state - // lookup so that we can combine all of these lookups in a single db - // transaction. - var startStates map[wire.OutPoint]*chanArbStartState - - err = kvdb.View(c.chanSource, func(tx walletdb.ReadTx) er.R { - for _, arbitrator := range c.activeChannels { - startState, err := arbitrator.getStartState(tx) - if err != nil { - return err - } - - startStates[arbitrator.cfg.ChanPoint] = startState - } - - return nil - }, func() { - startStates = make( - map[wire.OutPoint]*chanArbStartState, - len(c.activeChannels), - ) - }) - if err != nil { - stopAndLog() - return err - } - - // Launch all the goroutines for each arbitrator so they can carry out - // their duties. - for _, arbitrator := range c.activeChannels { - startState, ok := startStates[arbitrator.cfg.ChanPoint] - if !ok { - stopAndLog() - return er.Errorf("arbitrator: %v has no start state", - arbitrator.cfg.ChanPoint) - } - - if err := arbitrator.Start(startState); err != nil { - stopAndLog() - return err - } - } - - // Subscribe to a single stream of block epoch notifications that we - // will dispatch to all active arbitrators. - blockEpoch, err := c.cfg.Notifier.RegisterBlockEpochNtfn(nil) - if err != nil { - return err - } - - // Start our goroutine which will dispatch blocks to each arbitrator. - c.wg.Add(1) - go func() { - defer c.wg.Done() - c.dispatchBlocks(blockEpoch) - }() - - // TODO(roasbeef): eventually move all breach watching here - - return nil -} - -// blockRecipient contains the information we need to dispatch a block to a -// channel arbitrator. -type blockRecipient struct { - // chanPoint is the funding outpoint of the channel. - chanPoint wire.OutPoint - - // blocks is the channel that new block heights are sent into. This - // channel should be sufficiently buffered as to not block the sender. - blocks chan<- int32 - - // quit is closed if the receiving entity is shutting down. - quit chan struct{} -} - -// dispatchBlocks consumes a block epoch notification stream and dispatches -// blocks to each of the chain arb's active channel arbitrators. This function -// must be run in a goroutine. -func (c *ChainArbitrator) dispatchBlocks( - blockEpoch *chainntnfs.BlockEpochEvent) { - - // getRecipients is a helper function which acquires the chain arb - // lock and returns a set of block recipients which can be used to - // dispatch blocks. - getRecipients := func() []blockRecipient { - c.Lock() - blocks := make([]blockRecipient, 0, len(c.activeChannels)) - for _, channel := range c.activeChannels { - blocks = append(blocks, blockRecipient{ - chanPoint: channel.cfg.ChanPoint, - blocks: channel.blocks, - quit: channel.quit, - }) - } - c.Unlock() - - return blocks - } - - // On exit, cancel our blocks subscription and close each block channel - // so that the arbitrators know they will no longer be receiving blocks. - defer func() { - blockEpoch.Cancel() - - recipients := getRecipients() - for _, recipient := range recipients { - close(recipient.blocks) - } - }() - - // Consume block epochs until we receive the instruction to shutdown. - for { - select { - // Consume block epochs, exiting if our subscription is - // terminated. - case block, ok := <-blockEpoch.Epochs: - if !ok { - log.Trace("dispatchBlocks block epoch " + - "cancelled") - return - } - - // Get the set of currently active channels block - // subscription channels and dispatch the block to - // each. - for _, recipient := range getRecipients() { - select { - // Deliver the block to the arbitrator. - case recipient.blocks <- block.Height: - - // If the recipient is shutting down, exit - // without delivering the block. This may be - // the case when two blocks are mined in quick - // succession, and the arbitrator resolves - // after the first block, and does not need to - // consume the second block. - case <-recipient.quit: - log.Debugf("channel: %v exit without "+ - "receiving block: %v", - recipient.chanPoint, - block.Height) - - // If the chain arb is shutting down, we don't - // need to deliver any more blocks (everything - // will be shutting down). - case <-c.quit: - return - } - } - - // Exit if the chain arbitrator is shutting down. - case <-c.quit: - return - } - } -} - -// publishClosingTxs will load any stored cooperative or unilater closing -// transactions and republish them. This helps ensure propagation of the -// transactions in the event that prior publications failed. -func (c *ChainArbitrator) publishClosingTxs( - channel *channeldb.OpenChannel) er.R { - - // If the channel has had its unilateral close broadcasted already, - // republish it in case it didn't propagate. - if channel.HasChanStatus(channeldb.ChanStatusCommitBroadcasted) { - err := c.rebroadcast( - channel, channeldb.ChanStatusCommitBroadcasted, - ) - if err != nil { - return err - } - } - - // If the channel has had its cooperative close broadcasted - // already, republish it in case it didn't propagate. - if channel.HasChanStatus(channeldb.ChanStatusCoopBroadcasted) { - err := c.rebroadcast( - channel, channeldb.ChanStatusCoopBroadcasted, - ) - if err != nil { - return err - } - } - - return nil -} - -// rebroadcast is a helper method which will republish the unilateral or -// cooperative close transaction or a channel in a particular state. -// -// NOTE: There is no risk to caling this method if the channel isn't in either -// CommimentBroadcasted or CoopBroadcasted, but the logs will be misleading. -func (c *ChainArbitrator) rebroadcast(channel *channeldb.OpenChannel, - state channeldb.ChannelStatus) er.R { - - chanPoint := channel.FundingOutpoint - - var ( - closeTx *wire.MsgTx - kind string - err er.R - ) - switch state { - case channeldb.ChanStatusCommitBroadcasted: - kind = "force" - closeTx, err = channel.BroadcastedCommitment() - - case channeldb.ChanStatusCoopBroadcasted: - kind = "coop" - closeTx, err = channel.BroadcastedCooperative() - - default: - return er.Errorf("unknown closing state: %v", state) - } - - switch { - - // This can happen for channels that had their closing tx published - // before we started storing it to disk. - case channeldb.ErrNoCloseTx.Is(err): - log.Warnf("Channel %v is in state %v, but no %s closing tx "+ - "to re-publish...", chanPoint, state, kind) - return nil - - case err != nil: - return err - } - - log.Infof("Re-publishing %s close tx(%v) for channel %v", - kind, closeTx.TxHash(), chanPoint) - - label := labels.MakeLabel( - labels.LabelTypeChannelClose, &channel.ShortChannelID, - ) - err = c.cfg.PublishTx(closeTx, label) - if err != nil && !lnwallet.ErrDoubleSpend.Is(err) { - log.Warnf("Unable to broadcast %s close tx(%v): %v", - kind, closeTx.TxHash(), err) - } - - return nil -} - -// Stop signals the ChainArbitrator to trigger a graceful shutdown. Any active -// channel arbitrators will be signalled to exit, and this method will block -// until they've all exited. -func (c *ChainArbitrator) Stop() er.R { - if !atomic.CompareAndSwapInt32(&c.stopped, 0, 1) { - return nil - } - - log.Infof("Stopping ChainArbitrator") - - close(c.quit) - - var ( - activeWatchers = make(map[wire.OutPoint]*chainWatcher) - activeChannels = make(map[wire.OutPoint]*ChannelArbitrator) - ) - - // Copy the current set of active watchers and arbitrators to shutdown. - // We don't want to hold the lock when shutting down each watcher or - // arbitrator individually, as they may need to acquire this mutex. - c.Lock() - for chanPoint, watcher := range c.activeWatchers { - activeWatchers[chanPoint] = watcher - } - for chanPoint, arbitrator := range c.activeChannels { - activeChannels[chanPoint] = arbitrator - } - c.Unlock() - - for chanPoint, watcher := range activeWatchers { - log.Tracef("Attempting to stop ChainWatcher(%v)", - chanPoint) - - if err := watcher.Stop(); err != nil { - log.Errorf("unable to stop watcher for "+ - "ChannelPoint(%v): %v", chanPoint, err) - } - } - for chanPoint, arbitrator := range activeChannels { - log.Tracef("Attempting to stop ChannelArbitrator(%v)", - chanPoint) - - if err := arbitrator.Stop(); err != nil { - log.Errorf("unable to stop arbitrator for "+ - "ChannelPoint(%v): %v", chanPoint, err) - } - } - - c.wg.Wait() - - return nil -} - -// ContractUpdate is a message packages the latest set of active HTLCs on a -// commitment, and also identifies which commitment received a new set of -// HTLCs. -type ContractUpdate struct { - // HtlcKey identifies which commitment the HTLCs below are present on. - HtlcKey HtlcSetKey - - // Htlcs are the of active HTLCs on the commitment identified by the - // above HtlcKey. - Htlcs []channeldb.HTLC -} - -// ContractSignals wraps the two signals that affect the state of a channel -// being watched by an arbitrator. The two signals we care about are: the -// channel has a new set of HTLC's, and the remote party has just broadcast -// their version of the commitment transaction. -type ContractSignals struct { - // HtlcUpdates is a channel that the link will use to update the - // designated channel arbitrator when the set of HTLCs on any valid - // commitment changes. - HtlcUpdates chan *ContractUpdate - - // ShortChanID is the up to date short channel ID for a contract. This - // can change either if when the contract was added it didn't yet have - // a stable identifier, or in the case of a reorg. - ShortChanID lnwire.ShortChannelID -} - -// UpdateContractSignals sends a set of active, up to date contract signals to -// the ChannelArbitrator which is has been assigned to the channel infield by -// the passed channel point. -func (c *ChainArbitrator) UpdateContractSignals(chanPoint wire.OutPoint, - signals *ContractSignals) er.R { - - log.Infof("Attempting to update ContractSignals for ChannelPoint(%v)", - chanPoint) - - c.Lock() - arbitrator, ok := c.activeChannels[chanPoint] - c.Unlock() - if !ok { - return er.Errorf("unable to find arbitrator") - } - - arbitrator.UpdateContractSignals(signals) - - return nil -} - -// GetChannelArbitrator safely returns the channel arbitrator for a given -// channel outpoint. -func (c *ChainArbitrator) GetChannelArbitrator(chanPoint wire.OutPoint) ( - *ChannelArbitrator, er.R) { - - c.Lock() - arbitrator, ok := c.activeChannels[chanPoint] - c.Unlock() - if !ok { - return nil, er.Errorf("unable to find arbitrator") - } - - return arbitrator, nil -} - -// forceCloseReq is a request sent from an outside sub-system to the arbitrator -// that watches a particular channel to broadcast the commitment transaction, -// and enter the resolution phase of the channel. -type forceCloseReq struct { - // errResp is a channel that will be sent upon either in the case of - // force close success (nil error), or in the case on an error. - // - // NOTE; This channel MUST be buffered. - errResp chan er.R - - // closeTx is a channel that carries the transaction which ultimately - // closed out the channel. - closeTx chan *wire.MsgTx -} - -// ForceCloseContract attempts to force close the channel infield by the passed -// channel point. A force close will immediately terminate the contract, -// causing it to enter the resolution phase. If the force close was successful, -// then the force close transaction itself will be returned. -// -// TODO(roasbeef): just return the summary itself? -func (c *ChainArbitrator) ForceCloseContract(chanPoint wire.OutPoint) (*wire.MsgTx, er.R) { - c.Lock() - arbitrator, ok := c.activeChannels[chanPoint] - c.Unlock() - if !ok { - return nil, er.Errorf("unable to find arbitrator") - } - - log.Infof("Attempting to force close ChannelPoint(%v)", chanPoint) - - // Before closing, we'll attempt to send a disable update for the - // channel. We do so before closing the channel as otherwise the current - // edge policy won't be retrievable from the graph. - if err := c.cfg.DisableChannel(chanPoint); err != nil { - log.Warnf("Unable to disable channel %v on "+ - "close: %v", chanPoint, err) - } - - errChan := make(chan er.R, 1) - respChan := make(chan *wire.MsgTx, 1) - - // With the channel found, and the request crafted, we'll send over a - // force close request to the arbitrator that watches this channel. - select { - case arbitrator.forceCloseReqs <- &forceCloseReq{ - errResp: errChan, - closeTx: respChan, - }: - case <-c.quit: - return nil, ErrChainArbExiting.Default() - } - - // We'll await two responses: the error response, and the transaction - // that closed out the channel. - select { - case err := <-errChan: - if err != nil { - return nil, err - } - case <-c.quit: - return nil, ErrChainArbExiting.Default() - } - - var closeTx *wire.MsgTx - select { - case closeTx = <-respChan: - case <-c.quit: - return nil, ErrChainArbExiting.Default() - } - - return closeTx, nil -} - -// WatchNewChannel sends the ChainArbitrator a message to create a -// ChannelArbitrator tasked with watching over a new channel. Once a new -// channel has finished its final funding flow, it should be registered with -// the ChainArbitrator so we can properly react to any on-chain events. -func (c *ChainArbitrator) WatchNewChannel(newChan *channeldb.OpenChannel) er.R { - c.Lock() - defer c.Unlock() - - log.Infof("Creating new ChannelArbitrator for ChannelPoint(%v)", - newChan.FundingOutpoint) - - // If we're already watching this channel, then we'll ignore this - // request. - chanPoint := newChan.FundingOutpoint - if _, ok := c.activeChannels[chanPoint]; ok { - return nil - } - - // First, also create an active chainWatcher for this channel to ensure - // that we detect any relevant on chain events. - chainWatcher, err := newChainWatcher( - chainWatcherConfig{ - chanState: newChan, - notifier: c.cfg.Notifier, - signer: c.cfg.Signer, - isOurAddr: c.cfg.IsOurAddress, - contractBreach: func(retInfo *lnwallet.BreachRetribution) er.R { - return c.cfg.ContractBreach(chanPoint, retInfo) - }, - extractStateNumHint: lnwallet.GetStateNumHint, - }, - ) - if err != nil { - return err - } - - c.activeWatchers[newChan.FundingOutpoint] = chainWatcher - - // We'll also create a new channel arbitrator instance using this new - // channel, and our internal state. - channelArb, err := newActiveChannelArbitrator( - newChan, c, chainWatcher.SubscribeChannelEvents(), - ) - if err != nil { - return err - } - - // With the arbitrator created, we'll add it to our set of active - // arbitrators, then launch it. - c.activeChannels[chanPoint] = channelArb - - if err := channelArb.Start(nil); err != nil { - return err - } - - return chainWatcher.Start() -} - -// SubscribeChannelEvents returns a new active subscription for the set of -// possible on-chain events for a particular channel. The struct can be used by -// callers to be notified whenever an event that changes the state of the -// channel on-chain occurs. -func (c *ChainArbitrator) SubscribeChannelEvents( - chanPoint wire.OutPoint) (*ChainEventSubscription, er.R) { - - // First, we'll attempt to look up the active watcher for this channel. - // If we can't find it, then we'll return an error back to the caller. - watcher, ok := c.activeWatchers[chanPoint] - if !ok { - return nil, er.Errorf("unable to find watcher for: %v", - chanPoint) - } - - // With the watcher located, we'll request for it to create a new chain - // event subscription client. - return watcher.SubscribeChannelEvents(), nil -} - -// TODO(roasbeef): arbitration reports -// * types: contested, waiting for success conf, etc diff --git a/lnd/contractcourt/chain_arbitrator_test.go b/lnd/contractcourt/chain_arbitrator_test.go deleted file mode 100644 index 67cceec5..00000000 --- a/lnd/contractcourt/chain_arbitrator_test.go +++ /dev/null @@ -1,247 +0,0 @@ -package contractcourt - -import ( - "io/ioutil" - "net" - "os" - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/clock" - "github.com/pkt-cash/pktd/lnd/lntest/mock" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/wire" -) - -// TestChainArbitratorRepulishCloses tests that the chain arbitrator will -// republish closing transactions for channels marked CommitementBroadcast or -// CoopBroadcast in the database at startup. -func TestChainArbitratorRepublishCloses(t *testing.T) { - t.Parallel() - - tempPath, errr := ioutil.TempDir("", "testdb") - if errr != nil { - t.Fatal(errr) - } - defer os.RemoveAll(tempPath) - - db, err := channeldb.Open(tempPath) - if err != nil { - t.Fatal(err) - } - defer db.Close() - - // Create 10 test channels and sync them to the database. - const numChans = 10 - var channels []*channeldb.OpenChannel - for i := 0; i < numChans; i++ { - lChannel, _, cleanup, err := lnwallet.CreateTestChannels( - channeldb.SingleFunderTweaklessBit, - ) - if err != nil { - t.Fatal(err) - } - defer cleanup() - - channel := lChannel.State() - - // We manually set the db here to make sure all channels are - // synced to the same db. - channel.Db = db - - addr := &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18556, - } - if err := channel.SyncPending(addr, 101); err != nil { - t.Fatal(err) - } - - channels = append(channels, channel) - } - - // Mark half of the channels as commitment broadcasted. - for i := 0; i < numChans/2; i++ { - closeTx := channels[i].FundingTxn.Copy() - closeTx.TxIn[0].PreviousOutPoint = channels[i].FundingOutpoint - err := channels[i].MarkCommitmentBroadcasted(closeTx, true) - if err != nil { - t.Fatal(err) - } - - err = channels[i].MarkCoopBroadcasted(closeTx, true) - if err != nil { - t.Fatal(err) - } - } - - // We keep track of the transactions published by the ChainArbitrator - // at startup. - published := make(map[chainhash.Hash]int) - - chainArbCfg := ChainArbitratorConfig{ - ChainIO: &mock.ChainIO{}, - Notifier: &mock.ChainNotifier{ - SpendChan: make(chan *chainntnfs.SpendDetail), - EpochChan: make(chan *chainntnfs.BlockEpoch), - ConfChan: make(chan *chainntnfs.TxConfirmation), - }, - PublishTx: func(tx *wire.MsgTx, _ string) er.R { - published[tx.TxHash()]++ - return nil - }, - Clock: clock.NewDefaultClock(), - } - chainArb := NewChainArbitrator( - chainArbCfg, db, - ) - - if err := chainArb.Start(); err != nil { - t.Fatal(err) - } - defer func() { - if err := chainArb.Stop(); err != nil { - t.Fatal(err) - } - }() - - // Half of the channels should have had their closing tx re-published. - if len(published) != numChans/2 { - t.Fatalf("expected %d re-published transactions, got %d", - numChans/2, len(published)) - } - - // And make sure the published transactions are correct, and unique. - for i := 0; i < numChans/2; i++ { - closeTx := channels[i].FundingTxn.Copy() - closeTx.TxIn[0].PreviousOutPoint = channels[i].FundingOutpoint - - count, ok := published[closeTx.TxHash()] - if !ok { - t.Fatalf("closing tx not re-published") - } - - // We expect one coop close and one force close. - if count != 2 { - t.Fatalf("expected 2 closing txns, only got %d", count) - } - - delete(published, closeTx.TxHash()) - } - - if len(published) != 0 { - t.Fatalf("unexpected tx published") - } -} - -// TestResolveContract tests that if we have an active channel being watched by -// the chain arb, then a call to ResolveContract will mark the channel as fully -// closed in the database, and also clean up all arbitrator state. -func TestResolveContract(t *testing.T) { - t.Parallel() - - // To start with, we'll create a new temp DB for the duration of this - // test. - tempPath, errr := ioutil.TempDir("", "testdb") - if errr != nil { - t.Fatalf("unable to make temp dir: %v", errr) - } - defer os.RemoveAll(tempPath) - db, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to open db: %v", err) - } - defer db.Close() - - // With the DB created, we'll make a new channel, and mark it as - // pending open within the database. - newChannel, _, cleanup, err := lnwallet.CreateTestChannels( - channeldb.SingleFunderTweaklessBit, - ) - if err != nil { - t.Fatalf("unable to make new test channel: %v", err) - } - defer cleanup() - channel := newChannel.State() - channel.Db = db - addr := &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18556, - } - if err := channel.SyncPending(addr, 101); err != nil { - t.Fatalf("unable to write channel to db: %v", err) - } - - // With the channel inserted into the database, we'll now create a new - // chain arbitrator that should pick up these new channels and launch - // resolver for them. - chainArbCfg := ChainArbitratorConfig{ - ChainIO: &mock.ChainIO{}, - Notifier: &mock.ChainNotifier{ - SpendChan: make(chan *chainntnfs.SpendDetail), - EpochChan: make(chan *chainntnfs.BlockEpoch), - ConfChan: make(chan *chainntnfs.TxConfirmation), - }, - PublishTx: func(tx *wire.MsgTx, _ string) er.R { - return nil - }, - Clock: clock.NewDefaultClock(), - } - chainArb := NewChainArbitrator( - chainArbCfg, db, - ) - if err := chainArb.Start(); err != nil { - t.Fatal(err) - } - defer func() { - if err := chainArb.Stop(); err != nil { - t.Fatal(err) - } - }() - - channelArb := chainArb.activeChannels[channel.FundingOutpoint] - - // While the resolver are active, we'll now remove the channel from the - // database (mark is as closed). - err = db.AbandonChannel(&channel.FundingOutpoint, 4) - if err != nil { - t.Fatalf("unable to remove channel: %v", err) - } - - // With the channel removed, we'll now manually call ResolveContract. - // This stimulates needing to remove a channel from the chain arb due - // to any possible external consistency issues. - err = chainArb.ResolveContract(channel.FundingOutpoint) - if err != nil { - t.Fatalf("unable to resolve contract: %v", err) - } - - // The shouldn't be an active chain watcher or channel arb for this - // channel. - if len(chainArb.activeChannels) != 0 { - t.Fatalf("expected zero active channels, instead have %v", - len(chainArb.activeChannels)) - } - if len(chainArb.activeWatchers) != 0 { - t.Fatalf("expected zero active watchers, instead have %v", - len(chainArb.activeWatchers)) - } - - // At this point, the channel's arbitrator log should also be empty as - // well. - _, err = channelArb.log.FetchContractResolutions() - if !errScopeBucketNoExist.Is(err) { - t.Fatalf("channel arb log state should have been "+ - "removed: %v", err) - } - - // If we attempt to call this method again, then we should get a nil - // error, as there is no more state to be cleaned up. - err = chainArb.ResolveContract(channel.FundingOutpoint) - if err != nil { - t.Fatalf("second resolve call shouldn't fail: %v", err) - } -} diff --git a/lnd/contractcourt/chain_watcher.go b/lnd/contractcourt/chain_watcher.go deleted file mode 100644 index 88f33b2f..00000000 --- a/lnd/contractcourt/chain_watcher.go +++ /dev/null @@ -1,1104 +0,0 @@ -package contractcourt - -import ( - "bytes" - "sync" - "sync/atomic" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/shachain" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/txscript" - "github.com/pkt-cash/pktd/wire" - "github.com/pkt-cash/pktd/wire/constants" -) - -const ( - // minCommitPointPollTimeout is the minimum time we'll wait before - // polling the database for a channel's commitpoint. - minCommitPointPollTimeout = 1 * time.Second - - // maxCommitPointPollTimeout is the maximum time we'll wait before - // polling the database for a channel's commitpoint. - maxCommitPointPollTimeout = 10 * time.Minute -) - -// LocalUnilateralCloseInfo encapsulates all the information we need to act on -// a local force close that gets confirmed. -type LocalUnilateralCloseInfo struct { - *chainntnfs.SpendDetail - *lnwallet.LocalForceCloseSummary - *channeldb.ChannelCloseSummary - - // CommitSet is the set of known valid commitments at the time the - // remote party's commitment hit the chain. - CommitSet CommitSet -} - -// CooperativeCloseInfo encapsulates all the information we need to act on a -// cooperative close that gets confirmed. -type CooperativeCloseInfo struct { - *channeldb.ChannelCloseSummary -} - -// RemoteUnilateralCloseInfo wraps the normal UnilateralCloseSummary to couple -// the CommitSet at the time of channel closure. -type RemoteUnilateralCloseInfo struct { - *lnwallet.UnilateralCloseSummary - - // CommitSet is the set of known valid commitments at the time the - // remote party's commitment hit the chain. - CommitSet CommitSet -} - -// CommitSet is a collection of the set of known valid commitments at a given -// instant. If ConfCommitKey is set, then the commitment identified by the -// HtlcSetKey has hit the chain. This struct will be used to examine all live -// HTLCs to determine if any additional actions need to be made based on the -// remote party's commitments. -type CommitSet struct { - // ConfCommitKey if non-nil, identifies the commitment that was - // confirmed in the chain. - ConfCommitKey *HtlcSetKey - - // HtlcSets stores the set of all known active HTLC for each active - // commitment at the time of channel closure. - HtlcSets map[HtlcSetKey][]channeldb.HTLC -} - -// IsEmpty returns true if there are no HTLCs at all within all commitments -// that are a part of this commitment diff. -func (c *CommitSet) IsEmpty() bool { - if c == nil { - return true - } - - for _, htlcs := range c.HtlcSets { - if len(htlcs) != 0 { - return false - } - } - - return true -} - -// toActiveHTLCSets returns the set of all active HTLCs across all commitment -// transactions. -func (c *CommitSet) toActiveHTLCSets() map[HtlcSetKey]htlcSet { - htlcSets := make(map[HtlcSetKey]htlcSet) - - for htlcSetKey, htlcs := range c.HtlcSets { - htlcSets[htlcSetKey] = newHtlcSet(htlcs) - } - - return htlcSets -} - -// ChainEventSubscription is a struct that houses a subscription to be notified -// for any on-chain events related to a channel. There are three types of -// possible on-chain events: a cooperative channel closure, a unilateral -// channel closure, and a channel breach. The fourth type: a force close is -// locally initiated, so we don't provide any event stream for said event. -type ChainEventSubscription struct { - // ChanPoint is that channel that chain events will be dispatched for. - ChanPoint wire.OutPoint - - // RemoteUnilateralClosure is a channel that will be sent upon in the - // event that the remote party's commitment transaction is confirmed. - RemoteUnilateralClosure chan *RemoteUnilateralCloseInfo - - // LocalUnilateralClosure is a channel that will be sent upon in the - // event that our commitment transaction is confirmed. - LocalUnilateralClosure chan *LocalUnilateralCloseInfo - - // CooperativeClosure is a signal that will be sent upon once a - // cooperative channel closure has been detected confirmed. - CooperativeClosure chan *CooperativeCloseInfo - - // ContractBreach is a channel that will be sent upon if we detect a - // contract breach. The struct sent across the channel contains all the - // material required to bring the cheating channel peer to justice. - ContractBreach chan *lnwallet.BreachRetribution - - // Cancel cancels the subscription to the event stream for a particular - // channel. This method should be called once the caller no longer needs to - // be notified of any on-chain events for a particular channel. - Cancel func() -} - -// chainWatcherConfig encapsulates all the necessary functions and interfaces -// needed to watch and act on on-chain events for a particular channel. -type chainWatcherConfig struct { - // chanState is a snapshot of the persistent state of the channel that - // we're watching. In the event of an on-chain event, we'll query the - // database to ensure that we act using the most up to date state. - chanState *channeldb.OpenChannel - - // notifier is a reference to the channel notifier that we'll use to be - // notified of output spends and when transactions are confirmed. - notifier chainntnfs.ChainNotifier - - // signer is the main signer instances that will be responsible for - // signing any HTLC and commitment transaction generated by the state - // machine. - signer input.Signer - - // contractBreach is a method that will be called by the watcher if it - // detects that a contract breach transaction has been confirmed. Only - // when this method returns with a non-nil error it will be safe to mark - // the channel as pending close in the database. - contractBreach func(*lnwallet.BreachRetribution) er.R - - // isOurAddr is a function that returns true if the passed address is - // known to us. - isOurAddr func(btcutil.Address) bool - - // extractStateNumHint extracts the encoded state hint using the passed - // obfuscater. This is used by the chain watcher to identify which - // state was broadcast and confirmed on-chain. - extractStateNumHint func(*wire.MsgTx, [lnwallet.StateHintSize]byte) uint64 -} - -// chainWatcher is a system that's assigned to every active channel. The duty -// of this system is to watch the chain for spends of the channels chan point. -// If a spend is detected then with chain watcher will notify all subscribers -// that the channel has been closed, and also give them the materials necessary -// to sweep the funds of the channel on chain eventually. -type chainWatcher struct { - started int32 // To be used atomically. - stopped int32 // To be used atomically. - - quit chan struct{} - wg sync.WaitGroup - - cfg chainWatcherConfig - - // stateHintObfuscator is a 48-bit state hint that's used to obfuscate - // the current state number on the commitment transactions. - stateHintObfuscator [lnwallet.StateHintSize]byte - - // All the fields below are protected by this mutex. - sync.Mutex - - // clientID is an ephemeral counter used to keep track of each - // individual client subscription. - clientID uint64 - - // clientSubscriptions is a map that keeps track of all the active - // client subscriptions for events related to this channel. - clientSubscriptions map[uint64]*ChainEventSubscription -} - -// newChainWatcher returns a new instance of a chainWatcher for a channel given -// the chan point to watch, and also a notifier instance that will allow us to -// detect on chain events. -func newChainWatcher(cfg chainWatcherConfig) (*chainWatcher, er.R) { - // In order to be able to detect the nature of a potential channel - // closure we'll need to reconstruct the state hint bytes used to - // obfuscate the commitment state number encoded in the lock time and - // sequence fields. - var stateHint [lnwallet.StateHintSize]byte - chanState := cfg.chanState - if chanState.IsInitiator { - stateHint = lnwallet.DeriveStateHintObfuscator( - chanState.LocalChanCfg.PaymentBasePoint.PubKey, - chanState.RemoteChanCfg.PaymentBasePoint.PubKey, - ) - } else { - stateHint = lnwallet.DeriveStateHintObfuscator( - chanState.RemoteChanCfg.PaymentBasePoint.PubKey, - chanState.LocalChanCfg.PaymentBasePoint.PubKey, - ) - } - - return &chainWatcher{ - cfg: cfg, - stateHintObfuscator: stateHint, - quit: make(chan struct{}), - clientSubscriptions: make(map[uint64]*ChainEventSubscription), - }, nil -} - -// Start starts all goroutines that the chainWatcher needs to perform its -// duties. -func (c *chainWatcher) Start() er.R { - if !atomic.CompareAndSwapInt32(&c.started, 0, 1) { - return nil - } - - chanState := c.cfg.chanState - log.Debugf("Starting chain watcher for ChannelPoint(%v)", - chanState.FundingOutpoint) - - // First, we'll register for a notification to be dispatched if the - // funding output is spent. - fundingOut := &chanState.FundingOutpoint - - // As a height hint, we'll try to use the opening height, but if the - // channel isn't yet open, then we'll use the height it was broadcast - // at. - heightHint := c.cfg.chanState.ShortChanID().BlockHeight - if heightHint == 0 { - heightHint = chanState.FundingBroadcastHeight - } - - localKey := chanState.LocalChanCfg.MultiSigKey.PubKey.SerializeCompressed() - remoteKey := chanState.RemoteChanCfg.MultiSigKey.PubKey.SerializeCompressed() - multiSigScript, err := input.GenMultiSigScript( - localKey, remoteKey, - ) - if err != nil { - return err - } - pkScript, err := input.WitnessScriptHash(multiSigScript) - if err != nil { - return err - } - - spendNtfn, err := c.cfg.notifier.RegisterSpendNtfn( - fundingOut, pkScript, heightHint, - ) - if err != nil { - return err - } - - // With the spend notification obtained, we'll now dispatch the - // closeObserver which will properly react to any changes. - c.wg.Add(1) - go c.closeObserver(spendNtfn) - - return nil -} - -// Stop signals the close observer to gracefully exit. -func (c *chainWatcher) Stop() er.R { - if !atomic.CompareAndSwapInt32(&c.stopped, 0, 1) { - return nil - } - - close(c.quit) - - c.wg.Wait() - - return nil -} - -// SubscribeChannelEvents returns an active subscription to the set of channel -// events for the channel watched by this chain watcher. Once clients no longer -// require the subscription, they should call the Cancel() method to allow the -// watcher to regain those committed resources. -func (c *chainWatcher) SubscribeChannelEvents() *ChainEventSubscription { - - c.Lock() - clientID := c.clientID - c.clientID++ - c.Unlock() - - log.Debugf("New ChainEventSubscription(id=%v) for ChannelPoint(%v)", - clientID, c.cfg.chanState.FundingOutpoint) - - sub := &ChainEventSubscription{ - ChanPoint: c.cfg.chanState.FundingOutpoint, - RemoteUnilateralClosure: make(chan *RemoteUnilateralCloseInfo, 1), - LocalUnilateralClosure: make(chan *LocalUnilateralCloseInfo, 1), - CooperativeClosure: make(chan *CooperativeCloseInfo, 1), - ContractBreach: make(chan *lnwallet.BreachRetribution, 1), - Cancel: func() { - c.Lock() - delete(c.clientSubscriptions, clientID) - c.Unlock() - }, - } - - c.Lock() - c.clientSubscriptions[clientID] = sub - c.Unlock() - - return sub -} - -// isOurCommitment returns true if the passed commitSpend is a spend of the -// funding transaction using our commitment transaction (a local force close). -// In order to do this in a state agnostic manner, we'll make our decisions -// based off of only the set of outputs included. -func isOurCommitment(localChanCfg, remoteChanCfg channeldb.ChannelConfig, - commitSpend *chainntnfs.SpendDetail, broadcastStateNum uint64, - revocationProducer shachain.Producer, - chanType channeldb.ChannelType) (bool, er.R) { - - // First, we'll re-derive our commitment point for this state since - // this is what we use to randomize each of the keys for this state. - commitSecret, err := revocationProducer.AtIndex(broadcastStateNum) - if err != nil { - return false, err - } - commitPoint := input.ComputeCommitmentPoint(commitSecret[:]) - - // Now that we have the commit point, we'll derive the tweaked local - // and remote keys for this state. We use our point as only we can - // revoke our own commitment. - commitKeyRing := lnwallet.DeriveCommitmentKeys( - commitPoint, true, chanType, &localChanCfg, &remoteChanCfg, - ) - - // With the keys derived, we'll construct the remote script that'll be - // present if they have a non-dust balance on the commitment. - remoteScript, _, err := lnwallet.CommitScriptToRemote( - chanType, commitKeyRing.ToRemoteKey, - ) - if err != nil { - return false, err - } - - // Next, we'll derive our script that includes the revocation base for - // the remote party allowing them to claim this output before the CSV - // delay if we breach. - localScript, err := input.CommitScriptToSelf( - uint32(localChanCfg.CsvDelay), commitKeyRing.ToLocalKey, - commitKeyRing.RevocationKey, - ) - if err != nil { - return false, err - } - localPkScript, err := input.WitnessScriptHash(localScript) - if err != nil { - return false, err - } - - // With all our scripts assembled, we'll examine the outputs of the - // commitment transaction to determine if this is a local force close - // or not. - for _, output := range commitSpend.SpendingTx.TxOut { - pkScript := output.PkScript - - switch { - case bytes.Equal(localPkScript, pkScript): - return true, nil - - case bytes.Equal(remoteScript.PkScript, pkScript): - return true, nil - } - } - - // If neither of these scripts are present, then it isn't a local force - // close. - return false, nil -} - -// chainSet includes all the information we need to dispatch a channel close -// event to any subscribers. -type chainSet struct { - // remoteStateNum is the commitment number of the lowest valid - // commitment the remote party holds from our PoV. This value is used - // to determine if the remote party is playing a state that's behind, - // in line, or ahead of the latest state we know for it. - remoteStateNum uint64 - - // commitSet includes information pertaining to the set of active HTLCs - // on each commitment. - commitSet CommitSet - - // remoteCommit is the current commitment of the remote party. - remoteCommit channeldb.ChannelCommitment - - // localCommit is our current commitment. - localCommit channeldb.ChannelCommitment - - // remotePendingCommit points to the dangling commitment of the remote - // party, if it exists. If there's no dangling commitment, then this - // pointer will be nil. - remotePendingCommit *channeldb.ChannelCommitment -} - -// newChainSet creates a new chainSet given the current up to date channel -// state. -func newChainSet(chanState *channeldb.OpenChannel) (*chainSet, er.R) { - // First, we'll grab the current unrevoked commitments for ourselves - // and the remote party. - localCommit, remoteCommit, err := chanState.LatestCommitments() - if err != nil { - return nil, er.Errorf("unable to fetch channel state for "+ - "chan_point=%v", chanState.FundingOutpoint) - } - - log.Debugf("ChannelPoint(%v): local_commit_type=%v, local_commit=%v", - chanState.FundingOutpoint, chanState.ChanType, - spew.Sdump(localCommit)) - log.Debugf("ChannelPoint(%v): remote_commit_type=%v, remote_commit=%v", - chanState.FundingOutpoint, chanState.ChanType, - spew.Sdump(remoteCommit)) - - // Fetch the current known commit height for the remote party, and - // their pending commitment chain tip if it exists. - remoteStateNum := remoteCommit.CommitHeight - remoteChainTip, err := chanState.RemoteCommitChainTip() - if err != nil && !channeldb.ErrNoPendingCommit.Is(err) { - return nil, er.Errorf("unable to obtain chain tip for "+ - "ChannelPoint(%v): %v", - chanState.FundingOutpoint, err) - } - - // Now that we have all the possible valid commitments, we'll make the - // CommitSet the ChannelArbitrator will need in order to carry out its - // duty. - commitSet := CommitSet{ - HtlcSets: map[HtlcSetKey][]channeldb.HTLC{ - LocalHtlcSet: localCommit.Htlcs, - RemoteHtlcSet: remoteCommit.Htlcs, - }, - } - - var remotePendingCommit *channeldb.ChannelCommitment - if remoteChainTip != nil { - remotePendingCommit = &remoteChainTip.Commitment - log.Debugf("ChannelPoint(%v): remote_pending_commit_type=%v, "+ - "remote_pending_commit=%v", chanState.FundingOutpoint, - chanState.ChanType, - spew.Sdump(remoteChainTip.Commitment)) - - htlcs := remoteChainTip.Commitment.Htlcs - commitSet.HtlcSets[RemotePendingHtlcSet] = htlcs - } - - // We'll now retrieve the latest state of the revocation store so we - // can populate the revocation information within the channel state - // object that we have. - // - // TODO(roasbeef): mutation is bad mkay - _, err = chanState.RemoteRevocationStore() - if err != nil { - return nil, er.Errorf("unable to fetch revocation state for "+ - "chan_point=%v", chanState.FundingOutpoint) - } - - return &chainSet{ - remoteStateNum: remoteStateNum, - commitSet: commitSet, - localCommit: *localCommit, - remoteCommit: *remoteCommit, - remotePendingCommit: remotePendingCommit, - }, nil -} - -// closeObserver is a dedicated goroutine that will watch for any closes of the -// channel that it's watching on chain. In the event of an on-chain event, the -// close observer will assembled the proper materials required to claim the -// funds of the channel on-chain (if required), then dispatch these as -// notifications to all subscribers. -func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { - defer c.wg.Done() - - log.Infof("Close observer for ChannelPoint(%v) active", - c.cfg.chanState.FundingOutpoint) - - select { - // We've detected a spend of the channel onchain! Depending on the type - // of spend, we'll act accordingly, so we'll examine the spending - // transaction to determine what we should do. - // - // TODO(Roasbeef): need to be able to ensure this only triggers - // on confirmation, to ensure if multiple txns are broadcast, we - // act on the one that's timestamped - case commitSpend, ok := <-spendNtfn.Spend: - // If the channel was closed, then this means that the notifier - // exited, so we will as well. - if !ok { - return - } - - // Otherwise, the remote party might have broadcast a prior - // revoked state...!!! - commitTxBroadcast := commitSpend.SpendingTx - - // First, we'll construct the chainset which includes all the - // data we need to dispatch an event to our subscribers about - // this possible channel close event. - chainSet, err := newChainSet(c.cfg.chanState) - if err != nil { - log.Errorf("unable to create commit set: %v", err) - return - } - - // Decode the state hint encoded within the commitment - // transaction to determine if this is a revoked state or not. - obfuscator := c.stateHintObfuscator - broadcastStateNum := c.cfg.extractStateNumHint( - commitTxBroadcast, obfuscator, - ) - - // Based on the output scripts within this commitment, we'll - // determine if this is our commitment transaction or not (a - // self force close). - isOurCommit, err := isOurCommitment( - c.cfg.chanState.LocalChanCfg, - c.cfg.chanState.RemoteChanCfg, commitSpend, - broadcastStateNum, c.cfg.chanState.RevocationProducer, - c.cfg.chanState.ChanType, - ) - if err != nil { - log.Errorf("unable to determine self commit for "+ - "chan_point=%v: %v", - c.cfg.chanState.FundingOutpoint, err) - return - } - - // If this is our commitment transaction, then we can exit here - // as we don't have any further processing we need to do (we - // can't cheat ourselves :p). - if isOurCommit { - chainSet.commitSet.ConfCommitKey = &LocalHtlcSet - - if err := c.dispatchLocalForceClose( - commitSpend, chainSet.localCommit, - chainSet.commitSet, - ); err != nil { - log.Errorf("unable to handle local"+ - "close for chan_point=%v: %v", - c.cfg.chanState.FundingOutpoint, err) - } - return - } - - // Next, we'll check to see if this is a cooperative channel - // closure or not. This is characterized by having an input - // sequence number that's finalized. This won't happen with - // regular commitment transactions due to the state hint - // encoding scheme. - if commitTxBroadcast.TxIn[0].Sequence == constants.MaxTxInSequenceNum { - // TODO(roasbeef): rare but possible, need itest case - // for - err := c.dispatchCooperativeClose(commitSpend) - if err != nil { - log.Errorf("unable to handle co op close: %v", err) - } - return - } - - log.Warnf("Unprompted commitment broadcast for "+ - "ChannelPoint(%v) ", c.cfg.chanState.FundingOutpoint) - - // If this channel has been recovered, then we'll modify our - // behavior as it isn't possible for us to close out the - // channel off-chain ourselves. It can only be the remote party - // force closing, or a cooperative closure we signed off on - // before losing data getting confirmed in the chain. - isRecoveredChan := c.cfg.chanState.HasChanStatus( - channeldb.ChanStatusRestored, - ) - - switch { - // If state number spending transaction matches the current - // latest state, then they've initiated a unilateral close. So - // we'll trigger the unilateral close signal so subscribers can - // clean up the state as necessary. - case broadcastStateNum == chainSet.remoteStateNum && - !isRecoveredChan: - - log.Infof("Remote party broadcast base set, "+ - "commit_num=%v", chainSet.remoteStateNum) - - chainSet.commitSet.ConfCommitKey = &RemoteHtlcSet - err := c.dispatchRemoteForceClose( - commitSpend, chainSet.remoteCommit, - chainSet.commitSet, - c.cfg.chanState.RemoteCurrentRevocation, - ) - if err != nil { - log.Errorf("unable to handle remote "+ - "close for chan_point=%v: %v", - c.cfg.chanState.FundingOutpoint, err) - } - - // We'll also handle the case of the remote party broadcasting - // their commitment transaction which is one height above ours. - // This case can arise when we initiate a state transition, but - // the remote party has a fail crash _after_ accepting the new - // state, but _before_ sending their signature to us. - case broadcastStateNum == chainSet.remoteStateNum+1 && - chainSet.remotePendingCommit != nil && !isRecoveredChan: - - log.Infof("Remote party broadcast pending set, "+ - "commit_num=%v", chainSet.remoteStateNum+1) - - chainSet.commitSet.ConfCommitKey = &RemotePendingHtlcSet - err := c.dispatchRemoteForceClose( - commitSpend, *chainSet.remotePendingCommit, - chainSet.commitSet, - c.cfg.chanState.RemoteNextRevocation, - ) - if err != nil { - log.Errorf("unable to handle remote "+ - "close for chan_point=%v: %v", - c.cfg.chanState.FundingOutpoint, err) - } - - // If the remote party has broadcasted a state beyond our best - // known state for them, and they don't have a pending - // commitment (we write them to disk before sending out), then - // this means that we've lost data. In this case, we'll enter - // the DLP protocol. Otherwise, if we've recovered our channel - // state from scratch, then we don't know what the precise - // current state is, so we assume either the remote party - // forced closed or we've been breached. In the latter case, - // our tower will take care of us. - case broadcastStateNum > chainSet.remoteStateNum || isRecoveredChan: - log.Warnf("Remote node broadcast state #%v, "+ - "which is more than 1 beyond best known "+ - "state #%v!!! Attempting recovery...", - broadcastStateNum, chainSet.remoteStateNum) - - // If this isn't a tweakless commitment, then we'll - // need to wait for the remote party's latest unrevoked - // commitment point to be presented to us as we need - // this to sweep. Otherwise, we can dispatch the remote - // close and sweep immediately using a fake commitPoint - // as it isn't actually needed for recovery anymore. - commitPoint := c.cfg.chanState.RemoteCurrentRevocation - tweaklessCommit := c.cfg.chanState.ChanType.IsTweakless() - if !tweaklessCommit { - commitPoint = c.waitForCommitmentPoint() - if commitPoint == nil { - return - } - - log.Infof("Recovered commit point(%x) for "+ - "channel(%v)! Now attempting to use it to "+ - "sweep our funds...", - commitPoint.SerializeCompressed(), - c.cfg.chanState.FundingOutpoint) - - } else { - log.Infof("ChannelPoint(%v) is tweakless, "+ - "moving to sweep directly on chain", - c.cfg.chanState.FundingOutpoint) - } - - // Since we don't have the commitment stored for this - // state, we'll just pass an empty commitment within - // the commitment set. Note that this means we won't be - // able to recover any HTLC funds. - // - // TODO(halseth): can we try to recover some HTLCs? - chainSet.commitSet.ConfCommitKey = &RemoteHtlcSet - err = c.dispatchRemoteForceClose( - commitSpend, channeldb.ChannelCommitment{}, - chainSet.commitSet, commitPoint, - ) - if err != nil { - log.Errorf("unable to handle remote "+ - "close for chan_point=%v: %v", - c.cfg.chanState.FundingOutpoint, err) - } - - // If the state number broadcast is lower than the remote - // node's current un-revoked height, then THEY'RE ATTEMPTING TO - // VIOLATE THE CONTRACT LAID OUT WITHIN THE PAYMENT CHANNEL. - // Therefore we close the signal indicating a revoked broadcast - // to allow subscribers to swiftly dispatch justice!!! - case broadcastStateNum < chainSet.remoteStateNum: - err := c.dispatchContractBreach( - commitSpend, &chainSet.remoteCommit, - broadcastStateNum, - ) - if err != nil { - log.Errorf("unable to handle channel "+ - "breach for chan_point=%v: %v", - c.cfg.chanState.FundingOutpoint, err) - } - } - - // Now that a spend has been detected, we've done our job, so - // we'll exit immediately. - return - - // The chainWatcher has been signalled to exit, so we'll do so now. - case <-c.quit: - return - } -} - -// toSelfAmount takes a transaction and returns the sum of all outputs that pay -// to a script that the wallet controls. If no outputs pay to us, then we -// return zero. This is possible as our output may have been trimmed due to -// being dust. -func (c *chainWatcher) toSelfAmount(tx *wire.MsgTx) btcutil.Amount { - var selfAmt btcutil.Amount - for _, txOut := range tx.TxOut { - _, addrs, _, err := txscript.ExtractPkScriptAddrs( - // Doesn't matter what net we actually pass in. - txOut.PkScript, &chaincfg.TestNet3Params, - ) - if err != nil { - continue - } - - for _, addr := range addrs { - if c.cfg.isOurAddr(addr) { - selfAmt += btcutil.Amount(txOut.Value) - } - } - } - - return selfAmt -} - -// dispatchCooperativeClose processed a detect cooperative channel closure. -// We'll use the spending transaction to locate our output within the -// transaction, then clean up the database state. We'll also dispatch a -// notification to all subscribers that the channel has been closed in this -// manner. -func (c *chainWatcher) dispatchCooperativeClose(commitSpend *chainntnfs.SpendDetail) er.R { - broadcastTx := commitSpend.SpendingTx - - log.Infof("Cooperative closure for ChannelPoint(%v): %v", - c.cfg.chanState.FundingOutpoint, spew.Sdump(broadcastTx)) - - // If the input *is* final, then we'll check to see which output is - // ours. - localAmt := c.toSelfAmount(broadcastTx) - - // Once this is known, we'll mark the state as fully closed in the - // database. We can do this as a cooperatively closed channel has all - // its outputs resolved after only one confirmation. - closeSummary := &channeldb.ChannelCloseSummary{ - ChanPoint: c.cfg.chanState.FundingOutpoint, - ChainHash: c.cfg.chanState.ChainHash, - ClosingTXID: *commitSpend.SpenderTxHash, - RemotePub: c.cfg.chanState.IdentityPub, - Capacity: c.cfg.chanState.Capacity, - CloseHeight: uint32(commitSpend.SpendingHeight), - SettledBalance: localAmt, - CloseType: channeldb.CooperativeClose, - ShortChanID: c.cfg.chanState.ShortChanID(), - IsPending: true, - RemoteCurrentRevocation: c.cfg.chanState.RemoteCurrentRevocation, - RemoteNextRevocation: c.cfg.chanState.RemoteNextRevocation, - LocalChanConfig: c.cfg.chanState.LocalChanCfg, - } - - // Attempt to add a channel sync message to the close summary. - chanSync, err := c.cfg.chanState.ChanSyncMsg() - if err != nil { - log.Errorf("ChannelPoint(%v): unable to create channel sync "+ - "message: %v", c.cfg.chanState.FundingOutpoint, err) - } else { - closeSummary.LastChanSyncMsg = chanSync - } - - // Create a summary of all the information needed to handle the - // cooperative closure. - closeInfo := &CooperativeCloseInfo{ - ChannelCloseSummary: closeSummary, - } - - // With the event processed, we'll now notify all subscribers of the - // event. - c.Lock() - for _, sub := range c.clientSubscriptions { - select { - case sub.CooperativeClosure <- closeInfo: - case <-c.quit: - c.Unlock() - return er.Errorf("exiting") - } - } - c.Unlock() - - return nil -} - -// dispatchLocalForceClose processes a unilateral close by us being confirmed. -func (c *chainWatcher) dispatchLocalForceClose( - commitSpend *chainntnfs.SpendDetail, - localCommit channeldb.ChannelCommitment, commitSet CommitSet) er.R { - - log.Infof("Local unilateral close of ChannelPoint(%v) "+ - "detected", c.cfg.chanState.FundingOutpoint) - - forceClose, err := lnwallet.NewLocalForceCloseSummary( - c.cfg.chanState, c.cfg.signer, - commitSpend.SpendingTx, localCommit, - ) - if err != nil { - return err - } - - // As we've detected that the channel has been closed, immediately - // creating a close summary for future usage by related sub-systems. - chanSnapshot := forceClose.ChanSnapshot - closeSummary := &channeldb.ChannelCloseSummary{ - ChanPoint: chanSnapshot.ChannelPoint, - ChainHash: chanSnapshot.ChainHash, - ClosingTXID: forceClose.CloseTx.TxHash(), - RemotePub: &chanSnapshot.RemoteIdentity, - Capacity: chanSnapshot.Capacity, - CloseType: channeldb.LocalForceClose, - IsPending: true, - ShortChanID: c.cfg.chanState.ShortChanID(), - CloseHeight: uint32(commitSpend.SpendingHeight), - RemoteCurrentRevocation: c.cfg.chanState.RemoteCurrentRevocation, - RemoteNextRevocation: c.cfg.chanState.RemoteNextRevocation, - LocalChanConfig: c.cfg.chanState.LocalChanCfg, - } - - // If our commitment output isn't dust or we have active HTLC's on the - // commitment transaction, then we'll populate the balances on the - // close channel summary. - if forceClose.CommitResolution != nil { - closeSummary.SettledBalance = chanSnapshot.LocalBalance.ToSatoshis() - closeSummary.TimeLockedBalance = chanSnapshot.LocalBalance.ToSatoshis() - } - for _, htlc := range forceClose.HtlcResolutions.OutgoingHTLCs { - htlcValue := btcutil.Amount(htlc.SweepSignDesc.Output.Value) - closeSummary.TimeLockedBalance += htlcValue - } - - // Attempt to add a channel sync message to the close summary. - chanSync, err := c.cfg.chanState.ChanSyncMsg() - if err != nil { - log.Errorf("ChannelPoint(%v): unable to create channel sync "+ - "message: %v", c.cfg.chanState.FundingOutpoint, err) - } else { - closeSummary.LastChanSyncMsg = chanSync - } - - // With the event processed, we'll now notify all subscribers of the - // event. - closeInfo := &LocalUnilateralCloseInfo{ - SpendDetail: commitSpend, - LocalForceCloseSummary: forceClose, - ChannelCloseSummary: closeSummary, - CommitSet: commitSet, - } - c.Lock() - for _, sub := range c.clientSubscriptions { - select { - case sub.LocalUnilateralClosure <- closeInfo: - case <-c.quit: - c.Unlock() - return er.Errorf("exiting") - } - } - c.Unlock() - - return nil -} - -// dispatchRemoteForceClose processes a detected unilateral channel closure by -// the remote party. This function will prepare a UnilateralCloseSummary which -// will then be sent to any subscribers allowing them to resolve all our funds -// in the channel on chain. Once this close summary is prepared, all registered -// subscribers will receive a notification of this event. The commitPoint -// argument should be set to the per_commitment_point corresponding to the -// spending commitment. -// -// NOTE: The remoteCommit argument should be set to the stored commitment for -// this particular state. If we don't have the commitment stored (should only -// happen in case we have lost state) it should be set to an empty struct, in -// which case we will attempt to sweep the non-HTLC output using the passed -// commitPoint. -func (c *chainWatcher) dispatchRemoteForceClose( - commitSpend *chainntnfs.SpendDetail, - remoteCommit channeldb.ChannelCommitment, - commitSet CommitSet, commitPoint *btcec.PublicKey) er.R { - - log.Infof("Unilateral close of ChannelPoint(%v) "+ - "detected", c.cfg.chanState.FundingOutpoint) - - // First, we'll create a closure summary that contains all the - // materials required to let each subscriber sweep the funds in the - // channel on-chain. - uniClose, err := lnwallet.NewUnilateralCloseSummary( - c.cfg.chanState, c.cfg.signer, commitSpend, - remoteCommit, commitPoint, - ) - if err != nil { - return err - } - - // With the event processed, we'll now notify all subscribers of the - // event. - c.Lock() - for _, sub := range c.clientSubscriptions { - select { - case sub.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{ - UnilateralCloseSummary: uniClose, - CommitSet: commitSet, - }: - case <-c.quit: - c.Unlock() - return er.Errorf("exiting") - } - } - c.Unlock() - - return nil -} - -// dispatchContractBreach processes a detected contract breached by the remote -// party. This method is to be called once we detect that the remote party has -// broadcast a prior revoked commitment state. This method well prepare all the -// materials required to bring the cheater to justice, then notify all -// registered subscribers of this event. -func (c *chainWatcher) dispatchContractBreach(spendEvent *chainntnfs.SpendDetail, - remoteCommit *channeldb.ChannelCommitment, - broadcastStateNum uint64) er.R { - - log.Warnf("Remote peer has breached the channel contract for "+ - "ChannelPoint(%v). Revoked state #%v was broadcast!!!", - c.cfg.chanState.FundingOutpoint, broadcastStateNum) - - if err := c.cfg.chanState.MarkBorked(); err != nil { - return er.Errorf("unable to mark channel as borked: %v", err) - } - - spendHeight := uint32(spendEvent.SpendingHeight) - - // Create a new reach retribution struct which contains all the data - // needed to swiftly bring the cheating peer to justice. - // - // TODO(roasbeef): move to same package - retribution, err := lnwallet.NewBreachRetribution( - c.cfg.chanState, broadcastStateNum, spendHeight, - ) - if err != nil { - return er.Errorf("unable to create breach retribution: %v", err) - } - - // Nil the curve before printing. - if retribution.RemoteOutputSignDesc != nil && - retribution.RemoteOutputSignDesc.DoubleTweak != nil { - retribution.RemoteOutputSignDesc.DoubleTweak.Curve = nil - } - if retribution.RemoteOutputSignDesc != nil && - retribution.RemoteOutputSignDesc.KeyDesc.PubKey != nil { - retribution.RemoteOutputSignDesc.KeyDesc.PubKey.Curve = nil - } - if retribution.LocalOutputSignDesc != nil && - retribution.LocalOutputSignDesc.DoubleTweak != nil { - retribution.LocalOutputSignDesc.DoubleTweak.Curve = nil - } - if retribution.LocalOutputSignDesc != nil && - retribution.LocalOutputSignDesc.KeyDesc.PubKey != nil { - retribution.LocalOutputSignDesc.KeyDesc.PubKey.Curve = nil - } - - log.Debugf("Punishment breach retribution created: %v", - log.C(func() string { - retribution.KeyRing.CommitPoint.Curve = nil - retribution.KeyRing.LocalHtlcKey = nil - retribution.KeyRing.RemoteHtlcKey = nil - retribution.KeyRing.ToLocalKey = nil - retribution.KeyRing.ToRemoteKey = nil - retribution.KeyRing.RevocationKey = nil - return spew.Sdump(retribution) - })) - - // Hand the retribution info over to the breach arbiter. - if err := c.cfg.contractBreach(retribution); err != nil { - log.Errorf("unable to hand breached contract off to "+ - "breachArbiter: %v", err) - return err - } - - // With the event processed, we'll now notify all subscribers of the - // event. - c.Lock() - for _, sub := range c.clientSubscriptions { - select { - case sub.ContractBreach <- retribution: - case <-c.quit: - c.Unlock() - return er.Errorf("quitting") - } - } - c.Unlock() - - // At this point, we've successfully received an ack for the breach - // close. We now construct and persist the close summary, marking the - // channel as pending force closed. - // - // TODO(roasbeef): instead mark we got all the monies? - // TODO(halseth): move responsibility to breach arbiter? - settledBalance := remoteCommit.LocalBalance.ToSatoshis() - closeSummary := channeldb.ChannelCloseSummary{ - ChanPoint: c.cfg.chanState.FundingOutpoint, - ChainHash: c.cfg.chanState.ChainHash, - ClosingTXID: *spendEvent.SpenderTxHash, - CloseHeight: spendHeight, - RemotePub: c.cfg.chanState.IdentityPub, - Capacity: c.cfg.chanState.Capacity, - SettledBalance: settledBalance, - CloseType: channeldb.BreachClose, - IsPending: true, - ShortChanID: c.cfg.chanState.ShortChanID(), - RemoteCurrentRevocation: c.cfg.chanState.RemoteCurrentRevocation, - RemoteNextRevocation: c.cfg.chanState.RemoteNextRevocation, - LocalChanConfig: c.cfg.chanState.LocalChanCfg, - } - - // Attempt to add a channel sync message to the close summary. - chanSync, err := c.cfg.chanState.ChanSyncMsg() - if err != nil { - log.Errorf("ChannelPoint(%v): unable to create channel sync "+ - "message: %v", c.cfg.chanState.FundingOutpoint, err) - } else { - closeSummary.LastChanSyncMsg = chanSync - } - - if err := c.cfg.chanState.CloseChannel( - &closeSummary, channeldb.ChanStatusRemoteCloseInitiator, - ); err != nil { - return err - } - - log.Infof("Breached channel=%v marked pending-closed", - c.cfg.chanState.FundingOutpoint) - - return nil -} - -// waitForCommitmentPoint waits for the commitment point to be inserted into -// the local database. We'll use this method in the DLP case, to wait for the -// remote party to send us their point, as we can't proceed until we have that. -func (c *chainWatcher) waitForCommitmentPoint() *btcec.PublicKey { - // If we are lucky, the remote peer sent us the correct commitment - // point during channel sync, such that we can sweep our funds. If we - // cannot find the commit point, there's not much we can do other than - // wait for us to retrieve it. We will attempt to retrieve it from the - // peer each time we connect to it. - // - // TODO(halseth): actively initiate re-connection to the peer? - backoff := minCommitPointPollTimeout - for { - commitPoint, err := c.cfg.chanState.DataLossCommitPoint() - if err == nil { - return commitPoint - } - - log.Errorf("Unable to retrieve commitment point for "+ - "channel(%v) with lost state: %v. Retrying in %v.", - c.cfg.chanState.FundingOutpoint, err, backoff) - - select { - // Wait before retrying, with an exponential backoff. - case <-time.After(backoff): - backoff = 2 * backoff - if backoff > maxCommitPointPollTimeout { - backoff = maxCommitPointPollTimeout - } - - case <-c.quit: - return nil - } - } -} diff --git a/lnd/contractcourt/chain_watcher_test.go b/lnd/contractcourt/chain_watcher_test.go deleted file mode 100644 index fd305e75..00000000 --- a/lnd/contractcourt/chain_watcher_test.go +++ /dev/null @@ -1,551 +0,0 @@ -package contractcourt - -import ( - "bytes" - "crypto/sha256" - "fmt" - "testing" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/lntest/mock" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/wire" -) - -// TestChainWatcherRemoteUnilateralClose tests that the chain watcher is able -// to properly detect a normal unilateral close by the remote node using their -// lowest commitment. -func TestChainWatcherRemoteUnilateralClose(t *testing.T) { - t.Parallel() - - // First, we'll create two channels which already have established a - // commitment contract between themselves. - aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels( - channeldb.SingleFunderTweaklessBit, - ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } - defer cleanUp() - - // With the channels created, we'll now create a chain watcher instance - // which will be watching for any closes of Alice's channel. - aliceNotifier := &mock.ChainNotifier{ - SpendChan: make(chan *chainntnfs.SpendDetail), - EpochChan: make(chan *chainntnfs.BlockEpoch), - ConfChan: make(chan *chainntnfs.TxConfirmation), - } - aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{ - chanState: aliceChannel.State(), - notifier: aliceNotifier, - signer: aliceChannel.Signer, - extractStateNumHint: lnwallet.GetStateNumHint, - }) - if err != nil { - t.Fatalf("unable to create chain watcher: %v", err) - } - err = aliceChainWatcher.Start() - if err != nil { - t.Fatalf("unable to start chain watcher: %v", err) - } - defer aliceChainWatcher.Stop() - - // We'll request a new channel event subscription from Alice's chain - // watcher. - chanEvents := aliceChainWatcher.SubscribeChannelEvents() - - // If we simulate an immediate broadcast of the current commitment by - // Bob, then the chain watcher should detect this case. - bobCommit := bobChannel.State().LocalCommitment.CommitTx - bobTxHash := bobCommit.TxHash() - bobSpend := &chainntnfs.SpendDetail{ - SpenderTxHash: &bobTxHash, - SpendingTx: bobCommit, - } - aliceNotifier.SpendChan <- bobSpend - - // We should get a new spend event over the remote unilateral close - // event channel. - var uniClose *RemoteUnilateralCloseInfo - select { - case uniClose = <-chanEvents.RemoteUnilateralClosure: - case <-time.After(time.Second * 15): - t.Fatalf("didn't receive unilateral close event") - } - - // The unilateral close should have properly located Alice's output in - // the commitment transaction. - if uniClose.CommitResolution == nil { - t.Fatalf("unable to find alice's commit resolution") - } -} - -func addFakeHTLC(t *testing.T, htlcAmount lnwire.MilliSatoshi, id uint64, - aliceChannel, bobChannel *lnwallet.LightningChannel) { - - preimage := bytes.Repeat([]byte{byte(id)}, 32) - paymentHash := sha256.Sum256(preimage) - var returnPreimage [32]byte - copy(returnPreimage[:], preimage) - htlc := &lnwire.UpdateAddHTLC{ - ID: uint64(id), - PaymentHash: paymentHash, - Amount: htlcAmount, - Expiry: uint32(5), - } - - if _, err := aliceChannel.AddHTLC(htlc, nil); err != nil { - t.Fatalf("alice unable to add htlc: %v", err) - } - if _, err := bobChannel.ReceiveHTLC(htlc); err != nil { - t.Fatalf("bob unable to recv add htlc: %v", err) - } -} - -// TestChainWatcherRemoteUnilateralClosePendingCommit tests that the chain -// watcher is able to properly detect a unilateral close wherein the remote -// node broadcasts their newly received commitment, without first revoking the -// old one. -func TestChainWatcherRemoteUnilateralClosePendingCommit(t *testing.T) { - t.Parallel() - - // First, we'll create two channels which already have established a - // commitment contract between themselves. - aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels( - channeldb.SingleFunderTweaklessBit, - ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } - defer cleanUp() - - // With the channels created, we'll now create a chain watcher instance - // which will be watching for any closes of Alice's channel. - aliceNotifier := &mock.ChainNotifier{ - SpendChan: make(chan *chainntnfs.SpendDetail), - EpochChan: make(chan *chainntnfs.BlockEpoch), - ConfChan: make(chan *chainntnfs.TxConfirmation), - } - aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{ - chanState: aliceChannel.State(), - notifier: aliceNotifier, - signer: aliceChannel.Signer, - extractStateNumHint: lnwallet.GetStateNumHint, - }) - if err != nil { - t.Fatalf("unable to create chain watcher: %v", err) - } - if err := aliceChainWatcher.Start(); err != nil { - t.Fatalf("unable to start chain watcher: %v", err) - } - defer aliceChainWatcher.Stop() - - // We'll request a new channel event subscription from Alice's chain - // watcher. - chanEvents := aliceChainWatcher.SubscribeChannelEvents() - - // Next, we'll create a fake HTLC just so we can advance Alice's - // channel state to a new pending commitment on her remote commit chain - // for Bob. - htlcAmount := lnwire.NewMSatFromSatoshis(20000) - addFakeHTLC(t, htlcAmount, 0, aliceChannel, bobChannel) - - // With the HTLC added, we'll now manually initiate a state transition - // from Alice to Bob. - _, _, _, err = aliceChannel.SignNextCommitment() - if err != nil { - t.Fatal(err) - } - - // At this point, we'll now Bob broadcasting this new pending unrevoked - // commitment. - bobPendingCommit, err := aliceChannel.State().RemoteCommitChainTip() - if err != nil { - t.Fatal(err) - } - - // We'll craft a fake spend notification with Bob's actual commitment. - // The chain watcher should be able to detect that this is a pending - // commit broadcast based on the state hints in the commitment. - bobCommit := bobPendingCommit.Commitment.CommitTx - bobTxHash := bobCommit.TxHash() - bobSpend := &chainntnfs.SpendDetail{ - SpenderTxHash: &bobTxHash, - SpendingTx: bobCommit, - } - aliceNotifier.SpendChan <- bobSpend - - // We should get a new spend event over the remote unilateral close - // event channel. - var uniClose *RemoteUnilateralCloseInfo - select { - case uniClose = <-chanEvents.RemoteUnilateralClosure: - case <-time.After(time.Second * 15): - t.Fatalf("didn't receive unilateral close event") - } - - // The unilateral close should have properly located Alice's output in - // the commitment transaction. - if uniClose.CommitResolution == nil { - t.Fatalf("unable to find alice's commit resolution") - } -} - -// dlpTestCase is a special struct that we'll use to generate randomized test -// cases for the main TestChainWatcherDataLossProtect test. This struct has a -// special Generate method that will generate a random state number, and a -// broadcast state number which is greater than that state number. -type dlpTestCase struct { - BroadcastStateNum uint8 - NumUpdates uint8 -} - -func executeStateTransitions(t *testing.T, htlcAmount lnwire.MilliSatoshi, - aliceChannel, bobChannel *lnwallet.LightningChannel, - numUpdates uint8) er.R { - - for i := 0; i < int(numUpdates); i++ { - addFakeHTLC( - t, htlcAmount, uint64(i), aliceChannel, bobChannel, - ) - - err := lnwallet.ForceStateTransition(aliceChannel, bobChannel) - if err != nil { - return err - } - } - - return nil -} - -// TestChainWatcherDataLossProtect tests that if we've lost data (and are -// behind the remote node), then we'll properly detect this case and dispatch a -// remote force close using the obtained data loss commitment point. -func TestChainWatcherDataLossProtect(t *testing.T) { - t.Parallel() - - // dlpScenario is our primary quick check testing function for this - // test as whole. It ensures that if the remote party broadcasts a - // commitment that is beyond our best known commitment for them, and - // they don't have a pending commitment (one we sent but which hasn't - // been revoked), then we'll properly detect this case, and execute the - // DLP protocol on our end. - // - // broadcastStateNum is the number that we'll trick Alice into thinking - // was broadcast, while numUpdates is the actual number of updates - // we'll execute. Both of these will be random 8-bit values generated - // by testing/quick. - dlpScenario := func(t *testing.T, testCase dlpTestCase) bool { - // First, we'll create two channels which already have - // established a commitment contract between themselves. - aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels( - channeldb.SingleFunderBit, - ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } - defer cleanUp() - - // With the channels created, we'll now create a chain watcher - // instance which will be watching for any closes of Alice's - // channel. - aliceNotifier := &mock.ChainNotifier{ - SpendChan: make(chan *chainntnfs.SpendDetail), - EpochChan: make(chan *chainntnfs.BlockEpoch), - ConfChan: make(chan *chainntnfs.TxConfirmation), - } - aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{ - chanState: aliceChannel.State(), - notifier: aliceNotifier, - signer: aliceChannel.Signer, - extractStateNumHint: func(*wire.MsgTx, - [lnwallet.StateHintSize]byte) uint64 { - - // We'll return the "fake" broadcast commitment - // number so we can simulate broadcast of an - // arbitrary state. - return uint64(testCase.BroadcastStateNum) - }, - }) - if err != nil { - t.Fatalf("unable to create chain watcher: %v", err) - } - if err := aliceChainWatcher.Start(); err != nil { - t.Fatalf("unable to start chain watcher: %v", err) - } - defer aliceChainWatcher.Stop() - - // Based on the number of random updates for this state, make a - // new HTLC to add to the commitment, and then lock in a state - // transition. - const htlcAmt = 1000 - err = executeStateTransitions( - t, htlcAmt, aliceChannel, bobChannel, testCase.NumUpdates, - ) - if err != nil { - t.Errorf("unable to trigger state "+ - "transition: %v", err) - return false - } - - // We'll request a new channel event subscription from Alice's - // chain watcher so we can be notified of our fake close below. - chanEvents := aliceChainWatcher.SubscribeChannelEvents() - - // Otherwise, we'll feed in this new state number as a response - // to the query, and insert the expected DLP commit point. - dlpPoint := aliceChannel.State().RemoteCurrentRevocation - err = aliceChannel.State().MarkDataLoss(dlpPoint) - if err != nil { - t.Errorf("unable to insert dlp point: %v", err) - return false - } - - // Now we'll trigger the channel close event to trigger the - // scenario. - bobCommit := bobChannel.State().LocalCommitment.CommitTx - bobTxHash := bobCommit.TxHash() - bobSpend := &chainntnfs.SpendDetail{ - SpenderTxHash: &bobTxHash, - SpendingTx: bobCommit, - } - aliceNotifier.SpendChan <- bobSpend - - // We should get a new uni close resolution that indicates we - // processed the DLP scenario. - var uniClose *RemoteUnilateralCloseInfo - select { - case uniClose = <-chanEvents.RemoteUnilateralClosure: - // If we processed this as a DLP case, then the remote - // party's commitment should be blank, as we don't have - // this up to date state. - blankCommit := channeldb.ChannelCommitment{} - if uniClose.RemoteCommit.FeePerKw != blankCommit.FeePerKw { - t.Errorf("DLP path not executed") - return false - } - - // The resolution should have also read the DLP point - // we stored above, and used that to derive their sweep - // key for this output. - sweepTweak := input.SingleTweakBytes( - dlpPoint, - aliceChannel.State().LocalChanCfg.PaymentBasePoint.PubKey, - ) - commitResolution := uniClose.CommitResolution - resolutionTweak := commitResolution.SelfOutputSignDesc.SingleTweak - if !bytes.Equal(sweepTweak, resolutionTweak) { - t.Errorf("sweep key mismatch: expected %x got %x", - sweepTweak, resolutionTweak) - return false - } - - return true - - case <-time.After(time.Second * 5): - t.Errorf("didn't receive unilateral close event") - return false - } - } - - testCases := []dlpTestCase{ - // For our first scenario, we'll ensure that if we're on state 1, - // and the remote party broadcasts state 2 and we don't have a - // pending commit for them, then we'll properly detect this as a - // DLP scenario. - { - BroadcastStateNum: 2, - NumUpdates: 1, - }, - - // We've completed a single update, but the remote party broadcasts - // a state that's 5 states byeond our best known state. We've lost - // data, but only partially, so we should enter a DLP secnario. - { - BroadcastStateNum: 6, - NumUpdates: 1, - }, - - // Similar to the case above, but we've done more than one - // update. - { - BroadcastStateNum: 6, - NumUpdates: 3, - }, - - // We've done zero updates, but our channel peer broadcasts a - // state beyond our knowledge. - { - BroadcastStateNum: 10, - NumUpdates: 0, - }, - } - for _, testCase := range testCases { - testName := fmt.Sprintf("num_updates=%v,broadcast_state_num=%v", - testCase.NumUpdates, testCase.BroadcastStateNum) - - testCase := testCase - t.Run(testName, func(t *testing.T) { - t.Parallel() - - if !dlpScenario(t, testCase) { - t.Fatalf("test %v failed", testName) - } - }) - } -} - -// TestChainWatcherLocalForceCloseDetect tests we're able to always detect our -// commitment output based on only the outputs present on the transaction. -func TestChainWatcherLocalForceCloseDetect(t *testing.T) { - t.Parallel() - - // localForceCloseScenario is the primary test we'll use to execute our - // table driven tests. We'll assert that for any number of state - // updates, and if the commitment transaction has our output or not, - // we're able to properly detect a local force close. - localForceCloseScenario := func(t *testing.T, numUpdates uint8, - remoteOutputOnly, localOutputOnly bool) bool { - - // First, we'll create two channels which already have - // established a commitment contract between themselves. - aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels( - channeldb.SingleFunderBit, - ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } - defer cleanUp() - - // With the channels created, we'll now create a chain watcher - // instance which will be watching for any closes of Alice's - // channel. - aliceNotifier := &mock.ChainNotifier{ - SpendChan: make(chan *chainntnfs.SpendDetail), - EpochChan: make(chan *chainntnfs.BlockEpoch), - ConfChan: make(chan *chainntnfs.TxConfirmation), - } - aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{ - chanState: aliceChannel.State(), - notifier: aliceNotifier, - signer: aliceChannel.Signer, - extractStateNumHint: lnwallet.GetStateNumHint, - }) - if err != nil { - t.Fatalf("unable to create chain watcher: %v", err) - } - if err := aliceChainWatcher.Start(); err != nil { - t.Fatalf("unable to start chain watcher: %v", err) - } - defer aliceChainWatcher.Stop() - - // We'll execute a number of state transitions based on the - // randomly selected number from testing/quick. We do this to - // get more coverage of various state hint encodings beyond 0 - // and 1. - const htlcAmt = 1000 - err = executeStateTransitions( - t, htlcAmt, aliceChannel, bobChannel, numUpdates, - ) - if err != nil { - t.Errorf("unable to trigger state "+ - "transition: %v", err) - return false - } - - // We'll request a new channel event subscription from Alice's - // chain watcher so we can be notified of our fake close below. - chanEvents := aliceChainWatcher.SubscribeChannelEvents() - - // Next, we'll obtain Alice's commitment transaction and - // trigger a force close. This should cause her to detect a - // local force close, and dispatch a local close event. - aliceCommit := aliceChannel.State().LocalCommitment.CommitTx - - // Since this is Alice's commitment, her output is always first - // since she's the one creating the HTLCs (lower balance). In - // order to simulate the commitment only having the remote - // party's output, we'll remove Alice's output. - if remoteOutputOnly { - aliceCommit.TxOut = aliceCommit.TxOut[1:] - } - if localOutputOnly { - aliceCommit.TxOut = aliceCommit.TxOut[:1] - } - - aliceTxHash := aliceCommit.TxHash() - aliceSpend := &chainntnfs.SpendDetail{ - SpenderTxHash: &aliceTxHash, - SpendingTx: aliceCommit, - } - aliceNotifier.SpendChan <- aliceSpend - - // We should get a local force close event from Alice as she - // should be able to detect the close based on the commitment - // outputs. - select { - case <-chanEvents.LocalUnilateralClosure: - return true - - case <-time.After(time.Second * 5): - t.Errorf("didn't get local for close for state #%v", - numUpdates) - return false - } - } - - // For our test cases, we'll ensure that we test having a remote output - // present and absent with non or some number of updates in the channel. - testCases := []struct { - numUpdates uint8 - remoteOutputOnly bool - localOutputOnly bool - }{ - { - numUpdates: 0, - remoteOutputOnly: true, - }, - { - numUpdates: 0, - remoteOutputOnly: false, - }, - { - numUpdates: 0, - localOutputOnly: true, - }, - { - numUpdates: 20, - remoteOutputOnly: false, - }, - { - numUpdates: 20, - remoteOutputOnly: true, - }, - { - numUpdates: 20, - localOutputOnly: true, - }, - } - for _, testCase := range testCases { - testName := fmt.Sprintf( - "num_updates=%v,remote_output=%v,local_output=%v", - testCase.numUpdates, testCase.remoteOutputOnly, - testCase.localOutputOnly, - ) - - testCase := testCase - t.Run(testName, func(t *testing.T) { - t.Parallel() - - localForceCloseScenario( - t, testCase.numUpdates, testCase.remoteOutputOnly, - testCase.localOutputOnly, - ) - }) - } -} diff --git a/lnd/contractcourt/channel_arbitrator.go b/lnd/contractcourt/channel_arbitrator.go deleted file mode 100644 index e951c1a6..00000000 --- a/lnd/contractcourt/channel_arbitrator.go +++ /dev/null @@ -1,2471 +0,0 @@ -package contractcourt - -import ( - "bytes" - "sync" - "sync/atomic" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/labels" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/sweep" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/wire" -) - -var ( - // errAlreadyForceClosed is an error returned when we attempt to force - // close a channel that's already in the process of doing so. - errAlreadyForceClosed = Err.CodeWithDetail("errAlreadyForceClosed", - "channel is already in the process of being force closed") -) - -const ( - // anchorSweepConfTarget is the conf target used when sweeping - // commitment anchors. - anchorSweepConfTarget = 6 - - // arbitratorBlockBufferSize is the size of the buffer we give to each - // channel arbitrator. - arbitratorBlockBufferSize = 20 -) - -// WitnessSubscription represents an intent to be notified once new witnesses -// are discovered by various active contract resolvers. A contract resolver may -// use this to be notified of when it can satisfy an incoming contract after we -// discover the witness for an outgoing contract. -type WitnessSubscription struct { - // WitnessUpdates is a channel that newly discovered witnesses will be - // sent over. - // - // TODO(roasbeef): couple with WitnessType? - WitnessUpdates <-chan lntypes.Preimage - - // CancelSubscription is a function closure that should be used by a - // client to cancel the subscription once they are no longer interested - // in receiving new updates. - CancelSubscription func() -} - -// WitnessBeacon is a global beacon of witnesses. Contract resolvers will use -// this interface to lookup witnesses (preimages typically) of contracts -// they're trying to resolve, add new preimages they resolve, and finally -// receive new updates each new time a preimage is discovered. -// -// TODO(roasbeef): need to delete the pre-images once we've used them -// and have been sufficiently confirmed? -type WitnessBeacon interface { - // SubscribeUpdates returns a channel that will be sent upon *each* time - // a new preimage is discovered. - SubscribeUpdates() *WitnessSubscription - - // LookupPreImage attempts to lookup a preimage in the global cache. - // True is returned for the second argument if the preimage is found. - LookupPreimage(payhash lntypes.Hash) (lntypes.Preimage, bool) - - // AddPreimages adds a batch of newly discovered preimages to the global - // cache, and also signals any subscribers of the newly discovered - // witness. - AddPreimages(preimages ...lntypes.Preimage) er.R -} - -// ArbChannel is an abstraction that allows the channel arbitrator to interact -// with an open channel. -type ArbChannel interface { - // ForceCloseChan should force close the contract that this attendant - // is watching over. We'll use this when we decide that we need to go - // to chain. It should in addition tell the switch to remove the - // corresponding link, such that we won't accept any new updates. The - // returned summary contains all items needed to eventually resolve all - // outputs on chain. - ForceCloseChan() (*lnwallet.LocalForceCloseSummary, er.R) - - // NewAnchorResolutions returns the anchor resolutions for currently - // valid commitment transactions. - NewAnchorResolutions() ([]*lnwallet.AnchorResolution, er.R) -} - -// ChannelArbitratorConfig contains all the functionality that the -// ChannelArbitrator needs in order to properly arbitrate any contract dispute -// on chain. -type ChannelArbitratorConfig struct { - // ChanPoint is the channel point that uniquely identifies this - // channel. - ChanPoint wire.OutPoint - - // Channel is the full channel data structure. For legacy channels, this - // field may not always be set after a restart. - Channel ArbChannel - - // ShortChanID describes the exact location of the channel within the - // chain. We'll use this to address any messages that we need to send - // to the switch during contract resolution. - ShortChanID lnwire.ShortChannelID - - // ChainEvents is an active subscription to the chain watcher for this - // channel to be notified of any on-chain activity related to this - // channel. - ChainEvents *ChainEventSubscription - - // MarkCommitmentBroadcasted should mark the channel as the commitment - // being broadcast, and we are waiting for the commitment to confirm. - MarkCommitmentBroadcasted func(*wire.MsgTx, bool) er.R - - // MarkChannelClosed marks the channel closed in the database, with the - // passed close summary. After this method successfully returns we can - // no longer expect to receive chain events for this channel, and must - // be able to recover from a failure without getting the close event - // again. It takes an optional channel status which will update the - // channel status in the record that we keep of historical channels. - MarkChannelClosed func(*channeldb.ChannelCloseSummary, - ...channeldb.ChannelStatus) er.R - - // IsPendingClose is a boolean indicating whether the channel is marked - // as pending close in the database. - IsPendingClose bool - - // ClosingHeight is the height at which the channel was closed. Note - // that this value is only valid if IsPendingClose is true. - ClosingHeight uint32 - - // CloseType is the type of the close event in case IsPendingClose is - // true. Otherwise this value is unset. - CloseType channeldb.ClosureType - - // MarkChannelResolved is a function closure that serves to mark a - // channel as "fully resolved". A channel itself can be considered - // fully resolved once all active contracts have individually been - // fully resolved. - // - // TODO(roasbeef): need RPC's to combine for pendingchannels RPC - MarkChannelResolved func() er.R - - // PutResolverReport records a resolver report for the channel. If the - // transaction provided is nil, the function should write the report - // in a new transaction. - PutResolverReport func(tx kvdb.RwTx, - report *channeldb.ResolverReport) er.R - - ChainArbitratorConfig -} - -// ReportOutputType describes the type of output that is being reported -// on. -type ReportOutputType uint8 - -const ( - // ReportOutputIncomingHtlc is an incoming hash time locked contract on - // the commitment tx. - ReportOutputIncomingHtlc ReportOutputType = iota - - // ReportOutputOutgoingHtlc is an outgoing hash time locked contract on - // the commitment tx. - ReportOutputOutgoingHtlc - - // ReportOutputUnencumbered is an uncontested output on the commitment - // transaction paying to us directly. - ReportOutputUnencumbered - - // ReportOutputAnchor is an anchor output on the commitment tx. - ReportOutputAnchor -) - -// ContractReport provides a summary of a commitment tx output. -type ContractReport struct { - // Outpoint is the final output that will be swept back to the wallet. - Outpoint wire.OutPoint - - // Type indicates the type of the reported output. - Type ReportOutputType - - // Amount is the final value that will be swept in back to the wallet. - Amount btcutil.Amount - - // MaturityHeight is the absolute block height that this output will - // mature at. - MaturityHeight uint32 - - // Stage indicates whether the htlc is in the CLTV-timeout stage (1) or - // the CSV-delay stage (2). A stage 1 htlc's maturity height will be set - // to its expiry height, while a stage 2 htlc's maturity height will be - // set to its confirmation height plus the maturity requirement. - Stage uint32 - - // LimboBalance is the total number of frozen coins within this - // contract. - LimboBalance btcutil.Amount - - // RecoveredBalance is the total value that has been successfully swept - // back to the user's wallet. - RecoveredBalance btcutil.Amount -} - -// resolverReport creates a resolve report using some of the information in the -// contract report. -func (c *ContractReport) resolverReport(spendTx *chainhash.Hash, - resolverType channeldb.ResolverType, - outcome channeldb.ResolverOutcome) *channeldb.ResolverReport { - - return &channeldb.ResolverReport{ - OutPoint: c.Outpoint, - Amount: c.Amount, - ResolverType: resolverType, - ResolverOutcome: outcome, - SpendTxID: spendTx, - } -} - -// htlcSet represents the set of active HTLCs on a given commitment -// transaction. -type htlcSet struct { - // incomingHTLCs is a map of all incoming HTLCs on the target - // commitment transaction. We may potentially go onchain to claim the - // funds sent to us within this set. - incomingHTLCs map[uint64]channeldb.HTLC - - // outgoingHTLCs is a map of all outgoing HTLCs on the target - // commitment transaction. We may potentially go onchain to reclaim the - // funds that are currently in limbo. - outgoingHTLCs map[uint64]channeldb.HTLC -} - -// newHtlcSet constructs a new HTLC set from a slice of HTLC's. -func newHtlcSet(htlcs []channeldb.HTLC) htlcSet { - outHTLCs := make(map[uint64]channeldb.HTLC) - inHTLCs := make(map[uint64]channeldb.HTLC) - for _, htlc := range htlcs { - if htlc.Incoming { - inHTLCs[htlc.HtlcIndex] = htlc - continue - } - - outHTLCs[htlc.HtlcIndex] = htlc - } - - return htlcSet{ - incomingHTLCs: inHTLCs, - outgoingHTLCs: outHTLCs, - } -} - -// HtlcSetKey is a two-tuple that uniquely identifies a set of HTLCs on a -// commitment transaction. -type HtlcSetKey struct { - // IsRemote denotes if the HTLCs are on the remote commitment - // transaction. - IsRemote bool - - // IsPending denotes if the commitment transaction that HTLCS are on - // are pending (the higher of two unrevoked commitments). - IsPending bool -} - -var ( - // LocalHtlcSet is the HtlcSetKey used for local commitments. - LocalHtlcSet = HtlcSetKey{IsRemote: false, IsPending: false} - - // RemoteHtlcSet is the HtlcSetKey used for remote commitments. - RemoteHtlcSet = HtlcSetKey{IsRemote: true, IsPending: false} - - // RemotePendingHtlcSet is the HtlcSetKey used for dangling remote - // commitment transactions. - RemotePendingHtlcSet = HtlcSetKey{IsRemote: true, IsPending: true} -) - -// String returns a human readable string describing the target HtlcSetKey. -func (h HtlcSetKey) String() string { - switch h { - case LocalHtlcSet: - return "LocalHtlcSet" - case RemoteHtlcSet: - return "RemoteHtlcSet" - case RemotePendingHtlcSet: - return "RemotePendingHtlcSet" - default: - return "unknown HtlcSetKey" - } -} - -// ChannelArbitrator is the on-chain arbitrator for a particular channel. The -// struct will keep in sync with the current set of HTLCs on the commitment -// transaction. The job of the attendant is to go on-chain to either settle or -// cancel an HTLC as necessary iff: an HTLC times out, or we known the -// pre-image to an HTLC, but it wasn't settled by the link off-chain. The -// ChannelArbitrator will factor in an expected confirmation delta when -// broadcasting to ensure that we avoid any possibility of race conditions, and -// sweep the output(s) without contest. -type ChannelArbitrator struct { - started int32 // To be used atomically. - stopped int32 // To be used atomically. - - // startTimestamp is the time when this ChannelArbitrator was started. - startTimestamp time.Time - - // log is a persistent log that the attendant will use to checkpoint - // its next action, and the state of any unresolved contracts. - log ArbitratorLog - - // activeHTLCs is the set of active incoming/outgoing HTLC's on all - // currently valid commitment transactions. - activeHTLCs map[HtlcSetKey]htlcSet - - // cfg contains all the functionality that the ChannelArbitrator requires - // to do its duty. - cfg ChannelArbitratorConfig - - // blocks is a channel that the arbitrator will receive new blocks on. - // This channel should be buffered by so that it does not block the - // sender. - blocks chan int32 - - // signalUpdates is a channel that any new live signals for the channel - // we're watching over will be sent. - signalUpdates chan *signalUpdateMsg - - // htlcUpdates is a channel that is sent upon with new updates from the - // active channel. Each time a new commitment state is accepted, the - // set of HTLC's on the new state should be sent across this channel. - htlcUpdates <-chan *ContractUpdate - - // activeResolvers is a slice of any active resolvers. This is used to - // be able to signal them for shutdown in the case that we shutdown. - activeResolvers []ContractResolver - - // activeResolversLock prevents simultaneous read and write to the - // resolvers slice. - activeResolversLock sync.RWMutex - - // resolutionSignal is a channel that will be sent upon by contract - // resolvers once their contract has been fully resolved. With each - // send, we'll check to see if the contract is fully resolved. - resolutionSignal chan struct{} - - // forceCloseReqs is a channel that requests to forcibly close the - // contract will be sent over. - forceCloseReqs chan *forceCloseReq - - // state is the current state of the arbitrator. This state is examined - // upon start up to decide which actions to take. - state ArbitratorState - - wg sync.WaitGroup - quit chan struct{} -} - -// NewChannelArbitrator returns a new instance of a ChannelArbitrator backed by -// the passed config struct. -func NewChannelArbitrator(cfg ChannelArbitratorConfig, - htlcSets map[HtlcSetKey]htlcSet, log ArbitratorLog) *ChannelArbitrator { - - return &ChannelArbitrator{ - log: log, - blocks: make(chan int32, arbitratorBlockBufferSize), - signalUpdates: make(chan *signalUpdateMsg), - htlcUpdates: make(<-chan *ContractUpdate), - resolutionSignal: make(chan struct{}), - forceCloseReqs: make(chan *forceCloseReq), - activeHTLCs: htlcSets, - cfg: cfg, - quit: make(chan struct{}), - } -} - -// chanArbStartState contains the information from disk that we need to start -// up a channel arbitrator. -type chanArbStartState struct { - currentState ArbitratorState - commitSet *CommitSet -} - -// getStartState retrieves the information from disk that our channel arbitrator -// requires to start. -func (c *ChannelArbitrator) getStartState(tx kvdb.RTx) (*chanArbStartState, - er.R) { - - // First, we'll read our last state from disk, so our internal state - // machine can act accordingly. - state, err := c.log.CurrentState(tx) - if err != nil { - return nil, err - } - - // Next we'll fetch our confirmed commitment set. This will only exist - // if the channel has been closed out on chain for modern nodes. For - // older nodes, this won't be found at all, and will rely on the - // existing written chain actions. Additionally, if this channel hasn't - // logged any actions in the log, then this field won't be present. - commitSet, err := c.log.FetchConfirmedCommitSet(tx) - if err != nil && !errNoCommitSet.Is(err) && !errScopeBucketNoExist.Is(err) { - return nil, err - } - - return &chanArbStartState{ - currentState: state, - commitSet: commitSet, - }, nil -} - -// Start starts all the goroutines that the ChannelArbitrator needs to operate. -// If takes a start state, which will be looked up on disk if it is not -// provided. -func (c *ChannelArbitrator) Start(state *chanArbStartState) er.R { - if !atomic.CompareAndSwapInt32(&c.started, 0, 1) { - return nil - } - c.startTimestamp = c.cfg.Clock.Now() - - // If the state passed in is nil, we look it up now. - if state == nil { - var err er.R - state, err = c.getStartState(nil) - if err != nil { - return err - } - } - - log.Debugf("Starting ChannelArbitrator(%v), htlc_set=%v", - c.cfg.ChanPoint, log.C(func() string { - return spew.Sdump(c.activeHTLCs) - }), - ) - - // Set our state from our starting state. - c.state = state.currentState - - _, bestHeight, err := c.cfg.ChainIO.GetBestBlock() - if err != nil { - return err - } - - // If the channel has been marked pending close in the database, and we - // haven't transitioned the state machine to StateContractClosed (or a - // succeeding state), then a state transition most likely failed. We'll - // try to recover from this by manually advancing the state by setting - // the corresponding close trigger. - trigger := chainTrigger - triggerHeight := uint32(bestHeight) - if c.cfg.IsPendingClose { - switch c.state { - case StateDefault: - fallthrough - case StateBroadcastCommit: - fallthrough - case StateCommitmentBroadcasted: - switch c.cfg.CloseType { - - case channeldb.CooperativeClose: - trigger = coopCloseTrigger - - case channeldb.BreachClose: - trigger = breachCloseTrigger - - case channeldb.LocalForceClose: - trigger = localCloseTrigger - - case channeldb.RemoteForceClose: - trigger = remoteCloseTrigger - } - - log.Warnf("ChannelArbitrator(%v): detected stalled "+ - "state=%v for closed channel", - c.cfg.ChanPoint, c.state) - } - - triggerHeight = c.cfg.ClosingHeight - } - - log.Infof("ChannelArbitrator(%v): starting state=%v, trigger=%v, "+ - "triggerHeight=%v", c.cfg.ChanPoint, c.state, trigger, - triggerHeight) - - // We'll now attempt to advance our state forward based on the current - // on-chain state, and our set of active contracts. - startingState := c.state - nextState, _, err := c.advanceState( - triggerHeight, trigger, state.commitSet, - ) - if err != nil { - switch { - - // If we detect that we tried to fetch resolutions, but failed, - // this channel was marked closed in the database before - // resolutions successfully written. In this case there is not - // much we can do, so we don't return the error. - case errScopeBucketNoExist.Is(err): - fallthrough - case errNoResolutions.Is(err): - log.Warnf("ChannelArbitrator(%v): detected closed"+ - "channel with no contract resolutions written.", - c.cfg.ChanPoint) - - default: - return err - } - } - - // If we start and ended at the awaiting full resolution state, then - // we'll relaunch our set of unresolved contracts. - if startingState == StateWaitingFullResolution && - nextState == StateWaitingFullResolution { - - // In order to relaunch the resolvers, we'll need to fetch the - // set of HTLCs that were present in the commitment transaction - // at the time it was confirmed. commitSet.ConfCommitKey can't - // be nil at this point since we're in - // StateWaitingFullResolution. We can only be in - // StateWaitingFullResolution after we've transitioned from - // StateContractClosed which can only be triggered by the local - // or remote close trigger. This trigger is only fired when we - // receive a chain event from the chain watcher than the - // commitment has been confirmed on chain, and before we - // advance our state step, we call InsertConfirmedCommitSet. - err := c.relaunchResolvers(state.commitSet, triggerHeight) - if err != nil { - return err - } - } - - c.wg.Add(1) - go c.channelAttendant(bestHeight) - return nil -} - -// relauchResolvers relaunches the set of resolvers for unresolved contracts in -// order to provide them with information that's not immediately available upon -// starting the ChannelArbitrator. This information should ideally be stored in -// the database, so this only serves as a intermediate work-around to prevent a -// migration. -func (c *ChannelArbitrator) relaunchResolvers(commitSet *CommitSet, - heightHint uint32) er.R { - - // We'll now query our log to see if there are any active unresolved - // contracts. If this is the case, then we'll relaunch all contract - // resolvers. - unresolvedContracts, err := c.log.FetchUnresolvedContracts() - if err != nil { - return err - } - - // Retrieve the commitment tx hash from the log. - contractResolutions, err := c.log.FetchContractResolutions() - if err != nil { - log.Errorf("unable to fetch contract resolutions: %v", - err) - return err - } - commitHash := contractResolutions.CommitHash - - // In prior versions of lnd, the information needed to supplement the - // resolvers (in most cases, the full amount of the HTLC) was found in - // the chain action map, which is now deprecated. As a result, if the - // commitSet is nil (an older node with unresolved HTLCs at time of - // upgrade), then we'll use the chain action information in place. The - // chain actions may exclude some information, but we cannot recover it - // for these older nodes at the moment. - var confirmedHTLCs []channeldb.HTLC - if commitSet != nil { - confirmedHTLCs = commitSet.HtlcSets[*commitSet.ConfCommitKey] - } else { - chainActions, err := c.log.FetchChainActions() - if err != nil { - log.Errorf("unable to fetch chain actions: %v", err) - return err - } - for _, htlcs := range chainActions { - confirmedHTLCs = append(confirmedHTLCs, htlcs...) - } - } - - // Reconstruct the htlc outpoints and data from the chain action log. - // The purpose of the constructed htlc map is to supplement to - // resolvers restored from database with extra data. Ideally this data - // is stored as part of the resolver in the log. This is a workaround - // to prevent a db migration. We use all available htlc sets here in - // order to ensure we have complete coverage. - htlcMap := make(map[wire.OutPoint]*channeldb.HTLC) - for _, htlc := range confirmedHTLCs { - htlc := htlc - outpoint := wire.OutPoint{ - Hash: commitHash, - Index: uint32(htlc.OutputIndex), - } - htlcMap[outpoint] = &htlc - } - - log.Infof("ChannelArbitrator(%v): relaunching %v contract "+ - "resolvers", c.cfg.ChanPoint, len(unresolvedContracts)) - - for _, resolver := range unresolvedContracts { - htlcResolver, ok := resolver.(htlcContractResolver) - if !ok { - continue - } - - htlcPoint := htlcResolver.HtlcPoint() - htlc, ok := htlcMap[htlcPoint] - if !ok { - return er.Errorf( - "htlc resolver %T unavailable", resolver, - ) - } - - htlcResolver.Supplement(*htlc) - } - - // The anchor resolver is stateless and can always be re-instantiated. - if contractResolutions.AnchorResolution != nil { - anchorResolver := newAnchorResolver( - contractResolutions.AnchorResolution.AnchorSignDescriptor, - contractResolutions.AnchorResolution.CommitAnchor, - heightHint, c.cfg.ChanPoint, - ResolverConfig{ - ChannelArbitratorConfig: c.cfg, - }, - ) - unresolvedContracts = append(unresolvedContracts, anchorResolver) - } - - c.launchResolvers(unresolvedContracts) - - return nil -} - -// Report returns htlc reports for the active resolvers. -func (c *ChannelArbitrator) Report() []*ContractReport { - c.activeResolversLock.RLock() - defer c.activeResolversLock.RUnlock() - - var reports []*ContractReport - for _, resolver := range c.activeResolvers { - r, ok := resolver.(reportingContractResolver) - if !ok { - continue - } - - report := r.report() - if report == nil { - continue - } - - reports = append(reports, report) - } - - return reports -} - -// Stop signals the ChannelArbitrator for a graceful shutdown. -func (c *ChannelArbitrator) Stop() er.R { - if !atomic.CompareAndSwapInt32(&c.stopped, 0, 1) { - return nil - } - - log.Debugf("Stopping ChannelArbitrator(%v)", c.cfg.ChanPoint) - - if c.cfg.ChainEvents.Cancel != nil { - go c.cfg.ChainEvents.Cancel() - } - - c.activeResolversLock.RLock() - for _, activeResolver := range c.activeResolvers { - activeResolver.Stop() - } - c.activeResolversLock.RUnlock() - - close(c.quit) - c.wg.Wait() - - return nil -} - -// transitionTrigger is an enum that denotes exactly *why* a state transition -// was initiated. This is useful as depending on the initial trigger, we may -// skip certain states as those actions are expected to have already taken -// place as a result of the external trigger. -type transitionTrigger uint8 - -const ( - // chainTrigger is a transition trigger that has been attempted due to - // changing on-chain conditions such as a block which times out HTLC's - // being attached. - chainTrigger transitionTrigger = iota - - // userTrigger is a transition trigger driven by user action. Examples - // of such a trigger include a user requesting a force closure of the - // channel. - userTrigger - - // remoteCloseTrigger is a transition trigger driven by the remote - // peer's commitment being confirmed. - remoteCloseTrigger - - // localCloseTrigger is a transition trigger driven by our commitment - // being confirmed. - localCloseTrigger - - // coopCloseTrigger is a transition trigger driven by a cooperative - // close transaction being confirmed. - coopCloseTrigger - - // breachCloseTrigger is a transition trigger driven by a remote breach - // being confirmed. In this case the channel arbitrator won't have to - // do anything, so we'll just clean up and exit gracefully. - breachCloseTrigger -) - -// String returns a human readable string describing the passed -// transitionTrigger. -func (t transitionTrigger) String() string { - switch t { - case chainTrigger: - return "chainTrigger" - - case remoteCloseTrigger: - return "remoteCloseTrigger" - - case userTrigger: - return "userTrigger" - - case localCloseTrigger: - return "localCloseTrigger" - - case coopCloseTrigger: - return "coopCloseTrigger" - - case breachCloseTrigger: - return "breachCloseTrigger" - - default: - return "unknown trigger" - } -} - -// stateStep is a help method that examines our internal state, and attempts -// the appropriate state transition if necessary. The next state we transition -// to is returned, Additionally, if the next transition results in a commitment -// broadcast, the commitment transaction itself is returned. -func (c *ChannelArbitrator) stateStep( - triggerHeight uint32, trigger transitionTrigger, - confCommitSet *CommitSet) (ArbitratorState, *wire.MsgTx, er.R) { - - var ( - nextState ArbitratorState - closeTx *wire.MsgTx - ) - switch c.state { - - // If we're in the default state, then we'll check our set of actions - // to see if while we were down, conditions have changed. - case StateDefault: - log.Debugf("ChannelArbitrator(%v): new block (height=%v) "+ - "examining active HTLC's", c.cfg.ChanPoint, - triggerHeight) - - // As a new block has been connected to the end of the main - // chain, we'll check to see if we need to make any on-chain - // claims on behalf of the channel contract that we're - // arbitrating for. If a commitment has confirmed, then we'll - // use the set snapshot from the chain, otherwise we'll use our - // current set. - var htlcs map[HtlcSetKey]htlcSet - if confCommitSet != nil { - htlcs = confCommitSet.toActiveHTLCSets() - } else { - htlcs = c.activeHTLCs - } - chainActions, err := c.checkLocalChainActions( - triggerHeight, trigger, htlcs, false, - ) - if err != nil { - return StateDefault, nil, err - } - - // If there are no actions to be made, then we'll remain in the - // default state. If this isn't a self initiated event (we're - // checking due to a chain update), then we'll exit now. - if len(chainActions) == 0 && trigger == chainTrigger { - log.Tracef("ChannelArbitrator(%v): no actions for "+ - "chain trigger, terminating", c.cfg.ChanPoint) - - return StateDefault, closeTx, nil - } - - // Otherwise, we'll log that we checked the HTLC actions as the - // commitment transaction has already been broadcast. - log.Tracef("ChannelArbitrator(%v): logging chain_actions=%v", - c.cfg.ChanPoint, - log.C(func() string { - return spew.Sdump(chainActions) - })) - - // Depending on the type of trigger, we'll either "tunnel" - // through to a farther state, or just proceed linearly to the - // next state. - switch trigger { - - // If this is a chain trigger, then we'll go straight to the - // next state, as we still need to broadcast the commitment - // transaction. - case chainTrigger: - fallthrough - case userTrigger: - nextState = StateBroadcastCommit - - // If the trigger is a cooperative close being confirmed, then - // we can go straight to StateFullyResolved, as there won't be - // any contracts to resolve. The same is true in the case of a - // breach. - case coopCloseTrigger, breachCloseTrigger: - nextState = StateFullyResolved - - // Otherwise, if this state advance was triggered by a - // commitment being confirmed on chain, then we'll jump - // straight to the state where the contract has already been - // closed, and we will inspect the set of unresolved contracts. - case localCloseTrigger: - log.Errorf("ChannelArbitrator(%v): unexpected local "+ - "commitment confirmed while in StateDefault", - c.cfg.ChanPoint) - fallthrough - case remoteCloseTrigger: - nextState = StateContractClosed - } - - // If we're in this state, then we've decided to broadcast the - // commitment transaction. We enter this state either due to an outside - // sub-system, or because an on-chain action has been triggered. - case StateBroadcastCommit: - // Under normal operation, we can only enter - // StateBroadcastCommit via a user or chain trigger. On restart, - // this state may be reexecuted after closing the channel, but - // failing to commit to StateContractClosed or - // StateFullyResolved. In that case, one of the four close - // triggers will be presented, signifying that we should skip - // rebroadcasting, and go straight to resolving the on-chain - // contract or marking the channel resolved. - switch trigger { - case localCloseTrigger, remoteCloseTrigger: - log.Infof("ChannelArbitrator(%v): detected %s "+ - "close after closing channel, fast-forwarding "+ - "to %s to resolve contract", - c.cfg.ChanPoint, trigger, StateContractClosed) - return StateContractClosed, closeTx, nil - - case coopCloseTrigger, breachCloseTrigger: - log.Infof("ChannelArbitrator(%v): detected %s "+ - "close after closing channel, fast-forwarding "+ - "to %s to resolve contract", - c.cfg.ChanPoint, trigger, StateFullyResolved) - return StateFullyResolved, closeTx, nil - } - - log.Infof("ChannelArbitrator(%v): force closing "+ - "chan", c.cfg.ChanPoint) - - // Now that we have all the actions decided for the set of - // HTLC's, we'll broadcast the commitment transaction, and - // signal the link to exit. - - // We'll tell the switch that it should remove the link for - // this channel, in addition to fetching the force close - // summary needed to close this channel on chain. - closeSummary, err := c.cfg.Channel.ForceCloseChan() - if err != nil { - log.Errorf("ChannelArbitrator(%v): unable to "+ - "force close: %v", c.cfg.ChanPoint, err) - return StateError, closeTx, err - } - closeTx = closeSummary.CloseTx - - // Before publishing the transaction, we store it to the - // database, such that we can re-publish later in case it - // didn't propagate. We initiated the force close, so we - // mark broadcast with local initiator set to true. - err = c.cfg.MarkCommitmentBroadcasted(closeTx, true) - if err != nil { - log.Errorf("ChannelArbitrator(%v): unable to "+ - "mark commitment broadcasted: %v", - c.cfg.ChanPoint, err) - return StateError, closeTx, err - } - - // With the close transaction in hand, broadcast the - // transaction to the network, thereby entering the post - // channel resolution state. - log.Infof("Broadcasting force close transaction %v, "+ - "ChannelPoint(%v): %v", closeTx.TxHash(), - c.cfg.ChanPoint, - log.C(func() string { - return spew.Sdump(closeTx) - })) - - // At this point, we'll now broadcast the commitment - // transaction itself. - label := labels.MakeLabel( - labels.LabelTypeChannelClose, &c.cfg.ShortChanID, - ) - - if err := c.cfg.PublishTx(closeTx, label); err != nil { - log.Errorf("ChannelArbitrator(%v): unable to broadcast "+ - "close tx: %v", c.cfg.ChanPoint, err) - if !lnwallet.ErrDoubleSpend.Is(err) { - return StateError, closeTx, err - } - } - - // We go to the StateCommitmentBroadcasted state, where we'll - // be waiting for the commitment to be confirmed. - nextState = StateCommitmentBroadcasted - - // In this state we have broadcasted our own commitment, and will need - // to wait for a commitment (not necessarily the one we broadcasted!) - // to be confirmed. - case StateCommitmentBroadcasted: - switch trigger { - - // We are waiting for a commitment to be confirmed. - case chainTrigger, userTrigger: - // The commitment transaction has been broadcast, but it - // doesn't necessarily need to be the commitment - // transaction version that is going to be confirmed. To - // be sure that any of those versions can be anchored - // down, we now submit all anchor resolutions to the - // sweeper. The sweeper will keep trying to sweep all of - // them. - // - // Note that the sweeper is idempotent. If we ever - // happen to end up at this point in the code again, no - // harm is done by re-offering the anchors to the - // sweeper. - anchors, err := c.cfg.Channel.NewAnchorResolutions() - if err != nil { - return StateError, closeTx, err - } - - err = c.sweepAnchors(anchors, triggerHeight) - if err != nil { - return StateError, closeTx, err - } - - nextState = StateCommitmentBroadcasted - - // If this state advance was triggered by any of the - // commitments being confirmed, then we'll jump to the state - // where the contract has been closed. - case localCloseTrigger, remoteCloseTrigger: - nextState = StateContractClosed - - // If a coop close or breach was confirmed, jump straight to - // the fully resolved state. - case coopCloseTrigger, breachCloseTrigger: - nextState = StateFullyResolved - } - - log.Infof("ChannelArbitrator(%v): trigger %v moving from "+ - "state %v to %v", c.cfg.ChanPoint, trigger, c.state, - nextState) - - // If we're in this state, then the contract has been fully closed to - // outside sub-systems, so we'll process the prior set of on-chain - // contract actions and launch a set of resolvers. - case StateContractClosed: - // First, we'll fetch our chain actions, and both sets of - // resolutions so we can process them. - contractResolutions, err := c.log.FetchContractResolutions() - if err != nil { - log.Errorf("unable to fetch contract resolutions: %v", - err) - return StateError, closeTx, err - } - - // If the resolution is empty, and we have no HTLCs at all to - // tend to, then we're done here. We don't need to launch any - // resolvers, and can go straight to our final state. - if contractResolutions.IsEmpty() && confCommitSet.IsEmpty() { - log.Infof("ChannelArbitrator(%v): contract "+ - "resolutions empty, marking channel as fully resolved!", - c.cfg.ChanPoint) - nextState = StateFullyResolved - break - } - - // Now that we know we'll need to act, we'll process the htlc - // actions, wen create the structures we need to resolve all - // outstanding contracts. - htlcResolvers, pktsToSend, err := c.prepContractResolutions( - contractResolutions, triggerHeight, trigger, - confCommitSet, - ) - if err != nil { - log.Errorf("ChannelArbitrator(%v): unable to "+ - "resolve contracts: %v", c.cfg.ChanPoint, err) - return StateError, closeTx, err - } - - log.Debugf("ChannelArbitrator(%v): sending resolution message=%v", - c.cfg.ChanPoint, - log.C(func() string { - return spew.Sdump(pktsToSend) - })) - - // With the commitment broadcast, we'll then send over all - // messages we can send immediately. - if len(pktsToSend) != 0 { - err := c.cfg.DeliverResolutionMsg(pktsToSend...) - if err != nil { - // TODO(roasbeef): make sure packet sends are - // idempotent - log.Errorf("unable to send pkts: %v", err) - return StateError, closeTx, err - } - } - - log.Debugf("ChannelArbitrator(%v): inserting %v contract "+ - "resolvers", c.cfg.ChanPoint, len(htlcResolvers)) - - err = c.log.InsertUnresolvedContracts(nil, htlcResolvers...) - if err != nil { - return StateError, closeTx, err - } - - // Finally, we'll launch all the required contract resolvers. - // Once they're all resolved, we're no longer needed. - c.launchResolvers(htlcResolvers) - - nextState = StateWaitingFullResolution - - // This is our terminal state. We'll keep returning this state until - // all contracts are fully resolved. - case StateWaitingFullResolution: - log.Infof("ChannelArbitrator(%v): still awaiting contract "+ - "resolution", c.cfg.ChanPoint) - - numUnresolved, err := c.log.FetchUnresolvedContracts() - if err != nil { - return StateError, closeTx, err - } - - // If we still have unresolved contracts, then we'll stay alive - // to oversee their resolution. - if len(numUnresolved) != 0 { - nextState = StateWaitingFullResolution - break - } - - nextState = StateFullyResolved - - // If we start as fully resolved, then we'll end as fully resolved. - case StateFullyResolved: - // To ensure that the state of the contract in persistent - // storage is properly reflected, we'll mark the contract as - // fully resolved now. - nextState = StateFullyResolved - - log.Infof("ChannelPoint(%v) has been fully resolved "+ - "on-chain at height=%v", c.cfg.ChanPoint, triggerHeight) - - if err := c.cfg.MarkChannelResolved(); err != nil { - log.Errorf("unable to mark channel resolved: %v", err) - return StateError, closeTx, err - } - } - - log.Tracef("ChannelArbitrator(%v): next_state=%v", c.cfg.ChanPoint, - nextState) - - return nextState, closeTx, nil -} - -// sweepAnchors offers all given anchor resolutions to the sweeper. It requests -// sweeping at the minimum fee rate. This fee rate can be upped manually by the -// user via the BumpFee rpc. -func (c *ChannelArbitrator) sweepAnchors(anchors []*lnwallet.AnchorResolution, - heightHint uint32) er.R { - - // Use the chan id as the exclusive group. This prevents any of the - // anchors from being batched together. - exclusiveGroup := c.cfg.ShortChanID.ToUint64() - - for _, anchor := range anchors { - log.Debugf("ChannelArbitrator(%v): pre-confirmation sweep of "+ - "anchor of tx %v", c.cfg.ChanPoint, anchor.CommitAnchor) - - // Prepare anchor output for sweeping. - anchorInput := input.MakeBaseInput( - &anchor.CommitAnchor, - input.CommitmentAnchor, - &anchor.AnchorSignDescriptor, - heightHint, - &input.TxInfo{ - Fee: anchor.CommitFee, - Weight: anchor.CommitWeight, - }, - ) - - // Sweep anchor output with a confirmation target fee - // preference. Because this is a cpfp-operation, the anchor will - // only be attempted to sweep when the current fee estimate for - // the confirmation target exceeds the commit fee rate. - // - // Also signal that this is a force sweep, so that the anchor - // will be swept even if it isn't economical purely based on the - // anchor value. - _, err := c.cfg.Sweeper.SweepInput( - &anchorInput, - sweep.Params{ - Fee: sweep.FeePreference{ - ConfTarget: anchorSweepConfTarget, - }, - Force: true, - ExclusiveGroup: &exclusiveGroup, - }, - ) - if err != nil { - return err - } - } - - return nil -} - -// launchResolvers updates the activeResolvers list and starts the resolvers. -func (c *ChannelArbitrator) launchResolvers(resolvers []ContractResolver) { - c.activeResolversLock.Lock() - defer c.activeResolversLock.Unlock() - - c.activeResolvers = resolvers - for _, contract := range resolvers { - c.wg.Add(1) - go c.resolveContract(contract) - } -} - -// advanceState is the main driver of our state machine. This method is an -// iterative function which repeatedly attempts to advance the internal state -// of the channel arbitrator. The state will be advanced until we reach a -// redundant transition, meaning that the state transition is a noop. The final -// param is a callback that allows the caller to execute an arbitrary action -// after each state transition. -func (c *ChannelArbitrator) advanceState( - triggerHeight uint32, trigger transitionTrigger, - confCommitSet *CommitSet) (ArbitratorState, *wire.MsgTx, er.R) { - - var ( - priorState ArbitratorState - forceCloseTx *wire.MsgTx - ) - - // We'll continue to advance our state forward until the state we - // transition to is that same state that we started at. - for { - priorState = c.state - log.Tracef("ChannelArbitrator(%v): attempting state step with "+ - "trigger=%v from state=%v", c.cfg.ChanPoint, trigger, - priorState) - - nextState, closeTx, err := c.stateStep( - triggerHeight, trigger, confCommitSet, - ) - if err != nil { - log.Errorf("ChannelArbitrator(%v): unable to advance "+ - "state: %v", c.cfg.ChanPoint, err) - return priorState, nil, err - } - - if forceCloseTx == nil && closeTx != nil { - forceCloseTx = closeTx - } - - // Our termination transition is a noop transition. If we get - // our prior state back as the next state, then we'll - // terminate. - if nextState == priorState { - log.Tracef("ChannelArbitrator(%v): terminating at "+ - "state=%v", c.cfg.ChanPoint, nextState) - return nextState, forceCloseTx, nil - } - - // As the prior state was successfully executed, we can now - // commit the next state. This ensures that we will re-execute - // the prior state if anything fails. - if err := c.log.CommitState(nextState); err != nil { - log.Errorf("ChannelArbitrator(%v): unable to commit "+ - "next state(%v): %v", c.cfg.ChanPoint, - nextState, err) - return priorState, nil, err - } - c.state = nextState - } -} - -// ChainAction is an enum that encompasses all possible on-chain actions -// we'll take for a set of HTLC's. -type ChainAction uint8 - -const ( - // NoAction is the min chainAction type, indicating that no action - // needs to be taken for a given HTLC. - NoAction ChainAction = 0 - - // HtlcTimeoutAction indicates that the HTLC will timeout soon. As a - // result, we should get ready to sweep it on chain after the timeout. - HtlcTimeoutAction = 1 - - // HtlcClaimAction indicates that we should claim the HTLC on chain - // before its timeout period. - HtlcClaimAction = 2 - - // HtlcFailNowAction indicates that we should fail an outgoing HTLC - // immediately by cancelling it backwards as it has no corresponding - // output in our commitment transaction. - HtlcFailNowAction = 3 - - // HtlcOutgoingWatchAction indicates that we can't yet timeout this - // HTLC, but we had to go to chain on order to resolve an existing - // HTLC. In this case, we'll either: time it out once it expires, or - // will learn the pre-image if the remote party claims the output. In - // this case, well add the pre-image to our global store. - HtlcOutgoingWatchAction = 4 - - // HtlcIncomingWatchAction indicates that we don't yet have the - // pre-image to claim incoming HTLC, but we had to go to chain in order - // to resolve and existing HTLC. In this case, we'll either: let the - // other party time it out, or eventually learn of the pre-image, in - // which case we'll claim on chain. - HtlcIncomingWatchAction = 5 -) - -// String returns a human readable string describing a chain action. -func (c ChainAction) String() string { - switch c { - case NoAction: - return "NoAction" - - case HtlcTimeoutAction: - return "HtlcTimeoutAction" - - case HtlcClaimAction: - return "HtlcClaimAction" - - case HtlcFailNowAction: - return "HtlcFailNowAction" - - case HtlcOutgoingWatchAction: - return "HtlcOutgoingWatchAction" - - case HtlcIncomingWatchAction: - return "HtlcIncomingWatchAction" - - default: - return "" - } -} - -// ChainActionMap is a map of a chain action, to the set of HTLC's that need to -// be acted upon for a given action type. The channel -type ChainActionMap map[ChainAction][]channeldb.HTLC - -// Merge merges the passed chain actions with the target chain action map. -func (c ChainActionMap) Merge(actions ChainActionMap) { - for chainAction, htlcs := range actions { - c[chainAction] = append(c[chainAction], htlcs...) - } -} - -// shouldGoOnChain takes into account the absolute timeout of the HTLC, if the -// confirmation delta that we need is close, and returns a bool indicating if -// we should go on chain to claim. We do this rather than waiting up until the -// last minute as we want to ensure that when we *need* (HTLC is timed out) to -// sweep, the commitment is already confirmed. -func (c *ChannelArbitrator) shouldGoOnChain(htlc channeldb.HTLC, - broadcastDelta, currentHeight uint32) bool { - - // We'll calculate the broadcast cut off for this HTLC. This is the - // height that (based on our current fee estimation) we should - // broadcast in order to ensure the commitment transaction is confirmed - // before the HTLC fully expires. - broadcastCutOff := htlc.RefundTimeout - broadcastDelta - - log.Tracef("ChannelArbitrator(%v): examining outgoing contract: "+ - "expiry=%v, cutoff=%v, height=%v", c.cfg.ChanPoint, htlc.RefundTimeout, - broadcastCutOff, currentHeight) - - // TODO(roasbeef): take into account default HTLC delta, don't need to - // broadcast immediately - // * can then batch with SINGLE | ANYONECANPAY - - // We should on-chain for this HTLC, iff we're within out broadcast - // cutoff window. - if currentHeight < broadcastCutOff { - return false - } - - // In case of incoming htlc we should go to chain. - if htlc.Incoming { - return true - } - - // For htlcs that are result of our initiated payments we give some grace - // period before force closing the channel. During this time we expect - // both nodes to connect and give a chance to the other node to send its - // updates and cancel the htlc. - // This shouldn't add any security risk as there is no incoming htlc to - // fulfill at this case and the expectation is that when the channel is - // active the other node will send update_fail_htlc to remove the htlc - // without closing the channel. It is up to the user to force close the - // channel if the peer misbehaves and doesn't send the update_fail_htlc. - // It is useful when this node is most of the time not online and is - // likely to miss the time slot where the htlc may be cancelled. - isForwarded := c.cfg.IsForwardedHTLC(c.cfg.ShortChanID, htlc.HtlcIndex) - upTime := c.cfg.Clock.Now().Sub(c.startTimestamp) - return isForwarded || upTime > c.cfg.PaymentsExpirationGracePeriod -} - -// checkCommitChainActions is called for each new block connected to the end of -// the main chain. Given the new block height, this new method will examine all -// active HTLC's, and determine if we need to go on-chain to claim any of them. -// A map of action -> []htlc is returned, detailing what action (if any) should -// be performed for each HTLC. For timed out HTLC's, once the commitment has -// been sufficiently confirmed, the HTLC's should be canceled backwards. For -// redeemed HTLC's, we should send the pre-image back to the incoming link. -func (c *ChannelArbitrator) checkCommitChainActions(height uint32, - trigger transitionTrigger, htlcs htlcSet) (ChainActionMap, er.R) { - - // TODO(roasbeef): would need to lock channel? channel totem? - // * race condition if adding and we broadcast, etc - // * or would make each instance sync? - - log.Debugf("ChannelArbitrator(%v): checking commit chain actions at "+ - "height=%v, in_htlc_count=%v, out_htlc_count=%v", - c.cfg.ChanPoint, height, - len(htlcs.incomingHTLCs), len(htlcs.outgoingHTLCs)) - - actionMap := make(ChainActionMap) - - // First, we'll make an initial pass over the set of incoming and - // outgoing HTLC's to decide if we need to go on chain at all. - haveChainActions := false - for _, htlc := range htlcs.outgoingHTLCs { - // We'll need to go on-chain for an outgoing HTLC if it was - // never resolved downstream, and it's "close" to timing out. - toChain := c.shouldGoOnChain(htlc, c.cfg.OutgoingBroadcastDelta, - height, - ) - - if toChain { - log.Debugf("ChannelArbitrator(%v): go to chain for "+ - "outgoing htlc %x: timeout=%v, "+ - "blocks_until_expiry=%v, broadcast_delta=%v", - c.cfg.ChanPoint, htlc.RHash[:], - htlc.RefundTimeout, htlc.RefundTimeout-height, - c.cfg.OutgoingBroadcastDelta, - ) - } - - haveChainActions = haveChainActions || toChain - } - - for _, htlc := range htlcs.incomingHTLCs { - // We'll need to go on-chain to pull an incoming HTLC iff we - // know the pre-image and it's close to timing out. We need to - // ensure that we claim the funds that our rightfully ours - // on-chain. - preimageAvailable, err := c.isPreimageAvailable(htlc.RHash) - if err != nil { - return nil, err - } - - if !preimageAvailable { - continue - } - - toChain := c.shouldGoOnChain(htlc, c.cfg.IncomingBroadcastDelta, - height, - ) - - if toChain { - log.Debugf("ChannelArbitrator(%v): go to chain for "+ - "incoming htlc %x: timeout=%v, "+ - "blocks_until_expiry=%v, broadcast_delta=%v", - c.cfg.ChanPoint, htlc.RHash[:], - htlc.RefundTimeout, htlc.RefundTimeout-height, - c.cfg.IncomingBroadcastDelta, - ) - } - - haveChainActions = haveChainActions || toChain - } - - // If we don't have any actions to make, then we'll return an empty - // action map. We only do this if this was a chain trigger though, as - // if we're going to broadcast the commitment (or the remote party did) - // we're *forced* to act on each HTLC. - if !haveChainActions && trigger == chainTrigger { - log.Tracef("ChannelArbitrator(%v): no actions to take at "+ - "height=%v", c.cfg.ChanPoint, height) - return actionMap, nil - } - - // Now that we know we'll need to go on-chain, we'll examine all of our - // active outgoing HTLC's to see if we either need to: sweep them after - // a timeout (then cancel backwards), cancel them backwards - // immediately, or watch them as they're still active contracts. - for _, htlc := range htlcs.outgoingHTLCs { - switch { - // If the HTLC is dust, then we can cancel it backwards - // immediately as there's no matching contract to arbitrate - // on-chain. We know the HTLC is dust, if the OutputIndex - // negative. - case htlc.OutputIndex < 0: - log.Tracef("ChannelArbitrator(%v): immediately "+ - "failing dust htlc=%x", c.cfg.ChanPoint, - htlc.RHash[:]) - - actionMap[HtlcFailNowAction] = append( - actionMap[HtlcFailNowAction], htlc, - ) - - // If we don't need to immediately act on this HTLC, then we'll - // mark it still "live". After we broadcast, we'll monitor it - // until the HTLC times out to see if we can also redeem it - // on-chain. - case !c.shouldGoOnChain(htlc, c.cfg.OutgoingBroadcastDelta, - height, - ): - // TODO(roasbeef): also need to be able to query - // circuit map to see if HTLC hasn't been fully - // resolved - // - // * can't fail incoming until if outgoing not yet - // failed - - log.Tracef("ChannelArbitrator(%v): watching chain to "+ - "decide action for outgoing htlc=%x", - c.cfg.ChanPoint, htlc.RHash[:]) - - actionMap[HtlcOutgoingWatchAction] = append( - actionMap[HtlcOutgoingWatchAction], htlc, - ) - - // Otherwise, we'll update our actionMap to mark that we need - // to sweep this HTLC on-chain - default: - log.Tracef("ChannelArbitrator(%v): going on-chain to "+ - "timeout htlc=%x", c.cfg.ChanPoint, htlc.RHash[:]) - - actionMap[HtlcTimeoutAction] = append( - actionMap[HtlcTimeoutAction], htlc, - ) - } - } - - // Similarly, for each incoming HTLC, now that we need to go on-chain, - // we'll either: sweep it immediately if we know the pre-image, or - // observe the output on-chain if we don't In this last, case we'll - // either learn of it eventually from the outgoing HTLC, or the sender - // will timeout the HTLC. - for _, htlc := range htlcs.incomingHTLCs { - // If the HTLC is dust, there is no action to be taken. - if htlc.OutputIndex < 0 { - log.Debugf("ChannelArbitrator(%v): no resolution "+ - "needed for incoming dust htlc=%x", - c.cfg.ChanPoint, htlc.RHash[:]) - - continue - } - - log.Tracef("ChannelArbitrator(%v): watching chain to decide "+ - "action for incoming htlc=%x", c.cfg.ChanPoint, - htlc.RHash[:]) - - actionMap[HtlcIncomingWatchAction] = append( - actionMap[HtlcIncomingWatchAction], htlc, - ) - } - - return actionMap, nil -} - -// isPreimageAvailable returns whether the hash preimage is available in either -// the preimage cache or the invoice database. -func (c *ChannelArbitrator) isPreimageAvailable(hash lntypes.Hash) (bool, - er.R) { - - // Start by checking the preimage cache for preimages of - // forwarded HTLCs. - _, preimageAvailable := c.cfg.PreimageDB.LookupPreimage( - hash, - ) - if preimageAvailable { - return true, nil - } - - // Then check if we have an invoice that can be settled by this HTLC. - // - // TODO(joostjager): Check that there are still more blocks remaining - // than the invoice cltv delta. We don't want to go to chain only to - // have the incoming contest resolver decide that we don't want to - // settle this invoice. - invoice, err := c.cfg.Registry.LookupInvoice(hash) - switch { - case err == nil: - case channeldb.ErrInvoiceNotFound.Is(err), channeldb.ErrNoInvoicesCreated.Is(err): - return false, nil - default: - return false, err - } - - preimageAvailable = invoice.Terms.PaymentPreimage != nil - - return preimageAvailable, nil -} - -// checkLocalChainActions is similar to checkCommitChainActions, but it also -// examines the set of HTLCs on the remote party's commitment. This allows us -// to ensure we're able to satisfy the HTLC timeout constraints for incoming vs -// outgoing HTLCs. -func (c *ChannelArbitrator) checkLocalChainActions( - height uint32, trigger transitionTrigger, - activeHTLCs map[HtlcSetKey]htlcSet, - commitsConfirmed bool) (ChainActionMap, er.R) { - - // First, we'll check our local chain actions as normal. This will only - // examine HTLCs on our local commitment (timeout or settle). - localCommitActions, err := c.checkCommitChainActions( - height, trigger, activeHTLCs[LocalHtlcSet], - ) - if err != nil { - return nil, err - } - - // Next, we'll examine the remote commitment (and maybe a dangling one) - // to see if the set difference of our HTLCs is non-empty. If so, then - // we may need to cancel back some HTLCs if we decide go to chain. - remoteDanglingActions := c.checkRemoteDanglingActions( - height, activeHTLCs, commitsConfirmed, - ) - - // Finally, we'll merge the two set of chain actions. - localCommitActions.Merge(remoteDanglingActions) - - return localCommitActions, nil -} - -// checkRemoteDanglingActions examines the set of remote commitments for any -// HTLCs that are close to timing out. If we find any, then we'll return a set -// of chain actions for HTLCs that are on our commitment, but not theirs to -// cancel immediately. -func (c *ChannelArbitrator) checkRemoteDanglingActions( - height uint32, activeHTLCs map[HtlcSetKey]htlcSet, - commitsConfirmed bool) ChainActionMap { - - var ( - pendingRemoteHTLCs []channeldb.HTLC - localHTLCs = make(map[uint64]struct{}) - remoteHTLCs = make(map[uint64]channeldb.HTLC) - actionMap = make(ChainActionMap) - ) - - // First, we'll construct two sets of the outgoing HTLCs: those on our - // local commitment, and those that are on the remote commitment(s). - for htlcSetKey, htlcs := range activeHTLCs { - if htlcSetKey.IsRemote { - for _, htlc := range htlcs.outgoingHTLCs { - remoteHTLCs[htlc.HtlcIndex] = htlc - } - } else { - for _, htlc := range htlcs.outgoingHTLCs { - localHTLCs[htlc.HtlcIndex] = struct{}{} - } - } - } - - // With both sets constructed, we'll now compute the set difference of - // our two sets of HTLCs. This'll give us the HTLCs that exist on the - // remote commitment transaction, but not on ours. - for htlcIndex, htlc := range remoteHTLCs { - if _, ok := localHTLCs[htlcIndex]; ok { - continue - } - - pendingRemoteHTLCs = append(pendingRemoteHTLCs, htlc) - } - - // Finally, we'll examine all the pending remote HTLCs for those that - // have expired. If we find any, then we'll recommend that they be - // failed now so we can free up the incoming HTLC. - for _, htlc := range pendingRemoteHTLCs { - // We'll now check if we need to go to chain in order to cancel - // the incoming HTLC. - goToChain := c.shouldGoOnChain(htlc, c.cfg.OutgoingBroadcastDelta, - height, - ) - - // If we don't need to go to chain, and no commitments have - // been confirmed, then we can move on. Otherwise, if - // commitments have been confirmed, then we need to cancel back - // *all* of the pending remote HTLCS. - if !goToChain && !commitsConfirmed { - continue - } - - log.Tracef("ChannelArbitrator(%v): immediately failing "+ - "htlc=%x from remote commitment", - c.cfg.ChanPoint, htlc.RHash[:]) - - actionMap[HtlcFailNowAction] = append( - actionMap[HtlcFailNowAction], htlc, - ) - } - - return actionMap -} - -// checkRemoteChainActions examines the two possible remote commitment chains -// and returns the set of chain actions we need to carry out if the remote -// commitment (non pending) confirms. The pendingConf indicates if the pending -// remote commitment confirmed. This is similar to checkCommitChainActions, but -// we'll immediately fail any HTLCs on the pending remote commit, but not the -// remote commit (or the other way around). -func (c *ChannelArbitrator) checkRemoteChainActions( - height uint32, trigger transitionTrigger, - activeHTLCs map[HtlcSetKey]htlcSet, - pendingConf bool) (ChainActionMap, er.R) { - - // First, we'll examine all the normal chain actions on the remote - // commitment that confirmed. - confHTLCs := activeHTLCs[RemoteHtlcSet] - if pendingConf { - confHTLCs = activeHTLCs[RemotePendingHtlcSet] - } - remoteCommitActions, err := c.checkCommitChainActions( - height, trigger, confHTLCs, - ) - if err != nil { - return nil, err - } - - // With this actions computed, we'll now check the diff of the HTLCs on - // the commitments, and cancel back any that are on the pending but not - // the non-pending. - remoteDiffActions := c.checkRemoteDiffActions( - height, activeHTLCs, pendingConf, - ) - - // Finally, we'll merge all the chain actions and the final set of - // chain actions. - remoteCommitActions.Merge(remoteDiffActions) - return remoteCommitActions, nil -} - -// checkRemoteDiffActions checks the set difference of the HTLCs on the remote -// confirmed commit and remote dangling commit for HTLCS that we need to cancel -// back. If we find any HTLCs on the remote pending but not the remote, then -// we'll mark them to be failed immediately. -func (c *ChannelArbitrator) checkRemoteDiffActions(height uint32, - activeHTLCs map[HtlcSetKey]htlcSet, - pendingConf bool) ChainActionMap { - - // First, we'll partition the HTLCs into those that are present on the - // confirmed commitment, and those on the dangling commitment. - confHTLCs := activeHTLCs[RemoteHtlcSet] - danglingHTLCs := activeHTLCs[RemotePendingHtlcSet] - if pendingConf { - confHTLCs = activeHTLCs[RemotePendingHtlcSet] - danglingHTLCs = activeHTLCs[RemoteHtlcSet] - } - - // Next, we'll create a set of all the HTLCs confirmed commitment. - remoteHtlcs := make(map[uint64]struct{}) - for _, htlc := range confHTLCs.outgoingHTLCs { - remoteHtlcs[htlc.HtlcIndex] = struct{}{} - } - - // With the remote HTLCs assembled, we'll mark any HTLCs only on the - // remote dangling commitment to be failed asap. - actionMap := make(ChainActionMap) - for _, htlc := range danglingHTLCs.outgoingHTLCs { - if _, ok := remoteHtlcs[htlc.HtlcIndex]; ok { - continue - } - - actionMap[HtlcFailNowAction] = append( - actionMap[HtlcFailNowAction], htlc, - ) - - log.Tracef("ChannelArbitrator(%v): immediately failing "+ - "htlc=%x from remote commitment", - c.cfg.ChanPoint, htlc.RHash[:]) - } - - return actionMap -} - -// constructChainActions returns the set of actions that should be taken for -// confirmed HTLCs at the specified height. Our actions will depend on the set -// of HTLCs that were active across all channels at the time of channel -// closure. -func (c *ChannelArbitrator) constructChainActions(confCommitSet *CommitSet, - height uint32, trigger transitionTrigger) (ChainActionMap, er.R) { - - // If we've reached this point and have not confirmed commitment set, - // then this is an older node that had a pending close channel before - // the CommitSet was introduced. In this case, we'll just return the - // existing ChainActionMap they had on disk. - if confCommitSet == nil { - return c.log.FetchChainActions() - } - - // Otherwise we have the full commitment set written to disk, and can - // proceed as normal. - htlcSets := confCommitSet.toActiveHTLCSets() - switch *confCommitSet.ConfCommitKey { - - // If the local commitment transaction confirmed, then we'll examine - // that as well as their commitments to the set of chain actions. - case LocalHtlcSet: - return c.checkLocalChainActions( - height, trigger, htlcSets, true, - ) - - // If the remote commitment confirmed, then we'll grab all the chain - // actions for the remote commit, and check the pending commit for any - // HTLCS we need to handle immediately (dust). - case RemoteHtlcSet: - return c.checkRemoteChainActions( - height, trigger, htlcSets, false, - ) - - // Otherwise, the remote pending commitment confirmed, so we'll examine - // the HTLCs on that unrevoked dangling commitment. - case RemotePendingHtlcSet: - return c.checkRemoteChainActions( - height, trigger, htlcSets, true, - ) - } - - return nil, er.Errorf("unable to locate chain actions") -} - -// prepContractResolutions is called either int he case that we decide we need -// to go to chain, or the remote party goes to chain. Given a set of actions we -// need to take for each HTLC, this method will return a set of contract -// resolvers that will resolve the contracts on-chain if needed, and also a set -// of packets to send to the htlcswitch in order to ensure all incoming HTLC's -// are properly resolved. -func (c *ChannelArbitrator) prepContractResolutions( - contractResolutions *ContractResolutions, height uint32, - trigger transitionTrigger, - confCommitSet *CommitSet) ([]ContractResolver, []ResolutionMsg, er.R) { - - // First, we'll reconstruct a fresh set of chain actions as the set of - // actions we need to act on may differ based on if it was our - // commitment, or they're commitment that hit the chain. - htlcActions, err := c.constructChainActions( - confCommitSet, height, trigger, - ) - if err != nil { - return nil, nil, err - } - - // There may be a class of HTLC's which we can fail back immediately, - // for those we'll prepare a slice of packets to add to our outbox. Any - // packets we need to send, will be cancels. - var ( - msgsToSend []ResolutionMsg - ) - - incomingResolutions := contractResolutions.HtlcResolutions.IncomingHTLCs - outgoingResolutions := contractResolutions.HtlcResolutions.OutgoingHTLCs - - // We'll use these two maps to quickly look up an active HTLC with its - // matching HTLC resolution. - outResolutionMap := make(map[wire.OutPoint]lnwallet.OutgoingHtlcResolution) - inResolutionMap := make(map[wire.OutPoint]lnwallet.IncomingHtlcResolution) - for i := 0; i < len(incomingResolutions); i++ { - inRes := incomingResolutions[i] - inResolutionMap[inRes.HtlcPoint()] = inRes - } - for i := 0; i < len(outgoingResolutions); i++ { - outRes := outgoingResolutions[i] - outResolutionMap[outRes.HtlcPoint()] = outRes - } - - // We'll create the resolver kit that we'll be cloning for each - // resolver so they each can do their duty. - resolverCfg := ResolverConfig{ - ChannelArbitratorConfig: c.cfg, - Checkpoint: func(res ContractResolver, - reports ...*channeldb.ResolverReport) er.R { - - return c.log.InsertUnresolvedContracts(reports, res) - }, - } - - commitHash := contractResolutions.CommitHash - failureMsg := &lnwire.FailPermanentChannelFailure{} - - // For each HTLC, we'll either act immediately, meaning we'll instantly - // fail the HTLC, or we'll act only once the transaction has been - // confirmed, in which case we'll need an HTLC resolver. - var htlcResolvers []ContractResolver - for htlcAction, htlcs := range htlcActions { - switch htlcAction { - - // If we can fail an HTLC immediately (an outgoing HTLC with no - // contract), then we'll assemble an HTLC fail packet to send. - case HtlcFailNowAction: - for _, htlc := range htlcs { - failMsg := ResolutionMsg{ - SourceChan: c.cfg.ShortChanID, - HtlcIndex: htlc.HtlcIndex, - Failure: failureMsg, - } - - msgsToSend = append(msgsToSend, failMsg) - } - - // If we can claim this HTLC, we'll create an HTLC resolver to - // claim the HTLC (second-level or directly), then add the pre - case HtlcClaimAction: - for _, htlc := range htlcs { - htlc := htlc - - htlcOp := wire.OutPoint{ - Hash: commitHash, - Index: uint32(htlc.OutputIndex), - } - - resolution, ok := inResolutionMap[htlcOp] - if !ok { - // TODO(roasbeef): panic? - log.Errorf("ChannelArbitrator(%v) unable to find "+ - "incoming resolution: %v", - c.cfg.ChanPoint, htlcOp) - continue - } - - resolver := newSuccessResolver( - resolution, height, htlc, resolverCfg, - ) - htlcResolvers = append(htlcResolvers, resolver) - } - - // If we can timeout the HTLC directly, then we'll create the - // proper resolver to do so, who will then cancel the packet - // backwards. - case HtlcTimeoutAction: - for _, htlc := range htlcs { - htlc := htlc - - htlcOp := wire.OutPoint{ - Hash: commitHash, - Index: uint32(htlc.OutputIndex), - } - - resolution, ok := outResolutionMap[htlcOp] - if !ok { - log.Errorf("ChannelArbitrator(%v) unable to find "+ - "outgoing resolution: %v", c.cfg.ChanPoint, htlcOp) - continue - } - - resolver := newTimeoutResolver( - resolution, height, htlc, resolverCfg, - ) - htlcResolvers = append(htlcResolvers, resolver) - } - - // If this is an incoming HTLC, but we can't act yet, then - // we'll create an incoming resolver to redeem the HTLC if we - // learn of the pre-image, or let the remote party time out. - case HtlcIncomingWatchAction: - for _, htlc := range htlcs { - htlc := htlc - - htlcOp := wire.OutPoint{ - Hash: commitHash, - Index: uint32(htlc.OutputIndex), - } - - // TODO(roasbeef): need to handle incoming dust... - - // TODO(roasbeef): can't be negative!!! - resolution, ok := inResolutionMap[htlcOp] - if !ok { - log.Errorf("ChannelArbitrator(%v) unable to find "+ - "incoming resolution: %v", - c.cfg.ChanPoint, htlcOp) - continue - } - - resolver := newIncomingContestResolver( - resolution, height, htlc, - resolverCfg, - ) - htlcResolvers = append(htlcResolvers, resolver) - } - - // Finally, if this is an outgoing HTLC we've sent, then we'll - // launch a resolver to watch for the pre-image (and settle - // backwards), or just timeout. - case HtlcOutgoingWatchAction: - for _, htlc := range htlcs { - htlc := htlc - - htlcOp := wire.OutPoint{ - Hash: commitHash, - Index: uint32(htlc.OutputIndex), - } - - resolution, ok := outResolutionMap[htlcOp] - if !ok { - log.Errorf("ChannelArbitrator(%v) unable to find "+ - "outgoing resolution: %v", - c.cfg.ChanPoint, htlcOp) - continue - } - - resolver := newOutgoingContestResolver( - resolution, height, htlc, resolverCfg, - ) - htlcResolvers = append(htlcResolvers, resolver) - } - } - } - - // If this is was an unilateral closure, then we'll also create a - // resolver to sweep our commitment output (but only if it wasn't - // trimmed). - if contractResolutions.CommitResolution != nil { - resolver := newCommitSweepResolver( - *contractResolutions.CommitResolution, - height, c.cfg.ChanPoint, resolverCfg, - ) - htlcResolvers = append(htlcResolvers, resolver) - } - - // We instantiate an anchor resolver if the commitmentment tx has an - // anchor. - if contractResolutions.AnchorResolution != nil { - anchorResolver := newAnchorResolver( - contractResolutions.AnchorResolution.AnchorSignDescriptor, - contractResolutions.AnchorResolution.CommitAnchor, - height, c.cfg.ChanPoint, resolverCfg, - ) - htlcResolvers = append(htlcResolvers, anchorResolver) - } - - return htlcResolvers, msgsToSend, nil -} - -// replaceResolver replaces a in the list of active resolvers. If the resolver -// to be replaced is not found, it returns an error. -func (c *ChannelArbitrator) replaceResolver(oldResolver, - newResolver ContractResolver) er.R { - - c.activeResolversLock.Lock() - defer c.activeResolversLock.Unlock() - - oldKey := oldResolver.ResolverKey() - for i, r := range c.activeResolvers { - if bytes.Equal(r.ResolverKey(), oldKey) { - c.activeResolvers[i] = newResolver - return nil - } - } - - return er.New("resolver to be replaced not found") -} - -// resolveContract is a goroutine tasked with fully resolving an unresolved -// contract. Either the initial contract will be resolved after a single step, -// or the contract will itself create another contract to be resolved. In -// either case, one the contract has been fully resolved, we'll signal back to -// the main goroutine so it can properly keep track of the set of unresolved -// contracts. -// -// NOTE: This MUST be run as a goroutine. -func (c *ChannelArbitrator) resolveContract(currentContract ContractResolver) { - defer c.wg.Done() - - log.Debugf("ChannelArbitrator(%v): attempting to resolve %T", - c.cfg.ChanPoint, currentContract) - - // Until the contract is fully resolved, we'll continue to iteratively - // resolve the contract one step at a time. - for !currentContract.IsResolved() { - log.Debugf("ChannelArbitrator(%v): contract %T not yet resolved", - c.cfg.ChanPoint, currentContract) - - select { - - // If we've been signalled to quit, then we'll exit early. - case <-c.quit: - return - - default: - // Otherwise, we'll attempt to resolve the current - // contract. - nextContract, err := currentContract.Resolve() - if err != nil { - if errResolverShuttingDown.Is(err) { - return - } - - log.Errorf("ChannelArbitrator(%v): unable to "+ - "progress %T: %v", - c.cfg.ChanPoint, currentContract, err) - return - } - - switch { - // If this contract produced another, then this means - // the current contract was only able to be partially - // resolved in this step. So we'll do a contract swap - // within our logs: the new contract will take the - // place of the old one. - case nextContract != nil: - log.Debugf("ChannelArbitrator(%v): swapping "+ - "out contract %T for %T ", - c.cfg.ChanPoint, currentContract, - nextContract) - - // Swap contract in log. - err := c.log.SwapContract( - currentContract, nextContract, - ) - if err != nil { - log.Errorf("unable to add recurse "+ - "contract: %v", err) - } - - // Swap contract in resolvers list. This is to - // make sure that reports are queried from the - // new resolver. - err = c.replaceResolver( - currentContract, nextContract, - ) - if err != nil { - log.Errorf("unable to replace "+ - "contract: %v", err) - } - - // As this contract produced another, we'll - // re-assign, so we can continue our resolution - // loop. - currentContract = nextContract - - // If this contract is actually fully resolved, then - // we'll mark it as such within the database. - case currentContract.IsResolved(): - log.Debugf("ChannelArbitrator(%v): marking "+ - "contract %T fully resolved", - c.cfg.ChanPoint, currentContract) - - err := c.log.ResolveContract(currentContract) - if err != nil { - log.Errorf("unable to resolve contract: %v", - err) - } - - // Now that the contract has been resolved, - // well signal to the main goroutine. - select { - case c.resolutionSignal <- struct{}{}: - case <-c.quit: - return - } - } - - } - } -} - -// signalUpdateMsg is a struct that carries fresh signals to the -// ChannelArbitrator. We need to receive a message like this each time the -// channel becomes active, as it's internal state may change. -type signalUpdateMsg struct { - // newSignals is the set of new active signals to be sent to the - // arbitrator. - newSignals *ContractSignals - - // doneChan is a channel that will be closed on the arbitrator has - // attached the new signals. - doneChan chan struct{} -} - -// UpdateContractSignals updates the set of signals the ChannelArbitrator needs -// to receive from a channel in real-time in order to keep in sync with the -// latest state of the contract. -func (c *ChannelArbitrator) UpdateContractSignals(newSignals *ContractSignals) { - done := make(chan struct{}) - - select { - case c.signalUpdates <- &signalUpdateMsg{ - newSignals: newSignals, - doneChan: done, - }: - case <-c.quit: - } - - select { - case <-done: - case <-c.quit: - } -} - -// channelAttendant is the primary goroutine that acts at the judicial -// arbitrator between our channel state, the remote channel peer, and the -// blockchain (Our judge). This goroutine will ensure that we faithfully execute -// all clauses of our contract in the case that we need to go on-chain for a -// dispute. Currently, two such conditions warrant our intervention: when an -// outgoing HTLC is about to timeout, and when we know the pre-image for an -// incoming HTLC, but it hasn't yet been settled off-chain. In these cases, -// we'll: broadcast our commitment, cancel/settle any HTLC's backwards after -// sufficient confirmation, and finally send our set of outputs to the UTXO -// Nursery for incubation, and ultimate sweeping. -// -// NOTE: This MUST be run as a goroutine. -func (c *ChannelArbitrator) channelAttendant(bestHeight int32) { - - // TODO(roasbeef): tell top chain arb we're done - defer func() { - c.wg.Done() - }() - - for { - select { - - // A new block has arrived, we'll examine all the active HTLC's - // to see if any of them have expired, and also update our - // track of the best current height. - case blockHeight, ok := <-c.blocks: - if !ok { - return - } - bestHeight = blockHeight - - // If we're not in the default state, then we can - // ignore this signal as we're waiting for contract - // resolution. - if c.state != StateDefault { - continue - } - - // Now that a new block has arrived, we'll attempt to - // advance our state forward. - nextState, _, err := c.advanceState( - uint32(bestHeight), chainTrigger, nil, - ) - if err != nil { - log.Errorf("Unable to advance state: %v", err) - } - - // If as a result of this trigger, the contract is - // fully resolved, then well exit. - if nextState == StateFullyResolved { - return - } - - // A new signal update was just sent. This indicates that the - // channel under watch is now live, and may modify its internal - // state, so we'll get the most up to date signals to we can - // properly do our job. - case signalUpdate := <-c.signalUpdates: - log.Tracef("ChannelArbitrator(%v) got new signal "+ - "update!", c.cfg.ChanPoint) - - // First, we'll update our set of signals. - c.htlcUpdates = signalUpdate.newSignals.HtlcUpdates - c.cfg.ShortChanID = signalUpdate.newSignals.ShortChanID - - // Now that the signals have been updated, we'll now - // close the done channel to signal to the caller we've - // registered the new contracts. - close(signalUpdate.doneChan) - - // A new set of HTLC's has been added or removed from the - // commitment transaction. So we'll update our activeHTLCs map - // accordingly. - case htlcUpdate := <-c.htlcUpdates: - // We'll wipe out our old set of HTLC's for each - // htlcSetKey type included in this update in order to - // only monitor the HTLCs that are still active on this - // target commitment. - c.activeHTLCs[htlcUpdate.HtlcKey] = newHtlcSet( - htlcUpdate.Htlcs, - ) - - log.Tracef("ChannelArbitrator(%v): fresh set of htlcs=%v", - c.cfg.ChanPoint, - log.C(func() string { - return spew.Sdump(htlcUpdate) - }), - ) - - // We've cooperatively closed the channel, so we're no longer - // needed. We'll mark the channel as resolved and exit. - case closeInfo := <-c.cfg.ChainEvents.CooperativeClosure: - log.Infof("ChannelArbitrator(%v) marking channel "+ - "cooperatively closed", c.cfg.ChanPoint) - - err := c.cfg.MarkChannelClosed( - closeInfo.ChannelCloseSummary, - channeldb.ChanStatusCoopBroadcasted, - ) - if err != nil { - log.Errorf("Unable to mark channel closed: "+ - "%v", err) - return - } - - // We'll now advance our state machine until it reaches - // a terminal state, and the channel is marked resolved. - _, _, err = c.advanceState( - closeInfo.CloseHeight, coopCloseTrigger, nil, - ) - if err != nil { - log.Errorf("Unable to advance state: %v", err) - return - } - - // We have broadcasted our commitment, and it is now confirmed - // on-chain. - case closeInfo := <-c.cfg.ChainEvents.LocalUnilateralClosure: - log.Infof("ChannelArbitrator(%v): local on-chain "+ - "channel close", c.cfg.ChanPoint) - - if c.state != StateCommitmentBroadcasted { - log.Errorf("ChannelArbitrator(%v): unexpected "+ - "local on-chain channel close", - c.cfg.ChanPoint) - } - closeTx := closeInfo.CloseTx - - contractRes := &ContractResolutions{ - CommitHash: closeTx.TxHash(), - CommitResolution: closeInfo.CommitResolution, - HtlcResolutions: *closeInfo.HtlcResolutions, - AnchorResolution: closeInfo.AnchorResolution, - } - - // When processing a unilateral close event, we'll - // transition to the ContractClosed state. We'll log - // out the set of resolutions such that they are - // available to fetch in that state, we'll also write - // the commit set so we can reconstruct our chain - // actions on restart. - err := c.log.LogContractResolutions(contractRes) - if err != nil { - log.Errorf("Unable to write resolutions: %v", - err) - return - } - err = c.log.InsertConfirmedCommitSet( - &closeInfo.CommitSet, - ) - if err != nil { - log.Errorf("Unable to write commit set: %v", - err) - return - } - - // After the set of resolutions are successfully - // logged, we can safely close the channel. After this - // succeeds we won't be getting chain events anymore, - // so we must make sure we can recover on restart after - // it is marked closed. If the next state transition - // fails, we'll start up in the prior state again, and - // we won't be longer getting chain events. In this - // case we must manually re-trigger the state - // transition into StateContractClosed based on the - // close status of the channel. - err = c.cfg.MarkChannelClosed( - closeInfo.ChannelCloseSummary, - channeldb.ChanStatusLocalCloseInitiator, - ) - if err != nil { - log.Errorf("Unable to mark "+ - "channel closed: %v", err) - return - } - - // We'll now advance our state machine until it reaches - // a terminal state. - _, _, err = c.advanceState( - uint32(closeInfo.SpendingHeight), - localCloseTrigger, &closeInfo.CommitSet, - ) - if err != nil { - log.Errorf("Unable to advance state: %v", err) - } - - // The remote party has broadcast the commitment on-chain. - // We'll examine our state to determine if we need to act at - // all. - case uniClosure := <-c.cfg.ChainEvents.RemoteUnilateralClosure: - log.Infof("ChannelArbitrator(%v): remote party has "+ - "closed channel out on-chain", c.cfg.ChanPoint) - - // If we don't have a self output, and there are no - // active HTLC's, then we can immediately mark the - // contract as fully resolved and exit. - contractRes := &ContractResolutions{ - CommitHash: *uniClosure.SpenderTxHash, - CommitResolution: uniClosure.CommitResolution, - HtlcResolutions: *uniClosure.HtlcResolutions, - AnchorResolution: uniClosure.AnchorResolution, - } - - // When processing a unilateral close event, we'll - // transition to the ContractClosed state. We'll log - // out the set of resolutions such that they are - // available to fetch in that state, we'll also write - // the commit set so we can reconstruct our chain - // actions on restart. - err := c.log.LogContractResolutions(contractRes) - if err != nil { - log.Errorf("Unable to write resolutions: %v", - err) - return - } - err = c.log.InsertConfirmedCommitSet( - &uniClosure.CommitSet, - ) - if err != nil { - log.Errorf("Unable to write commit set: %v", - err) - return - } - - // After the set of resolutions are successfully - // logged, we can safely close the channel. After this - // succeeds we won't be getting chain events anymore, - // so we must make sure we can recover on restart after - // it is marked closed. If the next state transition - // fails, we'll start up in the prior state again, and - // we won't be longer getting chain events. In this - // case we must manually re-trigger the state - // transition into StateContractClosed based on the - // close status of the channel. - closeSummary := &uniClosure.ChannelCloseSummary - err = c.cfg.MarkChannelClosed( - closeSummary, - channeldb.ChanStatusRemoteCloseInitiator, - ) - if err != nil { - log.Errorf("Unable to mark channel closed: %v", - err) - return - } - - // We'll now advance our state machine until it reaches - // a terminal state. - _, _, err = c.advanceState( - uint32(uniClosure.SpendingHeight), - remoteCloseTrigger, &uniClosure.CommitSet, - ) - if err != nil { - log.Errorf("Unable to advance state: %v", err) - } - - // The remote has breached the channel. As this is handled by - // the ChainWatcher and BreachArbiter, we don't have to do - // anything in particular, so just advance our state and - // gracefully exit. - case <-c.cfg.ChainEvents.ContractBreach: - log.Infof("ChannelArbitrator(%v): remote party has "+ - "breached channel!", c.cfg.ChanPoint) - - // We'll advance our state machine until it reaches a - // terminal state. - _, _, err := c.advanceState( - uint32(bestHeight), breachCloseTrigger, nil, - ) - if err != nil { - log.Errorf("Unable to advance state: %v", err) - } - - // A new contract has just been resolved, we'll now check our - // log to see if all contracts have been resolved. If so, then - // we can exit as the contract is fully resolved. - case <-c.resolutionSignal: - log.Infof("ChannelArbitrator(%v): a contract has been "+ - "fully resolved!", c.cfg.ChanPoint) - - nextState, _, err := c.advanceState( - uint32(bestHeight), chainTrigger, nil, - ) - if err != nil { - log.Errorf("Unable to advance state: %v", err) - } - - // If we don't have anything further to do after - // advancing our state, then we'll exit. - if nextState == StateFullyResolved { - log.Infof("ChannelArbitrator(%v): all "+ - "contracts fully resolved, exiting", - c.cfg.ChanPoint) - - return - } - - // We've just received a request to forcibly close out the - // channel. We'll - case closeReq := <-c.forceCloseReqs: - if c.state != StateDefault { - select { - case closeReq.closeTx <- nil: - case <-c.quit: - } - - select { - case closeReq.errResp <- errAlreadyForceClosed.Default(): - case <-c.quit: - } - - continue - } - - nextState, closeTx, err := c.advanceState( - uint32(bestHeight), userTrigger, nil, - ) - if err != nil { - log.Errorf("Unable to advance state: %v", err) - } - - select { - case closeReq.closeTx <- closeTx: - case <-c.quit: - return - } - - select { - case closeReq.errResp <- err: - case <-c.quit: - return - } - - // If we don't have anything further to do after - // advancing our state, then we'll exit. - if nextState == StateFullyResolved { - log.Infof("ChannelArbitrator(%v): all "+ - "contracts resolved, exiting", - c.cfg.ChanPoint) - return - } - - case <-c.quit: - return - } - } -} diff --git a/lnd/contractcourt/channel_arbitrator_test.go b/lnd/contractcourt/channel_arbitrator_test.go deleted file mode 100644 index ecd00638..00000000 --- a/lnd/contractcourt/channel_arbitrator_test.go +++ /dev/null @@ -1,2307 +0,0 @@ -package contractcourt - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "sync" - "testing" - "time" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/chaincfg/globalcfg" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/clock" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/lntest/mock" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/wire" -) - -const ( - defaultTimeout = time.Second * 5 - - // stateTimeout is the timeout we allow when waiting for state - // transitions. - stateTimeout = time.Second * 15 -) - -type mockArbitratorLog struct { - state ArbitratorState - newStates chan ArbitratorState - failLog bool - failFetch *er.ErrorCode - failCommit bool - failCommitState ArbitratorState - resolutions *ContractResolutions - resolvers map[ContractResolver]struct{} - - commitSet *CommitSet - - sync.Mutex -} - -// A compile time check to ensure mockArbitratorLog meets the ArbitratorLog -// interface. -var _ ArbitratorLog = (*mockArbitratorLog)(nil) - -func (b *mockArbitratorLog) CurrentState(kvdb.RTx) (ArbitratorState, er.R) { - return b.state, nil -} - -func (b *mockArbitratorLog) CommitState(s ArbitratorState) er.R { - if b.failCommit && s == b.failCommitState { - return er.Errorf("intentional commit error at state %v", - b.failCommitState) - } - b.state = s - b.newStates <- s - return nil -} - -func (b *mockArbitratorLog) FetchUnresolvedContracts() ([]ContractResolver, - er.R) { - - b.Lock() - v := make([]ContractResolver, len(b.resolvers)) - idx := 0 - for resolver := range b.resolvers { - v[idx] = resolver - idx++ - } - b.Unlock() - - return v, nil -} - -func (b *mockArbitratorLog) InsertUnresolvedContracts(_ []*channeldb.ResolverReport, - resolvers ...ContractResolver) er.R { - - b.Lock() - for _, resolver := range resolvers { - resKey := resolver.ResolverKey() - if resKey == nil { - continue - } - - b.resolvers[resolver] = struct{}{} - } - b.Unlock() - return nil -} - -func (b *mockArbitratorLog) SwapContract(oldContract, - newContract ContractResolver) er.R { - - b.Lock() - delete(b.resolvers, oldContract) - b.resolvers[newContract] = struct{}{} - b.Unlock() - - return nil -} - -func (b *mockArbitratorLog) ResolveContract(res ContractResolver) er.R { - b.Lock() - delete(b.resolvers, res) - b.Unlock() - - return nil -} - -func (b *mockArbitratorLog) LogContractResolutions(c *ContractResolutions) er.R { - if b.failLog { - return er.Errorf("intentional log failure") - } - b.resolutions = c - return nil -} - -func (b *mockArbitratorLog) FetchContractResolutions() (*ContractResolutions, er.R) { - if b.failFetch != nil { - return nil, b.failFetch.Default() - } - - return b.resolutions, nil -} - -func (b *mockArbitratorLog) FetchChainActions() (ChainActionMap, er.R) { - return nil, nil -} - -func (b *mockArbitratorLog) InsertConfirmedCommitSet(c *CommitSet) er.R { - b.commitSet = c - return nil -} - -func (b *mockArbitratorLog) FetchConfirmedCommitSet(kvdb.RTx) (*CommitSet, er.R) { - return b.commitSet, nil -} - -func (b *mockArbitratorLog) WipeHistory() er.R { - return nil -} - -// testArbLog is a wrapper around an existing (ideally fully concrete -// ArbitratorLog) that lets us intercept certain calls like transitioning to a -// new state. -type testArbLog struct { - ArbitratorLog - - newStates chan ArbitratorState -} - -func (t *testArbLog) CommitState(s ArbitratorState) er.R { - if err := t.ArbitratorLog.CommitState(s); err != nil { - return err - } - - t.newStates <- s - - return nil -} - -type mockChainIO struct{} - -var _ lnwallet.BlockChainIO = (*mockChainIO)(nil) - -func (*mockChainIO) GetBestBlock() (*chainhash.Hash, int32, er.R) { - return nil, 0, nil -} - -func (*mockChainIO) GetUtxo(op *wire.OutPoint, _ []byte, - heightHint uint32, _ <-chan struct{}) (*wire.TxOut, er.R) { - return nil, nil -} - -func (*mockChainIO) GetBlockHash(blockHeight int64) (*chainhash.Hash, er.R) { - return nil, nil -} - -func (*mockChainIO) GetBlock(blockHash *chainhash.Hash) (*wire.MsgBlock, er.R) { - return nil, nil -} - -type chanArbTestCtx struct { - t *testing.T - - chanArb *ChannelArbitrator - - cleanUp func() - - resolvedChan chan struct{} - - incubationRequests chan struct{} - - resolutions chan []ResolutionMsg - - log ArbitratorLog - - sweeper *mockSweeper -} - -func (c *chanArbTestCtx) CleanUp() { - if err := c.chanArb.Stop(); err != nil { - c.t.Fatalf("unable to stop chan arb: %v", err) - } - - if c.cleanUp != nil { - c.cleanUp() - } -} - -// AssertStateTransitions asserts that the state machine steps through the -// passed states in order. -func (c *chanArbTestCtx) AssertStateTransitions(expectedStates ...ArbitratorState) { - c.t.Helper() - - var newStatesChan chan ArbitratorState - switch log := c.log.(type) { - case *mockArbitratorLog: - newStatesChan = log.newStates - - case *testArbLog: - newStatesChan = log.newStates - - default: - c.t.Fatalf("unable to assert state transitions with %T", log) - } - - for _, exp := range expectedStates { - var state ArbitratorState - select { - case state = <-newStatesChan: - case <-time.After(defaultTimeout): - c.t.Fatalf("new state not received") - } - - if state != exp { - c.t.Fatalf("expected new state %v, got %v", exp, state) - } - } -} - -// AssertState checks that the ChannelArbitrator is in the state we expect it -// to be. -func (c *chanArbTestCtx) AssertState(expected ArbitratorState) { - if c.chanArb.state != expected { - c.t.Fatalf("expected state %v, was %v", expected, c.chanArb.state) - } -} - -// Restart simulates a clean restart of the channel arbitrator, forcing it to -// walk through it's recovery logic. If this function returns nil, then a -// restart was successful. Note that the restart process keeps the log in -// place, in order to simulate proper persistence of the log. The caller can -// optionally provide a restart closure which will be executed before the -// resolver is started again, but after it is created. -func (c *chanArbTestCtx) Restart(restartClosure func(*chanArbTestCtx)) (*chanArbTestCtx, er.R) { - if err := c.chanArb.Stop(); err != nil { - return nil, err - } - - newCtx, err := createTestChannelArbitrator(c.t, c.log) - if err != nil { - return nil, err - } - - if restartClosure != nil { - restartClosure(newCtx) - } - - if err := newCtx.chanArb.Start(nil); err != nil { - return nil, err - } - - return newCtx, nil -} - -// testChanArbOption applies custom settings to a channel arbitrator config for -// testing purposes. -type testChanArbOption func(cfg *ChannelArbitratorConfig) - -// remoteInitiatorOption sets the MarkChannelClosed function in the -// Channel Arbitrator's config. -func withMarkClosed(markClosed func(*channeldb.ChannelCloseSummary, - ...channeldb.ChannelStatus) er.R) testChanArbOption { - - return func(cfg *ChannelArbitratorConfig) { - cfg.MarkChannelClosed = markClosed - } -} - -// createTestChannelArbitrator returns a channel arbitrator test context which -// contains a channel arbitrator with default values. These values can be -// changed by providing options which overwrite the default config. -func createTestChannelArbitrator(t *testing.T, log ArbitratorLog, - opts ...testChanArbOption) (*chanArbTestCtx, er.R) { - - chanPoint := wire.OutPoint{} - shortChanID := lnwire.ShortChannelID{} - chanEvents := &ChainEventSubscription{ - RemoteUnilateralClosure: make(chan *RemoteUnilateralCloseInfo, 1), - LocalUnilateralClosure: make(chan *LocalUnilateralCloseInfo, 1), - CooperativeClosure: make(chan *CooperativeCloseInfo, 1), - ContractBreach: make(chan *lnwallet.BreachRetribution, 1), - } - - resolutionChan := make(chan []ResolutionMsg, 1) - incubateChan := make(chan struct{}) - - chainIO := &mockChainIO{} - mockSweeper := newMockSweeper() - chainArbCfg := ChainArbitratorConfig{ - ChainIO: chainIO, - PublishTx: func(*wire.MsgTx, string) er.R { - return nil - }, - DeliverResolutionMsg: func(msgs ...ResolutionMsg) er.R { - resolutionChan <- msgs - return nil - }, - OutgoingBroadcastDelta: 5, - IncomingBroadcastDelta: 5, - Notifier: &mock.ChainNotifier{ - EpochChan: make(chan *chainntnfs.BlockEpoch), - SpendChan: make(chan *chainntnfs.SpendDetail), - ConfChan: make(chan *chainntnfs.TxConfirmation), - }, - IncubateOutputs: func(wire.OutPoint, - *lnwallet.OutgoingHtlcResolution, - *lnwallet.IncomingHtlcResolution, uint32) er.R { - - incubateChan <- struct{}{} - return nil - }, - OnionProcessor: &mockOnionProcessor{}, - IsForwardedHTLC: func(chanID lnwire.ShortChannelID, - htlcIndex uint64) bool { - - return true - }, - Clock: clock.NewDefaultClock(), - Sweeper: mockSweeper, - } - - // We'll use the resolvedChan to synchronize on call to - // MarkChannelResolved. - resolvedChan := make(chan struct{}, 1) - - // Next we'll create the matching configuration struct that contains - // all interfaces and methods the arbitrator needs to do its job. - arbCfg := &ChannelArbitratorConfig{ - ChanPoint: chanPoint, - ShortChanID: shortChanID, - MarkChannelResolved: func() er.R { - resolvedChan <- struct{}{} - return nil - }, - Channel: &mockChannel{}, - MarkCommitmentBroadcasted: func(_ *wire.MsgTx, _ bool) er.R { - return nil - }, - MarkChannelClosed: func(*channeldb.ChannelCloseSummary, - ...channeldb.ChannelStatus) er.R { - return nil - }, - IsPendingClose: false, - ChainArbitratorConfig: chainArbCfg, - ChainEvents: chanEvents, - PutResolverReport: func(_ kvdb.RwTx, - _ *channeldb.ResolverReport) er.R { - - return nil - }, - } - - // Apply all custom options to the config struct. - for _, option := range opts { - option(arbCfg) - } - - var cleanUp func() - if log == nil { - dbDir, errr := ioutil.TempDir("", "chanArb") - if errr != nil { - return nil, er.E(errr) - } - dbPath := filepath.Join(dbDir, "testdb") - db, err := kvdb.Create(kvdb.BoltBackendName, dbPath, true) - if err != nil { - return nil, err - } - - backingLog, err := newBoltArbitratorLog( - db, *arbCfg, chainhash.Hash{}, chanPoint, - ) - if err != nil { - return nil, err - } - cleanUp = func() { - db.Close() - os.RemoveAll(dbDir) - } - - log = &testArbLog{ - ArbitratorLog: backingLog, - newStates: make(chan ArbitratorState), - } - } - - htlcSets := make(map[HtlcSetKey]htlcSet) - - chanArb := NewChannelArbitrator(*arbCfg, htlcSets, log) - - return &chanArbTestCtx{ - t: t, - chanArb: chanArb, - cleanUp: cleanUp, - resolvedChan: resolvedChan, - resolutions: resolutionChan, - log: log, - incubationRequests: incubateChan, - sweeper: mockSweeper, - }, nil -} - -// TestChannelArbitratorCooperativeClose tests that the ChannelArbitertor -// correctly marks the channel resolved in case a cooperative close is -// confirmed. -func TestChannelArbitratorCooperativeClose(t *testing.T) { - log := &mockArbitratorLog{ - state: StateDefault, - newStates: make(chan ArbitratorState, 5), - } - - chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } - - if err := chanArbCtx.chanArb.Start(nil); err != nil { - t.Fatalf("unable to start ChannelArbitrator: %v", err) - } - defer func() { - if err := chanArbCtx.chanArb.Stop(); err != nil { - t.Fatalf("unable to stop chan arb: %v", err) - } - }() - - // It should start out in the default state. - chanArbCtx.AssertState(StateDefault) - - // We set up a channel to detect when MarkChannelClosed is called. - closeInfos := make(chan *channeldb.ChannelCloseSummary) - chanArbCtx.chanArb.cfg.MarkChannelClosed = func( - closeInfo *channeldb.ChannelCloseSummary, - statuses ...channeldb.ChannelStatus) er.R { - - closeInfos <- closeInfo - return nil - } - - // Cooperative close should do trigger a MarkChannelClosed + - // MarkChannelResolved. - closeInfo := &CooperativeCloseInfo{ - &channeldb.ChannelCloseSummary{}, - } - chanArbCtx.chanArb.cfg.ChainEvents.CooperativeClosure <- closeInfo - - select { - case c := <-closeInfos: - if c.CloseType != channeldb.CooperativeClose { - t.Fatalf("expected cooperative close, got %v", c.CloseType) - } - case <-time.After(defaultTimeout): - t.Fatalf("timeout waiting for channel close") - } - - // It should mark the channel as resolved. - select { - case <-chanArbCtx.resolvedChan: - // Expected. - case <-time.After(defaultTimeout): - t.Fatalf("contract was not resolved") - } -} - -// TestChannelArbitratorRemoteForceClose checks that the ChannelArbitrator goes -// through the expected states if a remote force close is observed in the -// chain. -func TestChannelArbitratorRemoteForceClose(t *testing.T) { - log := &mockArbitratorLog{ - state: StateDefault, - newStates: make(chan ArbitratorState, 5), - } - - chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } - chanArb := chanArbCtx.chanArb - - if err := chanArb.Start(nil); err != nil { - t.Fatalf("unable to start ChannelArbitrator: %v", err) - } - defer chanArb.Stop() - - // It should start out in the default state. - chanArbCtx.AssertState(StateDefault) - - // Send a remote force close event. - commitSpend := &chainntnfs.SpendDetail{ - SpenderTxHash: &chainhash.Hash{}, - } - - uniClose := &lnwallet.UnilateralCloseSummary{ - SpendDetail: commitSpend, - HtlcResolutions: &lnwallet.HtlcResolutions{}, - } - chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{ - UnilateralCloseSummary: uniClose, - CommitSet: CommitSet{ - ConfCommitKey: &RemoteHtlcSet, - HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC), - }, - } - - // It should transition StateDefault -> StateContractClosed -> - // StateFullyResolved. - chanArbCtx.AssertStateTransitions( - StateContractClosed, StateFullyResolved, - ) - - // It should also mark the channel as resolved. - select { - case <-chanArbCtx.resolvedChan: - // Expected. - case <-time.After(defaultTimeout): - t.Fatalf("contract was not resolved") - } -} - -// TestChannelArbitratorLocalForceClose tests that the ChannelArbitrator goes -// through the expected states in case we request it to force close the channel, -// and the local force close event is observed in chain. -func TestChannelArbitratorLocalForceClose(t *testing.T) { - log := &mockArbitratorLog{ - state: StateDefault, - newStates: make(chan ArbitratorState, 5), - } - - chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } - chanArb := chanArbCtx.chanArb - - if err := chanArb.Start(nil); err != nil { - t.Fatalf("unable to start ChannelArbitrator: %v", err) - } - defer chanArb.Stop() - - // It should start out in the default state. - chanArbCtx.AssertState(StateDefault) - - // We create a channel we can use to pause the ChannelArbitrator at the - // point where it broadcasts the close tx, and check its state. - stateChan := make(chan ArbitratorState) - chanArb.cfg.PublishTx = func(*wire.MsgTx, string) er.R { - // When the force close tx is being broadcasted, check that the - // state is correct at that point. - select { - case stateChan <- chanArb.state: - case <-chanArb.quit: - return er.Errorf("exiting") - } - return nil - } - - errChan := make(chan er.R, 1) - respChan := make(chan *wire.MsgTx, 1) - - // With the channel found, and the request crafted, we'll send over a - // force close request to the arbitrator that watches this channel. - chanArb.forceCloseReqs <- &forceCloseReq{ - errResp: errChan, - closeTx: respChan, - } - - // It should transition to StateBroadcastCommit. - chanArbCtx.AssertStateTransitions(StateBroadcastCommit) - - // When it is broadcasting the force close, its state should be - // StateBroadcastCommit. - select { - case state := <-stateChan: - if state != StateBroadcastCommit { - t.Fatalf("state during PublishTx was %v", state) - } - case <-time.After(stateTimeout): - t.Fatalf("did not get state update") - } - - // After broadcasting, transition should be to - // StateCommitmentBroadcasted. - chanArbCtx.AssertStateTransitions(StateCommitmentBroadcasted) - - select { - case <-respChan: - case <-time.After(defaultTimeout): - t.Fatalf("no response received") - } - - select { - case err := <-errChan: - if err != nil { - t.Fatalf("error force closing channel: %v", err) - } - case <-time.After(defaultTimeout): - t.Fatalf("no response received") - } - - // After broadcasting the close tx, it should be in state - // StateCommitmentBroadcasted. - chanArbCtx.AssertState(StateCommitmentBroadcasted) - - // Now notify about the local force close getting confirmed. - chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{ - SpendDetail: &chainntnfs.SpendDetail{}, - LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{ - CloseTx: &wire.MsgTx{}, - HtlcResolutions: &lnwallet.HtlcResolutions{}, - }, - ChannelCloseSummary: &channeldb.ChannelCloseSummary{}, - } - - // It should transition StateContractClosed -> StateFullyResolved. - chanArbCtx.AssertStateTransitions(StateContractClosed, StateFullyResolved) - - // It should also mark the channel as resolved. - select { - case <-chanArbCtx.resolvedChan: - // Expected. - case <-time.After(defaultTimeout): - t.Fatalf("contract was not resolved") - } -} - -// TestChannelArbitratorBreachClose tests that the ChannelArbitrator goes -// through the expected states in case we notice a breach in the chain, and -// gracefully exits. -func TestChannelArbitratorBreachClose(t *testing.T) { - log := &mockArbitratorLog{ - state: StateDefault, - newStates: make(chan ArbitratorState, 5), - } - - chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } - chanArb := chanArbCtx.chanArb - - if err := chanArb.Start(nil); err != nil { - t.Fatalf("unable to start ChannelArbitrator: %v", err) - } - defer func() { - if err := chanArb.Stop(); err != nil { - t.Fatal(err) - } - }() - - // It should start out in the default state. - chanArbCtx.AssertState(StateDefault) - - // Send a breach close event. - chanArb.cfg.ChainEvents.ContractBreach <- &lnwallet.BreachRetribution{} - - // It should transition StateDefault -> StateFullyResolved. - chanArbCtx.AssertStateTransitions( - StateFullyResolved, - ) - - // It should also mark the channel as resolved. - select { - case <-chanArbCtx.resolvedChan: - // Expected. - case <-time.After(defaultTimeout): - t.Fatalf("contract was not resolved") - } -} - -// TestChannelArbitratorLocalForceClosePendingHtlc tests that the -// ChannelArbitrator goes through the expected states in case we request it to -// force close a channel that still has an HTLC pending. -func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) { - // We create a new test context for this channel arb, notice that we - // pass in a nil ArbitratorLog which means that a default one backed by - // a real DB will be created. We need this for our test as we want to - // test proper restart recovery and resolver population. - chanArbCtx, err := createTestChannelArbitrator(t, nil) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } - chanArb := chanArbCtx.chanArb - chanArb.cfg.PreimageDB = newMockWitnessBeacon() - chanArb.cfg.Registry = &mockRegistry{} - - if err := chanArb.Start(nil); err != nil { - t.Fatalf("unable to start ChannelArbitrator: %v", err) - } - defer chanArb.Stop() - - // Create htlcUpdates channel. - htlcUpdates := make(chan *ContractUpdate) - - signals := &ContractSignals{ - HtlcUpdates: htlcUpdates, - ShortChanID: lnwire.ShortChannelID{}, - } - chanArb.UpdateContractSignals(signals) - - // Add HTLC to channel arbitrator. - htlcAmt := 10000 - htlc := channeldb.HTLC{ - Incoming: false, - Amt: lnwire.MilliSatoshi(htlcAmt), - HtlcIndex: 99, - } - - outgoingDustHtlc := channeldb.HTLC{ - Incoming: false, - Amt: 100, - HtlcIndex: 100, - OutputIndex: -1, - } - - incomingDustHtlc := channeldb.HTLC{ - Incoming: true, - Amt: 105, - HtlcIndex: 101, - OutputIndex: -1, - } - - htlcSet := []channeldb.HTLC{ - htlc, outgoingDustHtlc, incomingDustHtlc, - } - - htlcUpdates <- &ContractUpdate{ - HtlcKey: LocalHtlcSet, - Htlcs: htlcSet, - } - - errChan := make(chan er.R, 1) - respChan := make(chan *wire.MsgTx, 1) - - // With the channel found, and the request crafted, we'll send over a - // force close request to the arbitrator that watches this channel. - chanArb.forceCloseReqs <- &forceCloseReq{ - errResp: errChan, - closeTx: respChan, - } - - // The force close request should trigger broadcast of the commitment - // transaction. - chanArbCtx.AssertStateTransitions( - StateBroadcastCommit, - StateCommitmentBroadcasted, - ) - select { - case <-respChan: - case <-time.After(defaultTimeout): - t.Fatalf("no response received") - } - - select { - case err := <-errChan: - if err != nil { - t.Fatalf("error force closing channel: %v", err) - } - case <-time.After(defaultTimeout): - t.Fatalf("no response received") - } - - // Now notify about the local force close getting confirmed. - closeTx := &wire.MsgTx{ - TxIn: []*wire.TxIn{ - { - PreviousOutPoint: wire.OutPoint{}, - Witness: [][]byte{ - {0x1}, - {0x2}, - }, - }, - }, - } - - htlcOp := wire.OutPoint{ - Hash: closeTx.TxHash(), - Index: 0, - } - - // Set up the outgoing resolution. Populate SignedTimeoutTx because our - // commitment transaction got confirmed. - outgoingRes := lnwallet.OutgoingHtlcResolution{ - Expiry: 10, - SweepSignDesc: input.SignDescriptor{ - Output: &wire.TxOut{}, - }, - SignedTimeoutTx: &wire.MsgTx{ - TxIn: []*wire.TxIn{ - { - PreviousOutPoint: htlcOp, - Witness: [][]byte{{}}, - }, - }, - TxOut: []*wire.TxOut{ - {}, - }, - }, - } - - chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{ - SpendDetail: &chainntnfs.SpendDetail{}, - LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{ - CloseTx: closeTx, - HtlcResolutions: &lnwallet.HtlcResolutions{ - OutgoingHTLCs: []lnwallet.OutgoingHtlcResolution{ - outgoingRes, - }, - }, - }, - ChannelCloseSummary: &channeldb.ChannelCloseSummary{}, - CommitSet: CommitSet{ - ConfCommitKey: &LocalHtlcSet, - HtlcSets: map[HtlcSetKey][]channeldb.HTLC{ - LocalHtlcSet: htlcSet, - }, - }, - } - - chanArbCtx.AssertStateTransitions( - StateContractClosed, - StateWaitingFullResolution, - ) - - // We expect an immediate resolution message for the outgoing dust htlc. - // It is not resolvable on-chain. - select { - case msgs := <-chanArbCtx.resolutions: - if len(msgs) != 1 { - t.Fatalf("expected 1 message, instead got %v", len(msgs)) - } - - if msgs[0].HtlcIndex != outgoingDustHtlc.HtlcIndex { - t.Fatalf("wrong htlc index: expected %v, got %v", - outgoingDustHtlc.HtlcIndex, msgs[0].HtlcIndex) - } - case <-time.After(defaultTimeout): - t.Fatalf("resolution msgs not sent") - } - - // We'll grab the old notifier here as our resolvers are still holding - // a reference to this instance, and a new one will be created when we - // restart the channel arb below. - oldNotifier := chanArb.cfg.Notifier.(*mock.ChainNotifier) - - // At this point, in order to simulate a restart, we'll re-create the - // channel arbitrator. We do this to ensure that all information - // required to properly resolve this HTLC are populated. - if err := chanArb.Stop(); err != nil { - t.Fatalf("unable to stop chan arb: %v", err) - } - - // We'll no re-create the resolver, notice that we use the existing - // arbLog so it carries over the same on-disk state. - chanArbCtxNew, err := chanArbCtx.Restart(nil) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } - chanArb = chanArbCtxNew.chanArb - defer chanArbCtxNew.CleanUp() - - // Post restart, it should be the case that our resolver was properly - // supplemented, and we only have a single resolver in the final set. - if len(chanArb.activeResolvers) != 1 { - t.Fatalf("expected single resolver, instead got: %v", - len(chanArb.activeResolvers)) - } - - // We'll now examine the in-memory state of the active resolvers to - // ensure t hey were populated properly. - resolver := chanArb.activeResolvers[0] - outgoingResolver, ok := resolver.(*htlcOutgoingContestResolver) - if !ok { - t.Fatalf("expected outgoing contest resolver, got %vT", - resolver) - } - - // The resolver should have its htlc amt field populated as it. - if int64(outgoingResolver.htlc.Amt) != int64(htlcAmt) { - t.Fatalf("wrong htlc amount: expected %v, got %v,", - htlcAmt, int64(outgoingResolver.htlc.Amt)) - } - - // htlcOutgoingContestResolver is now active and waiting for the HTLC to - // expire. It should not yet have passed it on for incubation. - select { - case <-chanArbCtx.incubationRequests: - t.Fatalf("contract should not be incubated yet") - default: - } - - // Send a notification that the expiry height has been reached. - oldNotifier.EpochChan <- &chainntnfs.BlockEpoch{Height: 10} - - // htlcOutgoingContestResolver is now transforming into a - // htlcTimeoutResolver and should send the contract off for incubation. - select { - case <-chanArbCtx.incubationRequests: - case <-time.After(defaultTimeout): - t.Fatalf("no response received") - } - - // Notify resolver that the HTLC output of the commitment has been - // spent. - oldNotifier.SpendChan <- &chainntnfs.SpendDetail{SpendingTx: closeTx} - - // Finally, we should also receive a resolution message instructing the - // switch to cancel back the HTLC. - select { - case msgs := <-chanArbCtx.resolutions: - if len(msgs) != 1 { - t.Fatalf("expected 1 message, instead got %v", len(msgs)) - } - - if msgs[0].HtlcIndex != htlc.HtlcIndex { - t.Fatalf("wrong htlc index: expected %v, got %v", - htlc.HtlcIndex, msgs[0].HtlcIndex) - } - case <-time.After(defaultTimeout): - t.Fatalf("resolution msgs not sent") - } - - // As this is our own commitment transaction, the HTLC will go through - // to the second level. Channel arbitrator should still not be marked - // as resolved. - select { - case <-chanArbCtxNew.resolvedChan: - t.Fatalf("channel resolved prematurely") - default: - } - - // Notify resolver that the second level transaction is spent. - oldNotifier.SpendChan <- &chainntnfs.SpendDetail{SpendingTx: closeTx} - - // At this point channel should be marked as resolved. - chanArbCtxNew.AssertStateTransitions(StateFullyResolved) - select { - case <-chanArbCtxNew.resolvedChan: - case <-time.After(defaultTimeout): - t.Fatalf("contract was not resolved") - } -} - -// TestChannelArbitratorLocalForceCloseRemoteConfiremd tests that the -// ChannelArbitrator behaves as expected in the case where we request a local -// force close, but a remote commitment ends up being confirmed in chain. -func TestChannelArbitratorLocalForceCloseRemoteConfirmed(t *testing.T) { - log := &mockArbitratorLog{ - state: StateDefault, - newStates: make(chan ArbitratorState, 5), - } - - chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } - chanArb := chanArbCtx.chanArb - - if err := chanArb.Start(nil); err != nil { - t.Fatalf("unable to start ChannelArbitrator: %v", err) - } - defer chanArb.Stop() - - // It should start out in the default state. - chanArbCtx.AssertState(StateDefault) - - // Create a channel we can use to assert the state when it publishes - // the close tx. - stateChan := make(chan ArbitratorState) - chanArb.cfg.PublishTx = func(*wire.MsgTx, string) er.R { - // When the force close tx is being broadcasted, check that the - // state is correct at that point. - select { - case stateChan <- chanArb.state: - case <-chanArb.quit: - return er.Errorf("exiting") - } - return nil - } - - errChan := make(chan er.R, 1) - respChan := make(chan *wire.MsgTx, 1) - - // With the channel found, and the request crafted, we'll send over a - // force close request to the arbitrator that watches this channel. - chanArb.forceCloseReqs <- &forceCloseReq{ - errResp: errChan, - closeTx: respChan, - } - - // It should transition to StateBroadcastCommit. - chanArbCtx.AssertStateTransitions(StateBroadcastCommit) - - // We expect it to be in state StateBroadcastCommit when publishing - // the force close. - select { - case state := <-stateChan: - if state != StateBroadcastCommit { - t.Fatalf("state during PublishTx was %v", state) - } - case <-time.After(stateTimeout): - t.Fatalf("no state update received") - } - - // After broadcasting, transition should be to - // StateCommitmentBroadcasted. - chanArbCtx.AssertStateTransitions(StateCommitmentBroadcasted) - - // Wait for a response to the force close. - select { - case <-respChan: - case <-time.After(defaultTimeout): - t.Fatalf("no response received") - } - - select { - case err := <-errChan: - if err != nil { - t.Fatalf("error force closing channel: %v", err) - } - case <-time.After(defaultTimeout): - t.Fatalf("no response received") - } - - // The state should be StateCommitmentBroadcasted. - chanArbCtx.AssertState(StateCommitmentBroadcasted) - - // Now notify about the _REMOTE_ commitment getting confirmed. - commitSpend := &chainntnfs.SpendDetail{ - SpenderTxHash: &chainhash.Hash{}, - } - uniClose := &lnwallet.UnilateralCloseSummary{ - SpendDetail: commitSpend, - HtlcResolutions: &lnwallet.HtlcResolutions{}, - } - chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{ - UnilateralCloseSummary: uniClose, - } - - // It should transition StateContractClosed -> StateFullyResolved. - chanArbCtx.AssertStateTransitions(StateContractClosed, StateFullyResolved) - - // It should resolve. - select { - case <-chanArbCtx.resolvedChan: - // Expected. - case <-time.After(stateTimeout): - t.Fatalf("contract was not resolved") - } -} - -// TestChannelArbitratorLocalForceCloseDoubleSpend tests that the -// ChannelArbitrator behaves as expected in the case where we request a local -// force close, but we fail broadcasting our commitment because a remote -// commitment has already been published. -func TestChannelArbitratorLocalForceDoubleSpend(t *testing.T) { - log := &mockArbitratorLog{ - state: StateDefault, - newStates: make(chan ArbitratorState, 5), - } - - chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } - chanArb := chanArbCtx.chanArb - - if err := chanArb.Start(nil); err != nil { - t.Fatalf("unable to start ChannelArbitrator: %v", err) - } - defer chanArb.Stop() - - // It should start out in the default state. - chanArbCtx.AssertState(StateDefault) - - // Return ErrDoubleSpend when attempting to publish the tx. - stateChan := make(chan ArbitratorState) - chanArb.cfg.PublishTx = func(*wire.MsgTx, string) er.R { - // When the force close tx is being broadcasted, check that the - // state is correct at that point. - select { - case stateChan <- chanArb.state: - case <-chanArb.quit: - return er.Errorf("exiting") - } - return lnwallet.ErrDoubleSpend.Default() - } - - errChan := make(chan er.R, 1) - respChan := make(chan *wire.MsgTx, 1) - - // With the channel found, and the request crafted, we'll send over a - // force close request to the arbitrator that watches this channel. - chanArb.forceCloseReqs <- &forceCloseReq{ - errResp: errChan, - closeTx: respChan, - } - - // It should transition to StateBroadcastCommit. - chanArbCtx.AssertStateTransitions(StateBroadcastCommit) - - // We expect it to be in state StateBroadcastCommit when publishing - // the force close. - select { - case state := <-stateChan: - if state != StateBroadcastCommit { - t.Fatalf("state during PublishTx was %v", state) - } - case <-time.After(stateTimeout): - t.Fatalf("no state update received") - } - - // After broadcasting, transition should be to - // StateCommitmentBroadcasted. - chanArbCtx.AssertStateTransitions(StateCommitmentBroadcasted) - - // Wait for a response to the force close. - select { - case <-respChan: - case <-time.After(defaultTimeout): - t.Fatalf("no response received") - } - - select { - case err := <-errChan: - if err != nil { - t.Fatalf("error force closing channel: %v", err) - } - case <-time.After(defaultTimeout): - t.Fatalf("no response received") - } - - // The state should be StateCommitmentBroadcasted. - chanArbCtx.AssertState(StateCommitmentBroadcasted) - - // Now notify about the _REMOTE_ commitment getting confirmed. - commitSpend := &chainntnfs.SpendDetail{ - SpenderTxHash: &chainhash.Hash{}, - } - uniClose := &lnwallet.UnilateralCloseSummary{ - SpendDetail: commitSpend, - HtlcResolutions: &lnwallet.HtlcResolutions{}, - } - chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{ - UnilateralCloseSummary: uniClose, - } - - // It should transition StateContractClosed -> StateFullyResolved. - chanArbCtx.AssertStateTransitions(StateContractClosed, StateFullyResolved) - - // It should resolve. - select { - case <-chanArbCtx.resolvedChan: - // Expected. - case <-time.After(stateTimeout): - t.Fatalf("contract was not resolved") - } -} - -// TestChannelArbitratorPersistence tests that the ChannelArbitrator is able to -// keep advancing the state machine from various states after restart. -func TestChannelArbitratorPersistence(t *testing.T) { - // Start out with a log that will fail writing the set of resolutions. - log := &mockArbitratorLog{ - state: StateDefault, - newStates: make(chan ArbitratorState, 5), - failLog: true, - } - - chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } - - chanArb := chanArbCtx.chanArb - if err := chanArb.Start(nil); err != nil { - t.Fatalf("unable to start ChannelArbitrator: %v", err) - } - - // It should start in StateDefault. - chanArbCtx.AssertState(StateDefault) - - // Send a remote force close event. - commitSpend := &chainntnfs.SpendDetail{ - SpenderTxHash: &chainhash.Hash{}, - } - - uniClose := &lnwallet.UnilateralCloseSummary{ - SpendDetail: commitSpend, - HtlcResolutions: &lnwallet.HtlcResolutions{}, - } - chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{ - UnilateralCloseSummary: uniClose, - } - - // Since writing the resolutions fail, the arbitrator should not - // advance to the next state. - time.Sleep(100 * time.Millisecond) - if log.state != StateDefault { - t.Fatalf("expected to stay in StateDefault") - } - - // Restart the channel arb, this'll use the same long and prior - // context. - chanArbCtx, err = chanArbCtx.Restart(nil) - if err != nil { - t.Fatalf("unable to restart channel arb: %v", err) - } - chanArb = chanArbCtx.chanArb - - // Again, it should start up in the default state. - chanArbCtx.AssertState(StateDefault) - - // Now we make the log succeed writing the resolutions, but fail when - // attempting to close the channel. - log.failLog = false - chanArb.cfg.MarkChannelClosed = func(*channeldb.ChannelCloseSummary, - ...channeldb.ChannelStatus) er.R { - - return er.Errorf("intentional close error") - } - - // Send a new remote force close event. - chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{ - UnilateralCloseSummary: uniClose, - } - - // Since closing the channel failed, the arbitrator should stay in the - // default state. - time.Sleep(100 * time.Millisecond) - if log.state != StateDefault { - t.Fatalf("expected to stay in StateDefault") - } - - // Restart once again to simulate yet another restart. - chanArbCtx, err = chanArbCtx.Restart(nil) - if err != nil { - t.Fatalf("unable to restart channel arb: %v", err) - } - chanArb = chanArbCtx.chanArb - - // Starts out in StateDefault. - chanArbCtx.AssertState(StateDefault) - - // Now make fetching the resolutions fail. - log.failFetch = er.GenericErrorType.Code("intentional fetch failure") - chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{ - UnilateralCloseSummary: uniClose, - } - - // Since logging the resolutions and closing the channel now succeeds, - // it should advance to StateContractClosed. - chanArbCtx.AssertStateTransitions(StateContractClosed) - - // It should not advance further, however, as fetching resolutions - // failed. - time.Sleep(100 * time.Millisecond) - if log.state != StateContractClosed { - t.Fatalf("expected to stay in StateContractClosed") - } - chanArb.Stop() - - // Create a new arbitrator, and now make fetching resolutions succeed. - log.failFetch = nil - chanArbCtx, err = chanArbCtx.Restart(nil) - if err != nil { - t.Fatalf("unable to restart channel arb: %v", err) - } - defer chanArbCtx.CleanUp() - - // Finally it should advance to StateFullyResolved. - chanArbCtx.AssertStateTransitions(StateFullyResolved) - - // It should also mark the channel as resolved. - select { - case <-chanArbCtx.resolvedChan: - // Expected. - case <-time.After(defaultTimeout): - t.Fatalf("contract was not resolved") - } -} - -// TestChannelArbitratorForceCloseBreachedChannel tests that the channel -// arbitrator is able to handle a channel in the process of being force closed -// is breached by the remote node. In these cases we expect the -// ChannelArbitrator to gracefully exit, as the breach is handled by other -// subsystems. -func TestChannelArbitratorForceCloseBreachedChannel(t *testing.T) { - log := &mockArbitratorLog{ - state: StateDefault, - newStates: make(chan ArbitratorState, 5), - } - - chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } - - chanArb := chanArbCtx.chanArb - if err := chanArb.Start(nil); err != nil { - t.Fatalf("unable to start ChannelArbitrator: %v", err) - } - - // It should start in StateDefault. - chanArbCtx.AssertState(StateDefault) - - // We start by attempting a local force close. We'll return an - // unexpected publication error, causing the state machine to halt. - expErr := er.GenericErrorType.Code("intentional publication error") - stateChan := make(chan ArbitratorState) - chanArb.cfg.PublishTx = func(*wire.MsgTx, string) er.R { - // When the force close tx is being broadcasted, check that the - // state is correct at that point. - select { - case stateChan <- chanArb.state: - case <-chanArb.quit: - return er.Errorf("exiting") - } - return expErr.Default() - } - - errChan := make(chan er.R, 1) - respChan := make(chan *wire.MsgTx, 1) - - // With the channel found, and the request crafted, we'll send over a - // force close request to the arbitrator that watches this channel. - chanArb.forceCloseReqs <- &forceCloseReq{ - errResp: errChan, - closeTx: respChan, - } - - // It should transition to StateBroadcastCommit. - chanArbCtx.AssertStateTransitions(StateBroadcastCommit) - - // We expect it to be in state StateBroadcastCommit when attempting - // the force close. - select { - case state := <-stateChan: - if state != StateBroadcastCommit { - t.Fatalf("state during PublishTx was %v", state) - } - case <-time.After(stateTimeout): - t.Fatalf("no state update received") - } - - // Make sure we get the expected error. - select { - case err := <-errChan: - if !expErr.Is(err) { - t.Fatalf("unexpected error force closing channel: %v", - err) - } - case <-time.After(defaultTimeout): - t.Fatalf("no response received") - } - - // We mimic that the channel is breached while the channel arbitrator - // is down. This means that on restart it will be started with a - // pending close channel, of type BreachClose. - chanArbCtx, err = chanArbCtx.Restart(func(c *chanArbTestCtx) { - c.chanArb.cfg.IsPendingClose = true - c.chanArb.cfg.ClosingHeight = 100 - c.chanArb.cfg.CloseType = channeldb.BreachClose - }) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } - defer chanArbCtx.CleanUp() - - // Finally it should advance to StateFullyResolved. - chanArbCtx.AssertStateTransitions(StateFullyResolved) - - // It should also mark the channel as resolved. - select { - case <-chanArbCtx.resolvedChan: - // Expected. - case <-time.After(defaultTimeout): - t.Fatalf("contract was not resolved") - } -} - -// TestChannelArbitratorCommitFailure tests that the channel arbitrator is able -// to recover from a failed CommitState call at restart. -func TestChannelArbitratorCommitFailure(t *testing.T) { - - testCases := []struct { - - // closeType is the type of channel close we want ot test. - closeType channeldb.ClosureType - - // sendEvent is a function that will send the event - // corresponding to this test's closeType to the passed - // ChannelArbitrator. - sendEvent func(chanArb *ChannelArbitrator) - - // expectedStates is the states we expect the state machine to - // go through after a restart and successful log commit. - expectedStates []ArbitratorState - }{ - { - closeType: channeldb.CooperativeClose, - sendEvent: func(chanArb *ChannelArbitrator) { - closeInfo := &CooperativeCloseInfo{ - &channeldb.ChannelCloseSummary{}, - } - chanArb.cfg.ChainEvents.CooperativeClosure <- closeInfo - }, - expectedStates: []ArbitratorState{StateFullyResolved}, - }, - { - closeType: channeldb.RemoteForceClose, - sendEvent: func(chanArb *ChannelArbitrator) { - commitSpend := &chainntnfs.SpendDetail{ - SpenderTxHash: &chainhash.Hash{}, - } - - uniClose := &lnwallet.UnilateralCloseSummary{ - SpendDetail: commitSpend, - HtlcResolutions: &lnwallet.HtlcResolutions{}, - } - chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{ - UnilateralCloseSummary: uniClose, - } - }, - expectedStates: []ArbitratorState{StateContractClosed, StateFullyResolved}, - }, - { - closeType: channeldb.LocalForceClose, - sendEvent: func(chanArb *ChannelArbitrator) { - chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{ - SpendDetail: &chainntnfs.SpendDetail{}, - LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{ - CloseTx: &wire.MsgTx{}, - HtlcResolutions: &lnwallet.HtlcResolutions{}, - }, - ChannelCloseSummary: &channeldb.ChannelCloseSummary{}, - } - }, - expectedStates: []ArbitratorState{StateContractClosed, StateFullyResolved}, - }, - } - - for _, test := range testCases { - test := test - - log := &mockArbitratorLog{ - state: StateDefault, - newStates: make(chan ArbitratorState, 5), - failCommit: true, - - // Set the log to fail on the first expected state - // after state machine progress for this test case. - failCommitState: test.expectedStates[0], - } - - chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } - - chanArb := chanArbCtx.chanArb - if err := chanArb.Start(nil); err != nil { - t.Fatalf("unable to start ChannelArbitrator: %v", err) - } - - // It should start in StateDefault. - chanArbCtx.AssertState(StateDefault) - - closed := make(chan struct{}) - chanArb.cfg.MarkChannelClosed = func( - *channeldb.ChannelCloseSummary, - ...channeldb.ChannelStatus) er.R { - close(closed) - return nil - } - - // Send the test event to trigger the state machine. - test.sendEvent(chanArb) - - select { - case <-closed: - case <-time.After(defaultTimeout): - t.Fatalf("channel was not marked closed") - } - - // Since the channel was marked closed in the database, but the - // commit to the next state failed, the state should still be - // StateDefault. - time.Sleep(100 * time.Millisecond) - if log.state != StateDefault { - t.Fatalf("expected to stay in StateDefault, instead "+ - "has %v", log.state) - } - chanArb.Stop() - - // Start the arbitrator again, with IsPendingClose reporting - // the channel closed in the database. - log.failCommit = false - chanArbCtx, err = chanArbCtx.Restart(func(c *chanArbTestCtx) { - c.chanArb.cfg.IsPendingClose = true - c.chanArb.cfg.ClosingHeight = 100 - c.chanArb.cfg.CloseType = test.closeType - }) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } - - // Since the channel is marked closed in the database, it - // should advance to the expected states. - chanArbCtx.AssertStateTransitions(test.expectedStates...) - - // It should also mark the channel as resolved. - select { - case <-chanArbCtx.resolvedChan: - // Expected. - case <-time.After(defaultTimeout): - t.Fatalf("contract was not resolved") - } - } -} - -// TestChannelArbitratorEmptyResolutions makes sure that a channel that is -// pending close in the database, but haven't had any resolutions logged will -// not be marked resolved. This situation must be handled to avoid closing -// channels from earlier versions of the ChannelArbitrator, which didn't have a -// proper handoff from the ChainWatcher, and we could risk ending up in a state -// where the channel was closed in the DB, but the resolutions weren't properly -// written. -func TestChannelArbitratorEmptyResolutions(t *testing.T) { - // Start out with a log that will fail writing the set of resolutions. - log := &mockArbitratorLog{ - state: StateDefault, - newStates: make(chan ArbitratorState, 5), - failFetch: errNoResolutions, - } - - chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } - - chanArb := chanArbCtx.chanArb - chanArb.cfg.IsPendingClose = true - chanArb.cfg.ClosingHeight = 100 - chanArb.cfg.CloseType = channeldb.RemoteForceClose - - if err := chanArb.Start(nil); err != nil { - t.Fatalf("unable to start ChannelArbitrator: %v", err) - } - - // It should not advance its state beyond StateContractClosed, since - // fetching resolutions fails. - chanArbCtx.AssertStateTransitions(StateContractClosed) - - // It should not advance further, however, as fetching resolutions - // failed. - time.Sleep(100 * time.Millisecond) - if log.state != StateContractClosed { - t.Fatalf("expected to stay in StateContractClosed") - } - chanArb.Stop() -} - -// TestChannelArbitratorAlreadyForceClosed ensures that we cannot force close a -// channel that is already in the process of doing so. -func TestChannelArbitratorAlreadyForceClosed(t *testing.T) { - t.Parallel() - - // We'll create the arbitrator and its backing log to signal that it's - // already in the process of being force closed. - log := &mockArbitratorLog{ - state: StateCommitmentBroadcasted, - } - chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } - chanArb := chanArbCtx.chanArb - if err := chanArb.Start(nil); err != nil { - t.Fatalf("unable to start ChannelArbitrator: %v", err) - } - defer chanArb.Stop() - - // Then, we'll create a request to signal a force close request to the - // channel arbitrator. - errChan := make(chan er.R, 1) - respChan := make(chan *wire.MsgTx, 1) - - select { - case chanArb.forceCloseReqs <- &forceCloseReq{ - closeTx: respChan, - errResp: errChan, - }: - case <-chanArb.quit: - } - - // Finally, we should ensure that we are not able to do so by seeing - // the expected errAlreadyForceClosed error. - select { - case err = <-errChan: - if !errAlreadyForceClosed.Is(err) { - t.Fatalf("expected errAlreadyForceClosed, got %v", err) - } - case <-time.After(time.Second): - t.Fatal("expected to receive error response") - } -} - -// TestChannelArbitratorDanglingCommitForceClose tests that if there're HTLCs -// on the remote party's commitment, but not ours, and they're about to time -// out, then we'll go on chain so we can cancel back the HTLCs on the incoming -// commitment. -func TestChannelArbitratorDanglingCommitForceClose(t *testing.T) { - t.Parallel() - - type testCase struct { - htlcExpired bool - remotePendingHTLC bool - confCommit HtlcSetKey - } - var testCases []testCase - - testOptions := []bool{true, false} - confOptions := []HtlcSetKey{ - LocalHtlcSet, RemoteHtlcSet, RemotePendingHtlcSet, - } - for _, htlcExpired := range testOptions { - for _, remotePendingHTLC := range testOptions { - for _, commitConf := range confOptions { - switch { - // If the HTLC is on the remote commitment, and - // that one confirms, then there's no special - // behavior, we should play all the HTLCs on - // that remote commitment as normal. - case !remotePendingHTLC && commitConf == RemoteHtlcSet: - fallthrough - - // If the HTLC is on the remote pending, and - // that confirms, then we don't have any - // special actions. - case remotePendingHTLC && commitConf == RemotePendingHtlcSet: - continue - } - - testCases = append(testCases, testCase{ - htlcExpired: htlcExpired, - remotePendingHTLC: remotePendingHTLC, - confCommit: commitConf, - }) - } - } - } - - for _, testCase := range testCases { - testCase := testCase - testName := fmt.Sprintf("testCase: htlcExpired=%v,"+ - "remotePendingHTLC=%v,remotePendingCommitConf=%v", - testCase.htlcExpired, testCase.remotePendingHTLC, - testCase.confCommit) - - t.Run(testName, func(t *testing.T) { - t.Parallel() - - arbLog := &mockArbitratorLog{ - state: StateDefault, - newStates: make(chan ArbitratorState, 5), - resolvers: make(map[ContractResolver]struct{}), - } - - chanArbCtx, err := createTestChannelArbitrator( - t, arbLog, - ) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } - chanArb := chanArbCtx.chanArb - if err := chanArb.Start(nil); err != nil { - t.Fatalf("unable to start ChannelArbitrator: %v", err) - } - defer chanArb.Stop() - - // Now that our channel arb has started, we'll set up - // its contract signals channel so we can send it - // various HTLC updates for this test. - htlcUpdates := make(chan *ContractUpdate) - signals := &ContractSignals{ - HtlcUpdates: htlcUpdates, - ShortChanID: lnwire.ShortChannelID{}, - } - chanArb.UpdateContractSignals(signals) - - htlcKey := RemoteHtlcSet - if testCase.remotePendingHTLC { - htlcKey = RemotePendingHtlcSet - } - - // Next, we'll send it a new HTLC that is set to expire - // in 10 blocks, this HTLC will only appear on the - // commitment transaction of the _remote_ party. - htlcIndex := uint64(99) - htlcExpiry := uint32(10) - danglingHTLC := channeldb.HTLC{ - Incoming: false, - Amt: 10000, - HtlcIndex: htlcIndex, - RefundTimeout: htlcExpiry, - } - htlcUpdates <- &ContractUpdate{ - HtlcKey: htlcKey, - Htlcs: []channeldb.HTLC{danglingHTLC}, - } - - // At this point, we now have a split commitment state - // from the PoV of the channel arb. There's now an HTLC - // that only exists on the commitment transaction of - // the remote party. - errChan := make(chan er.R, 1) - respChan := make(chan *wire.MsgTx, 1) - switch { - // If we want an HTLC expiration trigger, then We'll - // now mine a block (height 5), which is 5 blocks away - // (our grace delta) from the expiry of that HTLC. - case testCase.htlcExpired: - chanArbCtx.chanArb.blocks <- 5 - - // Otherwise, we'll just trigger a regular force close - // request. - case !testCase.htlcExpired: - chanArb.forceCloseReqs <- &forceCloseReq{ - errResp: errChan, - closeTx: respChan, - } - - } - - // At this point, the resolver should now have - // determined that it needs to go to chain in order to - // block off the redemption path so it can cancel the - // incoming HTLC. - chanArbCtx.AssertStateTransitions( - StateBroadcastCommit, - StateCommitmentBroadcasted, - ) - - // Next we'll craft a fake commitment transaction to - // send to signal that the channel has closed out on - // chain. - closeTx := &wire.MsgTx{ - TxIn: []*wire.TxIn{ - { - PreviousOutPoint: wire.OutPoint{}, - Witness: [][]byte{ - {0x9}, - }, - }, - }, - } - - // We'll now signal to the channel arb that the HTLC - // has fully closed on chain. Our local commit set - // shows now HTLC on our commitment, but one on the - // remote commitment. This should result in the HTLC - // being canalled back. Also note that there're no HTLC - // resolutions sent since we have none on our - // commitment transaction. - uniCloseInfo := &LocalUnilateralCloseInfo{ - SpendDetail: &chainntnfs.SpendDetail{}, - LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{ - CloseTx: closeTx, - HtlcResolutions: &lnwallet.HtlcResolutions{}, - }, - ChannelCloseSummary: &channeldb.ChannelCloseSummary{}, - CommitSet: CommitSet{ - ConfCommitKey: &testCase.confCommit, - HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC), - }, - } - - // If the HTLC was meant to expire, then we'll mark the - // closing transaction at the proper expiry height - // since our comparison "need to timeout" comparison is - // based on the confirmation height. - if testCase.htlcExpired { - uniCloseInfo.SpendDetail.SpendingHeight = 5 - } - - // Depending on if we're testing the remote pending - // commitment or not, we'll populate either a fake - // dangling remote commitment, or a regular locked in - // one. - htlcs := []channeldb.HTLC{danglingHTLC} - if testCase.remotePendingHTLC { - uniCloseInfo.CommitSet.HtlcSets[RemotePendingHtlcSet] = htlcs - } else { - uniCloseInfo.CommitSet.HtlcSets[RemoteHtlcSet] = htlcs - } - - chanArb.cfg.ChainEvents.LocalUnilateralClosure <- uniCloseInfo - - // The channel arb should now transition to waiting - // until the HTLCs have been fully resolved. - chanArbCtx.AssertStateTransitions( - StateContractClosed, - StateWaitingFullResolution, - ) - - // Now that we've sent this signal, we should have that - // HTLC be canceled back immediately. - select { - case msgs := <-chanArbCtx.resolutions: - if len(msgs) != 1 { - t.Fatalf("expected 1 message, "+ - "instead got %v", len(msgs)) - } - - if msgs[0].HtlcIndex != htlcIndex { - t.Fatalf("wrong htlc index: expected %v, got %v", - htlcIndex, msgs[0].HtlcIndex) - } - case <-time.After(defaultTimeout): - t.Fatalf("resolution msgs not sent") - } - - // There's no contract to send a fully resolve message, - // so instead, we'll mine another block which'll cause - // it to re-examine its state and realize there're no - // more HTLCs. - chanArbCtx.chanArb.blocks <- 6 - chanArbCtx.AssertStateTransitions(StateFullyResolved) - }) - } -} - -// TestChannelArbitratorPendingExpiredHTLC tests that if we have pending htlc -// that is expired we will only go to chain if we are running at least the -// time defined in PaymentsExpirationGracePeriod. -// During this time the remote party is expected to send his updates and cancel -// The htlc. -func TestChannelArbitratorPendingExpiredHTLC(t *testing.T) { - t.Parallel() - - // We'll create the arbitrator and its backing log in a default state. - log := &mockArbitratorLog{ - state: StateDefault, - newStates: make(chan ArbitratorState, 5), - resolvers: make(map[ContractResolver]struct{}), - } - chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } - chanArb := chanArbCtx.chanArb - - // We'll inject a test clock implementation so we can control the uptime. - startTime := time.Date(2020, time.February, 3, 13, 0, 0, 0, time.UTC) - testClock := clock.NewTestClock(startTime) - chanArb.cfg.Clock = testClock - - // We also configure the grace period and the IsForwardedHTLC to identify - // the htlc as our initiated payment. - chanArb.cfg.PaymentsExpirationGracePeriod = time.Second * 15 - chanArb.cfg.IsForwardedHTLC = func(chanID lnwire.ShortChannelID, - htlcIndex uint64) bool { - - return false - } - - if err := chanArb.Start(nil); err != nil { - t.Fatalf("unable to start ChannelArbitrator: %v", err) - } - defer func() { - if err := chanArb.Stop(); err != nil { - t.Fatalf("unable to stop chan arb: %v", err) - } - }() - - // Now that our channel arb has started, we'll set up - // its contract signals channel so we can send it - // various HTLC updates for this test. - htlcUpdates := make(chan *ContractUpdate) - signals := &ContractSignals{ - HtlcUpdates: htlcUpdates, - ShortChanID: lnwire.ShortChannelID{}, - } - chanArb.UpdateContractSignals(signals) - - // Next, we'll send it a new HTLC that is set to expire - // in 10 blocks. - htlcIndex := uint64(99) - htlcExpiry := uint32(10) - pendingHTLC := channeldb.HTLC{ - Incoming: false, - Amt: 10000, - HtlcIndex: htlcIndex, - RefundTimeout: htlcExpiry, - } - htlcUpdates <- &ContractUpdate{ - HtlcKey: RemoteHtlcSet, - Htlcs: []channeldb.HTLC{pendingHTLC}, - } - - // We will advance the uptime to 10 seconds which should be still within - // the grace period and should not trigger going to chain. - testClock.SetTime(startTime.Add(time.Second * 10)) - chanArbCtx.chanArb.blocks <- 5 - chanArbCtx.AssertState(StateDefault) - - // We will advance the uptime to 16 seconds which should trigger going - // to chain. - testClock.SetTime(startTime.Add(time.Second * 16)) - chanArbCtx.chanArb.blocks <- 6 - chanArbCtx.AssertStateTransitions( - StateBroadcastCommit, - StateCommitmentBroadcasted, - ) -} - -// TestRemoteCloseInitiator tests the setting of close initiator statuses -// for remote force closes and breaches. -func TestRemoteCloseInitiator(t *testing.T) { - // getCloseSummary returns a unilateral close summary for the channel - // provided. - getCloseSummary := func(channel *channeldb.OpenChannel) *RemoteUnilateralCloseInfo { - return &RemoteUnilateralCloseInfo{ - UnilateralCloseSummary: &lnwallet.UnilateralCloseSummary{ - SpendDetail: &chainntnfs.SpendDetail{ - SpenderTxHash: &chainhash.Hash{}, - SpendingTx: &wire.MsgTx{ - TxIn: []*wire.TxIn{}, - TxOut: []*wire.TxOut{}, - }, - }, - ChannelCloseSummary: channeldb.ChannelCloseSummary{ - ChanPoint: channel.FundingOutpoint, - RemotePub: channel.IdentityPub, - SettledBalance: btcutil.Amount(500), - TimeLockedBalance: btcutil.Amount(10000), - IsPending: false, - }, - HtlcResolutions: &lnwallet.HtlcResolutions{}, - }, - } - } - - tests := []struct { - name string - - // notifyClose sends the appropriate chain event to indicate - // that the channel has closed. The event subscription channel - // is expected to be buffered, as is the default for test - // channel arbitrators. - notifyClose func(sub *ChainEventSubscription, - channel *channeldb.OpenChannel) - - // expectedStates is the set of states we expect the arbitrator - // to progress through. - expectedStates []ArbitratorState - }{ - { - name: "force close", - notifyClose: func(sub *ChainEventSubscription, - channel *channeldb.OpenChannel) { - - s := getCloseSummary(channel) - sub.RemoteUnilateralClosure <- s - }, - expectedStates: []ArbitratorState{ - StateContractClosed, StateFullyResolved, - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - // First, create alice's channel. - alice, _, cleanUp, err := lnwallet.CreateTestChannels( - channeldb.SingleFunderTweaklessBit, - ) - if err != nil { - t.Fatalf("unable to create test channels: %v", - err) - } - defer cleanUp() - - // Create a mock log which will not block the test's - // expected number of transitions transitions, and has - // no commit resolutions so that the channel will - // resolve immediately. - log := &mockArbitratorLog{ - state: StateDefault, - newStates: make(chan ArbitratorState, - len(test.expectedStates)), - resolutions: &ContractResolutions{ - CommitHash: chainhash.Hash{}, - CommitResolution: nil, - }, - } - - // Mock marking the channel as closed, we only care - // about setting of channel status. - mockMarkClosed := func(_ *channeldb.ChannelCloseSummary, - statuses ...channeldb.ChannelStatus) er.R { - for _, status := range statuses { - err := alice.State().ApplyChanStatus(status) - if err != nil { - return err - } - } - return nil - } - - chanArbCtx, err := createTestChannelArbitrator( - t, log, withMarkClosed(mockMarkClosed), - ) - if err != nil { - t.Fatalf("unable to create "+ - "ChannelArbitrator: %v", err) - } - chanArb := chanArbCtx.chanArb - - if err := chanArb.Start(nil); err != nil { - t.Fatalf("unable to start "+ - "ChannelArbitrator: %v", err) - } - defer func() { - if err := chanArb.Stop(); err != nil { - t.Fatal(err) - } - }() - - // It should start out in the default state. - chanArbCtx.AssertState(StateDefault) - - // Notify the close event. - test.notifyClose(chanArb.cfg.ChainEvents, alice.State()) - - // Check that the channel transitions as expected. - chanArbCtx.AssertStateTransitions( - test.expectedStates..., - ) - - // It should also mark the channel as resolved. - select { - case <-chanArbCtx.resolvedChan: - // Expected. - case <-time.After(defaultTimeout): - t.Fatalf("contract was not resolved") - } - - // Check that alice has the status we expect. - if !alice.State().HasChanStatus( - channeldb.ChanStatusRemoteCloseInitiator, - ) { - t.Fatalf("expected remote close initiator, "+ - "got: %v", alice.State().ChanStatus()) - } - }) - } -} - -// TestChannelArbitratorAnchors asserts that the commitment tx anchor is swept. -func TestChannelArbitratorAnchors(t *testing.T) { - log := &mockArbitratorLog{ - state: StateDefault, - newStates: make(chan ArbitratorState, 5), - } - - chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } - - // Replace our mocked put report function with one which will push - // reports into a channel for us to consume. We update this function - // because our resolver will be created from the existing chanArb cfg. - reports := make(chan *channeldb.ResolverReport) - chanArbCtx.chanArb.cfg.PutResolverReport = putResolverReportInChannel( - reports, - ) - - chanArb := chanArbCtx.chanArb - chanArb.cfg.PreimageDB = newMockWitnessBeacon() - chanArb.cfg.Registry = &mockRegistry{} - - // Setup two pre-confirmation anchor resolutions on the mock channel. - chanArb.cfg.Channel.(*mockChannel).anchorResolutions = - []*lnwallet.AnchorResolution{ - {}, {}, - } - - if err := chanArb.Start(nil); err != nil { - t.Fatalf("unable to start ChannelArbitrator: %v", err) - } - defer func() { - if err := chanArb.Stop(); err != nil { - t.Fatal(err) - } - }() - - // Create htlcUpdates channel. - htlcUpdates := make(chan *ContractUpdate) - - signals := &ContractSignals{ - HtlcUpdates: htlcUpdates, - ShortChanID: lnwire.ShortChannelID{}, - } - chanArb.UpdateContractSignals(signals) - - errChan := make(chan er.R, 1) - respChan := make(chan *wire.MsgTx, 1) - - // With the channel found, and the request crafted, we'll send over a - // force close request to the arbitrator that watches this channel. - chanArb.forceCloseReqs <- &forceCloseReq{ - errResp: errChan, - closeTx: respChan, - } - - // The force close request should trigger broadcast of the commitment - // transaction. - chanArbCtx.AssertStateTransitions( - StateBroadcastCommit, - StateCommitmentBroadcasted, - ) - - // With the commitment tx still unconfirmed, we expect sweep attempts - // for all three versions of the commitment transaction. - <-chanArbCtx.sweeper.sweptInputs - <-chanArbCtx.sweeper.sweptInputs - - select { - case <-respChan: - case <-time.After(5 * time.Second): - t.Fatalf("no response received") - } - - select { - case err := <-errChan: - if err != nil { - t.Fatalf("error force closing channel: %v", err) - } - case <-time.After(5 * time.Second): - t.Fatalf("no response received") - } - - // Now notify about the local force close getting confirmed. - closeTx := &wire.MsgTx{ - TxIn: []*wire.TxIn{ - { - PreviousOutPoint: wire.OutPoint{}, - Witness: [][]byte{ - {0x1}, - {0x2}, - }, - }, - }, - } - - anchorResolution := &lnwallet.AnchorResolution{ - AnchorSignDescriptor: input.SignDescriptor{ - Output: &wire.TxOut{ - Value: 1, - }, - }, - } - - chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{ - SpendDetail: &chainntnfs.SpendDetail{}, - LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{ - CloseTx: closeTx, - HtlcResolutions: &lnwallet.HtlcResolutions{}, - AnchorResolution: anchorResolution, - }, - ChannelCloseSummary: &channeldb.ChannelCloseSummary{}, - CommitSet: CommitSet{ - ConfCommitKey: &LocalHtlcSet, - HtlcSets: map[HtlcSetKey][]channeldb.HTLC{}, - }, - } - - chanArbCtx.AssertStateTransitions( - StateContractClosed, - StateWaitingFullResolution, - ) - - // We expect to only have the anchor resolver active. - if len(chanArb.activeResolvers) != 1 { - t.Fatalf("expected single resolver, instead got: %v", - len(chanArb.activeResolvers)) - } - - resolver := chanArb.activeResolvers[0] - _, ok := resolver.(*anchorResolver) - if !ok { - t.Fatalf("expected anchor resolver, got %T", resolver) - } - - // The anchor resolver is expected to re-offer the anchor input to the - // sweeper. - <-chanArbCtx.sweeper.sweptInputs - - // The mock sweeper immediately signals success for that input. This - // should transition the channel to the resolved state. - chanArbCtx.AssertStateTransitions(StateFullyResolved) - select { - case <-chanArbCtx.resolvedChan: - case <-time.After(5 * time.Second): - t.Fatalf("contract was not resolved") - } - - anchorAmt := btcutil.Amount( - anchorResolution.AnchorSignDescriptor.Output.Value, - ) - spendTx := chanArbCtx.sweeper.sweepTx.TxHash() - expectedReport := &channeldb.ResolverReport{ - OutPoint: anchorResolution.CommitAnchor, - Amount: anchorAmt, - ResolverType: channeldb.ResolverTypeAnchor, - ResolverOutcome: channeldb.ResolverOutcomeClaimed, - SpendTxID: &spendTx, - } - - assertResolverReport(t, reports, expectedReport) -} - -// putResolverReportInChannel returns a put report function which will pipe -// reports into the channel provided. -func putResolverReportInChannel(reports chan *channeldb.ResolverReport) func( - _ kvdb.RwTx, report *channeldb.ResolverReport) er.R { - - return func(_ kvdb.RwTx, report *channeldb.ResolverReport) er.R { - reports <- report - return nil - } -} - -// assertResolverReport checks that a set of reports only contains a single -// report, and that it is equal to the expected report passed in. -func assertResolverReport(t *testing.T, reports chan *channeldb.ResolverReport, - expected *channeldb.ResolverReport) { - - select { - case report := <-reports: - if !reflect.DeepEqual(report, expected) { - t.Fatalf("expected: %v, got: %v", expected, report) - } - - case <-time.After(defaultTimeout): - t.Fatalf("no reports present") - } -} - -type mockChannel struct { - anchorResolutions []*lnwallet.AnchorResolution -} - -func (m *mockChannel) NewAnchorResolutions() ([]*lnwallet.AnchorResolution, - er.R) { - - return m.anchorResolutions, nil -} - -func (m *mockChannel) ForceCloseChan() (*lnwallet.LocalForceCloseSummary, er.R) { - summary := &lnwallet.LocalForceCloseSummary{ - CloseTx: &wire.MsgTx{}, - HtlcResolutions: &lnwallet.HtlcResolutions{}, - } - return summary, nil -} - -func TestMain(m *testing.M) { - globalcfg.SelectConfig(globalcfg.BitcoinDefaults()) - os.Exit(m.Run()) -} diff --git a/lnd/contractcourt/commit_sweep_resolver.go b/lnd/contractcourt/commit_sweep_resolver.go deleted file mode 100644 index fe408819..00000000 --- a/lnd/contractcourt/commit_sweep_resolver.go +++ /dev/null @@ -1,412 +0,0 @@ -package contractcourt - -import ( - "io" - "sync" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/sweep" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/txscript/opcode" - "github.com/pkt-cash/pktd/wire" -) - -const ( - // commitOutputConfTarget is the default confirmation target we'll use - // for sweeps of commit outputs that belong to us. - commitOutputConfTarget = 6 -) - -// commitSweepResolver is a resolver that will attempt to sweep the commitment -// output paying to us, in the case that the remote party broadcasts their -// version of the commitment transaction. We can sweep this output immediately, -// as it doesn't have a time-lock delay. -type commitSweepResolver struct { - // commitResolution contains all data required to successfully sweep - // this HTLC on-chain. - commitResolution lnwallet.CommitOutputResolution - - // resolved reflects if the contract has been fully resolved or not. - resolved bool - - // broadcastHeight is the height that the original contract was - // broadcast to the main-chain at. We'll use this value to bound any - // historical queries to the chain for spends/confirmations. - broadcastHeight uint32 - - // chanPoint is the channel point of the original contract. - chanPoint wire.OutPoint - - // currentReport stores the current state of the resolver for reporting - // over the rpc interface. - currentReport ContractReport - - // reportLock prevents concurrent access to the resolver report. - reportLock sync.Mutex - - contractResolverKit -} - -// newCommitSweepResolver instantiates a new direct commit output resolver. -func newCommitSweepResolver(res lnwallet.CommitOutputResolution, - broadcastHeight uint32, - chanPoint wire.OutPoint, resCfg ResolverConfig) *commitSweepResolver { - - r := &commitSweepResolver{ - contractResolverKit: *newContractResolverKit(resCfg), - commitResolution: res, - broadcastHeight: broadcastHeight, - chanPoint: chanPoint, - } - - r.initReport() - - return r -} - -// ResolverKey returns an identifier which should be globally unique for this -// particular resolver within the chain the original contract resides within. -func (c *commitSweepResolver) ResolverKey() []byte { - key := newResolverID(c.commitResolution.SelfOutPoint) - return key[:] -} - -// waitForHeight registers for block notifications and waits for the provided -// block height to be reached. -func (c *commitSweepResolver) waitForHeight(waitHeight uint32) er.R { - // Register for block epochs. After registration, the current height - // will be sent on the channel immediately. - blockEpochs, err := c.Notifier.RegisterBlockEpochNtfn(nil) - if err != nil { - return err - } - defer blockEpochs.Cancel() - - for { - select { - case newBlock, ok := <-blockEpochs.Epochs: - if !ok { - return errResolverShuttingDown.Default() - } - height := newBlock.Height - if height >= int32(waitHeight) { - return nil - } - - case <-c.quit: - return errResolverShuttingDown.Default() - } - } -} - -// getCommitTxConfHeight waits for confirmation of the commitment tx and returns -// the confirmation height. -func (c *commitSweepResolver) getCommitTxConfHeight() (uint32, er.R) { - txID := c.commitResolution.SelfOutPoint.Hash - signDesc := c.commitResolution.SelfOutputSignDesc - pkScript := signDesc.Output.PkScript - const confDepth = 1 - confChan, err := c.Notifier.RegisterConfirmationsNtfn( - &txID, pkScript, confDepth, c.broadcastHeight, - ) - if err != nil { - return 0, err - } - defer confChan.Cancel() - - select { - case txConfirmation, ok := <-confChan.Confirmed: - if !ok { - return 0, er.Errorf("cannot get confirmation "+ - "for commit tx %v", txID) - } - - return txConfirmation.BlockHeight, nil - - case <-c.quit: - return 0, errResolverShuttingDown.Default() - } -} - -// Resolve instructs the contract resolver to resolve the output on-chain. Once -// the output has been *fully* resolved, the function should return immediately -// with a nil ContractResolver value for the first return value. In the case -// that the contract requires further resolution, then another resolve is -// returned. -// -// NOTE: This function MUST be run as a goroutine. -func (c *commitSweepResolver) Resolve() (ContractResolver, er.R) { - // If we're already resolved, then we can exit early. - if c.resolved { - return nil, nil - } - - confHeight, err := c.getCommitTxConfHeight() - if err != nil { - return nil, err - } - - unlockHeight := confHeight + c.commitResolution.MaturityDelay - - log.Debugf("commit conf_height=%v, unlock_height=%v", - confHeight, unlockHeight) - - // Update report now that we learned the confirmation height. - c.reportLock.Lock() - c.currentReport.MaturityHeight = unlockHeight - c.reportLock.Unlock() - - // If there is a csv delay, we'll wait for that. - if c.commitResolution.MaturityDelay > 0 { - log.Debugf("waiting for csv lock to expire at height %v", - unlockHeight) - - // We only need to wait for the block before the block that - // unlocks the spend path. - err := c.waitForHeight(unlockHeight - 1) - if err != nil { - return nil, err - } - } - - // The output is on our local commitment if the script starts with - // OP_IF for the revocation clause. On the remote commitment it will - // either be a regular P2WKH or a simple sig spend with a CSV delay. - isLocalCommitTx := c.commitResolution.SelfOutputSignDesc.WitnessScript[0] == opcode.OP_IF - isDelayedOutput := c.commitResolution.MaturityDelay != 0 - - log.Debugf("isDelayedOutput=%v, isLocalCommitTx=%v", isDelayedOutput, - isLocalCommitTx) - - // There're three types of commitments, those that have tweaks - // for the remote key (us in this case), those that don't, and a third - // where there is no tweak and the output is delayed. On the local - // commitment our output will always be delayed. We'll rely on the - // presence of the commitment tweak to to discern which type of - // commitment this is. - var witnessType input.WitnessType - switch { - - // Delayed output to us on our local commitment. - case isLocalCommitTx: - witnessType = input.CommitmentTimeLock - - // A confirmed output to us on the remote commitment. - case isDelayedOutput: - witnessType = input.CommitmentToRemoteConfirmed - - // A non-delayed output on the remote commitment where the key is - // tweakless. - case c.commitResolution.SelfOutputSignDesc.SingleTweak == nil: - witnessType = input.CommitSpendNoDelayTweakless - - // A non-delayed output on the remote commitment where the key is - // tweaked. - default: - witnessType = input.CommitmentNoDelay - } - - log.Infof("Sweeping with witness type: %v", witnessType) - - // We'll craft an input with all the information required for - // the sweeper to create a fully valid sweeping transaction to - // recover these coins. - inp := input.NewCsvInput( - &c.commitResolution.SelfOutPoint, - witnessType, - &c.commitResolution.SelfOutputSignDesc, - c.broadcastHeight, - c.commitResolution.MaturityDelay, - ) - - // With our input constructed, we'll now offer it to the - // sweeper. - log.Infof("sweeping commit output") - - feePref := sweep.FeePreference{ConfTarget: commitOutputConfTarget} - resultChan, err := c.Sweeper.SweepInput(inp, sweep.Params{Fee: feePref}) - if err != nil { - log.Errorf("unable to sweep input: %v", err) - - return nil, err - } - - var sweepTxID chainhash.Hash - - // Sweeper is going to join this input with other inputs if - // possible and publish the sweep tx. When the sweep tx - // confirms, it signals us through the result channel with the - // outcome. Wait for this to happen. - outcome := channeldb.ResolverOutcomeClaimed - select { - case sweepResult := <-resultChan: - switch { - case sweep.ErrRemoteSpend.Is(sweepResult.Err): - // If the remote party was able to sweep this output - // it's likely what we sent was actually a revoked - // commitment. Report the error and continue to wrap up - // the contract. - log.Warnf("local commitment output was swept by "+ - "remote party via %v", sweepResult.Tx.TxHash()) - outcome = channeldb.ResolverOutcomeUnclaimed - case sweepResult.Err == nil: - // No errors, therefore continue processing. - log.Infof("local commitment output fully resolved by "+ - "sweep tx: %v", sweepResult.Tx.TxHash()) - default: - // Unknown errors. - log.Errorf("unable to sweep input: %v", - sweepResult.Err) - - return nil, sweepResult.Err - } - - sweepTxID = sweepResult.Tx.TxHash() - - case <-c.quit: - return nil, errResolverShuttingDown.Default() - } - - // Funds have been swept and balance is no longer in limbo. - c.reportLock.Lock() - if outcome == channeldb.ResolverOutcomeClaimed { - // We only record the balance as recovered if it actually came - // back to us. - c.currentReport.RecoveredBalance = c.currentReport.LimboBalance - } - c.currentReport.LimboBalance = 0 - c.reportLock.Unlock() - report := c.currentReport.resolverReport( - &sweepTxID, channeldb.ResolverTypeCommit, outcome, - ) - c.resolved = true - - // Checkpoint the resolver with a closure that will write the outcome - // of the resolver and its sweep transaction to disk. - return nil, c.Checkpoint(c, report) -} - -// Stop signals the resolver to cancel any current resolution processes, and -// suspend. -// -// NOTE: Part of the ContractResolver interface. -func (c *commitSweepResolver) Stop() { - close(c.quit) -} - -// IsResolved returns true if the stored state in the resolve is fully -// resolved. In this case the target output can be forgotten. -// -// NOTE: Part of the ContractResolver interface. -func (c *commitSweepResolver) IsResolved() bool { - return c.resolved -} - -// Encode writes an encoded version of the ContractResolver into the passed -// Writer. -// -// NOTE: Part of the ContractResolver interface. -func (c *commitSweepResolver) Encode(w io.Writer) er.R { - if err := encodeCommitResolution(w, &c.commitResolution); err != nil { - return err - } - - if err := util.WriteBin(w, endian, c.resolved); err != nil { - return err - } - if err := util.WriteBin(w, endian, c.broadcastHeight); err != nil { - return err - } - if _, err := util.Write(w, c.chanPoint.Hash[:]); err != nil { - return err - } - err := util.WriteBin(w, endian, c.chanPoint.Index) - if err != nil { - return err - } - - // Previously a sweep tx was serialized at this point. Refactoring - // removed this, but keep in mind that this data may still be present in - // the database. - - return nil -} - -// newCommitSweepResolverFromReader attempts to decode an encoded -// ContractResolver from the passed Reader instance, returning an active -// ContractResolver instance. -func newCommitSweepResolverFromReader(r io.Reader, resCfg ResolverConfig) ( - *commitSweepResolver, er.R) { - - c := &commitSweepResolver{ - contractResolverKit: *newContractResolverKit(resCfg), - } - - if err := decodeCommitResolution(r, &c.commitResolution); err != nil { - return nil, err - } - - if err := util.ReadBin(r, endian, &c.resolved); err != nil { - return nil, err - } - if err := util.ReadBin(r, endian, &c.broadcastHeight); err != nil { - return nil, err - } - _, err := util.ReadFull(r, c.chanPoint.Hash[:]) - if err != nil { - return nil, err - } - err = util.ReadBin(r, endian, &c.chanPoint.Index) - if err != nil { - return nil, err - } - - // Previously a sweep tx was deserialized at this point. Refactoring - // removed this, but keep in mind that this data may still be present in - // the database. - - c.initReport() - - return c, nil -} - -// report returns a report on the resolution state of the contract. -func (c *commitSweepResolver) report() *ContractReport { - c.reportLock.Lock() - defer c.reportLock.Unlock() - - copy := c.currentReport - return © -} - -// initReport initializes the pending channels report for this resolver. -func (c *commitSweepResolver) initReport() { - amt := btcutil.Amount( - c.commitResolution.SelfOutputSignDesc.Output.Value, - ) - - // Set the initial report. All fields are filled in, except for the - // maturity height which remains 0 until Resolve() is executed. - // - // TODO(joostjager): Resolvers only activate after the commit tx - // confirms. With more refactoring in channel arbitrator, it would be - // possible to make the confirmation height part of ResolverConfig and - // populate MaturityHeight here. - c.currentReport = ContractReport{ - Outpoint: c.commitResolution.SelfOutPoint, - Type: ReportOutputUnencumbered, - Amount: amt, - LimboBalance: amt, - RecoveredBalance: 0, - } -} - -// A compile time assertion to ensure commitSweepResolver meets the -// ContractResolver interface. -var _ reportingContractResolver = (*commitSweepResolver)(nil) diff --git a/lnd/contractcourt/commit_sweep_resolver_test.go b/lnd/contractcourt/commit_sweep_resolver_test.go deleted file mode 100644 index bb4a7071..00000000 --- a/lnd/contractcourt/commit_sweep_resolver_test.go +++ /dev/null @@ -1,372 +0,0 @@ -package contractcourt - -import ( - "testing" - "time" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/lntest/mock" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee" - "github.com/pkt-cash/pktd/lnd/sweep" - "github.com/pkt-cash/pktd/wire" -) - -type commitSweepResolverTestContext struct { - resolver *commitSweepResolver - notifier *mock.ChainNotifier - sweeper *mockSweeper - resolverResultChan chan resolveResult - t *testing.T -} - -func newCommitSweepResolverTestContext(t *testing.T, - resolution *lnwallet.CommitOutputResolution) *commitSweepResolverTestContext { - - notifier := &mock.ChainNotifier{ - EpochChan: make(chan *chainntnfs.BlockEpoch), - SpendChan: make(chan *chainntnfs.SpendDetail), - ConfChan: make(chan *chainntnfs.TxConfirmation), - } - - sweeper := newMockSweeper() - - checkPointChan := make(chan struct{}, 1) - - chainCfg := ChannelArbitratorConfig{ - ChainArbitratorConfig: ChainArbitratorConfig{ - Notifier: notifier, - Sweeper: sweeper, - }, - PutResolverReport: func(_ kvdb.RwTx, - _ *channeldb.ResolverReport) er.R { - - return nil - }, - } - - cfg := ResolverConfig{ - ChannelArbitratorConfig: chainCfg, - Checkpoint: func(_ ContractResolver, - _ ...*channeldb.ResolverReport) er.R { - - checkPointChan <- struct{}{} - return nil - }, - } - - resolver := newCommitSweepResolver( - *resolution, 0, wire.OutPoint{}, cfg, - ) - - return &commitSweepResolverTestContext{ - resolver: resolver, - notifier: notifier, - sweeper: sweeper, - t: t, - } -} - -func (i *commitSweepResolverTestContext) resolve() { - // Start resolver. - i.resolverResultChan = make(chan resolveResult, 1) - go func() { - nextResolver, err := i.resolver.Resolve() - i.resolverResultChan <- resolveResult{ - nextResolver: nextResolver, - err: err, - } - }() -} - -func (i *commitSweepResolverTestContext) notifyEpoch(height int32) { - i.notifier.EpochChan <- &chainntnfs.BlockEpoch{ - Height: height, - } -} - -func (i *commitSweepResolverTestContext) waitForResult() { - i.t.Helper() - - result := <-i.resolverResultChan - if result.err != nil { - i.t.Fatal(result.err) - } - - if result.nextResolver != nil { - i.t.Fatal("expected no next resolver") - } -} - -type mockSweeper struct { - sweptInputs chan input.Input - updatedInputs chan wire.OutPoint - sweepTx *wire.MsgTx - sweepErr *er.ErrorCode -} - -func newMockSweeper() *mockSweeper { - return &mockSweeper{ - sweptInputs: make(chan input.Input), - updatedInputs: make(chan wire.OutPoint), - sweepTx: &wire.MsgTx{}, - } -} - -func (s *mockSweeper) SweepInput(input input.Input, params sweep.Params) ( - chan sweep.Result, er.R) { - - s.sweptInputs <- input - - var e er.R - if s.sweepErr != nil { - e = s.sweepErr.Default() - } - - result := make(chan sweep.Result, 1) - result <- sweep.Result{ - Tx: s.sweepTx, - Err: e, - } - return result, nil -} - -func (s *mockSweeper) CreateSweepTx(inputs []input.Input, feePref sweep.FeePreference, - currentBlockHeight uint32) (*wire.MsgTx, er.R) { - - return nil, nil -} - -func (s *mockSweeper) RelayFeePerKW() chainfee.SatPerKWeight { - return 253 -} - -func (s *mockSweeper) UpdateParams(input wire.OutPoint, - params sweep.ParamsUpdate) (chan sweep.Result, er.R) { - - s.updatedInputs <- input - - result := make(chan sweep.Result, 1) - result <- sweep.Result{ - Tx: s.sweepTx, - } - return result, nil -} - -var _ UtxoSweeper = &mockSweeper{} - -// TestCommitSweepResolverNoDelay tests resolution of a direct commitment output -// unencumbered by a time lock. -func TestCommitSweepResolverNoDelay(t *testing.T) { - t.Parallel() - defer timeout(t)() - - res := lnwallet.CommitOutputResolution{ - SelfOutputSignDesc: input.SignDescriptor{ - Output: &wire.TxOut{ - Value: 100, - }, - WitnessScript: []byte{0}, - }, - } - - ctx := newCommitSweepResolverTestContext(t, &res) - - // Replace our checkpoint with one which will push reports into a - // channel for us to consume. We replace this function on the resolver - // itself because it is created by the test context. - reportChan := make(chan *channeldb.ResolverReport) - ctx.resolver.Checkpoint = func(_ ContractResolver, - reports ...*channeldb.ResolverReport) er.R { - - // Send all of our reports into the channel. - for _, report := range reports { - reportChan <- report - } - - return nil - } - - ctx.resolve() - - spendTx := &wire.MsgTx{} - spendHash := spendTx.TxHash() - ctx.notifier.ConfChan <- &chainntnfs.TxConfirmation{ - Tx: spendTx, - } - - // No csv delay, so the input should be swept immediately. - <-ctx.sweeper.sweptInputs - - amt := btcutil.Amount(res.SelfOutputSignDesc.Output.Value) - expectedReport := &channeldb.ResolverReport{ - OutPoint: wire.OutPoint{}, - Amount: amt, - ResolverType: channeldb.ResolverTypeCommit, - ResolverOutcome: channeldb.ResolverOutcomeClaimed, - SpendTxID: &spendHash, - } - - assertResolverReport(t, reportChan, expectedReport) - - ctx.waitForResult() -} - -// testCommitSweepResolverDelay tests resolution of a direct commitment output -// that is encumbered by a time lock. sweepErr indicates whether the local node -// fails to sweep the output. -func testCommitSweepResolverDelay(t *testing.T, sweepErr *er.ErrorCode) { - defer timeout(t)() - - const sweepProcessInterval = 100 * time.Millisecond - amt := int64(100) - outpoint := wire.OutPoint{ - Index: 5, - } - res := lnwallet.CommitOutputResolution{ - SelfOutputSignDesc: input.SignDescriptor{ - Output: &wire.TxOut{ - Value: amt, - }, - WitnessScript: []byte{0}, - }, - MaturityDelay: 3, - SelfOutPoint: outpoint, - } - - ctx := newCommitSweepResolverTestContext(t, &res) - - // Replace our checkpoint with one which will push reports into a - // channel for us to consume. We replace this function on the resolver - // itself because it is created by the test context. - reportChan := make(chan *channeldb.ResolverReport) - ctx.resolver.Checkpoint = func(_ ContractResolver, - reports ...*channeldb.ResolverReport) er.R { - - // Send all of our reports into the channel. - for _, report := range reports { - reportChan <- report - } - - return nil - } - - // Setup whether we expect the sweeper to receive a sweep error in this - // test case. - ctx.sweeper.sweepErr = sweepErr - - report := ctx.resolver.report() - expectedReport := ContractReport{ - Outpoint: outpoint, - Type: ReportOutputUnencumbered, - Amount: btcutil.Amount(amt), - LimboBalance: btcutil.Amount(amt), - } - if *report != expectedReport { - t.Fatalf("unexpected resolver report. want=%v got=%v", - expectedReport, report) - } - - ctx.resolve() - - ctx.notifier.ConfChan <- &chainntnfs.TxConfirmation{ - BlockHeight: testInitialBlockHeight - 1, - } - - // Allow resolver to process confirmation. - time.Sleep(sweepProcessInterval) - - // Expect report to be updated. - report = ctx.resolver.report() - if report.MaturityHeight != testInitialBlockHeight+2 { - t.Fatal("report maturity height incorrect") - } - - // Notify initial block height. The csv lock is still in effect, so we - // don't expect any sweep to happen yet. - ctx.notifyEpoch(testInitialBlockHeight) - - select { - case <-ctx.sweeper.sweptInputs: - t.Fatal("no sweep expected") - case <-time.After(sweepProcessInterval): - } - - // A new block arrives. The commit tx confirmed at height -1 and the csv - // is 3, so a spend will be valid in the first block after height +1. - ctx.notifyEpoch(testInitialBlockHeight + 1) - - <-ctx.sweeper.sweptInputs - - // Set the resolution report outcome based on whether our sweep - // succeeded. - outcome := channeldb.ResolverOutcomeClaimed - if sweepErr != nil { - outcome = channeldb.ResolverOutcomeUnclaimed - } - sweepTx := ctx.sweeper.sweepTx.TxHash() - - assertResolverReport(t, reportChan, &channeldb.ResolverReport{ - OutPoint: outpoint, - ResolverType: channeldb.ResolverTypeCommit, - ResolverOutcome: outcome, - Amount: btcutil.Amount(amt), - SpendTxID: &sweepTx, - }) - - ctx.waitForResult() - - // If this test case generates a sweep error, we don't expect to be - // able to recover anything. This might happen if the local commitment - // output was swept by a justice transaction by the remote party. - expectedRecoveredBalance := btcutil.Amount(amt) - if sweepErr != nil { - expectedRecoveredBalance = 0 - } - - report = ctx.resolver.report() - expectedReport = ContractReport{ - Outpoint: outpoint, - Type: ReportOutputUnencumbered, - Amount: btcutil.Amount(amt), - MaturityHeight: testInitialBlockHeight + 2, - RecoveredBalance: expectedRecoveredBalance, - } - if *report != expectedReport { - t.Fatalf("unexpected resolver report. want=%v got=%v", - expectedReport, report) - } - -} - -// TestCommitSweepResolverDelay tests resolution of a direct commitment output -// that is encumbered by a time lock. -func TestCommitSweepResolverDelay(t *testing.T) { - t.Parallel() - - testCases := []struct { - name string - sweepErr *er.ErrorCode - }{{ - name: "success", - sweepErr: nil, - }, { - name: "remote spend", - sweepErr: sweep.ErrRemoteSpend, - }} - - for _, tc := range testCases { - tc := tc - ok := t.Run(tc.name, func(t *testing.T) { - testCommitSweepResolverDelay(t, tc.sweepErr) - }) - if !ok { - break - } - } -} diff --git a/lnd/contractcourt/contract_resolvers.go b/lnd/contractcourt/contract_resolvers.go deleted file mode 100644 index 98ba2dae..00000000 --- a/lnd/contractcourt/contract_resolvers.go +++ /dev/null @@ -1,114 +0,0 @@ -package contractcourt - -import ( - "encoding/binary" - "io" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/wire" -) - -var ( - endian = binary.BigEndian -) - -const ( - // sweepConfTarget is the default number of blocks that we'll use as a - // confirmation target when sweeping. - sweepConfTarget = 6 -) - -// ContractResolver is an interface which packages a state machine which is -// able to carry out the necessary steps required to fully resolve a Bitcoin -// contract on-chain. Resolvers are fully encodable to ensure callers are able -// to persist them properly. A resolver may produce another resolver in the -// case that claiming an HTLC is a multi-stage process. In this case, we may -// partially resolve the contract, then persist, and set up for an additional -// resolution. -type ContractResolver interface { - // ResolverKey returns an identifier which should be globally unique - // for this particular resolver within the chain the original contract - // resides within. - ResolverKey() []byte - - // Resolve instructs the contract resolver to resolve the output - // on-chain. Once the output has been *fully* resolved, the function - // should return immediately with a nil ContractResolver value for the - // first return value. In the case that the contract requires further - // resolution, then another resolve is returned. - // - // NOTE: This function MUST be run as a goroutine. - Resolve() (ContractResolver, er.R) - - // IsResolved returns true if the stored state in the resolve is fully - // resolved. In this case the target output can be forgotten. - IsResolved() bool - - // Encode writes an encoded version of the ContractResolver into the - // passed Writer. - Encode(w io.Writer) er.R - - // Stop signals the resolver to cancel any current resolution - // processes, and suspend. - Stop() -} - -// htlcContractResolver is the required interface for htlc resolvers. -type htlcContractResolver interface { - ContractResolver - - // HtlcPoint returns the htlc's outpoint on the commitment tx. - HtlcPoint() wire.OutPoint - - // Supplement adds additional information to the resolver that is - // required before Resolve() is called. - Supplement(htlc channeldb.HTLC) -} - -// reportingContractResolver is a ContractResolver that also exposes a report on -// the resolution state of the contract. -type reportingContractResolver interface { - ContractResolver - - report() *ContractReport -} - -// ResolverConfig contains the externally supplied configuration items that are -// required by a ContractResolver implementation. -type ResolverConfig struct { - // ChannelArbitratorConfig contains all the interfaces and closures - // required for the resolver to interact with outside sub-systems. - ChannelArbitratorConfig - - // Checkpoint allows a resolver to check point its state. This function - // should write the state of the resolver to persistent storage, and - // return a non-nil error upon success. It takes a resolver report, - // which contains information about the outcome and should be written - // to disk if non-nil. - Checkpoint func(ContractResolver, ...*channeldb.ResolverReport) er.R -} - -// contractResolverKit is meant to be used as a mix-in struct to be embedded within a -// given ContractResolver implementation. It contains all the common items that -// a resolver requires to carry out its duties. -type contractResolverKit struct { - ResolverConfig - - quit chan struct{} -} - -// newContractResolverKit instantiates the mix-in struct. -func newContractResolverKit(cfg ResolverConfig) *contractResolverKit { - return &contractResolverKit{ - ResolverConfig: cfg, - quit: make(chan struct{}), - } -} - -var ( - // errResolverShuttingDown is returned when the resolver stops - // progressing because it received the quit signal. - errResolverShuttingDown = er.GenericErrorType.CodeWithDetail("errResolverShuttingDown", - "resolver shutting down") -) diff --git a/lnd/contractcourt/htlc_incoming_contest_resolver.go b/lnd/contractcourt/htlc_incoming_contest_resolver.go deleted file mode 100644 index c7ac105b..00000000 --- a/lnd/contractcourt/htlc_incoming_contest_resolver.go +++ /dev/null @@ -1,452 +0,0 @@ -package contractcourt - -import ( - "bytes" - "io" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/htlcswitch/hop" - "github.com/pkt-cash/pktd/lnd/invoices" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/pktlog/log" -) - -// htlcIncomingContestResolver is a ContractResolver that's able to resolve an -// incoming HTLC that is still contested. An HTLC is still contested, if at the -// time of commitment broadcast, we don't know of the preimage for it yet, and -// it hasn't expired. In this case, we can resolve the HTLC if we learn of the -// preimage, otherwise the remote party will sweep it after it expires. -// -// TODO(roasbeef): just embed the other resolver? -type htlcIncomingContestResolver struct { - // htlcExpiry is the absolute expiry of this incoming HTLC. We use this - // value to determine if we can exit early as if the HTLC times out, - // before we learn of the preimage then we can't claim it on chain - // successfully. - htlcExpiry uint32 - - // htlcSuccessResolver is the inner resolver that may be utilized if we - // learn of the preimage. - htlcSuccessResolver -} - -// newIncomingContestResolver instantiates a new incoming htlc contest resolver. -func newIncomingContestResolver( - res lnwallet.IncomingHtlcResolution, broadcastHeight uint32, - htlc channeldb.HTLC, resCfg ResolverConfig) *htlcIncomingContestResolver { - - success := newSuccessResolver( - res, broadcastHeight, htlc, resCfg, - ) - - return &htlcIncomingContestResolver{ - htlcExpiry: htlc.RefundTimeout, - htlcSuccessResolver: *success, - } -} - -// Resolve attempts to resolve this contract. As we don't yet know of the -// preimage for the contract, we'll wait for one of two things to happen: -// -// 1. We learn of the preimage! In this case, we can sweep the HTLC incoming -// and ensure that if this was a multi-hop HTLC we are made whole. In this -// case, an additional ContractResolver will be returned to finish the -// job. -// -// 2. The HTLC expires. If this happens, then the contract is fully resolved -// as we have no remaining actions left at our disposal. -// -// NOTE: Part of the ContractResolver interface. -func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, er.R) { - // If we're already full resolved, then we don't have anything further - // to do. - if h.resolved { - return nil, nil - } - - // First try to parse the payload. If that fails, we can stop resolution - // now. - payload, err := h.decodePayload() - if err != nil { - log.Debugf("ChannelArbitrator(%v): cannot decode payload of "+ - "htlc %v", h.ChanPoint, h.HtlcPoint()) - - // If we've locked in an htlc with an invalid payload on our - // commitment tx, we don't need to resolve it. The other party - // will time it out and get their funds back. This situation can - // present itself when we crash before processRemoteAdds in the - // link has ran. - h.resolved = true - - // We write a report to disk that indicates we could not decode - // the htlc. - resReport := h.report().resolverReport( - nil, channeldb.ResolverTypeIncomingHtlc, - channeldb.ResolverOutcomeAbandoned, - ) - return nil, h.PutResolverReport(nil, resReport) - } - - // Register for block epochs. After registration, the current height - // will be sent on the channel immediately. - blockEpochs, err := h.Notifier.RegisterBlockEpochNtfn(nil) - if err != nil { - return nil, err - } - defer blockEpochs.Cancel() - - var currentHeight int32 - select { - case newBlock, ok := <-blockEpochs.Epochs: - if !ok { - return nil, errResolverShuttingDown.Default() - } - currentHeight = newBlock.Height - case <-h.quit: - return nil, errResolverShuttingDown.Default() - } - - // We'll first check if this HTLC has been timed out, if so, we can - // return now and mark ourselves as resolved. If we're past the point of - // expiry of the HTLC, then at this point the sender can sweep it, so - // we'll end our lifetime. Here we deliberately forego the chance that - // the sender doesn't sweep and we already have or will learn the - // preimage. Otherwise the resolver could potentially stay active - // indefinitely and the channel will never close properly. - if uint32(currentHeight) >= h.htlcExpiry { - // TODO(roasbeef): should also somehow check if outgoing is - // resolved or not - // * may need to hook into the circuit map - // * can't timeout before the outgoing has been - - log.Infof("%T(%v): HTLC has timed out (expiry=%v, height=%v), "+ - "abandoning", h, h.htlcResolution.ClaimOutpoint, - h.htlcExpiry, currentHeight) - h.resolved = true - - // Finally, get our report and checkpoint our resolver with a - // timeout outcome report. - report := h.report().resolverReport( - nil, channeldb.ResolverTypeIncomingHtlc, - channeldb.ResolverOutcomeTimeout, - ) - return nil, h.Checkpoint(h, report) - } - - // applyPreimage is a helper function that will populate our internal - // resolver with the preimage we learn of. This should be called once - // the preimage is revealed so the inner resolver can properly complete - // its duties. The error return value indicates whether the preimage - // was properly applied. - applyPreimage := func(preimage lntypes.Preimage) er.R { - // Sanity check to see if this preimage matches our htlc. At - // this point it should never happen that it does not match. - if !preimage.Matches(h.htlc.RHash) { - return er.New("preimage does not match hash") - } - - // Update htlcResolution with the matching preimage. - h.htlcResolution.Preimage = preimage - - log.Infof("%T(%v): extracted preimage=%v from beacon!", h, - h.htlcResolution.ClaimOutpoint, preimage) - - // If this is our commitment transaction, then we'll need to - // populate the witness for the second-level HTLC transaction. - if h.htlcResolution.SignedSuccessTx != nil { - // Within the witness for the success transaction, the - // preimage is the 4th element as it looks like: - // - // * - // - // We'll populate it within the witness, as since this - // was a "contest" resolver, we didn't yet know of the - // preimage. - h.htlcResolution.SignedSuccessTx.TxIn[0].Witness[3] = preimage[:] - } - - return nil - } - - // Define a closure to process htlc resolutions either directly or - // triggered by future notifications. - processHtlcResolution := func(e invoices.HtlcResolution) ( - ContractResolver, er.R) { - - // Take action based on the type of resolution we have - // received. - switch resolution := e.(type) { - - // If the htlc resolution was a settle, apply the - // preimage and return a success resolver. - case *invoices.HtlcSettleResolution: - err := applyPreimage(resolution.Preimage) - if err != nil { - return nil, err - } - - return &h.htlcSuccessResolver, nil - - // If the htlc was failed, mark the htlc as - // resolved. - case *invoices.HtlcFailResolution: - log.Infof("%T(%v): Exit hop HTLC canceled "+ - "(expiry=%v, height=%v), abandoning", h, - h.htlcResolution.ClaimOutpoint, - h.htlcExpiry, currentHeight) - - h.resolved = true - - // Checkpoint our resolver with an abandoned outcome - // because we take no further action on this htlc. - report := h.report().resolverReport( - nil, channeldb.ResolverTypeIncomingHtlc, - channeldb.ResolverOutcomeAbandoned, - ) - return nil, h.Checkpoint(h, report) - - // Error if the resolution type is unknown, we are only - // expecting settles and fails. - default: - return nil, er.Errorf("unknown resolution"+ - " type: %v", e) - } - } - - var ( - hodlChan chan interface{} - witnessUpdates <-chan lntypes.Preimage - ) - if payload.FwdInfo.NextHop == hop.Exit { - // Create a buffered hodl chan to prevent deadlock. - hodlChan = make(chan interface{}, 1) - - // Notify registry that we are potentially resolving as an exit - // hop on-chain. If this HTLC indeed pays to an existing - // invoice, the invoice registry will tell us what to do with - // the HTLC. This is identical to HTLC resolution in the link. - circuitKey := channeldb.CircuitKey{ - ChanID: h.ShortChanID, - HtlcID: h.htlc.HtlcIndex, - } - - resolution, err := h.Registry.NotifyExitHopHtlc( - h.htlc.RHash, h.htlc.Amt, h.htlcExpiry, currentHeight, - circuitKey, hodlChan, payload, - ) - if err != nil { - return nil, err - } - - defer h.Registry.HodlUnsubscribeAll(hodlChan) - - // Take action based on the resolution we received. If the htlc - // was settled, or a htlc for a known invoice failed we can - // resolve it directly. If the resolution is nil, the htlc was - // neither accepted nor failed, so we cannot take action yet. - switch res := resolution.(type) { - case *invoices.HtlcFailResolution: - // In the case where the htlc failed, but the invoice - // was known to the registry, we can directly resolve - // the htlc. - if res.Outcome != invoices.ResultInvoiceNotFound { - return processHtlcResolution(resolution) - } - - // If we settled the htlc, we can resolve it. - case *invoices.HtlcSettleResolution: - return processHtlcResolution(resolution) - - // If the resolution is nil, the htlc was neither settled nor - // failed so we cannot take action at present. - case nil: - - default: - return nil, er.Errorf("unknown htlc resolution type: %T", - resolution) - } - } else { - // If the HTLC hasn't expired yet, then we may still be able to - // claim it if we learn of the pre-image, so we'll subscribe to - // the preimage database to see if it turns up, or the HTLC - // times out. - // - // NOTE: This is done BEFORE opportunistically querying the db, - // to ensure the preimage can't be delivered between querying - // and registering for the preimage subscription. - preimageSubscription := h.PreimageDB.SubscribeUpdates() - defer preimageSubscription.CancelSubscription() - - // With the epochs and preimage subscriptions initialized, we'll - // query to see if we already know the preimage. - preimage, ok := h.PreimageDB.LookupPreimage(h.htlc.RHash) - if ok { - // If we do, then this means we can claim the HTLC! - // However, we don't know how to ourselves, so we'll - // return our inner resolver which has the knowledge to - // do so. - if err := applyPreimage(preimage); err != nil { - return nil, err - } - - return &h.htlcSuccessResolver, nil - } - - witnessUpdates = preimageSubscription.WitnessUpdates - } - - for { - select { - case preimage := <-witnessUpdates: - // We received a new preimage, but we need to ignore - // all except the preimage we are waiting for. - if !preimage.Matches(h.htlc.RHash) { - continue - } - - if err := applyPreimage(preimage); err != nil { - return nil, err - } - - // We've learned of the preimage and this information - // has been added to our inner resolver. We return it so - // it can continue contract resolution. - return &h.htlcSuccessResolver, nil - - case hodlItem := <-hodlChan: - htlcResolution := hodlItem.(invoices.HtlcResolution) - return processHtlcResolution(htlcResolution) - - case newBlock, ok := <-blockEpochs.Epochs: - if !ok { - return nil, errResolverShuttingDown.Default() - } - - // If this new height expires the HTLC, then this means - // we never found out the preimage, so we can mark - // resolved and exit. - newHeight := uint32(newBlock.Height) - if newHeight >= h.htlcExpiry { - log.Infof("%T(%v): HTLC has timed out "+ - "(expiry=%v, height=%v), abandoning", h, - h.htlcResolution.ClaimOutpoint, - h.htlcExpiry, currentHeight) - h.resolved = true - - report := h.report().resolverReport( - nil, - channeldb.ResolverTypeIncomingHtlc, - channeldb.ResolverOutcomeTimeout, - ) - return nil, h.Checkpoint(h, report) - } - - case <-h.quit: - return nil, errResolverShuttingDown.Default() - } - } -} - -// report returns a report on the resolution state of the contract. -func (h *htlcIncomingContestResolver) report() *ContractReport { - // No locking needed as these values are read-only. - - finalAmt := h.htlc.Amt.ToSatoshis() - if h.htlcResolution.SignedSuccessTx != nil { - finalAmt = btcutil.Amount( - h.htlcResolution.SignedSuccessTx.TxOut[0].Value, - ) - } - - return &ContractReport{ - Outpoint: h.htlcResolution.ClaimOutpoint, - Type: ReportOutputIncomingHtlc, - Amount: finalAmt, - MaturityHeight: h.htlcExpiry, - LimboBalance: finalAmt, - Stage: 1, - } -} - -// Stop signals the resolver to cancel any current resolution processes, and -// suspend. -// -// NOTE: Part of the ContractResolver interface. -func (h *htlcIncomingContestResolver) Stop() { - close(h.quit) -} - -// IsResolved returns true if the stored state in the resolve is fully -// resolved. In this case the target output can be forgotten. -// -// NOTE: Part of the ContractResolver interface. -func (h *htlcIncomingContestResolver) IsResolved() bool { - return h.resolved -} - -// Encode writes an encoded version of the ContractResolver into the passed -// Writer. -// -// NOTE: Part of the ContractResolver interface. -func (h *htlcIncomingContestResolver) Encode(w io.Writer) er.R { - // We'll first write out the one field unique to this resolver. - if err := util.WriteBin(w, endian, h.htlcExpiry); err != nil { - return err - } - - // Then we'll write out our internal resolver. - return h.htlcSuccessResolver.Encode(w) -} - -// newIncomingContestResolverFromReader attempts to decode an encoded ContractResolver -// from the passed Reader instance, returning an active ContractResolver -// instance. -func newIncomingContestResolverFromReader(r io.Reader, resCfg ResolverConfig) ( - *htlcIncomingContestResolver, er.R) { - - h := &htlcIncomingContestResolver{} - - // We'll first read the one field unique to this resolver. - if err := util.ReadBin(r, endian, &h.htlcExpiry); err != nil { - return nil, err - } - - // Then we'll decode our internal resolver. - successResolver, err := newSuccessResolverFromReader(r, resCfg) - if err != nil { - return nil, err - } - h.htlcSuccessResolver = *successResolver - - return h, nil -} - -// Supplement adds additional information to the resolver that is required -// before Resolve() is called. -// -// NOTE: Part of the htlcContractResolver interface. -func (h *htlcIncomingContestResolver) Supplement(htlc channeldb.HTLC) { - h.htlc = htlc -} - -// decodePayload (re)decodes the hop payload of a received htlc. -func (h *htlcIncomingContestResolver) decodePayload() (*hop.Payload, er.R) { - - onionReader := bytes.NewReader(h.htlc.OnionBlob) - iterator, err := h.OnionProcessor.ReconstructHopIterator( - onionReader, h.htlc.RHash[:], - ) - if err != nil { - return nil, err - } - - return iterator.HopPayload() -} - -// A compile time assertion to ensure htlcIncomingContestResolver meets the -// ContractResolver interface. -var _ htlcContractResolver = (*htlcIncomingContestResolver)(nil) diff --git a/lnd/contractcourt/htlc_incoming_resolver_test.go b/lnd/contractcourt/htlc_incoming_resolver_test.go deleted file mode 100644 index d3ce6ef6..00000000 --- a/lnd/contractcourt/htlc_incoming_resolver_test.go +++ /dev/null @@ -1,417 +0,0 @@ -package contractcourt - -import ( - "bytes" - "io" - "io/ioutil" - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - sphinx "github.com/pkt-cash/pktd/lightning-onion" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/htlcswitch/hop" - "github.com/pkt-cash/pktd/lnd/invoices" - "github.com/pkt-cash/pktd/lnd/lntest/mock" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -const ( - testInitialBlockHeight = 100 - testHtlcExpiry = 150 -) - -var ( - testResPreimage = lntypes.Preimage{1, 2, 3} - testResHash = testResPreimage.Hash() - testResCircuitKey = channeldb.CircuitKey{} - testOnionBlob = []byte{4, 5, 6} - testAcceptHeight int32 = 1234 - testHtlcAmount = 2300 -) - -// TestHtlcIncomingResolverFwdPreimageKnown tests resolution of a forwarded htlc -// for which the preimage is already known initially. -func TestHtlcIncomingResolverFwdPreimageKnown(t *testing.T) { - t.Parallel() - defer timeout(t)() - - ctx := newIncomingResolverTestContext(t, false) - ctx.witnessBeacon.lookupPreimage[testResHash] = testResPreimage - ctx.resolve() - ctx.waitForResult(true) -} - -// TestHtlcIncomingResolverFwdContestedSuccess tests resolution of a forwarded -// htlc for which the preimage becomes known after the resolver has been -// started. -func TestHtlcIncomingResolverFwdContestedSuccess(t *testing.T) { - t.Parallel() - defer timeout(t)() - - ctx := newIncomingResolverTestContext(t, false) - ctx.resolve() - - // Simulate a new block coming in. HTLC is not yet expired. - ctx.notifyEpoch(testInitialBlockHeight + 1) - - ctx.witnessBeacon.preImageUpdates <- testResPreimage - ctx.waitForResult(true) -} - -// TestHtlcIncomingResolverFwdContestedTimeout tests resolution of a forwarded -// htlc that times out after the resolver has been started. -func TestHtlcIncomingResolverFwdContestedTimeout(t *testing.T) { - t.Parallel() - defer timeout(t)() - - ctx := newIncomingResolverTestContext(t, false) - - // Replace our checkpoint with one which will push reports into a - // channel for us to consume. We replace this function on the resolver - // itself because it is created by the test context. - reportChan := make(chan *channeldb.ResolverReport) - ctx.resolver.Checkpoint = func(_ ContractResolver, - reports ...*channeldb.ResolverReport) er.R { - - // Send all of our reports into the channel. - for _, report := range reports { - reportChan <- report - } - - return nil - } - - ctx.resolve() - - // Simulate a new block coming in. HTLC expires. - ctx.notifyEpoch(testHtlcExpiry) - - // Assert that we have a failure resolution because our invoice was - // cancelled. - assertResolverReport(t, reportChan, &channeldb.ResolverReport{ - Amount: lnwire.MilliSatoshi(testHtlcAmount).ToSatoshis(), - ResolverType: channeldb.ResolverTypeIncomingHtlc, - ResolverOutcome: channeldb.ResolverOutcomeTimeout, - }) - - ctx.waitForResult(false) -} - -// TestHtlcIncomingResolverFwdTimeout tests resolution of a forwarded htlc that -// has already expired when the resolver starts. -func TestHtlcIncomingResolverFwdTimeout(t *testing.T) { - t.Parallel() - defer timeout(t)() - - ctx := newIncomingResolverTestContext(t, true) - ctx.witnessBeacon.lookupPreimage[testResHash] = testResPreimage - ctx.resolver.htlcExpiry = 90 - ctx.resolve() - ctx.waitForResult(false) -} - -// TestHtlcIncomingResolverExitSettle tests resolution of an exit hop htlc for -// which the invoice has already been settled when the resolver starts. -func TestHtlcIncomingResolverExitSettle(t *testing.T) { - t.Parallel() - defer timeout(t)() - - ctx := newIncomingResolverTestContext(t, true) - ctx.registry.notifyResolution = invoices.NewSettleResolution( - testResPreimage, testResCircuitKey, testAcceptHeight, - invoices.ResultReplayToSettled, - ) - - ctx.resolve() - - data := <-ctx.registry.notifyChan - if data.expiry != testHtlcExpiry { - t.Fatal("incorrect expiry") - } - if data.currentHeight != testInitialBlockHeight { - t.Fatal("incorrect block height") - } - - ctx.waitForResult(true) - - if !bytes.Equal( - ctx.onionProcessor.offeredOnionBlob, testOnionBlob, - ) { - t.Fatal("unexpected onion blob") - } -} - -// TestHtlcIncomingResolverExitCancel tests resolution of an exit hop htlc for -// an invoice that is already canceled when the resolver starts. -func TestHtlcIncomingResolverExitCancel(t *testing.T) { - t.Parallel() - defer timeout(t)() - - ctx := newIncomingResolverTestContext(t, true) - ctx.registry.notifyResolution = invoices.NewFailResolution( - testResCircuitKey, testAcceptHeight, - invoices.ResultInvoiceAlreadyCanceled, - ) - - ctx.resolve() - ctx.waitForResult(false) -} - -// TestHtlcIncomingResolverExitSettleHodl tests resolution of an exit hop htlc -// for a hodl invoice that is settled after the resolver has started. -func TestHtlcIncomingResolverExitSettleHodl(t *testing.T) { - t.Parallel() - defer timeout(t)() - - ctx := newIncomingResolverTestContext(t, true) - ctx.resolve() - - notifyData := <-ctx.registry.notifyChan - notifyData.hodlChan <- invoices.NewSettleResolution( - testResPreimage, testResCircuitKey, testAcceptHeight, - invoices.ResultSettled, - ) - - ctx.waitForResult(true) -} - -// TestHtlcIncomingResolverExitTimeoutHodl tests resolution of an exit hop htlc -// for a hodl invoice that times out. -func TestHtlcIncomingResolverExitTimeoutHodl(t *testing.T) { - t.Parallel() - defer timeout(t)() - - ctx := newIncomingResolverTestContext(t, true) - - // Replace our checkpoint with one which will push reports into a - // channel for us to consume. We replace this function on the resolver - // itself because it is created by the test context. - reportChan := make(chan *channeldb.ResolverReport) - ctx.resolver.Checkpoint = func(_ ContractResolver, - reports ...*channeldb.ResolverReport) er.R { - - // Send all of our reports into the channel. - for _, report := range reports { - reportChan <- report - } - - return nil - } - - ctx.resolve() - ctx.notifyEpoch(testHtlcExpiry) - - // Assert that we have a failure resolution because our invoice was - // cancelled. - assertResolverReport(t, reportChan, &channeldb.ResolverReport{ - Amount: lnwire.MilliSatoshi(testHtlcAmount).ToSatoshis(), - ResolverType: channeldb.ResolverTypeIncomingHtlc, - ResolverOutcome: channeldb.ResolverOutcomeTimeout, - }) - - ctx.waitForResult(false) -} - -// TestHtlcIncomingResolverExitCancelHodl tests resolution of an exit hop htlc -// for a hodl invoice that is canceled after the resolver has started. -func TestHtlcIncomingResolverExitCancelHodl(t *testing.T) { - t.Parallel() - defer timeout(t)() - - ctx := newIncomingResolverTestContext(t, true) - - // Replace our checkpoint with one which will push reports into a - // channel for us to consume. We replace this function on the resolver - // itself because it is created by the test context. - reportChan := make(chan *channeldb.ResolverReport) - ctx.resolver.Checkpoint = func(_ ContractResolver, - reports ...*channeldb.ResolverReport) er.R { - - // Send all of our reports into the channel. - for _, report := range reports { - reportChan <- report - } - - return nil - } - - ctx.resolve() - notifyData := <-ctx.registry.notifyChan - notifyData.hodlChan <- invoices.NewFailResolution( - testResCircuitKey, testAcceptHeight, invoices.ResultCanceled, - ) - - // Assert that we have a failure resolution because our invoice was - // cancelled. - assertResolverReport(t, reportChan, &channeldb.ResolverReport{ - Amount: lnwire.MilliSatoshi(testHtlcAmount).ToSatoshis(), - ResolverType: channeldb.ResolverTypeIncomingHtlc, - ResolverOutcome: channeldb.ResolverOutcomeAbandoned, - }) - - ctx.waitForResult(false) -} - -type mockHopIterator struct { - isExit bool - hop.Iterator -} - -func (h *mockHopIterator) HopPayload() (*hop.Payload, er.R) { - var nextAddress [8]byte - if !h.isExit { - nextAddress = [8]byte{0x01} - } - - return hop.NewLegacyPayload(&sphinx.HopData{ - Realm: [1]byte{}, - NextAddress: nextAddress, - ForwardAmount: 100, - OutgoingCltv: 40, - ExtraBytes: [12]byte{}, - }), nil -} - -type mockOnionProcessor struct { - isExit bool - offeredOnionBlob []byte -} - -func (o *mockOnionProcessor) ReconstructHopIterator(r io.Reader, rHash []byte) ( - hop.Iterator, er.R) { - - data, err := ioutil.ReadAll(r) - if err != nil { - return nil, er.E(err) - } - o.offeredOnionBlob = data - - return &mockHopIterator{isExit: o.isExit}, nil -} - -type incomingResolverTestContext struct { - registry *mockRegistry - witnessBeacon *mockWitnessBeacon - resolver *htlcIncomingContestResolver - notifier *mock.ChainNotifier - onionProcessor *mockOnionProcessor - resolveErr chan er.R - nextResolver ContractResolver - t *testing.T -} - -func newIncomingResolverTestContext(t *testing.T, isExit bool) *incomingResolverTestContext { - notifier := &mock.ChainNotifier{ - EpochChan: make(chan *chainntnfs.BlockEpoch), - SpendChan: make(chan *chainntnfs.SpendDetail), - ConfChan: make(chan *chainntnfs.TxConfirmation), - } - witnessBeacon := newMockWitnessBeacon() - registry := &mockRegistry{ - notifyChan: make(chan notifyExitHopData, 1), - } - - onionProcessor := &mockOnionProcessor{isExit: isExit} - - checkPointChan := make(chan struct{}, 1) - - chainCfg := ChannelArbitratorConfig{ - ChainArbitratorConfig: ChainArbitratorConfig{ - Notifier: notifier, - PreimageDB: witnessBeacon, - Registry: registry, - OnionProcessor: onionProcessor, - }, - PutResolverReport: func(_ kvdb.RwTx, - _ *channeldb.ResolverReport) er.R { - - return nil - }, - } - - cfg := ResolverConfig{ - ChannelArbitratorConfig: chainCfg, - Checkpoint: func(_ ContractResolver, - _ ...*channeldb.ResolverReport) er.R { - - checkPointChan <- struct{}{} - return nil - }, - } - resolver := &htlcIncomingContestResolver{ - htlcSuccessResolver: htlcSuccessResolver{ - contractResolverKit: *newContractResolverKit(cfg), - htlcResolution: lnwallet.IncomingHtlcResolution{}, - htlc: channeldb.HTLC{ - Amt: lnwire.MilliSatoshi(testHtlcAmount), - RHash: testResHash, - OnionBlob: testOnionBlob, - }, - }, - htlcExpiry: testHtlcExpiry, - } - - return &incomingResolverTestContext{ - registry: registry, - witnessBeacon: witnessBeacon, - resolver: resolver, - notifier: notifier, - onionProcessor: onionProcessor, - t: t, - } -} - -func (i *incomingResolverTestContext) resolve() { - // Start resolver. - i.resolveErr = make(chan er.R, 1) - go func() { - var err er.R - i.nextResolver, err = i.resolver.Resolve() - i.resolveErr <- err - }() - - // Notify initial block height. - i.notifyEpoch(testInitialBlockHeight) -} - -func (i *incomingResolverTestContext) notifyEpoch(height int32) { - i.notifier.EpochChan <- &chainntnfs.BlockEpoch{ - Height: height, - } -} - -func (i *incomingResolverTestContext) waitForResult(expectSuccessRes bool) { - i.t.Helper() - - err := <-i.resolveErr - if err != nil { - i.t.Fatal(err) - } - - if !expectSuccessRes { - if i.nextResolver != nil { - i.t.Fatal("expected no next resolver") - } - return - } - - successResolver, ok := i.nextResolver.(*htlcSuccessResolver) - if !ok { - i.t.Fatal("expected htlcSuccessResolver") - } - - if successResolver.htlcResolution.Preimage != testResPreimage { - i.t.Fatal("invalid preimage") - } - - successTx := successResolver.htlcResolution.SignedSuccessTx - if successTx != nil && - !bytes.Equal(successTx.TxIn[0].Witness[3], testResPreimage[:]) { - - i.t.Fatal("invalid preimage") - } -} diff --git a/lnd/contractcourt/htlc_outgoing_contest_resolver.go b/lnd/contractcourt/htlc_outgoing_contest_resolver.go deleted file mode 100644 index 9db46f09..00000000 --- a/lnd/contractcourt/htlc_outgoing_contest_resolver.go +++ /dev/null @@ -1,219 +0,0 @@ -package contractcourt - -import ( - "io" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/pktlog/log" -) - -// htlcOutgoingContestResolver is a ContractResolver that's able to resolve an -// outgoing HTLC that is still contested. An HTLC is still contested, if at the -// time that we broadcast the commitment transaction, it isn't able to be fully -// resolved. In this case, we'll either wait for the HTLC to timeout, or for -// us to learn of the preimage. -type htlcOutgoingContestResolver struct { - // htlcTimeoutResolver is the inner solver that this resolver may turn - // into. This only happens if the HTLC expires on-chain. - htlcTimeoutResolver -} - -// newOutgoingContestResolver instantiates a new outgoing contested htlc -// resolver. -func newOutgoingContestResolver(res lnwallet.OutgoingHtlcResolution, - broadcastHeight uint32, htlc channeldb.HTLC, - resCfg ResolverConfig) *htlcOutgoingContestResolver { - - timeout := newTimeoutResolver( - res, broadcastHeight, htlc, resCfg, - ) - - return &htlcOutgoingContestResolver{ - htlcTimeoutResolver: *timeout, - } -} - -// Resolve commences the resolution of this contract. As this contract hasn't -// yet timed out, we'll wait for one of two things to happen -// -// 1. The HTLC expires. In this case, we'll sweep the funds and send a clean -// up cancel message to outside sub-systems. -// -// 2. The remote party sweeps this HTLC on-chain, in which case we'll add the -// pre-image to our global cache, then send a clean up settle message -// backwards. -// -// When either of these two things happens, we'll create a new resolver which -// is able to handle the final resolution of the contract. We're only the pivot -// point. -func (h *htlcOutgoingContestResolver) Resolve() (ContractResolver, er.R) { - // If we're already full resolved, then we don't have anything further - // to do. - if h.resolved { - return nil, nil - } - - // Otherwise, we'll watch for two external signals to decide if we'll - // morph into another resolver, or fully resolve the contract. - // - // The output we'll be watching for is the *direct* spend from the HTLC - // output. If this isn't our commitment transaction, it'll be right on - // the resolution. Otherwise, we fetch this pointer from the input of - // the time out transaction. - outPointToWatch, scriptToWatch, err := h.chainDetailsToWatch() - if err != nil { - return nil, err - } - - // First, we'll register for a spend notification for this output. If - // the remote party sweeps with the pre-image, we'll be notified. - spendNtfn, err := h.Notifier.RegisterSpendNtfn( - outPointToWatch, scriptToWatch, h.broadcastHeight, - ) - if err != nil { - return nil, err - } - - // We'll quickly check to see if the output has already been spent. - select { - // If the output has already been spent, then we can stop early and - // sweep the pre-image from the output. - case commitSpend, ok := <-spendNtfn.Spend: - if !ok { - return nil, errResolverShuttingDown.Default() - } - - // TODO(roasbeef): Checkpoint? - return h.claimCleanUp(commitSpend) - - // If it hasn't, then we'll watch for both the expiration, and the - // sweeping out this output. - default: - } - - // If we reach this point, then we can't fully act yet, so we'll await - // either of our signals triggering: the HTLC expires, or we learn of - // the preimage. - blockEpochs, err := h.Notifier.RegisterBlockEpochNtfn(nil) - if err != nil { - return nil, err - } - defer blockEpochs.Cancel() - - for { - select { - - // A new block has arrived, we'll check to see if this leads to - // HTLC expiration. - case newBlock, ok := <-blockEpochs.Epochs: - if !ok { - return nil, errResolverShuttingDown.Default() - } - - // If the current height is >= expiry-1, then a timeout - // path spend will be valid to be included in the next - // block, and we can immediately return the resolver. - // - // TODO(joostjager): Statement above may not be valid. - // For CLTV locks, the expiry value is the last - // _invalid_ block. The likely reason that this does not - // create a problem, is that utxonursery is checking the - // expiry again (in the proper way). - // - // Source: - // https://github.com/btcsuite/btcd/blob/991d32e72fe84d5fbf9c47cd604d793a0cd3a072/blockchain/validate.go#L154 - newHeight := uint32(newBlock.Height) - if newHeight >= h.htlcResolution.Expiry-1 { - log.Infof("%T(%v): HTLC has expired "+ - "(height=%v, expiry=%v), transforming "+ - "into timeout resolver", h, - h.htlcResolution.ClaimOutpoint, - newHeight, h.htlcResolution.Expiry) - return &h.htlcTimeoutResolver, nil - } - - // The output has been spent! This means the preimage has been - // revealed on-chain. - case commitSpend, ok := <-spendNtfn.Spend: - if !ok { - return nil, errResolverShuttingDown.Default() - } - - // The only way this output can be spent by the remote - // party is by revealing the preimage. So we'll perform - // our duties to clean up the contract once it has been - // claimed. - return h.claimCleanUp(commitSpend) - - case <-h.quit: - return nil, er.Errorf("resolver canceled") - } - } -} - -// report returns a report on the resolution state of the contract. -func (h *htlcOutgoingContestResolver) report() *ContractReport { - // No locking needed as these values are read-only. - - finalAmt := h.htlc.Amt.ToSatoshis() - if h.htlcResolution.SignedTimeoutTx != nil { - finalAmt = btcutil.Amount( - h.htlcResolution.SignedTimeoutTx.TxOut[0].Value, - ) - } - - return &ContractReport{ - Outpoint: h.htlcResolution.ClaimOutpoint, - Type: ReportOutputOutgoingHtlc, - Amount: finalAmt, - MaturityHeight: h.htlcResolution.Expiry, - LimboBalance: finalAmt, - Stage: 1, - } -} - -// Stop signals the resolver to cancel any current resolution processes, and -// suspend. -// -// NOTE: Part of the ContractResolver interface. -func (h *htlcOutgoingContestResolver) Stop() { - close(h.quit) -} - -// IsResolved returns true if the stored state in the resolve is fully -// resolved. In this case the target output can be forgotten. -// -// NOTE: Part of the ContractResolver interface. -func (h *htlcOutgoingContestResolver) IsResolved() bool { - return h.resolved -} - -// Encode writes an encoded version of the ContractResolver into the passed -// Writer. -// -// NOTE: Part of the ContractResolver interface. -func (h *htlcOutgoingContestResolver) Encode(w io.Writer) er.R { - return h.htlcTimeoutResolver.Encode(w) -} - -// newOutgoingContestResolverFromReader attempts to decode an encoded ContractResolver -// from the passed Reader instance, returning an active ContractResolver -// instance. -func newOutgoingContestResolverFromReader(r io.Reader, resCfg ResolverConfig) ( - *htlcOutgoingContestResolver, er.R) { - - h := &htlcOutgoingContestResolver{} - timeoutResolver, err := newTimeoutResolverFromReader(r, resCfg) - if err != nil { - return nil, err - } - h.htlcTimeoutResolver = *timeoutResolver - return h, nil -} - -// A compile time assertion to ensure htlcOutgoingContestResolver meets the -// ContractResolver interface. -var _ htlcContractResolver = (*htlcOutgoingContestResolver)(nil) diff --git a/lnd/contractcourt/htlc_outgoing_contest_resolver_test.go b/lnd/contractcourt/htlc_outgoing_contest_resolver_test.go deleted file mode 100644 index 4f1d9f3a..00000000 --- a/lnd/contractcourt/htlc_outgoing_contest_resolver_test.go +++ /dev/null @@ -1,240 +0,0 @@ -package contractcourt - -import ( - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/lntest/mock" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/wire" -) - -const ( - outgoingContestHtlcExpiry = 110 -) - -// TestHtlcOutgoingResolverTimeout tests resolution of an offered htlc that -// timed out. -func TestHtlcOutgoingResolverTimeout(t *testing.T) { - t.Parallel() - defer timeout(t)() - - // Setup the resolver with our test resolution. - ctx := newOutgoingResolverTestContext(t) - - // Start the resolution process in a goroutine. - ctx.resolve() - - // Notify arrival of the block after which the timeout path of the htlc - // unlocks. - ctx.notifyEpoch(outgoingContestHtlcExpiry - 1) - - // Assert that the resolver finishes without error and transforms in a - // timeout resolver. - ctx.waitForResult(true) -} - -// TestHtlcOutgoingResolverRemoteClaim tests resolution of an offered htlc that -// is claimed by the remote party. -func TestHtlcOutgoingResolverRemoteClaim(t *testing.T) { - t.Parallel() - defer timeout(t)() - - // Setup the resolver with our test resolution and start the resolution - // process. - ctx := newOutgoingResolverTestContext(t) - - // Replace our mocked checkpoint function with one which will push - // reports into a channel for us to consume. We do so on the resolver - // level because our test context has already created the resolver. - reportChan := make(chan *channeldb.ResolverReport) - ctx.resolver.Checkpoint = func(_ ContractResolver, - reports ...*channeldb.ResolverReport) er.R { - - // Send all of our reports into the channel. - for _, report := range reports { - reportChan <- report - } - - return nil - } - - ctx.resolve() - - // The remote party sweeps the htlc. Notify our resolver of this event. - preimage := lntypes.Preimage{} - spendTx := &wire.MsgTx{ - TxIn: []*wire.TxIn{ - { - Witness: [][]byte{ - {0}, {1}, {2}, preimage[:], - }, - }, - }, - } - - spendHash := spendTx.TxHash() - - ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{ - SpendingTx: spendTx, - SpenderTxHash: &spendHash, - } - - // We expect the extracted preimage to be added to the witness beacon. - <-ctx.preimageDB.newPreimages - - // We also expect a resolution message to the incoming side of the - // circuit. - <-ctx.resolutionChan - - // Finally, check that we have a report as expected. - expectedReport := &channeldb.ResolverReport{ - OutPoint: wire.OutPoint{}, - Amount: 0, - ResolverType: channeldb.ResolverTypeOutgoingHtlc, - ResolverOutcome: channeldb.ResolverOutcomeClaimed, - SpendTxID: &spendHash, - } - - assertResolverReport(t, reportChan, expectedReport) - - // Assert that the resolver finishes without error. - ctx.waitForResult(false) -} - -type resolveResult struct { - err er.R - nextResolver ContractResolver -} - -type outgoingResolverTestContext struct { - resolver *htlcOutgoingContestResolver - notifier *mock.ChainNotifier - preimageDB *mockWitnessBeacon - resolverResultChan chan resolveResult - resolutionChan chan ResolutionMsg - t *testing.T -} - -func newOutgoingResolverTestContext(t *testing.T) *outgoingResolverTestContext { - notifier := &mock.ChainNotifier{ - EpochChan: make(chan *chainntnfs.BlockEpoch), - SpendChan: make(chan *chainntnfs.SpendDetail), - ConfChan: make(chan *chainntnfs.TxConfirmation), - } - - checkPointChan := make(chan struct{}, 1) - resolutionChan := make(chan ResolutionMsg, 1) - - preimageDB := newMockWitnessBeacon() - - onionProcessor := &mockOnionProcessor{} - - chainCfg := ChannelArbitratorConfig{ - ChainArbitratorConfig: ChainArbitratorConfig{ - Notifier: notifier, - PreimageDB: preimageDB, - DeliverResolutionMsg: func(msgs ...ResolutionMsg) er.R { - if len(msgs) != 1 { - return er.Errorf("expected 1 "+ - "resolution msg, instead got %v", - len(msgs)) - } - - resolutionChan <- msgs[0] - return nil - }, - OnionProcessor: onionProcessor, - }, - PutResolverReport: func(_ kvdb.RwTx, - _ *channeldb.ResolverReport) er.R { - - return nil - }, - } - - outgoingRes := lnwallet.OutgoingHtlcResolution{ - Expiry: outgoingContestHtlcExpiry, - SweepSignDesc: input.SignDescriptor{ - Output: &wire.TxOut{}, - }, - } - - cfg := ResolverConfig{ - ChannelArbitratorConfig: chainCfg, - Checkpoint: func(_ ContractResolver, - _ ...*channeldb.ResolverReport) er.R { - - checkPointChan <- struct{}{} - return nil - }, - } - - resolver := &htlcOutgoingContestResolver{ - htlcTimeoutResolver: htlcTimeoutResolver{ - contractResolverKit: *newContractResolverKit(cfg), - htlcResolution: outgoingRes, - htlc: channeldb.HTLC{ - Amt: lnwire.MilliSatoshi(testHtlcAmount), - RHash: testResHash, - OnionBlob: testOnionBlob, - }, - }, - } - - return &outgoingResolverTestContext{ - resolver: resolver, - notifier: notifier, - preimageDB: preimageDB, - resolutionChan: resolutionChan, - t: t, - } -} - -func (i *outgoingResolverTestContext) resolve() { - // Start resolver. - i.resolverResultChan = make(chan resolveResult, 1) - go func() { - nextResolver, err := i.resolver.Resolve() - i.resolverResultChan <- resolveResult{ - nextResolver: nextResolver, - err: err, - } - }() - - // Notify initial block height. - i.notifyEpoch(testInitialBlockHeight) -} - -func (i *outgoingResolverTestContext) notifyEpoch(height int32) { - i.notifier.EpochChan <- &chainntnfs.BlockEpoch{ - Height: height, - } -} - -func (i *outgoingResolverTestContext) waitForResult(expectTimeoutRes bool) { - i.t.Helper() - - result := <-i.resolverResultChan - if result.err != nil { - i.t.Fatal(result.err) - } - - if !expectTimeoutRes { - if result.nextResolver != nil { - i.t.Fatal("expected no next resolver") - } - return - } - - _, ok := result.nextResolver.(*htlcTimeoutResolver) - if !ok { - i.t.Fatal("expected htlcTimeoutResolver") - } -} diff --git a/lnd/contractcourt/htlc_success_resolver.go b/lnd/contractcourt/htlc_success_resolver.go deleted file mode 100644 index 0ead21c3..00000000 --- a/lnd/contractcourt/htlc_success_resolver.go +++ /dev/null @@ -1,413 +0,0 @@ -package contractcourt - -import ( - "io" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/labels" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/sweep" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/wire" -) - -// htlcSuccessResolver is a resolver that's capable of sweeping an incoming -// HTLC output on-chain. If this is the remote party's commitment, we'll sweep -// it directly from the commitment output *immediately*. If this is our -// commitment, we'll first broadcast the success transaction, then send it to -// the incubator for sweeping. That's it, no need to send any clean up -// messages. -// -// TODO(roasbeef): don't need to broadcast? -type htlcSuccessResolver struct { - // htlcResolution is the incoming HTLC resolution for this HTLC. It - // contains everything we need to properly resolve this HTLC. - htlcResolution lnwallet.IncomingHtlcResolution - - // outputIncubating returns true if we've sent the output to the output - // incubator (utxo nursery). - outputIncubating bool - - // resolved reflects if the contract has been fully resolved or not. - resolved bool - - // broadcastHeight is the height that the original contract was - // broadcast to the main-chain at. We'll use this value to bound any - // historical queries to the chain for spends/confirmations. - broadcastHeight uint32 - - // sweepTx will be non-nil if we've already crafted a transaction to - // sweep a direct HTLC output. This is only a concern if we're sweeping - // from the commitment transaction of the remote party. - // - // TODO(roasbeef): send off to utxobundler - sweepTx *wire.MsgTx - - // htlc contains information on the htlc that we are resolving on-chain. - htlc channeldb.HTLC - - contractResolverKit -} - -// newSuccessResolver instanties a new htlc success resolver. -func newSuccessResolver(res lnwallet.IncomingHtlcResolution, - broadcastHeight uint32, htlc channeldb.HTLC, - resCfg ResolverConfig) *htlcSuccessResolver { - - return &htlcSuccessResolver{ - contractResolverKit: *newContractResolverKit(resCfg), - htlcResolution: res, - broadcastHeight: broadcastHeight, - htlc: htlc, - } -} - -// ResolverKey returns an identifier which should be globally unique for this -// particular resolver within the chain the original contract resides within. -// -// NOTE: Part of the ContractResolver interface. -func (h *htlcSuccessResolver) ResolverKey() []byte { - // The primary key for this resolver will be the outpoint of the HTLC - // on the commitment transaction itself. If this is our commitment, - // then the output can be found within the signed success tx, - // otherwise, it's just the ClaimOutpoint. - var op wire.OutPoint - if h.htlcResolution.SignedSuccessTx != nil { - op = h.htlcResolution.SignedSuccessTx.TxIn[0].PreviousOutPoint - } else { - op = h.htlcResolution.ClaimOutpoint - } - - key := newResolverID(op) - return key[:] -} - -// Resolve attempts to resolve an unresolved incoming HTLC that we know the -// preimage to. If the HTLC is on the commitment of the remote party, then we'll -// simply sweep it directly. Otherwise, we'll hand this off to the utxo nursery -// to do its duty. There is no need to make a call to the invoice registry -// anymore. Every HTLC has already passed through the incoming contest resolver -// and in there the invoice was already marked as settled. -// -// TODO(roasbeef): create multi to batch -// -// NOTE: Part of the ContractResolver interface. -func (h *htlcSuccessResolver) Resolve() (ContractResolver, er.R) { - // If we're already resolved, then we can exit early. - if h.resolved { - return nil, nil - } - - // If we don't have a success transaction, then this means that this is - // an output on the remote party's commitment transaction. - if h.htlcResolution.SignedSuccessTx == nil { - // If we don't already have the sweep transaction constructed, - // we'll do so and broadcast it. - if h.sweepTx == nil { - log.Infof("%T(%x): crafting sweep tx for "+ - "incoming+remote htlc confirmed", h, - h.htlc.RHash[:]) - - // Before we can craft out sweeping transaction, we - // need to create an input which contains all the items - // required to add this input to a sweeping transaction, - // and generate a witness. - inp := input.MakeHtlcSucceedInput( - &h.htlcResolution.ClaimOutpoint, - &h.htlcResolution.SweepSignDesc, - h.htlcResolution.Preimage[:], - h.broadcastHeight, - h.htlcResolution.CsvDelay, - ) - - // With the input created, we can now generate the full - // sweep transaction, that we'll use to move these - // coins back into the backing wallet. - // - // TODO: Set tx lock time to current block height - // instead of zero. Will be taken care of once sweeper - // implementation is complete. - // - // TODO: Use time-based sweeper and result chan. - var err er.R - h.sweepTx, err = h.Sweeper.CreateSweepTx( - []input.Input{&inp}, - sweep.FeePreference{ - ConfTarget: sweepConfTarget, - }, 0, - ) - if err != nil { - return nil, err - } - - log.Infof("%T(%x): crafted sweep tx=%v", h, - h.htlc.RHash[:], spew.Sdump(h.sweepTx)) - - // With the sweep transaction signed, we'll now - // Checkpoint our state. - if err := h.Checkpoint(h); err != nil { - log.Errorf("unable to Checkpoint: %v", err) - return nil, err - } - } - - // Regardless of whether an existing transaction was found or newly - // constructed, we'll broadcast the sweep transaction to the - // network. - label := labels.MakeLabel( - labels.LabelTypeChannelClose, &h.ShortChanID, - ) - err := h.PublishTx(h.sweepTx, label) - if err != nil { - log.Infof("%T(%x): unable to publish tx: %v", - h, h.htlc.RHash[:], err) - return nil, err - } - - // With the sweep transaction broadcast, we'll wait for its - // confirmation. - sweepTXID := h.sweepTx.TxHash() - sweepScript := h.sweepTx.TxOut[0].PkScript - confNtfn, err := h.Notifier.RegisterConfirmationsNtfn( - &sweepTXID, sweepScript, 1, h.broadcastHeight, - ) - if err != nil { - return nil, err - } - - log.Infof("%T(%x): waiting for sweep tx (txid=%v) to be "+ - "confirmed", h, h.htlc.RHash[:], sweepTXID) - - select { - case _, ok := <-confNtfn.Confirmed: - if !ok { - return nil, errResolverShuttingDown.Default() - } - - case <-h.quit: - return nil, errResolverShuttingDown.Default() - } - - // Once the transaction has received a sufficient number of - // confirmations, we'll mark ourselves as fully resolved and exit. - h.resolved = true - - // Checkpoint the resolver, and write the outcome to disk. - return nil, h.checkpointClaim( - &sweepTXID, - channeldb.ResolverOutcomeClaimed, - ) - } - - log.Infof("%T(%x): broadcasting second-layer transition tx: %v", - h, h.htlc.RHash[:], spew.Sdump(h.htlcResolution.SignedSuccessTx)) - - // We'll now broadcast the second layer transaction so we can kick off - // the claiming process. - // - // TODO(roasbeef): after changing sighashes send to tx bundler - label := labels.MakeLabel( - labels.LabelTypeChannelClose, &h.ShortChanID, - ) - err := h.PublishTx(h.htlcResolution.SignedSuccessTx, label) - if err != nil { - return nil, err - } - - // Otherwise, this is an output on our commitment transaction. In this - // case, we'll send it to the incubator, but only if we haven't already - // done so. - if !h.outputIncubating { - log.Infof("%T(%x): incubating incoming htlc output", - h, h.htlc.RHash[:]) - - err := h.IncubateOutputs( - h.ChanPoint, nil, &h.htlcResolution, - h.broadcastHeight, - ) - if err != nil { - return nil, err - } - - h.outputIncubating = true - - if err := h.Checkpoint(h); err != nil { - log.Errorf("unable to Checkpoint: %v", err) - return nil, err - } - } - - // To wrap this up, we'll wait until the second-level transaction has - // been spent, then fully resolve the contract. - spendNtfn, err := h.Notifier.RegisterSpendNtfn( - &h.htlcResolution.ClaimOutpoint, - h.htlcResolution.SweepSignDesc.Output.PkScript, - h.broadcastHeight, - ) - if err != nil { - return nil, err - } - - log.Infof("%T(%x): waiting for second-level HTLC output to be spent "+ - "after csv_delay=%v", h, h.htlc.RHash[:], h.htlcResolution.CsvDelay) - - var spendTxid *chainhash.Hash - select { - case spend, ok := <-spendNtfn.Spend: - if !ok { - return nil, errResolverShuttingDown.Default() - } - spendTxid = spend.SpenderTxHash - - case <-h.quit: - return nil, errResolverShuttingDown.Default() - } - - h.resolved = true - return nil, h.checkpointClaim( - spendTxid, channeldb.ResolverOutcomeClaimed, - ) -} - -// checkpointClaim checkpoints the success resolver with the reports it needs. -// If this htlc was claimed two stages, it will write reports for both stages, -// otherwise it will just write for the single htlc claim. -func (h *htlcSuccessResolver) checkpointClaim(spendTx *chainhash.Hash, - outcome channeldb.ResolverOutcome) er.R { - - // Create a resolver report for claiming of the htlc itself. - amt := btcutil.Amount(h.htlcResolution.SweepSignDesc.Output.Value) - reports := []*channeldb.ResolverReport{ - { - OutPoint: h.htlcResolution.ClaimOutpoint, - Amount: amt, - ResolverType: channeldb.ResolverTypeIncomingHtlc, - ResolverOutcome: outcome, - SpendTxID: spendTx, - }, - } - - // If we have a success tx, we append a report to represent our first - // stage claim. - if h.htlcResolution.SignedSuccessTx != nil { - // If the SignedSuccessTx is not nil, we are claiming the htlc - // in two stages, so we need to create a report for the first - // stage transaction as well. - spendTx := h.htlcResolution.SignedSuccessTx - spendTxID := spendTx.TxHash() - - report := &channeldb.ResolverReport{ - OutPoint: spendTx.TxIn[0].PreviousOutPoint, - Amount: h.htlc.Amt.ToSatoshis(), - ResolverType: channeldb.ResolverTypeIncomingHtlc, - ResolverOutcome: channeldb.ResolverOutcomeFirstStage, - SpendTxID: &spendTxID, - } - reports = append(reports, report) - } - - // Finally, we checkpoint the resolver with our report(s). - return h.Checkpoint(h, reports...) -} - -// Stop signals the resolver to cancel any current resolution processes, and -// suspend. -// -// NOTE: Part of the ContractResolver interface. -func (h *htlcSuccessResolver) Stop() { - close(h.quit) -} - -// IsResolved returns true if the stored state in the resolve is fully -// resolved. In this case the target output can be forgotten. -// -// NOTE: Part of the ContractResolver interface. -func (h *htlcSuccessResolver) IsResolved() bool { - return h.resolved -} - -// Encode writes an encoded version of the ContractResolver into the passed -// Writer. -// -// NOTE: Part of the ContractResolver interface. -func (h *htlcSuccessResolver) Encode(w io.Writer) er.R { - // First we'll encode our inner HTLC resolution. - if err := encodeIncomingResolution(w, &h.htlcResolution); err != nil { - return err - } - - // Next, we'll write out the fields that are specified to the contract - // resolver. - if err := util.WriteBin(w, endian, h.outputIncubating); err != nil { - return err - } - if err := util.WriteBin(w, endian, h.resolved); err != nil { - return err - } - if err := util.WriteBin(w, endian, h.broadcastHeight); err != nil { - return err - } - if _, err := util.Write(w, h.htlc.RHash[:]); err != nil { - return err - } - - return nil -} - -// newSuccessResolverFromReader attempts to decode an encoded ContractResolver -// from the passed Reader instance, returning an active ContractResolver -// instance. -func newSuccessResolverFromReader(r io.Reader, resCfg ResolverConfig) ( - *htlcSuccessResolver, er.R) { - - h := &htlcSuccessResolver{ - contractResolverKit: *newContractResolverKit(resCfg), - } - - // First we'll decode our inner HTLC resolution. - if err := decodeIncomingResolution(r, &h.htlcResolution); err != nil { - return nil, err - } - - // Next, we'll read all the fields that are specified to the contract - // resolver. - if err := util.ReadBin(r, endian, &h.outputIncubating); err != nil { - return nil, err - } - if err := util.ReadBin(r, endian, &h.resolved); err != nil { - return nil, err - } - if err := util.ReadBin(r, endian, &h.broadcastHeight); err != nil { - return nil, err - } - if _, err := util.ReadFull(r, h.htlc.RHash[:]); err != nil { - return nil, err - } - - return h, nil -} - -// Supplement adds additional information to the resolver that is required -// before Resolve() is called. -// -// NOTE: Part of the htlcContractResolver interface. -func (h *htlcSuccessResolver) Supplement(htlc channeldb.HTLC) { - h.htlc = htlc -} - -// HtlcPoint returns the htlc's outpoint on the commitment tx. -// -// NOTE: Part of the htlcContractResolver interface. -func (h *htlcSuccessResolver) HtlcPoint() wire.OutPoint { - return h.htlcResolution.HtlcPoint() -} - -// A compile time assertion to ensure htlcSuccessResolver meets the -// ContractResolver interface. -var _ htlcContractResolver = (*htlcSuccessResolver)(nil) diff --git a/lnd/contractcourt/htlc_success_resolver_test.go b/lnd/contractcourt/htlc_success_resolver_test.go deleted file mode 100644 index 3d13e163..00000000 --- a/lnd/contractcourt/htlc_success_resolver_test.go +++ /dev/null @@ -1,243 +0,0 @@ -package contractcourt - -import ( - "testing" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lntest/mock" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/wire" -) - -var testHtlcAmt = lnwire.MilliSatoshi(200000) - -type htlcSuccessResolverTestContext struct { - resolver *htlcSuccessResolver - notifier *mock.ChainNotifier - resolverResultChan chan resolveResult - t *testing.T -} - -func newHtlcSuccessResolverTextContext(t *testing.T) *htlcSuccessResolverTestContext { - notifier := &mock.ChainNotifier{ - EpochChan: make(chan *chainntnfs.BlockEpoch), - SpendChan: make(chan *chainntnfs.SpendDetail), - ConfChan: make(chan *chainntnfs.TxConfirmation), - } - - checkPointChan := make(chan struct{}, 1) - - testCtx := &htlcSuccessResolverTestContext{ - notifier: notifier, - t: t, - } - - chainCfg := ChannelArbitratorConfig{ - ChainArbitratorConfig: ChainArbitratorConfig{ - Notifier: notifier, - PublishTx: func(_ *wire.MsgTx, _ string) er.R { - return nil - }, - }, - PutResolverReport: func(_ kvdb.RwTx, - report *channeldb.ResolverReport) er.R { - - return nil - }, - } - - cfg := ResolverConfig{ - ChannelArbitratorConfig: chainCfg, - Checkpoint: func(_ ContractResolver, - _ ...*channeldb.ResolverReport) er.R { - - checkPointChan <- struct{}{} - return nil - }, - } - - testCtx.resolver = &htlcSuccessResolver{ - contractResolverKit: *newContractResolverKit(cfg), - htlcResolution: lnwallet.IncomingHtlcResolution{}, - htlc: channeldb.HTLC{ - RHash: testResHash, - OnionBlob: testOnionBlob, - Amt: testHtlcAmt, - }, - } - - return testCtx -} - -func (i *htlcSuccessResolverTestContext) resolve() { - // Start resolver. - i.resolverResultChan = make(chan resolveResult, 1) - go func() { - nextResolver, err := i.resolver.Resolve() - i.resolverResultChan <- resolveResult{ - nextResolver: nextResolver, - err: err, - } - }() -} - -func (i *htlcSuccessResolverTestContext) waitForResult() { - i.t.Helper() - - result := <-i.resolverResultChan - if result.err != nil { - i.t.Fatal(result.err) - } - - if result.nextResolver != nil { - i.t.Fatal("expected no next resolver") - } -} - -// TestSingleStageSuccess tests successful sweep of a single stage htlc claim. -func TestSingleStageSuccess(t *testing.T) { - htlcOutpoint := wire.OutPoint{Index: 3} - - sweepTx := &wire.MsgTx{ - TxIn: []*wire.TxIn{{}}, - TxOut: []*wire.TxOut{{}}, - } - - // singleStageResolution is a resolution for a htlc on the remote - // party's commitment. - singleStageResolution := lnwallet.IncomingHtlcResolution{ - SweepSignDesc: testSignDesc, - ClaimOutpoint: htlcOutpoint, - } - - // We send a confirmation for our sweep tx to indicate that our sweep - // succeeded. - resolve := func(ctx *htlcSuccessResolverTestContext) { - ctx.notifier.ConfChan <- &chainntnfs.TxConfirmation{ - Tx: ctx.resolver.sweepTx, - BlockHeight: testInitialBlockHeight - 1, - } - } - - sweepTxid := sweepTx.TxHash() - claim := &channeldb.ResolverReport{ - OutPoint: htlcOutpoint, - Amount: btcutil.Amount(testSignDesc.Output.Value), - ResolverType: channeldb.ResolverTypeIncomingHtlc, - ResolverOutcome: channeldb.ResolverOutcomeClaimed, - SpendTxID: &sweepTxid, - } - testHtlcSuccess( - t, singleStageResolution, resolve, sweepTx, claim, - ) -} - -// TestSecondStageResolution tests successful sweep of a second stage htlc -// claim. -func TestSecondStageResolution(t *testing.T) { - commitOutpoint := wire.OutPoint{Index: 2} - htlcOutpoint := wire.OutPoint{Index: 3} - - sweepTx := &wire.MsgTx{ - TxIn: []*wire.TxIn{{}}, - TxOut: []*wire.TxOut{{}}, - } - sweepHash := sweepTx.TxHash() - - // twoStageResolution is a resolution for htlc on our own commitment - // which is spent from the signed success tx. - twoStageResolution := lnwallet.IncomingHtlcResolution{ - Preimage: [32]byte{}, - SignedSuccessTx: &wire.MsgTx{ - TxIn: []*wire.TxIn{ - { - PreviousOutPoint: commitOutpoint, - }, - }, - TxOut: []*wire.TxOut{}, - }, - ClaimOutpoint: htlcOutpoint, - SweepSignDesc: testSignDesc, - } - - // We send a spend notification for our output to resolve our htlc. - resolve := func(ctx *htlcSuccessResolverTestContext) { - ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{ - SpendingTx: sweepTx, - SpenderTxHash: &sweepHash, - } - } - - successTx := twoStageResolution.SignedSuccessTx.TxHash() - firstStage := &channeldb.ResolverReport{ - OutPoint: commitOutpoint, - Amount: testHtlcAmt.ToSatoshis(), - ResolverType: channeldb.ResolverTypeIncomingHtlc, - ResolverOutcome: channeldb.ResolverOutcomeFirstStage, - SpendTxID: &successTx, - } - - secondStage := &channeldb.ResolverReport{ - OutPoint: htlcOutpoint, - Amount: btcutil.Amount(testSignDesc.Output.Value), - ResolverType: channeldb.ResolverTypeIncomingHtlc, - ResolverOutcome: channeldb.ResolverOutcomeClaimed, - SpendTxID: &sweepHash, - } - - testHtlcSuccess( - t, twoStageResolution, resolve, sweepTx, secondStage, firstStage, - ) -} - -// testHtlcSuccess tests resolution of a success resolver. It takes a resolve -// function which triggers resolution and the sweeptxid that will resolve it. -func testHtlcSuccess(t *testing.T, resolution lnwallet.IncomingHtlcResolution, - resolve func(*htlcSuccessResolverTestContext), - sweepTx *wire.MsgTx, reports ...*channeldb.ResolverReport) { - - defer timeout(t)() - - ctx := newHtlcSuccessResolverTextContext(t) - - // Replace our checkpoint with one which will push reports into a - // channel for us to consume. We replace this function on the resolver - // itself because it is created by the test context. - reportChan := make(chan *channeldb.ResolverReport) - ctx.resolver.Checkpoint = func(_ ContractResolver, - reports ...*channeldb.ResolverReport) er.R { - - // Send all of our reports into the channel. - for _, report := range reports { - reportChan <- report - } - - return nil - } - - ctx.resolver.htlcResolution = resolution - - // We set the sweepTx to be non-nil and mark the output as already - // incubating so that we do not need to set test values for crafting - // our own sweep transaction. - ctx.resolver.sweepTx = sweepTx - ctx.resolver.outputIncubating = true - - // Start the htlc success resolver. - ctx.resolve() - - // Trigger and event that will resolve our test context. - resolve(ctx) - - for _, report := range reports { - assertResolverReport(t, reportChan, report) - } - - // Wait for the resolver to fully complete. - ctx.waitForResult() -} diff --git a/lnd/contractcourt/htlc_timeout_resolver.go b/lnd/contractcourt/htlc_timeout_resolver.go deleted file mode 100644 index 1de12fd3..00000000 --- a/lnd/contractcourt/htlc_timeout_resolver.go +++ /dev/null @@ -1,514 +0,0 @@ -package contractcourt - -import ( - "io" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/wire" -) - -// htlcTimeoutResolver is a ContractResolver that's capable of resolving an -// outgoing HTLC. The HTLC may be on our commitment transaction, or on the -// commitment transaction of the remote party. An output on our commitment -// transaction is considered fully resolved once the second-level transaction -// has been confirmed (and reached a sufficient depth). An output on the -// commitment transaction of the remote party is resolved once we detect a -// spend of the direct HTLC output using the timeout clause. -type htlcTimeoutResolver struct { - // htlcResolution contains all the information required to properly - // resolve this outgoing HTLC. - htlcResolution lnwallet.OutgoingHtlcResolution - - // outputIncubating returns true if we've sent the output to the output - // incubator (utxo nursery). - outputIncubating bool - - // resolved reflects if the contract has been fully resolved or not. - resolved bool - - // broadcastHeight is the height that the original contract was - // broadcast to the main-chain at. We'll use this value to bound any - // historical queries to the chain for spends/confirmations. - // - // TODO(roasbeef): wrap above into definite resolution embedding? - broadcastHeight uint32 - - // htlc contains information on the htlc that we are resolving on-chain. - htlc channeldb.HTLC - - contractResolverKit -} - -// newTimeoutResolver instantiates a new timeout htlc resolver. -func newTimeoutResolver(res lnwallet.OutgoingHtlcResolution, - broadcastHeight uint32, htlc channeldb.HTLC, - resCfg ResolverConfig) *htlcTimeoutResolver { - - return &htlcTimeoutResolver{ - contractResolverKit: *newContractResolverKit(resCfg), - htlcResolution: res, - broadcastHeight: broadcastHeight, - htlc: htlc, - } -} - -// ResolverKey returns an identifier which should be globally unique for this -// particular resolver within the chain the original contract resides within. -// -// NOTE: Part of the ContractResolver interface. -func (h *htlcTimeoutResolver) ResolverKey() []byte { - // The primary key for this resolver will be the outpoint of the HTLC - // on the commitment transaction itself. If this is our commitment, - // then the output can be found within the signed timeout tx, - // otherwise, it's just the ClaimOutpoint. - var op wire.OutPoint - if h.htlcResolution.SignedTimeoutTx != nil { - op = h.htlcResolution.SignedTimeoutTx.TxIn[0].PreviousOutPoint - } else { - op = h.htlcResolution.ClaimOutpoint - } - - key := newResolverID(op) - return key[:] -} - -const ( - // expectedRemoteWitnessSuccessSize is the expected size of the witness - // on the remote commitment transaction for an outgoing HTLC that is - // swept on-chain by them with pre-image. - expectedRemoteWitnessSuccessSize = 5 - - // remotePreimageIndex index within the witness on the remote - // commitment transaction that will hold they pre-image if they go to - // sweep it on chain. - remotePreimageIndex = 3 - - // localPreimageIndex is the index within the witness on the local - // commitment transaction for an outgoing HTLC that will hold the - // pre-image if the remote party sweeps it. - localPreimageIndex = 1 -) - -// claimCleanUp is a helper method that's called once the HTLC output is spent -// by the remote party. It'll extract the preimage, add it to the global cache, -// and finally send the appropriate clean up message. -func (h *htlcTimeoutResolver) claimCleanUp( - commitSpend *chainntnfs.SpendDetail) (ContractResolver, er.R) { - - // Depending on if this is our commitment or not, then we'll be looking - // for a different witness pattern. - spenderIndex := commitSpend.SpenderInputIndex - spendingInput := commitSpend.SpendingTx.TxIn[spenderIndex] - - log.Infof("%T(%v): extracting preimage! remote party spent "+ - "HTLC with tx=%v", h, h.htlcResolution.ClaimOutpoint, - spew.Sdump(commitSpend.SpendingTx)) - - // If this is the remote party's commitment, then we'll be looking for - // them to spend using the second-level success transaction. - var preimageBytes []byte - if h.htlcResolution.SignedTimeoutTx == nil { - // The witness stack when the remote party sweeps the output to - // them looks like: - // - // * <0> - preimageBytes = spendingInput.Witness[remotePreimageIndex] - } else { - // Otherwise, they'll be spending directly from our commitment - // output. In which case the witness stack looks like: - // - // * - preimageBytes = spendingInput.Witness[localPreimageIndex] - } - - preimage, err := lntypes.MakePreimage(preimageBytes) - if err != nil { - return nil, er.Errorf("unable to create pre-image from "+ - "witness: %v", err) - } - - log.Infof("%T(%v): extracting preimage=%v from on-chain "+ - "spend!", h, h.htlcResolution.ClaimOutpoint, preimage) - - // With the preimage obtained, we can now add it to the global cache. - if err := h.PreimageDB.AddPreimages(preimage); err != nil { - log.Errorf("%T(%v): unable to add witness to cache", - h, h.htlcResolution.ClaimOutpoint) - } - - var pre [32]byte - copy(pre[:], preimage[:]) - - // Finally, we'll send the clean up message, mark ourselves as - // resolved, then exit. - if err := h.DeliverResolutionMsg(ResolutionMsg{ - SourceChan: h.ShortChanID, - HtlcIndex: h.htlc.HtlcIndex, - PreImage: &pre, - }); err != nil { - return nil, err - } - h.resolved = true - - // Checkpoint our resolver with a report which reflects the preimage - // claim by the remote party. - amt := btcutil.Amount(h.htlcResolution.SweepSignDesc.Output.Value) - report := &channeldb.ResolverReport{ - OutPoint: h.htlcResolution.ClaimOutpoint, - Amount: amt, - ResolverType: channeldb.ResolverTypeOutgoingHtlc, - ResolverOutcome: channeldb.ResolverOutcomeClaimed, - SpendTxID: commitSpend.SpenderTxHash, - } - - return nil, h.Checkpoint(h, report) -} - -// chainDetailsToWatch returns the output and script which we use to watch for -// spends from the direct HTLC output on the commitment transaction. -// -// TODO(joostjager): output already set properly in -// lnwallet.newOutgoingHtlcResolution? And script too? -func (h *htlcTimeoutResolver) chainDetailsToWatch() (*wire.OutPoint, []byte, er.R) { - // If there's no timeout transaction, then the claim output is the - // output directly on the commitment transaction, so we'll just use - // that. - if h.htlcResolution.SignedTimeoutTx == nil { - outPointToWatch := h.htlcResolution.ClaimOutpoint - scriptToWatch := h.htlcResolution.SweepSignDesc.Output.PkScript - - return &outPointToWatch, scriptToWatch, nil - } - - // If this is the remote party's commitment, then we'll need to grab - // watch the output that our timeout transaction points to. We can - // directly grab the outpoint, then also extract the witness script - // (the last element of the witness stack) to re-construct the pkScript - // we need to watch. - outPointToWatch := h.htlcResolution.SignedTimeoutTx.TxIn[0].PreviousOutPoint - witness := h.htlcResolution.SignedTimeoutTx.TxIn[0].Witness - scriptToWatch, err := input.WitnessScriptHash(witness[len(witness)-1]) - if err != nil { - return nil, nil, err - } - - return &outPointToWatch, scriptToWatch, nil -} - -// isSuccessSpend returns true if the passed spend on the specified commitment -// is a success spend that reveals the pre-image or not. -func isSuccessSpend(spend *chainntnfs.SpendDetail, localCommit bool) bool { - // Based on the spending input index and transaction, obtain the - // witness that tells us what type of spend this is. - spenderIndex := spend.SpenderInputIndex - spendingInput := spend.SpendingTx.TxIn[spenderIndex] - spendingWitness := spendingInput.Witness - - // If this is the remote commitment then the only possible spends for - // outgoing HTLCs are: - // - // RECVR: <0> (2nd level success spend) - // REVOK: - // SENDR: 0 - // - // In this case, if 5 witness elements are present (factoring the - // witness script), and the 3rd element is the size of the pre-image, - // then this is a remote spend. If not, then we swept it ourselves, or - // revoked their output. - if !localCommit { - return len(spendingWitness) == expectedRemoteWitnessSuccessSize && - len(spendingWitness[remotePreimageIndex]) == lntypes.HashSize - } - - // Otherwise, for our commitment, the only possible spends for an - // outgoing HTLC are: - // - // SENDR: <0> <0> (2nd level timeout) - // RECVR: - // REVOK: - // - // So the only success case has the pre-image as the 2nd (index 1) - // element in the witness. - return len(spendingWitness[localPreimageIndex]) == lntypes.HashSize -} - -// Resolve kicks off full resolution of an outgoing HTLC output. If it's our -// commitment, it isn't resolved until we see the second level HTLC txn -// confirmed. If it's the remote party's commitment, we don't resolve until we -// see a direct sweep via the timeout clause. -// -// NOTE: Part of the ContractResolver interface. -func (h *htlcTimeoutResolver) Resolve() (ContractResolver, er.R) { - // If we're already resolved, then we can exit early. - if h.resolved { - return nil, nil - } - - // If we haven't already sent the output to the utxo nursery, then - // we'll do so now. - if !h.outputIncubating { - log.Tracef("%T(%v): incubating htlc output", h, - h.htlcResolution.ClaimOutpoint) - - err := h.IncubateOutputs( - h.ChanPoint, &h.htlcResolution, nil, - h.broadcastHeight, - ) - if err != nil { - return nil, err - } - - h.outputIncubating = true - - if err := h.Checkpoint(h); err != nil { - log.Errorf("unable to Checkpoint: %v", err) - return nil, err - } - } - - var spendTxID *chainhash.Hash - - // waitForOutputResolution waits for the HTLC output to be fully - // resolved. The output is considered fully resolved once it has been - // spent, and the spending transaction has been fully confirmed. - waitForOutputResolution := func() er.R { - // We first need to register to see when the HTLC output itself - // has been spent by a confirmed transaction. - spendNtfn, err := h.Notifier.RegisterSpendNtfn( - &h.htlcResolution.ClaimOutpoint, - h.htlcResolution.SweepSignDesc.Output.PkScript, - h.broadcastHeight, - ) - if err != nil { - return err - } - - select { - case spendDetail, ok := <-spendNtfn.Spend: - if !ok { - return errResolverShuttingDown.Default() - } - spendTxID = spendDetail.SpenderTxHash - - case <-h.quit: - return errResolverShuttingDown.Default() - } - - return nil - } - - // Now that we've handed off the HTLC to the nursery, we'll watch for a - // spend of the output, and make our next move off of that. Depending - // on if this is our commitment, or the remote party's commitment, - // we'll be watching a different outpoint and script. - outpointToWatch, scriptToWatch, err := h.chainDetailsToWatch() - if err != nil { - return nil, err - } - spendNtfn, err := h.Notifier.RegisterSpendNtfn( - outpointToWatch, scriptToWatch, h.broadcastHeight, - ) - if err != nil { - return nil, err - } - - log.Infof("%T(%v): waiting for HTLC output %v to be spent"+ - "fully confirmed", h, h.htlcResolution.ClaimOutpoint, - outpointToWatch) - - // We'll block here until either we exit, or the HTLC output on the - // commitment transaction has been spent. - var ( - spend *chainntnfs.SpendDetail - ok bool - ) - select { - case spend, ok = <-spendNtfn.Spend: - if !ok { - return nil, errResolverShuttingDown.Default() - } - spendTxID = spend.SpenderTxHash - - case <-h.quit: - return nil, errResolverShuttingDown.Default() - } - - // If the spend reveals the pre-image, then we'll enter the clean up - // workflow to pass the pre-image back to the incoming link, add it to - // the witness cache, and exit. - if isSuccessSpend(spend, h.htlcResolution.SignedTimeoutTx != nil) { - log.Infof("%T(%v): HTLC has been swept with pre-image by "+ - "remote party during timeout flow! Adding pre-image to "+ - "witness cache", h.htlcResolution.ClaimOutpoint) - - return h.claimCleanUp(spend) - } - - log.Infof("%T(%v): resolving htlc with incoming fail msg, fully "+ - "confirmed", h, h.htlcResolution.ClaimOutpoint) - - // At this point, the second-level transaction is sufficiently - // confirmed, or a transaction directly spending the output is. - // Therefore, we can now send back our clean up message, failing the - // HTLC on the incoming link. - failureMsg := &lnwire.FailPermanentChannelFailure{} - if err := h.DeliverResolutionMsg(ResolutionMsg{ - SourceChan: h.ShortChanID, - HtlcIndex: h.htlc.HtlcIndex, - Failure: failureMsg, - }); err != nil { - return nil, err - } - - var reports []*channeldb.ResolverReport - - // Finally, if this was an output on our commitment transaction, we'll - // wait for the second-level HTLC output to be spent, and for that - // transaction itself to confirm. - if h.htlcResolution.SignedTimeoutTx != nil { - log.Infof("%T(%v): waiting for nursery to spend CSV delayed "+ - "output", h, h.htlcResolution.ClaimOutpoint) - if err := waitForOutputResolution(); err != nil { - return nil, err - } - - // Once our timeout tx has confirmed, we add a resolution for - // our timeoutTx tx first stage transaction. - timeoutTx := h.htlcResolution.SignedTimeoutTx - spendHash := timeoutTx.TxHash() - - reports = append(reports, &channeldb.ResolverReport{ - OutPoint: timeoutTx.TxIn[0].PreviousOutPoint, - Amount: h.htlc.Amt.ToSatoshis(), - ResolverType: channeldb.ResolverTypeOutgoingHtlc, - ResolverOutcome: channeldb.ResolverOutcomeFirstStage, - SpendTxID: &spendHash, - }) - } - - // With the clean up message sent, we'll now mark the contract - // resolved, record the timeout and the sweep txid on disk, and wait. - h.resolved = true - - amt := btcutil.Amount(h.htlcResolution.SweepSignDesc.Output.Value) - reports = append(reports, &channeldb.ResolverReport{ - OutPoint: h.htlcResolution.ClaimOutpoint, - Amount: amt, - ResolverType: channeldb.ResolverTypeOutgoingHtlc, - ResolverOutcome: channeldb.ResolverOutcomeTimeout, - SpendTxID: spendTxID, - }) - - return nil, h.Checkpoint(h, reports...) -} - -// Stop signals the resolver to cancel any current resolution processes, and -// suspend. -// -// NOTE: Part of the ContractResolver interface. -func (h *htlcTimeoutResolver) Stop() { - close(h.quit) -} - -// IsResolved returns true if the stored state in the resolve is fully -// resolved. In this case the target output can be forgotten. -// -// NOTE: Part of the ContractResolver interface. -func (h *htlcTimeoutResolver) IsResolved() bool { - return h.resolved -} - -// Encode writes an encoded version of the ContractResolver into the passed -// Writer. -// -// NOTE: Part of the ContractResolver interface. -func (h *htlcTimeoutResolver) Encode(w io.Writer) er.R { - // First, we'll write out the relevant fields of the - // OutgoingHtlcResolution to the writer. - if err := encodeOutgoingResolution(w, &h.htlcResolution); err != nil { - return err - } - - // With that portion written, we can now write out the fields specific - // to the resolver itself. - if err := util.WriteBin(w, endian, h.outputIncubating); err != nil { - return err - } - if err := util.WriteBin(w, endian, h.resolved); err != nil { - return err - } - if err := util.WriteBin(w, endian, h.broadcastHeight); err != nil { - return err - } - - if err := util.WriteBin(w, endian, h.htlc.HtlcIndex); err != nil { - return err - } - - return nil -} - -// newTimeoutResolverFromReader attempts to decode an encoded ContractResolver -// from the passed Reader instance, returning an active ContractResolver -// instance. -func newTimeoutResolverFromReader(r io.Reader, resCfg ResolverConfig) ( - *htlcTimeoutResolver, er.R) { - - h := &htlcTimeoutResolver{ - contractResolverKit: *newContractResolverKit(resCfg), - } - - // First, we'll read out all the mandatory fields of the - // OutgoingHtlcResolution that we store. - if err := decodeOutgoingResolution(r, &h.htlcResolution); err != nil { - return nil, err - } - - // With those fields read, we can now read back the fields that are - // specific to the resolver itself. - if err := util.ReadBin(r, endian, &h.outputIncubating); err != nil { - return nil, err - } - if err := util.ReadBin(r, endian, &h.resolved); err != nil { - return nil, err - } - if err := util.ReadBin(r, endian, &h.broadcastHeight); err != nil { - return nil, err - } - - if err := util.ReadBin(r, endian, &h.htlc.HtlcIndex); err != nil { - return nil, err - } - - return h, nil -} - -// Supplement adds additional information to the resolver that is required -// before Resolve() is called. -// -// NOTE: Part of the htlcContractResolver interface. -func (h *htlcTimeoutResolver) Supplement(htlc channeldb.HTLC) { - h.htlc = htlc -} - -// HtlcPoint returns the htlc's outpoint on the commitment tx. -// -// NOTE: Part of the htlcContractResolver interface. -func (h *htlcTimeoutResolver) HtlcPoint() wire.OutPoint { - return h.htlcResolution.HtlcPoint() -} - -// A compile time assertion to ensure htlcTimeoutResolver meets the -// ContractResolver interface. -var _ htlcContractResolver = (*htlcTimeoutResolver)(nil) diff --git a/lnd/contractcourt/htlc_timeout_resolver_test.go b/lnd/contractcourt/htlc_timeout_resolver_test.go deleted file mode 100644 index 523e65e1..00000000 --- a/lnd/contractcourt/htlc_timeout_resolver_test.go +++ /dev/null @@ -1,435 +0,0 @@ -package contractcourt - -import ( - "bytes" - "sync" - "testing" - "time" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/lntest/mock" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/txscript/params" - "github.com/pkt-cash/pktd/wire" -) - -type mockWitnessBeacon struct { - preImageUpdates chan lntypes.Preimage - newPreimages chan []lntypes.Preimage - lookupPreimage map[lntypes.Hash]lntypes.Preimage -} - -func newMockWitnessBeacon() *mockWitnessBeacon { - return &mockWitnessBeacon{ - preImageUpdates: make(chan lntypes.Preimage, 1), - newPreimages: make(chan []lntypes.Preimage), - lookupPreimage: make(map[lntypes.Hash]lntypes.Preimage), - } -} - -func (m *mockWitnessBeacon) SubscribeUpdates() *WitnessSubscription { - return &WitnessSubscription{ - WitnessUpdates: m.preImageUpdates, - CancelSubscription: func() {}, - } -} - -func (m *mockWitnessBeacon) LookupPreimage(payhash lntypes.Hash) (lntypes.Preimage, bool) { - preimage, ok := m.lookupPreimage[payhash] - if !ok { - return lntypes.Preimage{}, false - } - return preimage, true -} - -func (m *mockWitnessBeacon) AddPreimages(preimages ...lntypes.Preimage) er.R { - m.newPreimages <- preimages - return nil -} - -// TestHtlcTimeoutResolver tests that the timeout resolver properly handles all -// variations of possible local+remote spends. -func TestHtlcTimeoutResolver(t *testing.T) { - t.Parallel() - - fakePreimageBytes := bytes.Repeat([]byte{1}, lntypes.HashSize) - - var ( - htlcOutpoint wire.OutPoint - fakePreimage lntypes.Preimage - ) - fakeSignDesc := &input.SignDescriptor{ - Output: &wire.TxOut{}, - } - - copy(fakePreimage[:], fakePreimageBytes) - - signer := &mock.DummySigner{} - sweepTx := &wire.MsgTx{ - TxIn: []*wire.TxIn{ - { - PreviousOutPoint: htlcOutpoint, - Witness: [][]byte{{0x01}}, - }, - }, - } - fakeTimeout := int32(5) - - templateTx := &wire.MsgTx{ - TxIn: []*wire.TxIn{ - { - PreviousOutPoint: htlcOutpoint, - }, - }, - } - - testCases := []struct { - // name is a human readable description of the test case. - name string - - // remoteCommit denotes if the commitment broadcast was the - // remote commitment or not. - remoteCommit bool - - // timeout denotes if the HTLC should be let timeout, or if the - // "remote" party should sweep it on-chain. This also affects - // what type of resolution message we expect. - timeout bool - - // txToBroadcast is a function closure that should generate the - // transaction that should spend the HTLC output. Test authors - // can use this to customize the witness used when spending to - // trigger various redemption cases. - txToBroadcast func() (*wire.MsgTx, er.R) - - // outcome is the resolver outcome that we expect to be reported - // once the contract is fully resolved. - outcome channeldb.ResolverOutcome - }{ - // Remote commitment is broadcast, we time out the HTLC on - // chain, and should expect a fail HTLC resolution. - { - name: "timeout remote tx", - remoteCommit: true, - timeout: true, - txToBroadcast: func() (*wire.MsgTx, er.R) { - witness, err := input.ReceiverHtlcSpendTimeout( - signer, fakeSignDesc, sweepTx, - fakeTimeout, - ) - if err != nil { - return nil, err - } - - templateTx.TxIn[0].Witness = witness - return templateTx, nil - }, - outcome: channeldb.ResolverOutcomeTimeout, - }, - - // Our local commitment is broadcast, we timeout the HTLC and - // still expect an HTLC fail resolution. - { - name: "timeout local tx", - remoteCommit: false, - timeout: true, - txToBroadcast: func() (*wire.MsgTx, er.R) { - witness, err := input.SenderHtlcSpendTimeout( - &mock.DummySignature{}, params.SigHashAll, - signer, fakeSignDesc, sweepTx, - ) - if err != nil { - return nil, err - } - - templateTx.TxIn[0].Witness = witness - - // Set the outpoint to be on our commitment, since - // we need to claim in two stages. - templateTx.TxIn[0].PreviousOutPoint = testChanPoint1 - return templateTx, nil - }, - outcome: channeldb.ResolverOutcomeTimeout, - }, - - // The remote commitment is broadcast, they sweep with the - // pre-image, we should get a settle HTLC resolution. - { - name: "success remote tx", - remoteCommit: true, - timeout: false, - txToBroadcast: func() (*wire.MsgTx, er.R) { - witness, err := input.ReceiverHtlcSpendRedeem( - &mock.DummySignature{}, params.SigHashAll, - fakePreimageBytes, signer, fakeSignDesc, - sweepTx, - ) - if err != nil { - return nil, err - } - - templateTx.TxIn[0].Witness = witness - return templateTx, nil - }, - outcome: channeldb.ResolverOutcomeClaimed, - }, - - // The local commitment is broadcast, they sweep it with a - // timeout from the output, and we should still get the HTLC - // settle resolution back. - { - name: "success local tx", - remoteCommit: false, - timeout: false, - txToBroadcast: func() (*wire.MsgTx, er.R) { - witness, err := input.SenderHtlcSpendRedeem( - signer, fakeSignDesc, sweepTx, - fakePreimageBytes, - ) - if err != nil { - return nil, err - } - - templateTx.TxIn[0].Witness = witness - return templateTx, nil - }, - outcome: channeldb.ResolverOutcomeClaimed, - }, - } - - notifier := &mock.ChainNotifier{ - EpochChan: make(chan *chainntnfs.BlockEpoch), - SpendChan: make(chan *chainntnfs.SpendDetail), - ConfChan: make(chan *chainntnfs.TxConfirmation), - } - witnessBeacon := newMockWitnessBeacon() - - for _, testCase := range testCases { - t.Logf("Running test case: %v", testCase.name) - - checkPointChan := make(chan struct{}, 1) - incubateChan := make(chan struct{}, 1) - resolutionChan := make(chan ResolutionMsg, 1) - reportChan := make(chan *channeldb.ResolverReport) - - chainCfg := ChannelArbitratorConfig{ - ChainArbitratorConfig: ChainArbitratorConfig{ - Notifier: notifier, - PreimageDB: witnessBeacon, - IncubateOutputs: func(wire.OutPoint, - *lnwallet.OutgoingHtlcResolution, - *lnwallet.IncomingHtlcResolution, - uint32) er.R { - - incubateChan <- struct{}{} - return nil - }, - DeliverResolutionMsg: func(msgs ...ResolutionMsg) er.R { - if len(msgs) != 1 { - return er.Errorf("expected 1 "+ - "resolution msg, instead got %v", - len(msgs)) - } - - resolutionChan <- msgs[0] - return nil - }, - }, - PutResolverReport: func(_ kvdb.RwTx, - _ *channeldb.ResolverReport) er.R { - - return nil - }, - } - - cfg := ResolverConfig{ - ChannelArbitratorConfig: chainCfg, - Checkpoint: func(_ ContractResolver, - reports ...*channeldb.ResolverReport) er.R { - - checkPointChan <- struct{}{} - - // Send all of our reports into the channel. - for _, report := range reports { - reportChan <- report - } - - return nil - }, - } - resolver := &htlcTimeoutResolver{ - htlcResolution: lnwallet.OutgoingHtlcResolution{ - ClaimOutpoint: testChanPoint2, - SweepSignDesc: *fakeSignDesc, - }, - contractResolverKit: *newContractResolverKit( - cfg, - ), - htlc: channeldb.HTLC{ - Amt: testHtlcAmt, - }, - } - - var reports []*channeldb.ResolverReport - - // If the test case needs the remote commitment to be - // broadcast, then we'll set the timeout commit to a fake - // transaction to force the code path. - if !testCase.remoteCommit { - resolver.htlcResolution.SignedTimeoutTx = sweepTx - - if testCase.timeout { - success := sweepTx.TxHash() - reports = append(reports, &channeldb.ResolverReport{ - OutPoint: sweepTx.TxIn[0].PreviousOutPoint, - Amount: testHtlcAmt.ToSatoshis(), - ResolverType: channeldb.ResolverTypeOutgoingHtlc, - ResolverOutcome: channeldb.ResolverOutcomeFirstStage, - SpendTxID: &success, - }) - } - } - - // With all the setup above complete, we can initiate the - // resolution process, and the bulk of our test. - var wg sync.WaitGroup - resolveErr := make(chan er.R, 1) - wg.Add(1) - go func() { - defer wg.Done() - - _, err := resolver.Resolve() - if err != nil { - resolveErr <- err - } - }() - - // At the output isn't yet in the nursery, we expect that we - // should receive an incubation request. - select { - case <-incubateChan: - case err := <-resolveErr: - t.Fatalf("unable to resolve HTLC: %v", err) - case <-time.After(time.Second * 5): - t.Fatalf("failed to receive incubation request") - } - - // Next, the resolver should request a spend notification for - // the direct HTLC output. We'll use the txToBroadcast closure - // for the test case to generate the transaction that we'll - // send to the resolver. - spendingTx, err := testCase.txToBroadcast() - if err != nil { - t.Fatalf("unable to generate tx: %v", err) - } - spendTxHash := spendingTx.TxHash() - - select { - case notifier.SpendChan <- &chainntnfs.SpendDetail{ - SpendingTx: spendingTx, - SpenderTxHash: &spendTxHash, - }: - case <-time.After(time.Second * 5): - t.Fatalf("failed to request spend ntfn") - } - - if !testCase.timeout { - // If the resolver should settle now, then we'll - // extract the pre-image to be extracted and the - // resolution message sent. - select { - case newPreimage := <-witnessBeacon.newPreimages: - if newPreimage[0] != fakePreimage { - t.Fatalf("wrong pre-image: "+ - "expected %v, got %v", - fakePreimage, newPreimage) - } - - case <-time.After(time.Second * 5): - t.Fatalf("pre-image not added") - } - - // Finally, we should get a resolution message with the - // pre-image set within the message. - select { - case resolutionMsg := <-resolutionChan: - // Once again, the pre-images should match up. - if *resolutionMsg.PreImage != fakePreimage { - t.Fatalf("wrong pre-image: "+ - "expected %v, got %v", - fakePreimage, resolutionMsg.PreImage) - } - case <-time.After(time.Second * 5): - t.Fatalf("resolution not sent") - } - } else { - - // Otherwise, the HTLC should now timeout. First, we - // should get a resolution message with a populated - // failure message. - select { - case resolutionMsg := <-resolutionChan: - if resolutionMsg.Failure == nil { - t.Fatalf("expected failure resolution msg") - } - case <-time.After(time.Second * 5): - t.Fatalf("resolution not sent") - } - - // We should also get another request for the spend - // notification of the second-level transaction to - // indicate that it's been swept by the nursery, but - // only if this is a local commitment transaction. - if !testCase.remoteCommit { - select { - case notifier.SpendChan <- &chainntnfs.SpendDetail{ - SpendingTx: spendingTx, - SpenderTxHash: &spendTxHash, - }: - case <-time.After(time.Second * 5): - t.Fatalf("failed to request spend ntfn") - } - } - } - - // In any case, before the resolver exits, it should checkpoint - // its final state. - select { - case <-checkPointChan: - case err := <-resolveErr: - t.Fatalf("unable to resolve HTLC: %v", err) - case <-time.After(time.Second * 5): - t.Fatalf("check point not received") - } - - // Add a report to our set of expected reports with the outcome - // that the test specifies (either success or timeout). - spendTxID := spendingTx.TxHash() - amt := btcutil.Amount(fakeSignDesc.Output.Value) - - reports = append(reports, &channeldb.ResolverReport{ - OutPoint: testChanPoint2, - Amount: amt, - ResolverType: channeldb.ResolverTypeOutgoingHtlc, - ResolverOutcome: testCase.outcome, - SpendTxID: &spendTxID, - }) - - for _, report := range reports { - assertResolverReport(t, reportChan, report) - } - - wg.Wait() - - // Finally, the resolver should be marked as resolved. - if !resolver.resolved { - t.Fatalf("resolver should be marked as resolved") - } - } -} diff --git a/lnd/contractcourt/interfaces.go b/lnd/contractcourt/interfaces.go deleted file mode 100644 index b79de79e..00000000 --- a/lnd/contractcourt/interfaces.go +++ /dev/null @@ -1,68 +0,0 @@ -package contractcourt - -import ( - "io" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/htlcswitch/hop" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/invoices" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/sweep" - "github.com/pkt-cash/pktd/wire" -) - -// Registry is an interface which represents the invoice registry. -type Registry interface { - // LookupInvoice attempts to look up an invoice according to its 32 - // byte payment hash. - LookupInvoice(lntypes.Hash) (channeldb.Invoice, er.R) - - // NotifyExitHopHtlc attempts to mark an invoice as settled. If the - // invoice is a debug invoice, then this method is a noop as debug - // invoices are never fully settled. The return value describes how the - // htlc should be resolved. If the htlc cannot be resolved immediately, - // the resolution is sent on the passed in hodlChan later. - NotifyExitHopHtlc(payHash lntypes.Hash, paidAmount lnwire.MilliSatoshi, - expiry uint32, currentHeight int32, - circuitKey channeldb.CircuitKey, hodlChan chan<- interface{}, - payload invoices.Payload) (invoices.HtlcResolution, er.R) - - // HodlUnsubscribeAll unsubscribes from all htlc resolutions. - HodlUnsubscribeAll(subscriber chan<- interface{}) -} - -// OnionProcessor is an interface used to decode onion blobs. -type OnionProcessor interface { - // ReconstructHopIterator attempts to decode a valid sphinx packet from - // the passed io.Reader instance. - ReconstructHopIterator(r io.Reader, rHash []byte) (hop.Iterator, er.R) -} - -// UtxoSweeper defines the sweep functions that contract court requires. -type UtxoSweeper interface { - // SweepInput sweeps inputs back into the wallet. - SweepInput(input input.Input, params sweep.Params) (chan sweep.Result, - er.R) - - // CreateSweepTx accepts a list of inputs and signs and generates a txn - // that spends from them. This method also makes an accurate fee - // estimate before generating the required witnesses. - CreateSweepTx(inputs []input.Input, feePref sweep.FeePreference, - currentBlockHeight uint32) (*wire.MsgTx, er.R) - - // RelayFeePerKW returns the minimum fee rate required for transactions - // to be relayed. - RelayFeePerKW() chainfee.SatPerKWeight - - // UpdateParams allows updating the sweep parameters of a pending input - // in the UtxoSweeper. This function can be used to provide an updated - // fee preference that will be used for a new sweep transaction of the - // input that will act as a replacement transaction (RBF) of the - // original sweeping transaction, if any. - UpdateParams(input wire.OutPoint, params sweep.ParamsUpdate) ( - chan sweep.Result, er.R) -} diff --git a/lnd/contractcourt/mock_registry_test.go b/lnd/contractcourt/mock_registry_test.go deleted file mode 100644 index bd187b8e..00000000 --- a/lnd/contractcourt/mock_registry_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package contractcourt - -import ( - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/invoices" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -type notifyExitHopData struct { - payHash lntypes.Hash - paidAmount lnwire.MilliSatoshi - hodlChan chan<- interface{} - expiry uint32 - currentHeight int32 -} - -type mockRegistry struct { - notifyChan chan notifyExitHopData - notifyErr *er.ErrorCode - notifyResolution invoices.HtlcResolution -} - -func (r *mockRegistry) NotifyExitHopHtlc(payHash lntypes.Hash, - paidAmount lnwire.MilliSatoshi, expiry uint32, currentHeight int32, - circuitKey channeldb.CircuitKey, hodlChan chan<- interface{}, - payload invoices.Payload) (invoices.HtlcResolution, er.R) { - - r.notifyChan <- notifyExitHopData{ - hodlChan: hodlChan, - payHash: payHash, - paidAmount: paidAmount, - expiry: expiry, - currentHeight: currentHeight, - } - - var e er.R - if r.notifyErr != nil { - e = r.notifyErr.Default() - } - - return r.notifyResolution, e -} - -func (r *mockRegistry) HodlUnsubscribeAll(subscriber chan<- interface{}) {} - -func (r *mockRegistry) LookupInvoice(lntypes.Hash) (channeldb.Invoice, - er.R) { - - return channeldb.Invoice{}, channeldb.ErrInvoiceNotFound.Default() -} diff --git a/lnd/contractcourt/utils_test.go b/lnd/contractcourt/utils_test.go deleted file mode 100644 index 2bf81b41..00000000 --- a/lnd/contractcourt/utils_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package contractcourt - -import ( - "os" - "runtime/pprof" - "testing" - "time" -) - -// timeout implements a test level timeout. -func timeout(t *testing.T) func() { - done := make(chan struct{}) - go func() { - select { - case <-time.After(5 * time.Second): - pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) - - panic("test timeout") - case <-done: - } - }() - - return func() { - close(done) - } -} diff --git a/lnd/contrib/lncli.bash-completion b/lnd/contrib/lncli.bash-completion deleted file mode 100644 index 8388942f..00000000 --- a/lnd/contrib/lncli.bash-completion +++ /dev/null @@ -1,53 +0,0 @@ -# bash programmable completion for lncli -# copy to /etc/bash_completion.d and restart your shell session -# Copyright (c) by Andreas M. Antonopoulos -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -_lncli() { - local cur prev words=() cword - local lncli - - # lncli might not be in $PATH - lncli="$1" - - COMPREPLY=() - _get_comp_words_by_ref -n = cur prev words cword - - case "$prev" in - # example of further completion - newaddress) - COMPREPLY=( $( compgen -W "p2wkh np2wkh" -- "$cur" ) ) - return 0 - ;; - esac - - case "$cur" in - -*=*) # prevent nonsense completions - return 0 - ;; - *) - local helpopts globalcmds - - # get the global options, starting with -- - if [[ -z "$cur" || "$cur" =~ ^- ]]; then - globalcmds=$($lncli help 2>&1 | awk '$1 ~ /^-/ { sub(/,/, ""); print $1}') - fi - - # get the regular commands - if [[ -z "$cur" || "$cur" =~ ^[a-z] ]]; then - helpopts=$($lncli help 2>/dev/null | awk '$1 ~ /^[a-z]/ { print $1; }' ) - fi - - COMPREPLY=( $( compgen -W "$helpopts $globalcmds" -X "*," -- "$cur" ) ) - esac -} && -complete -F _lncli lncli - -# Local variables: -# mode: shell-script -# sh-basic-offset: 4 -# sh-indent-comment: t -# indent-tabs-mode: nil -# End: -# ex: ts=4 sw=4 et filetype=sh diff --git a/lnd/dev.Dockerfile b/lnd/dev.Dockerfile deleted file mode 100644 index db55ce12..00000000 --- a/lnd/dev.Dockerfile +++ /dev/null @@ -1,37 +0,0 @@ -FROM golang:1.13-alpine as builder - -LABEL maintainer="Olaoluwa Osuntokun " - -# Force Go to use the cgo based DNS resolver. This is required to ensure DNS -# queries required to connect to linked containers succeed. -ENV GODEBUG netdns=cgo - -# Install dependencies and install/build lnd. -RUN apk add --no-cache --update alpine-sdk \ - git \ - make - -# Copy in the local repository to build from. -COPY . /go/src/github.com/lightningnetwork/lnd - -RUN cd /go/src/github.com/lightningnetwork/lnd \ -&& make \ -&& make install tags="signrpc walletrpc chainrpc invoicesrpc" - -# Start a new, final image to reduce size. -FROM alpine as final - -# Expose lnd ports (server, rpc). -EXPOSE 9735 10009 - -# Copy the binaries and entrypoint from the builder image. -COPY --from=builder /go/bin/lncli /bin/ -COPY --from=builder /go/bin/lnd /bin/ - -# Add bash. -RUN apk add --no-cache \ - bash - -# Copy the entrypoint script. -COPY "docker/lnd/start-lnd.sh" . -RUN chmod +x start-lnd.sh diff --git a/lnd/discovery/bootstrapper.go b/lnd/discovery/bootstrapper.go deleted file mode 100644 index a1681cd5..00000000 --- a/lnd/discovery/bootstrapper.go +++ /dev/null @@ -1,535 +0,0 @@ -package discovery - -import ( - "bytes" - "crypto/rand" - "crypto/sha256" - "fmt" - prand "math/rand" - "net" - "strconv" - "strings" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/miekg/dns" - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/bech32" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/autopilot" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/tor" - "github.com/pkt-cash/pktd/pktlog/log" -) - -func init() { - prand.Seed(time.Now().Unix()) -} - -// NetworkPeerBootstrapper is an interface that represents an initial peer -// bootstrap mechanism. This interface is to be used to bootstrap a new peer to -// the connection by providing it with the pubkey+address of a set of existing -// peers on the network. Several bootstrap mechanisms can be implemented such -// as DNS, in channel graph, DHT's, etc. -type NetworkPeerBootstrapper interface { - // SampleNodeAddrs uniformly samples a set of specified address from - // the network peer bootstrapper source. The num addrs field passed in - // denotes how many valid peer addresses to return. The passed set of - // node nodes allows the caller to ignore a set of nodes perhaps - // because they already have connections established. - SampleNodeAddrs(numAddrs uint32, - ignore map[autopilot.NodeID]struct{}) ([]*lnwire.NetAddress, er.R) - - // Name returns a human readable string which names the concrete - // implementation of the NetworkPeerBootstrapper. - Name() string -} - -// MultiSourceBootstrap attempts to utilize a set of NetworkPeerBootstrapper -// passed in to return the target (numAddrs) number of peer addresses that can -// be used to bootstrap a peer just joining the Lightning Network. Each -// bootstrapper will be queried successively until the target amount is met. If -// the ignore map is populated, then the bootstrappers will be instructed to -// skip those nodes. -func MultiSourceBootstrap(ignore map[autopilot.NodeID]struct{}, numAddrs uint32, - bootstrappers ...NetworkPeerBootstrapper) ([]*lnwire.NetAddress, er.R) { - - // We'll randomly shuffle our bootstrappers before querying them in - // order to avoid from querying the same bootstrapper method over and - // over, as some of these might tend to provide better/worse results - // than others. - bootstrappers = shuffleBootstrappers(bootstrappers) - - var addrs []*lnwire.NetAddress - for _, bootstrapper := range bootstrappers { - // If we already have enough addresses, then we can exit early - // w/o querying the additional bootstrappers. - if uint32(len(addrs)) >= numAddrs { - break - } - - log.Infof("Attempting to bootstrap with: %v", bootstrapper.Name()) - - // If we still need additional addresses, then we'll compute - // the number of address remaining that we need to fetch. - numAddrsLeft := numAddrs - uint32(len(addrs)) - log.Tracef("Querying for %v addresses", numAddrsLeft) - netAddrs, err := bootstrapper.SampleNodeAddrs(numAddrsLeft, ignore) - if err != nil { - // If we encounter an error with a bootstrapper, then - // we'll continue on to the next available - // bootstrapper. - log.Errorf("Unable to query bootstrapper %v: %v", - bootstrapper.Name(), err) - continue - } - - addrs = append(addrs, netAddrs...) - } - - if len(addrs) == 0 { - return nil, er.New("no addresses found") - } - - log.Infof("Obtained %v addrs to bootstrap network with", len(addrs)) - - return addrs, nil -} - -// shuffleBootstrappers shuffles the set of bootstrappers in order to avoid -// querying the same bootstrapper over and over. To shuffle the set of -// candidates, we use a version of the Fisher–Yates shuffle algorithm. -func shuffleBootstrappers(candidates []NetworkPeerBootstrapper) []NetworkPeerBootstrapper { - shuffled := make([]NetworkPeerBootstrapper, len(candidates)) - perm := prand.Perm(len(candidates)) - - for i, v := range perm { - shuffled[v] = candidates[i] - } - - return shuffled -} - -// ChannelGraphBootstrapper is an implementation of the NetworkPeerBootstrapper -// which attempts to retrieve advertised peers directly from the active channel -// graph. This instance requires a backing autopilot.ChannelGraph instance in -// order to operate properly. -type ChannelGraphBootstrapper struct { - chanGraph autopilot.ChannelGraph - - // hashAccumulator is a set of 32 random bytes that are read upon the - // creation of the channel graph bootstrapper. We use this value to - // randomly select nodes within the known graph to connect to. After - // each selection, we rotate the accumulator by hashing it with itself. - hashAccumulator [32]byte - - tried map[autopilot.NodeID]struct{} -} - -// A compile time assertion to ensure that ChannelGraphBootstrapper meets the -// NetworkPeerBootstrapper interface. -var _ NetworkPeerBootstrapper = (*ChannelGraphBootstrapper)(nil) - -// NewGraphBootstrapper returns a new instance of a ChannelGraphBootstrapper -// backed by an active autopilot.ChannelGraph instance. This type of network -// peer bootstrapper will use the authenticated nodes within the known channel -// graph to bootstrap connections. -func NewGraphBootstrapper(cg autopilot.ChannelGraph) (NetworkPeerBootstrapper, er.R) { - - c := &ChannelGraphBootstrapper{ - chanGraph: cg, - tried: make(map[autopilot.NodeID]struct{}), - } - - if _, err := rand.Read(c.hashAccumulator[:]); err != nil { - return nil, er.E(err) - } - - return c, nil -} - -// SampleNodeAddrs uniformly samples a set of specified address from the -// network peer bootstrapper source. The num addrs field passed in denotes how -// many valid peer addresses to return. -// -// NOTE: Part of the NetworkPeerBootstrapper interface. -func (c *ChannelGraphBootstrapper) SampleNodeAddrs(numAddrs uint32, - ignore map[autopilot.NodeID]struct{}) ([]*lnwire.NetAddress, er.R) { - - // We'll merge the ignore map with our currently selected map in order - // to ensure we don't return any duplicate nodes. - for n := range ignore { - c.tried[n] = struct{}{} - } - - // In order to bootstrap, we'll iterate all the nodes in the channel - // graph, accumulating nodes until either we go through all active - // nodes, or we reach our limit. We ensure that we meet the randomly - // sample constraint as we maintain an xor accumulator to ensure we - // randomly sample nodes independent of the iteration of the channel - // graph. - sampleAddrs := func() ([]*lnwire.NetAddress, er.R) { - var a []*lnwire.NetAddress - - err := c.chanGraph.ForEachNode(func(node autopilot.Node) er.R { - nID := autopilot.NodeID(node.PubKey()) - if _, ok := c.tried[nID]; ok { - return nil - } - - // We'll select the first node we come across who's - // public key is less than our current accumulator - // value. When comparing, we skip the first byte as - // it's 50/50. If it isn't less, than then we'll - // continue forward. - nodePubKeyBytes := node.PubKey() - if bytes.Compare(c.hashAccumulator[:], nodePubKeyBytes[1:]) > 0 { - return nil - } - - for _, nodeAddr := range node.Addrs() { - // If we haven't yet reached our limit, then - // we'll copy over the details of this node - // into the set of addresses to be returned. - switch nodeAddr.(type) { - case *net.TCPAddr, *tor.OnionAddr: - default: - // If this isn't a valid address - // supported by the protocol, then we'll - // skip this node. - return nil - } - - nodePub, err := btcec.ParsePubKey( - nodePubKeyBytes[:], btcec.S256(), - ) - if err != nil { - return err - } - - // At this point, we've found an eligible node, - // so we'll return early with our shibboleth - // error. - a = append(a, &lnwire.NetAddress{ - IdentityKey: nodePub, - Address: nodeAddr, - }) - } - - c.tried[nID] = struct{}{} - - return er.LoopBreak - }) - if err != nil && !er.IsLoopBreak(err) { - return nil, err - } - - return a, nil - } - - // We'll loop and sample new addresses from the graph source until - // we've reached our target number of outbound connections or we hit 50 - // attempts, which ever comes first. - var ( - addrs []*lnwire.NetAddress - tries uint32 - ) - for tries < 30 && uint32(len(addrs)) < numAddrs { - sampleAddrs, err := sampleAddrs() - if err != nil { - return nil, err - } - - tries++ - - // We'll now rotate our hash accumulator one value forwards. - c.hashAccumulator = sha256.Sum256(c.hashAccumulator[:]) - - // If this attempt didn't yield any addresses, then we'll exit - // early. - if len(sampleAddrs) == 0 { - continue - } - - addrs = append(addrs, sampleAddrs...) - } - - log.Tracef("Ending hash accumulator state: %x", c.hashAccumulator) - - return addrs, nil -} - -// Name returns a human readable string which names the concrete implementation -// of the NetworkPeerBootstrapper. -// -// NOTE: Part of the NetworkPeerBootstrapper interface. -func (c *ChannelGraphBootstrapper) Name() string { - return "Authenticated Channel Graph" -} - -// DNSSeedBootstrapper as an implementation of the NetworkPeerBootstrapper -// interface which implements peer bootstrapping via a special DNS seed as -// defined in BOLT-0010. For further details concerning Lightning's current DNS -// boot strapping protocol, see this link: -// * https://github.com/lightningnetwork/lightning-rfc/blob/master/10-dns-bootstrap.md -type DNSSeedBootstrapper struct { - // dnsSeeds is an array of two tuples we'll use for bootstrapping. The - // first item in the tuple is the primary host we'll use to attempt the - // SRV lookup we require. If we're unable to receive a response over - // UDP, then we'll fall back to manual TCP resolution. The second item - // in the tuple is a special A record that we'll query in order to - // receive the IP address of the current authoritative DNS server for - // the network seed. - dnsSeeds [][2]string - net tor.Net - - // timeout is the maximum amount of time a dial will wait for a connect to - // complete. - timeout time.Duration -} - -// A compile time assertion to ensure that DNSSeedBootstrapper meets the -// NetworkPeerjBootstrapper interface. -var _ NetworkPeerBootstrapper = (*ChannelGraphBootstrapper)(nil) - -// NewDNSSeedBootstrapper returns a new instance of the DNSSeedBootstrapper. -// The set of passed seeds should point to DNS servers that properly implement -// Lightning's DNS peer bootstrapping protocol as defined in BOLT-0010. The set -// of passed DNS seeds should come in pairs, with the second host name to be -// used as a fallback for manual TCP resolution in the case of an error -// receiving the UDP response. The second host should return a single A record -// with the IP address of the authoritative name server. -func NewDNSSeedBootstrapper( - seeds [][2]string, net tor.Net, - timeout time.Duration) NetworkPeerBootstrapper { - return &DNSSeedBootstrapper{dnsSeeds: seeds, net: net, timeout: timeout} -} - -// fallBackSRVLookup attempts to manually query for SRV records we need to -// properly bootstrap. We do this by querying the special record at the "soa." -// sub-domain of supporting DNS servers. The retuned IP address will be the IP -// address of the authoritative DNS server. Once we have this IP address, we'll -// connect manually over TCP to request the SRV record. This is necessary as -// the records we return are currently too large for a class of resolvers, -// causing them to be filtered out. The targetEndPoint is the original end -// point that was meant to be hit. -func (d *DNSSeedBootstrapper) fallBackSRVLookup(soaShim string, - targetEndPoint string) ([]*net.SRV, er.R) { - - log.Tracef("Attempting to query fallback DNS seed") - - // First, we'll lookup the IP address of the server that will act as - // our shim. - addrs, err := d.net.LookupHost(soaShim) - if err != nil { - return nil, err - } - - // Once we have the IP address, we'll establish a TCP connection using - // port 53. - dnsServer := net.JoinHostPort(addrs[0], "53") - conn, err := d.net.Dial("tcp", dnsServer, d.timeout) - if err != nil { - return nil, err - } - - dnsHost := fmt.Sprintf("_nodes._tcp.%v.", targetEndPoint) - dnsConn := &dns.Conn{Conn: conn} - defer dnsConn.Close() - - // With the connection established, we'll craft our SRV query, write - // toe request, then wait for the server to give our response. - msg := new(dns.Msg) - msg.SetQuestion(dnsHost, dns.TypeSRV) - if err := dnsConn.WriteMsg(msg); err != nil { - return nil, er.E(err) - } - resp, errr := dnsConn.ReadMsg() - if errr != nil { - return nil, er.E(errr) - } - - // If the message response code was not the success code, fail. - if resp.Rcode != dns.RcodeSuccess { - return nil, er.Errorf("unsuccessful SRV request, "+ - "received: %v", resp.Rcode) - } - - // Retrieve the RR(s) of the Answer section, and covert to the format - // that net.LookupSRV would normally return. - var rrs []*net.SRV - for _, rr := range resp.Answer { - srv := rr.(*dns.SRV) - rrs = append(rrs, &net.SRV{ - Target: srv.Target, - Port: srv.Port, - Priority: srv.Priority, - Weight: srv.Weight, - }) - } - - return rrs, nil -} - -// SampleNodeAddrs uniformly samples a set of specified address from the -// network peer bootstrapper source. The num addrs field passed in denotes how -// many valid peer addresses to return. The set of DNS seeds are used -// successively to retrieve eligible target nodes. -func (d *DNSSeedBootstrapper) SampleNodeAddrs(numAddrs uint32, - ignore map[autopilot.NodeID]struct{}) ([]*lnwire.NetAddress, er.R) { - - var netAddrs []*lnwire.NetAddress - - // We'll try all the registered DNS seeds, exiting early if one of them - // gives us all the peers we need. - // - // TODO(roasbeef): should combine results from both -search: - for _, dnsSeedTuple := range d.dnsSeeds { - // We'll first query the seed with an SRV record so we can - // obtain a random sample of the encoded public keys of nodes. - // We use the lndLookupSRV function for this task. - primarySeed := dnsSeedTuple[0] - _, addrs, err := d.net.LookupSRV( - "nodes", "tcp", primarySeed, d.timeout, - ) - if err != nil { - log.Tracef("Unable to lookup SRV records via "+ - "primary seed (%v): %v", primarySeed, err) - - log.Trace("Falling back to secondary") - - // If the host of the secondary seed is blank, then - // we'll bail here as we can't proceed. - if dnsSeedTuple[1] == "" { - log.Tracef("DNS seed %v has no secondary, "+ - "skipping fallback", primarySeed) - continue - } - - // If we get an error when trying to query via the - // primary seed, we'll fallback to the secondary seed - // before concluding failure. - soaShim := dnsSeedTuple[1] - addrs, err = d.fallBackSRVLookup( - soaShim, primarySeed, - ) - if err != nil { - log.Tracef("Unable to query fall "+ - "back dns seed (%v): %v", soaShim, err) - continue - } - - log.Tracef("Successfully queried fallback DNS seed") - } - - log.Tracef("Retrieved SRV records from dns seed: %v", - log.C(func() string { - return spew.Sdump(addrs) - }), - ) - - // Next, we'll need to issue an A record request for each of - // the nodes, skipping it if nothing comes back. - for _, nodeSrv := range addrs { - if uint32(len(netAddrs)) >= numAddrs { - break search - } - - // With the SRV target obtained, we'll now perform - // another query to obtain the IP address for the - // matching bech32 encoded node key. We use the - // lndLookup function for this task. - bechNodeHost := nodeSrv.Target - addrs, err := d.net.LookupHost(bechNodeHost) - if err != nil { - return nil, err - } - - if len(addrs) == 0 { - log.Tracef("No addresses for %v, skipping", - bechNodeHost) - continue - } - - log.Tracef("Attempting to convert: %v", bechNodeHost) - - // If the host isn't correctly formatted, then we'll - // skip it. - if len(bechNodeHost) == 0 || - !strings.Contains(bechNodeHost, ".") { - - continue - } - - // If we have a set of valid addresses, then we'll need - // to parse the public key from the original bech32 - // encoded string. - bechNode := strings.Split(bechNodeHost, ".") - _, nodeBytes5Bits, err := bech32.Decode(bechNode[0]) - if err != nil { - return nil, err - } - - // Once we have the bech32 decoded pubkey, we'll need - // to convert the 5-bit word grouping into our regular - // 8-bit word grouping so we can convert it into a - // public key. - nodeBytes, err := bech32.ConvertBits( - nodeBytes5Bits, 5, 8, false, - ) - if err != nil { - return nil, err - } - nodeKey, err := btcec.ParsePubKey( - nodeBytes, btcec.S256(), - ) - if err != nil { - return nil, err - } - - // If we have an ignore list, and this node is in the - // ignore list, then we'll go to the next candidate. - if ignore != nil { - nID := autopilot.NewNodeID(nodeKey) - if _, ok := ignore[nID]; ok { - continue - } - } - - // Finally we'll convert the host:port peer to a proper - // TCP address to use within the lnwire.NetAddress. We - // don't need to use the lndResolveTCP function here - // because we already have the host:port peer. - addr := net.JoinHostPort( - addrs[0], - strconv.FormatUint(uint64(nodeSrv.Port), 10), - ) - tcpAddr, errr := net.ResolveTCPAddr("tcp", addr) - if errr != nil { - return nil, er.E(errr) - } - - // Finally, with all the information parsed, we'll - // return this fully valid address as a connection - // attempt. - lnAddr := &lnwire.NetAddress{ - IdentityKey: nodeKey, - Address: tcpAddr, - } - - log.Tracef("Obtained %v as valid reachable "+ - "node", lnAddr) - - netAddrs = append(netAddrs, lnAddr) - } - } - - return netAddrs, nil -} - -// Name returns a human readable string which names the concrete -// implementation of the NetworkPeerBootstrapper. -func (d *DNSSeedBootstrapper) Name() string { - return fmt.Sprintf("BOLT-0010 DNS Seed: %v", d.dnsSeeds) -} diff --git a/lnd/discovery/chan_series.go b/lnd/discovery/chan_series.go deleted file mode 100644 index b69b0be5..00000000 --- a/lnd/discovery/chan_series.go +++ /dev/null @@ -1,350 +0,0 @@ -package discovery - -import ( - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/netann" - "github.com/pkt-cash/pktd/lnd/routing/route" - "github.com/pkt-cash/pktd/pktlog/log" -) - -// ChannelGraphTimeSeries is an interface that provides time and block based -// querying into our view of the channel graph. New channels will have -// monotonically increasing block heights, and new channel updates will have -// increasing timestamps. Once we connect to a peer, we'll use the methods in -// this interface to determine if we're already in sync, or need to request -// some new information from them. -type ChannelGraphTimeSeries interface { - // HighestChanID should return the channel ID of the channel we know of - // that's furthest in the target chain. This channel will have a block - // height that's close to the current tip of the main chain as we - // know it. We'll use this to start our QueryChannelRange dance with - // the remote node. - HighestChanID(chain chainhash.Hash) (*lnwire.ShortChannelID, er.R) - - // UpdatesInHorizon returns all known channel and node updates with an - // update timestamp between the start time and end time. We'll use this - // to catch up a remote node to the set of channel updates that they - // may have missed out on within the target chain. - UpdatesInHorizon(chain chainhash.Hash, - startTime time.Time, endTime time.Time) ([]lnwire.Message, er.R) - - // FilterKnownChanIDs takes a target chain, and a set of channel ID's, - // and returns a filtered set of chan ID's. This filtered set of chan - // ID's represents the ID's that we don't know of which were in the - // passed superSet. - FilterKnownChanIDs(chain chainhash.Hash, - superSet []lnwire.ShortChannelID) ([]lnwire.ShortChannelID, er.R) - - // FilterChannelRange returns the set of channels that we created - // between the start height and the end height. We'll use this to to a - // remote peer's QueryChannelRange message. - FilterChannelRange(chain chainhash.Hash, - startHeight, endHeight uint32) ([]lnwire.ShortChannelID, er.R) - - // FetchChanAnns returns a full set of channel announcements as well as - // their updates that match the set of specified short channel ID's. - // We'll use this to reply to a QueryShortChanIDs message sent by a - // remote peer. The response will contain a unique set of - // ChannelAnnouncements, the latest ChannelUpdate for each of the - // announcements, and a unique set of NodeAnnouncements. - FetchChanAnns(chain chainhash.Hash, - shortChanIDs []lnwire.ShortChannelID) ([]lnwire.Message, er.R) - - // FetchChanUpdates returns the latest channel update messages for the - // specified short channel ID. If no channel updates are known for the - // channel, then an empty slice will be returned. - FetchChanUpdates(chain chainhash.Hash, - shortChanID lnwire.ShortChannelID) ([]*lnwire.ChannelUpdate, er.R) -} - -// ChanSeries is an implementation of the ChannelGraphTimeSeries -// interface backed by the channeldb ChannelGraph database. We'll provide this -// implementation to the AuthenticatedGossiper so it can properly use the -// in-protocol channel range queries to quickly and efficiently synchronize our -// channel state with all peers. -type ChanSeries struct { - graph *channeldb.ChannelGraph -} - -// NewChanSeries constructs a new ChanSeries backed by a channeldb.ChannelGraph. -// The returned ChanSeries implements the ChannelGraphTimeSeries interface. -func NewChanSeries(graph *channeldb.ChannelGraph) *ChanSeries { - return &ChanSeries{ - graph: graph, - } -} - -// HighestChanID should return is the channel ID of the channel we know of -// that's furthest in the target chain. This channel will have a block height -// that's close to the current tip of the main chain as we know it. We'll use -// this to start our QueryChannelRange dance with the remote node. -// -// NOTE: This is part of the ChannelGraphTimeSeries interface. -func (c *ChanSeries) HighestChanID(chain chainhash.Hash) (*lnwire.ShortChannelID, er.R) { - chanID, err := c.graph.HighestChanID() - if err != nil { - return nil, err - } - - shortChanID := lnwire.NewShortChanIDFromInt(chanID) - return &shortChanID, nil -} - -// UpdatesInHorizon returns all known channel and node updates with an update -// timestamp between the start time and end time. We'll use this to catch up a -// remote node to the set of channel updates that they may have missed out on -// within the target chain. -// -// NOTE: This is part of the ChannelGraphTimeSeries interface. -func (c *ChanSeries) UpdatesInHorizon(chain chainhash.Hash, - startTime time.Time, endTime time.Time) ([]lnwire.Message, er.R) { - - var updates []lnwire.Message - - // First, we'll query for all the set of channels that have an update - // that falls within the specified horizon. - chansInHorizon, err := c.graph.ChanUpdatesInHorizon( - startTime, endTime, - ) - if err != nil { - return nil, err - } - for _, channel := range chansInHorizon { - // If the channel hasn't been fully advertised yet, or is a - // private channel, then we'll skip it as we can't construct a - // full authentication proof if one is requested. - if channel.Info.AuthProof == nil { - continue - } - - chanAnn, edge1, edge2, err := netann.CreateChanAnnouncement( - channel.Info.AuthProof, channel.Info, channel.Policy1, - channel.Policy2, - ) - if err != nil { - return nil, err - } - - updates = append(updates, chanAnn) - if edge1 != nil { - updates = append(updates, edge1) - } - if edge2 != nil { - updates = append(updates, edge2) - } - } - - // Next, we'll send out all the node announcements that have an update - // within the horizon as well. We send these second to ensure that they - // follow any active channels they have. - nodeAnnsInHorizon, err := c.graph.NodeUpdatesInHorizon( - startTime, endTime, - ) - if err != nil { - return nil, err - } - for _, nodeAnn := range nodeAnnsInHorizon { - // Ensure we only forward nodes that are publicly advertised to - // prevent leaking information about nodes. - isNodePublic, err := c.graph.IsPublicNode(nodeAnn.PubKeyBytes) - if err != nil { - log.Errorf("Unable to determine if node %x is "+ - "advertised: %v", nodeAnn.PubKeyBytes, err) - continue - } - - if !isNodePublic { - log.Tracef("Skipping forwarding announcement for "+ - "node %x due to being unadvertised", - nodeAnn.PubKeyBytes) - continue - } - - nodeUpdate, err := nodeAnn.NodeAnnouncement(true) - if err != nil { - return nil, err - } - - updates = append(updates, nodeUpdate) - } - - return updates, nil -} - -// FilterKnownChanIDs takes a target chain, and a set of channel ID's, and -// returns a filtered set of chan ID's. This filtered set of chan ID's -// represents the ID's that we don't know of which were in the passed superSet. -// -// NOTE: This is part of the ChannelGraphTimeSeries interface. -func (c *ChanSeries) FilterKnownChanIDs(chain chainhash.Hash, - superSet []lnwire.ShortChannelID) ([]lnwire.ShortChannelID, er.R) { - - chanIDs := make([]uint64, 0, len(superSet)) - for _, chanID := range superSet { - chanIDs = append(chanIDs, chanID.ToUint64()) - } - - newChanIDs, err := c.graph.FilterKnownChanIDs(chanIDs) - if err != nil { - return nil, err - } - - filteredIDs := make([]lnwire.ShortChannelID, 0, len(newChanIDs)) - for _, chanID := range newChanIDs { - filteredIDs = append( - filteredIDs, lnwire.NewShortChanIDFromInt(chanID), - ) - } - - return filteredIDs, nil -} - -// FilterChannelRange returns the set of channels that we created between the -// start height and the end height. We'll use this respond to a remote peer's -// QueryChannelRange message. -// -// NOTE: This is part of the ChannelGraphTimeSeries interface. -func (c *ChanSeries) FilterChannelRange(chain chainhash.Hash, - startHeight, endHeight uint32) ([]lnwire.ShortChannelID, er.R) { - - chansInRange, err := c.graph.FilterChannelRange(startHeight, endHeight) - if err != nil { - return nil, err - } - - chanResp := make([]lnwire.ShortChannelID, 0, len(chansInRange)) - for _, chanID := range chansInRange { - chanResp = append( - chanResp, lnwire.NewShortChanIDFromInt(chanID), - ) - } - - return chanResp, nil -} - -// FetchChanAnns returns a full set of channel announcements as well as their -// updates that match the set of specified short channel ID's. We'll use this -// to reply to a QueryShortChanIDs message sent by a remote peer. The response -// will contain a unique set of ChannelAnnouncements, the latest ChannelUpdate -// for each of the announcements, and a unique set of NodeAnnouncements. -// -// NOTE: This is part of the ChannelGraphTimeSeries interface. -func (c *ChanSeries) FetchChanAnns(chain chainhash.Hash, - shortChanIDs []lnwire.ShortChannelID) ([]lnwire.Message, er.R) { - - chanIDs := make([]uint64, 0, len(shortChanIDs)) - for _, chanID := range shortChanIDs { - chanIDs = append(chanIDs, chanID.ToUint64()) - } - - channels, err := c.graph.FetchChanInfos(chanIDs) - if err != nil { - return nil, err - } - - // We'll use this map to ensure we don't send the same node - // announcement more than one time as one node may have many channel - // anns we'll need to send. - nodePubsSent := make(map[route.Vertex]struct{}) - - chanAnns := make([]lnwire.Message, 0, len(channels)*3) - for _, channel := range channels { - // If the channel doesn't have an authentication proof, then we - // won't send it over as it may not yet be finalized, or be a - // non-advertised channel. - if channel.Info.AuthProof == nil { - continue - } - - chanAnn, edge1, edge2, err := netann.CreateChanAnnouncement( - channel.Info.AuthProof, channel.Info, channel.Policy1, - channel.Policy2, - ) - if err != nil { - return nil, err - } - - chanAnns = append(chanAnns, chanAnn) - if edge1 != nil { - chanAnns = append(chanAnns, edge1) - - // If this edge has a validated node announcement, that - // we haven't yet sent, then we'll send that as well. - nodePub := channel.Policy1.Node.PubKeyBytes - hasNodeAnn := channel.Policy1.Node.HaveNodeAnnouncement - if _, ok := nodePubsSent[nodePub]; !ok && hasNodeAnn { - nodeAnn, err := channel.Policy1.Node.NodeAnnouncement(true) - if err != nil { - return nil, err - } - - chanAnns = append(chanAnns, nodeAnn) - nodePubsSent[nodePub] = struct{}{} - } - } - if edge2 != nil { - chanAnns = append(chanAnns, edge2) - - // If this edge has a validated node announcement, that - // we haven't yet sent, then we'll send that as well. - nodePub := channel.Policy2.Node.PubKeyBytes - hasNodeAnn := channel.Policy2.Node.HaveNodeAnnouncement - if _, ok := nodePubsSent[nodePub]; !ok && hasNodeAnn { - nodeAnn, err := channel.Policy2.Node.NodeAnnouncement(true) - if err != nil { - return nil, err - } - - chanAnns = append(chanAnns, nodeAnn) - nodePubsSent[nodePub] = struct{}{} - } - } - } - - return chanAnns, nil -} - -// FetchChanUpdates returns the latest channel update messages for the -// specified short channel ID. If no channel updates are known for the channel, -// then an empty slice will be returned. -// -// NOTE: This is part of the ChannelGraphTimeSeries interface. -func (c *ChanSeries) FetchChanUpdates(chain chainhash.Hash, - shortChanID lnwire.ShortChannelID) ([]*lnwire.ChannelUpdate, er.R) { - - chanInfo, e1, e2, err := c.graph.FetchChannelEdgesByID( - shortChanID.ToUint64(), - ) - if err != nil { - return nil, err - } - - chanUpdates := make([]*lnwire.ChannelUpdate, 0, 2) - if e1 != nil { - chanUpdate, err := netann.ChannelUpdateFromEdge(chanInfo, e1) - if err != nil { - return nil, err - } - - chanUpdates = append(chanUpdates, chanUpdate) - } - if e2 != nil { - chanUpdate, err := netann.ChannelUpdateFromEdge(chanInfo, e2) - if err != nil { - return nil, err - } - - chanUpdates = append(chanUpdates, chanUpdate) - } - - return chanUpdates, nil -} - -// A compile-time assertion to ensure that ChanSeries meets the -// ChannelGraphTimeSeries interface. -var _ ChannelGraphTimeSeries = (*ChanSeries)(nil) diff --git a/lnd/discovery/gossiper.go b/lnd/discovery/gossiper.go deleted file mode 100644 index c92ee54c..00000000 --- a/lnd/discovery/gossiper.go +++ /dev/null @@ -1,2539 +0,0 @@ -package discovery - -import ( - "bytes" - "runtime" - "sync" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/lnpeer" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/multimutex" - "github.com/pkt-cash/pktd/lnd/netann" - "github.com/pkt-cash/pktd/lnd/routing" - "github.com/pkt-cash/pktd/lnd/routing/route" - "github.com/pkt-cash/pktd/lnd/ticker" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/wire" -) - -var ( - Err = er.NewErrorType("lnd.discovery") - // ErrGossiperShuttingDown is an error that is returned if the gossiper - // is in the process of being shut down. - ErrGossiperShuttingDown = Err.CodeWithDetail("ErrGossiperShuttingDown", "gossiper is shutting down") - - // ErrGossipSyncerNotFound signals that we were unable to find an active - // gossip syncer corresponding to a gossip query message received from - // the remote peer. - ErrGossipSyncerNotFound = Err.CodeWithDetail("ErrGossipSyncerNotFound", "gossip syncer not found") -) - -// optionalMsgFields is a set of optional message fields that external callers -// can provide that serve useful when processing a specific network -// announcement. -type optionalMsgFields struct { - capacity *btcutil.Amount - channelPoint *wire.OutPoint -} - -// apply applies the optional fields within the functional options. -func (f *optionalMsgFields) apply(optionalMsgFields ...OptionalMsgField) { - for _, optionalMsgField := range optionalMsgFields { - optionalMsgField(f) - } -} - -// OptionalMsgField is a functional option parameter that can be used to provide -// external information that is not included within a network message but serves -// useful when processing it. -type OptionalMsgField func(*optionalMsgFields) - -// ChannelCapacity is an optional field that lets the gossiper know of the -// capacity of a channel. -func ChannelCapacity(capacity btcutil.Amount) OptionalMsgField { - return func(f *optionalMsgFields) { - f.capacity = &capacity - } -} - -// ChannelPoint is an optional field that lets the gossiper know of the outpoint -// of a channel. -func ChannelPoint(op wire.OutPoint) OptionalMsgField { - return func(f *optionalMsgFields) { - f.channelPoint = &op - } -} - -// networkMsg couples a routing related wire message with the peer that -// originally sent it. -type networkMsg struct { - peer lnpeer.Peer - source *btcec.PublicKey - msg lnwire.Message - optionalMsgFields *optionalMsgFields - - isRemote bool - - err chan er.R -} - -// chanPolicyUpdateRequest is a request that is sent to the server when a caller -// wishes to update a particular set of channels. New ChannelUpdate messages -// will be crafted to be sent out during the next broadcast epoch and the fee -// updates committed to the lower layer. -type chanPolicyUpdateRequest struct { - edgesToUpdate []EdgeWithInfo - errChan chan er.R -} - -// Config defines the configuration for the service. ALL elements within the -// configuration MUST be non-nil for the service to carry out its duties. -type Config struct { - // ChainHash is a hash that indicates which resident chain of the - // AuthenticatedGossiper. Any announcements that don't match this - // chain hash will be ignored. - // - // TODO(roasbeef): eventually make into map so can de-multiplex - // incoming announcements - // * also need to do same for Notifier - ChainHash chainhash.Hash - - // Router is the subsystem which is responsible for managing the - // topology of lightning network. After incoming channel, node, channel - // updates announcements are validated they are sent to the router in - // order to be included in the LN graph. - Router routing.ChannelGraphSource - - // ChanSeries is an interfaces that provides access to a time series - // view of the current known channel graph. Each GossipSyncer enabled - // peer will utilize this in order to create and respond to channel - // graph time series queries. - ChanSeries ChannelGraphTimeSeries - - // Notifier is used for receiving notifications of incoming blocks. - // With each new incoming block found we process previously premature - // announcements. - // - // TODO(roasbeef): could possibly just replace this with an epoch - // channel. - Notifier chainntnfs.ChainNotifier - - // Broadcast broadcasts a particular set of announcements to all peers - // that the daemon is connected to. If supplied, the exclude parameter - // indicates that the target peer should be excluded from the - // broadcast. - Broadcast func(skips map[route.Vertex]struct{}, - msg ...lnwire.Message) er.R - - // NotifyWhenOnline is a function that allows the gossiper to be - // notified when a certain peer comes online, allowing it to - // retry sending a peer message. - // - // NOTE: The peerChan channel must be buffered. - NotifyWhenOnline func(peerPubKey [33]byte, peerChan chan<- lnpeer.Peer) - - // NotifyWhenOffline is a function that allows the gossiper to be - // notified when a certain peer disconnects, allowing it to request a - // notification for when it reconnects. - NotifyWhenOffline func(peerPubKey [33]byte) <-chan struct{} - - // SelfNodeAnnouncement is a function that fetches our own current node - // announcement, for use when determining whether we should update our - // peers about our presence on the network. If the refresh is true, a - // new and updated announcement will be returned. - SelfNodeAnnouncement func(refresh bool) (lnwire.NodeAnnouncement, er.R) - - // ProofMatureDelta the number of confirmations which is needed before - // exchange the channel announcement proofs. - ProofMatureDelta uint32 - - // TrickleDelay the period of trickle timer which flushes to the - // network the pending batch of new announcements we've received since - // the last trickle tick. - TrickleDelay time.Duration - - // RetransmitTicker is a ticker that ticks with a period which - // indicates that we should check if we need re-broadcast any of our - // personal channels. - RetransmitTicker ticker.Ticker - - // RebroadcastInterval is the maximum time we wait between sending out - // channel updates for our active channels and our own node - // announcement. We do this to ensure our active presence on the - // network is known, and we are not being considered a zombie node or - // having zombie channels. - RebroadcastInterval time.Duration - - // WaitingProofStore is a persistent storage of partial channel proof - // announcement messages. We use it to buffer half of the material - // needed to reconstruct a full authenticated channel announcement. - // Once we receive the other half the channel proof, we'll be able to - // properly validate it and re-broadcast it out to the network. - // - // TODO(wilmer): make interface to prevent channeldb dependency. - WaitingProofStore *channeldb.WaitingProofStore - - // MessageStore is a persistent storage of gossip messages which we will - // use to determine which messages need to be resent for a given peer. - MessageStore GossipMessageStore - - // AnnSigner is an instance of the MessageSigner interface which will - // be used to manually sign any outgoing channel updates. The signer - // implementation should be backed by the public key of the backing - // Lightning node. - // - // TODO(roasbeef): extract ann crafting + sign from fundingMgr into - // here? - AnnSigner lnwallet.MessageSigner - - // NumActiveSyncers is the number of peers for which we should have - // active syncers with. After reaching NumActiveSyncers, any future - // gossip syncers will be passive. - NumActiveSyncers int - - // RotateTicker is a ticker responsible for notifying the SyncManager - // when it should rotate its active syncers. A single active syncer with - // a chansSynced state will be exchanged for a passive syncer in order - // to ensure we don't keep syncing with the same peers. - RotateTicker ticker.Ticker - - // HistoricalSyncTicker is a ticker responsible for notifying the - // syncManager when it should attempt a historical sync with a gossip - // sync peer. - HistoricalSyncTicker ticker.Ticker - - // ActiveSyncerTimeoutTicker is a ticker responsible for notifying the - // syncManager when it should attempt to start the next pending - // activeSyncer due to the current one not completing its state machine - // within the timeout. - ActiveSyncerTimeoutTicker ticker.Ticker - - // MinimumBatchSize is minimum size of a sub batch of announcement - // messages. - MinimumBatchSize int - - // SubBatchDelay is the delay between sending sub batches of - // gossip messages. - SubBatchDelay time.Duration - - // IgnoreHistoricalFilters will prevent syncers from replying with - // historical data when the remote peer sets a gossip_timestamp_range. - // This prevents ranges with old start times from causing us to dump the - // graph on connect. - IgnoreHistoricalFilters bool -} - -// AuthenticatedGossiper is a subsystem which is responsible for receiving -// announcements, validating them and applying the changes to router, syncing -// lightning network with newly connected nodes, broadcasting announcements -// after validation, negotiating the channel announcement proofs exchange and -// handling the premature announcements. All outgoing announcements are -// expected to be properly signed as dictated in BOLT#7, additionally, all -// incoming message are expected to be well formed and signed. Invalid messages -// will be rejected by this struct. -type AuthenticatedGossiper struct { - // Parameters which are needed to properly handle the start and stop of - // the service. - started sync.Once - stopped sync.Once - - // bestHeight is the height of the block at the tip of the main chain - // as we know it. Accesses *MUST* be done with the gossiper's lock - // held. - bestHeight uint32 - - quit chan struct{} - wg sync.WaitGroup - - // cfg is a copy of the configuration struct that the gossiper service - // was initialized with. - cfg *Config - - // blockEpochs encapsulates a stream of block epochs that are sent at - // every new block height. - blockEpochs *chainntnfs.BlockEpochEvent - - // prematureAnnouncements maps a block height to a set of network - // messages which are "premature" from our PoV. A message is premature - // if it claims to be anchored in a block which is beyond the current - // main chain tip as we know it. Premature network messages will be - // processed once the chain tip as we know it extends to/past the - // premature height. - // - // TODO(roasbeef): limit premature networkMsgs to N - prematureAnnouncements map[uint32][]*networkMsg - - // prematureChannelUpdates is a map of ChannelUpdates we have received - // that wasn't associated with any channel we know about. We store - // them temporarily, such that we can reprocess them when a - // ChannelAnnouncement for the channel is received. - prematureChannelUpdates map[uint64][]*networkMsg - pChanUpdMtx sync.Mutex - - // networkMsgs is a channel that carries new network broadcasted - // message from outside the gossiper service to be processed by the - // networkHandler. - networkMsgs chan *networkMsg - - // chanPolicyUpdates is a channel that requests to update the - // forwarding policy of a set of channels is sent over. - chanPolicyUpdates chan *chanPolicyUpdateRequest - - // selfKey is the identity public key of the backing Lightning node. - selfKey *btcec.PublicKey - - // channelMtx is used to restrict the database access to one - // goroutine per channel ID. This is done to ensure that when - // the gossiper is handling an announcement, the db state stays - // consistent between when the DB is first read until it's written. - channelMtx *multimutex.Mutex - - rejectMtx sync.RWMutex - recentRejects map[uint64]struct{} - - // syncMgr is a subsystem responsible for managing the gossip syncers - // for peers currently connected. When a new peer is connected, the - // manager will create its accompanying gossip syncer and determine - // whether it should have an activeSync or passiveSync sync type based - // on how many other gossip syncers are currently active. Any activeSync - // gossip syncers are started in a round-robin manner to ensure we're - // not syncing with multiple peers at the same time. - syncMgr *SyncManager - - // reliableSender is a subsystem responsible for handling reliable - // message send requests to peers. This should only be used for channels - // that are unadvertised at the time of handling the message since if it - // is advertised, then peers should be able to get the message from the - // network. - reliableSender *reliableSender - - sync.Mutex -} - -// New creates a new AuthenticatedGossiper instance, initialized with the -// passed configuration parameters. -func New(cfg Config, selfKey *btcec.PublicKey) *AuthenticatedGossiper { - gossiper := &AuthenticatedGossiper{ - selfKey: selfKey, - cfg: &cfg, - networkMsgs: make(chan *networkMsg), - quit: make(chan struct{}), - chanPolicyUpdates: make(chan *chanPolicyUpdateRequest), - prematureAnnouncements: make(map[uint32][]*networkMsg), - prematureChannelUpdates: make(map[uint64][]*networkMsg), - channelMtx: multimutex.NewMutex(), - recentRejects: make(map[uint64]struct{}), - syncMgr: newSyncManager(&SyncManagerCfg{ - ChainHash: cfg.ChainHash, - ChanSeries: cfg.ChanSeries, - RotateTicker: cfg.RotateTicker, - HistoricalSyncTicker: cfg.HistoricalSyncTicker, - NumActiveSyncers: cfg.NumActiveSyncers, - IgnoreHistoricalFilters: cfg.IgnoreHistoricalFilters, - }), - } - - gossiper.reliableSender = newReliableSender(&reliableSenderCfg{ - NotifyWhenOnline: cfg.NotifyWhenOnline, - NotifyWhenOffline: cfg.NotifyWhenOffline, - MessageStore: cfg.MessageStore, - IsMsgStale: gossiper.isMsgStale, - }) - - return gossiper -} - -// EdgeWithInfo contains the information that is required to update an edge. -type EdgeWithInfo struct { - // Info describes the channel. - Info *channeldb.ChannelEdgeInfo - - // Edge describes the policy in one direction of the channel. - Edge *channeldb.ChannelEdgePolicy -} - -// PropagateChanPolicyUpdate signals the AuthenticatedGossiper to perform the -// specified edge updates. Updates are done in two stages: first, the -// AuthenticatedGossiper ensures the update has been committed by dependent -// sub-systems, then it signs and broadcasts new updates to the network. A -// mapping between outpoints and updated channel policies is returned, which is -// used to update the forwarding policies of the underlying links. -func (d *AuthenticatedGossiper) PropagateChanPolicyUpdate( - edgesToUpdate []EdgeWithInfo) er.R { - - errChan := make(chan er.R, 1) - policyUpdate := &chanPolicyUpdateRequest{ - edgesToUpdate: edgesToUpdate, - errChan: errChan, - } - - select { - case d.chanPolicyUpdates <- policyUpdate: - err := <-errChan - return err - case <-d.quit: - return er.Errorf("AuthenticatedGossiper shutting down") - } -} - -// Start spawns network messages handler goroutine and registers on new block -// notifications in order to properly handle the premature announcements. -func (d *AuthenticatedGossiper) Start() er.R { - var err er.R - d.started.Do(func() { - err = d.start() - }) - return err -} - -func (d *AuthenticatedGossiper) start() er.R { - log.Info("Authenticated Gossiper is starting") - - // First we register for new notifications of newly discovered blocks. - // We do this immediately so we'll later be able to consume any/all - // blocks which were discovered. - blockEpochs, err := d.cfg.Notifier.RegisterBlockEpochNtfn(nil) - if err != nil { - return err - } - d.blockEpochs = blockEpochs - - height, err := d.cfg.Router.CurrentBlockHeight() - if err != nil { - return err - } - d.bestHeight = height - - // Start the reliable sender. In case we had any pending messages ready - // to be sent when the gossiper was last shut down, we must continue on - // our quest to deliver them to their respective peers. - if err := d.reliableSender.Start(); err != nil { - return err - } - - d.syncMgr.Start() - - d.wg.Add(1) - go d.networkHandler() - - return nil -} - -// Stop signals any active goroutines for a graceful closure. -func (d *AuthenticatedGossiper) Stop() { - d.stopped.Do(d.stop) -} - -func (d *AuthenticatedGossiper) stop() { - log.Info("Authenticated Gossiper is stopping") - - d.blockEpochs.Cancel() - - d.syncMgr.Stop() - - close(d.quit) - d.wg.Wait() - - // We'll stop our reliable sender after all of the gossiper's goroutines - // have exited to ensure nothing can cause it to continue executing. - d.reliableSender.Stop() -} - -// TODO(roasbeef): need method to get current gossip timestamp? -// * using mtx, check time rotate forward is needed? - -// ProcessRemoteAnnouncement sends a new remote announcement message along with -// the peer that sent the routing message. The announcement will be processed -// then added to a queue for batched trickled announcement to all connected -// peers. Remote channel announcements should contain the announcement proof -// and be fully validated. -func (d *AuthenticatedGossiper) ProcessRemoteAnnouncement(msg lnwire.Message, - peer lnpeer.Peer) chan er.R { - - errChan := make(chan er.R, 1) - - // For messages in the known set of channel series queries, we'll - // dispatch the message directly to the GossipSyncer, and skip the main - // processing loop. - switch m := msg.(type) { - case *lnwire.QueryShortChanIDs, - *lnwire.QueryChannelRange, - *lnwire.ReplyChannelRange, - *lnwire.ReplyShortChanIDsEnd: - - syncer, ok := d.syncMgr.GossipSyncer(peer.PubKey()) - if !ok { - log.Warnf("Gossip syncer for peer=%x not found", - peer.PubKey()) - - errChan <- ErrGossipSyncerNotFound.Default() - return errChan - } - - // If we've found the message target, then we'll dispatch the - // message directly to it. - syncer.ProcessQueryMsg(m, peer.QuitSignal()) - - errChan <- nil - return errChan - - // If a peer is updating its current update horizon, then we'll dispatch - // that directly to the proper GossipSyncer. - case *lnwire.GossipTimestampRange: - syncer, ok := d.syncMgr.GossipSyncer(peer.PubKey()) - if !ok { - log.Warnf("Gossip syncer for peer=%x not found", - peer.PubKey()) - - errChan <- ErrGossipSyncerNotFound.Default() - return errChan - } - - // If we've found the message target, then we'll dispatch the - // message directly to it. - if err := syncer.ApplyGossipFilter(m); err != nil { - log.Warnf("Unable to apply gossip filter for peer=%x: "+ - "%v", peer.PubKey(), err) - - errChan <- err - return errChan - } - - errChan <- nil - return errChan - } - - nMsg := &networkMsg{ - msg: msg, - isRemote: true, - peer: peer, - source: peer.IdentityKey(), - err: errChan, - } - - select { - case d.networkMsgs <- nMsg: - - // If the peer that sent us this error is quitting, then we don't need - // to send back an error and can return immediately. - case <-peer.QuitSignal(): - return nil - case <-d.quit: - nMsg.err <- ErrGossiperShuttingDown.Default() - } - - return nMsg.err -} - -// ProcessLocalAnnouncement sends a new remote announcement message along with -// the peer that sent the routing message. The announcement will be processed -// then added to a queue for batched trickled announcement to all connected -// peers. Local channel announcements don't contain the announcement proof and -// will not be fully validated. Once the channel proofs are received, the -// entire channel announcement and update messages will be re-constructed and -// broadcast to the rest of the network. -func (d *AuthenticatedGossiper) ProcessLocalAnnouncement(msg lnwire.Message, - source *btcec.PublicKey, optionalFields ...OptionalMsgField) chan er.R { - - optionalMsgFields := &optionalMsgFields{} - optionalMsgFields.apply(optionalFields...) - - nMsg := &networkMsg{ - msg: msg, - optionalMsgFields: optionalMsgFields, - isRemote: false, - source: source, - err: make(chan er.R, 1), - } - - select { - case d.networkMsgs <- nMsg: - case <-d.quit: - nMsg.err <- ErrGossiperShuttingDown.Default() - } - - return nMsg.err -} - -// channelUpdateID is a unique identifier for ChannelUpdate messages, as -// channel updates can be identified by the (ShortChannelID, ChannelFlags) -// tuple. -type channelUpdateID struct { - // channelID represents the set of data which is needed to - // retrieve all necessary data to validate the channel existence. - channelID lnwire.ShortChannelID - - // Flags least-significant bit must be set to 0 if the creating node - // corresponds to the first node in the previously sent channel - // announcement and 1 otherwise. - flags lnwire.ChanUpdateChanFlags -} - -// msgWithSenders is a wrapper struct around a message, and the set of peers -// that originally sent us this message. Using this struct, we can ensure that -// we don't re-send a message to the peer that sent it to us in the first -// place. -type msgWithSenders struct { - // msg is the wire message itself. - msg lnwire.Message - - // sender is the set of peers that sent us this message. - senders map[route.Vertex]struct{} -} - -// mergeSyncerMap is used to merge the set of senders of a particular message -// with peers that we have an active GossipSyncer with. We do this to ensure -// that we don't broadcast messages to any peers that we have active gossip -// syncers for. -func (m *msgWithSenders) mergeSyncerMap(syncers map[route.Vertex]*GossipSyncer) { - for peerPub := range syncers { - m.senders[peerPub] = struct{}{} - } -} - -// deDupedAnnouncements de-duplicates announcements that have been added to the -// batch. Internally, announcements are stored in three maps -// (one each for channel announcements, channel updates, and node -// announcements). These maps keep track of unique announcements and ensure no -// announcements are duplicated. We keep the three message types separate, such -// that we can send channel announcements first, then channel updates, and -// finally node announcements when it's time to broadcast them. -type deDupedAnnouncements struct { - // channelAnnouncements are identified by the short channel id field. - channelAnnouncements map[lnwire.ShortChannelID]msgWithSenders - - // channelUpdates are identified by the channel update id field. - channelUpdates map[channelUpdateID]msgWithSenders - - // nodeAnnouncements are identified by the Vertex field. - nodeAnnouncements map[route.Vertex]msgWithSenders - - sync.Mutex -} - -// Reset operates on deDupedAnnouncements to reset the storage of -// announcements. -func (d *deDupedAnnouncements) Reset() { - d.Lock() - defer d.Unlock() - - d.reset() -} - -// reset is the private version of the Reset method. We have this so we can -// call this method within method that are already holding the lock. -func (d *deDupedAnnouncements) reset() { - // Storage of each type of announcement (channel announcements, channel - // updates, node announcements) is set to an empty map where the - // appropriate key points to the corresponding lnwire.Message. - d.channelAnnouncements = make(map[lnwire.ShortChannelID]msgWithSenders) - d.channelUpdates = make(map[channelUpdateID]msgWithSenders) - d.nodeAnnouncements = make(map[route.Vertex]msgWithSenders) -} - -// addMsg adds a new message to the current batch. If the message is already -// present in the current batch, then this new instance replaces the latter, -// and the set of senders is updated to reflect which node sent us this -// message. -func (d *deDupedAnnouncements) addMsg(message networkMsg) { - // Depending on the message type (channel announcement, channel update, - // or node announcement), the message is added to the corresponding map - // in deDupedAnnouncements. Because each identifying key can have at - // most one value, the announcements are de-duplicated, with newer ones - // replacing older ones. - switch msg := message.msg.(type) { - - // Channel announcements are identified by the short channel id field. - case *lnwire.ChannelAnnouncement: - deDupKey := msg.ShortChannelID - sender := route.NewVertex(message.source) - - mws, ok := d.channelAnnouncements[deDupKey] - if !ok { - mws = msgWithSenders{ - msg: msg, - senders: make(map[route.Vertex]struct{}), - } - mws.senders[sender] = struct{}{} - - d.channelAnnouncements[deDupKey] = mws - - return - } - - mws.msg = msg - mws.senders[sender] = struct{}{} - d.channelAnnouncements[deDupKey] = mws - - // Channel updates are identified by the (short channel id, - // channelflags) tuple. - case *lnwire.ChannelUpdate: - sender := route.NewVertex(message.source) - deDupKey := channelUpdateID{ - msg.ShortChannelID, - msg.ChannelFlags, - } - - oldTimestamp := uint32(0) - mws, ok := d.channelUpdates[deDupKey] - if ok { - // If we already have seen this message, record its - // timestamp. - oldTimestamp = mws.msg.(*lnwire.ChannelUpdate).Timestamp - } - - // If we already had this message with a strictly newer - // timestamp, then we'll just discard the message we got. - if oldTimestamp > msg.Timestamp { - return - } - - // If the message we just got is newer than what we previously - // have seen, or this is the first time we see it, then we'll - // add it to our map of announcements. - if oldTimestamp < msg.Timestamp { - mws = msgWithSenders{ - msg: msg, - senders: make(map[route.Vertex]struct{}), - } - - // We'll mark the sender of the message in the - // senders map. - mws.senders[sender] = struct{}{} - - d.channelUpdates[deDupKey] = mws - - return - } - - // Lastly, if we had seen this exact message from before, with - // the same timestamp, we'll add the sender to the map of - // senders, such that we can skip sending this message back in - // the next batch. - mws.msg = msg - mws.senders[sender] = struct{}{} - d.channelUpdates[deDupKey] = mws - - // Node announcements are identified by the Vertex field. Use the - // NodeID to create the corresponding Vertex. - case *lnwire.NodeAnnouncement: - sender := route.NewVertex(message.source) - deDupKey := route.Vertex(msg.NodeID) - - // We do the same for node announcements as we did for channel - // updates, as they also carry a timestamp. - oldTimestamp := uint32(0) - mws, ok := d.nodeAnnouncements[deDupKey] - if ok { - oldTimestamp = mws.msg.(*lnwire.NodeAnnouncement).Timestamp - } - - // Discard the message if it's old. - if oldTimestamp > msg.Timestamp { - return - } - - // Replace if it's newer. - if oldTimestamp < msg.Timestamp { - mws = msgWithSenders{ - msg: msg, - senders: make(map[route.Vertex]struct{}), - } - - mws.senders[sender] = struct{}{} - - d.nodeAnnouncements[deDupKey] = mws - - return - } - - // Add to senders map if it's the same as we had. - mws.msg = msg - mws.senders[sender] = struct{}{} - d.nodeAnnouncements[deDupKey] = mws - } -} - -// AddMsgs is a helper method to add multiple messages to the announcement -// batch. -func (d *deDupedAnnouncements) AddMsgs(msgs ...networkMsg) { - d.Lock() - defer d.Unlock() - - for _, msg := range msgs { - d.addMsg(msg) - } -} - -// Emit returns the set of de-duplicated announcements to be sent out during -// the next announcement epoch, in the order of channel announcements, channel -// updates, and node announcements. Each message emitted, contains the set of -// peers that sent us the message. This way, we can ensure that we don't waste -// bandwidth by re-sending a message to the peer that sent it to us in the -// first place. Additionally, the set of stored messages are reset. -func (d *deDupedAnnouncements) Emit() []msgWithSenders { - d.Lock() - defer d.Unlock() - - // Get the total number of announcements. - numAnnouncements := len(d.channelAnnouncements) + len(d.channelUpdates) + - len(d.nodeAnnouncements) - - // Create an empty array of lnwire.Messages with a length equal to - // the total number of announcements. - msgs := make([]msgWithSenders, 0, numAnnouncements) - - // Add the channel announcements to the array first. - for _, message := range d.channelAnnouncements { - msgs = append(msgs, message) - } - - // Then add the channel updates. - for _, message := range d.channelUpdates { - msgs = append(msgs, message) - } - - // Finally add the node announcements. - for _, message := range d.nodeAnnouncements { - msgs = append(msgs, message) - } - - d.reset() - - // Return the array of lnwire.messages. - return msgs -} - -// calculateSubBatchSize is a helper function that calculates the size to break -// down the batchSize into. -func calculateSubBatchSize(totalDelay, subBatchDelay time.Duration, - minimumBatchSize, batchSize int) int { - if subBatchDelay > totalDelay { - return batchSize - } - - subBatchSize := (int(batchSize)*int(subBatchDelay) + int(totalDelay) - 1) / - int(totalDelay) - - if subBatchSize < minimumBatchSize { - return minimumBatchSize - } - - return subBatchSize -} - -// splitAnnouncementBatches takes an exiting list of announcements and -// decomposes it into sub batches controlled by the `subBatchSize`. -func splitAnnouncementBatches(subBatchSize int, - announcementBatch []msgWithSenders) [][]msgWithSenders { - var splitAnnouncementBatch [][]msgWithSenders - - for subBatchSize < len(announcementBatch) { - // For slicing with minimal allocation - // https://github.com/golang/go/wiki/SliceTricks - announcementBatch, splitAnnouncementBatch = - announcementBatch[subBatchSize:], - append(splitAnnouncementBatch, - announcementBatch[0:subBatchSize:subBatchSize]) - } - splitAnnouncementBatch = append(splitAnnouncementBatch, announcementBatch) - - return splitAnnouncementBatch -} - -// sendBatch broadcasts a list of announcements to our peers. -func (d *AuthenticatedGossiper) sendBatch(announcementBatch []msgWithSenders) { - syncerPeers := d.syncMgr.GossipSyncers() - - // We'll first attempt to filter out this new message - // for all peers that have active gossip syncers - // active. - for _, syncer := range syncerPeers { - syncer.FilterGossipMsgs(announcementBatch...) - } - - for _, msgChunk := range announcementBatch { - // With the syncers taken care of, we'll merge - // the sender map with the set of syncers, so - // we don't send out duplicate messages. - msgChunk.mergeSyncerMap(syncerPeers) - - err := d.cfg.Broadcast( - msgChunk.senders, msgChunk.msg, - ) - if err != nil { - log.Errorf("Unable to send batch "+ - "announcements: %v", err) - continue - } - } -} - -// networkHandler is the primary goroutine that drives this service. The roles -// of this goroutine includes answering queries related to the state of the -// network, syncing up newly connected peers, and also periodically -// broadcasting our latest topology state to all connected peers. -// -// NOTE: This MUST be run as a goroutine. -func (d *AuthenticatedGossiper) networkHandler() { - defer d.wg.Done() - - // Initialize empty deDupedAnnouncements to store announcement batch. - announcements := deDupedAnnouncements{} - announcements.Reset() - - d.cfg.RetransmitTicker.Resume() - defer d.cfg.RetransmitTicker.Stop() - - trickleTimer := time.NewTicker(d.cfg.TrickleDelay) - defer trickleTimer.Stop() - - // To start, we'll first check to see if there are any stale channel or - // node announcements that we need to re-transmit. - if err := d.retransmitStaleAnns(time.Now()); err != nil { - log.Errorf("Unable to rebroadcast stale announcements: %v", err) - } - - // We'll use this validation to ensure that we process jobs in their - // dependency order during parallel validation. - validationBarrier := routing.NewValidationBarrier( - runtime.NumCPU()*4, d.quit, - ) - - for { - select { - // A new policy update has arrived. We'll commit it to the - // sub-systems below us, then craft, sign, and broadcast a new - // ChannelUpdate for the set of affected clients. - case policyUpdate := <-d.chanPolicyUpdates: - // First, we'll now create new fully signed updates for - // the affected channels and also update the underlying - // graph with the new state. - newChanUpdates, err := d.processChanPolicyUpdate( - policyUpdate.edgesToUpdate, - ) - policyUpdate.errChan <- err - if err != nil { - log.Errorf("Unable to craft policy updates: %v", - err) - continue - } - - // Finally, with the updates committed, we'll now add - // them to the announcement batch to be flushed at the - // start of the next epoch. - announcements.AddMsgs(newChanUpdates...) - - case announcement := <-d.networkMsgs: - // We should only broadcast this message forward if it - // originated from us or it wasn't received as part of - // our initial historical sync. - shouldBroadcast := !announcement.isRemote || - d.syncMgr.IsGraphSynced() - - switch announcement.msg.(type) { - // Channel announcement signatures are amongst the only - // messages that we'll process serially. - case *lnwire.AnnounceSignatures: - emittedAnnouncements := d.processNetworkAnnouncement( - announcement, - ) - if emittedAnnouncements != nil { - announcements.AddMsgs( - emittedAnnouncements..., - ) - } - continue - } - - // If this message was recently rejected, then we won't - // attempt to re-process it. - if d.isRecentlyRejectedMsg(announcement.msg) { - announcement.err <- er.Errorf("recently " + - "rejected") - continue - } - - // We'll set up any dependent, and wait until a free - // slot for this job opens up, this allow us to not - // have thousands of goroutines active. - validationBarrier.InitJobDependencies(announcement.msg) - - d.wg.Add(1) - go func() { - defer d.wg.Done() - defer validationBarrier.CompleteJob() - - // If this message has an existing dependency, - // then we'll wait until that has been fully - // validated before we proceed. - err := validationBarrier.WaitForDependants( - announcement.msg, - ) - if err != nil { - if !routing.ErrVBarrierShuttingDown.Is(err) { - log.Warnf("unexpected error "+ - "during validation "+ - "barrier shutdown: %v", - err) - } - announcement.err <- err - return - } - - // Process the network announcement to - // determine if this is either a new - // announcement from our PoV or an edges to a - // prior vertex/edge we previously proceeded. - emittedAnnouncements := d.processNetworkAnnouncement( - announcement, - ) - - // If this message had any dependencies, then - // we can now signal them to continue. - validationBarrier.SignalDependants( - announcement.msg, - ) - - // If the announcement was accepted, then add - // the emitted announcements to our announce - // batch to be broadcast once the trickle timer - // ticks gain. - if emittedAnnouncements != nil && shouldBroadcast { - // TODO(roasbeef): exclude peer that - // sent. - announcements.AddMsgs( - emittedAnnouncements..., - ) - } else if emittedAnnouncements != nil { - log.Trace("Skipping broadcast of " + - "announcements received " + - "during initial graph sync") - } - - }() - - // A new block has arrived, so we can re-process the previously - // premature announcements. - case newBlock, ok := <-d.blockEpochs.Epochs: - // If the channel has been closed, then this indicates - // the daemon is shutting down, so we exit ourselves. - if !ok { - return - } - - // Once a new block arrives, we update our running - // track of the height of the chain tip. - d.Lock() - blockHeight := uint32(newBlock.Height) - d.bestHeight = blockHeight - - log.Debugf("New block: height=%d, hash=%s", blockHeight, - newBlock.Hash) - - // Next we check if we have any premature announcements - // for this height, if so, then we process them once - // more as normal announcements. - premature := d.prematureAnnouncements[blockHeight] - if len(premature) == 0 { - d.Unlock() - continue - } - delete(d.prematureAnnouncements, blockHeight) - d.Unlock() - - log.Infof("Re-processing %v premature announcements "+ - "for height %v", len(premature), blockHeight) - - for _, ann := range premature { - emittedAnnouncements := d.processNetworkAnnouncement(ann) - if emittedAnnouncements != nil { - announcements.AddMsgs( - emittedAnnouncements..., - ) - } - } - - // The trickle timer has ticked, which indicates we should - // flush to the network the pending batch of new announcements - // we've received since the last trickle tick. - case <-trickleTimer.C: - // Emit the current batch of announcements from - // deDupedAnnouncements. - announcementBatch := announcements.Emit() - - // If the current announcements batch is nil, then we - // have no further work here. - if len(announcementBatch) == 0 { - continue - } - - // Next, If we have new things to announce then - // broadcast them to all our immediately connected - // peers. - subBatchSize := calculateSubBatchSize( - d.cfg.TrickleDelay, d.cfg.SubBatchDelay, d.cfg.MinimumBatchSize, - len(announcementBatch), - ) - - splitAnnouncementBatch := splitAnnouncementBatches( - subBatchSize, announcementBatch, - ) - - d.wg.Add(1) - go func() { - defer d.wg.Done() - log.Infof("Broadcasting %v new announcements in %d sub batches", - len(announcementBatch), len(splitAnnouncementBatch)) - - for _, announcementBatch := range splitAnnouncementBatch { - d.sendBatch(announcementBatch) - select { - case <-time.After(d.cfg.SubBatchDelay): - case <-d.quit: - return - } - } - }() - - // The retransmission timer has ticked which indicates that we - // should check if we need to prune or re-broadcast any of our - // personal channels or node announcement. This addresses the - // case of "zombie" channels and channel advertisements that - // have been dropped, or not properly propagated through the - // network. - case tick := <-d.cfg.RetransmitTicker.Ticks(): - if err := d.retransmitStaleAnns(tick); err != nil { - log.Errorf("unable to rebroadcast stale "+ - "announcements: %v", err) - } - - // The gossiper has been signalled to exit, to we exit our - // main loop so the wait group can be decremented. - case <-d.quit: - return - } - } -} - -// TODO(roasbeef): d/c peers that send updates not on our chain - -// InitSyncState is called by outside sub-systems when a connection is -// established to a new peer that understands how to perform channel range -// queries. We'll allocate a new gossip syncer for it, and start any goroutines -// needed to handle new queries. -func (d *AuthenticatedGossiper) InitSyncState(syncPeer lnpeer.Peer) { - d.syncMgr.InitSyncState(syncPeer) -} - -// PruneSyncState is called by outside sub-systems once a peer that we were -// previously connected to has been disconnected. In this case we can stop the -// existing GossipSyncer assigned to the peer and free up resources. -func (d *AuthenticatedGossiper) PruneSyncState(peer route.Vertex) { - d.syncMgr.PruneSyncState(peer) -} - -// isRecentlyRejectedMsg returns true if we recently rejected a message, and -// false otherwise, This avoids expensive reprocessing of the message. -func (d *AuthenticatedGossiper) isRecentlyRejectedMsg(msg lnwire.Message) bool { - d.rejectMtx.RLock() - defer d.rejectMtx.RUnlock() - - switch m := msg.(type) { - case *lnwire.ChannelUpdate: - _, ok := d.recentRejects[m.ShortChannelID.ToUint64()] - return ok - - case *lnwire.ChannelAnnouncement: - _, ok := d.recentRejects[m.ShortChannelID.ToUint64()] - return ok - - default: - return false - } -} - -// retransmitStaleAnns examines all outgoing channels that the source node is -// known to maintain to check to see if any of them are "stale". A channel is -// stale iff, the last timestamp of its rebroadcast is older than the -// RebroadcastInterval. We also check if a refreshed node announcement should -// be resent. -func (d *AuthenticatedGossiper) retransmitStaleAnns(now time.Time) er.R { - // Iterate over all of our channels and check if any of them fall - // within the prune interval or re-broadcast interval. - type updateTuple struct { - info *channeldb.ChannelEdgeInfo - edge *channeldb.ChannelEdgePolicy - } - - var ( - havePublicChannels bool - edgesToUpdate []updateTuple - ) - err := d.cfg.Router.ForAllOutgoingChannels(func( - info *channeldb.ChannelEdgeInfo, - edge *channeldb.ChannelEdgePolicy) er.R { - - // If there's no auth proof attached to this edge, it means - // that it is a private channel not meant to be announced to - // the greater network, so avoid sending channel updates for - // this channel to not leak its - // existence. - if info.AuthProof == nil { - log.Debugf("Skipping retransmission of channel "+ - "without AuthProof: %v", info.ChannelID) - return nil - } - - // We make a note that we have at least one public channel. We - // use this to determine whether we should send a node - // announcement below. - havePublicChannels = true - - // If this edge has a ChannelUpdate that was created before the - // introduction of the MaxHTLC field, then we'll update this - // edge to propagate this information in the network. - if !edge.MessageFlags.HasMaxHtlc() { - // We'll make sure we support the new max_htlc field if - // not already present. - edge.MessageFlags |= lnwire.ChanUpdateOptionMaxHtlc - edge.MaxHTLC = lnwire.NewMSatFromSatoshis(info.Capacity) - - edgesToUpdate = append(edgesToUpdate, updateTuple{ - info: info, - edge: edge, - }) - return nil - } - - timeElapsed := now.Sub(edge.LastUpdate) - - // If it's been longer than RebroadcastInterval since we've - // re-broadcasted the channel, add the channel to the set of - // edges we need to update. - if timeElapsed >= d.cfg.RebroadcastInterval { - edgesToUpdate = append(edgesToUpdate, updateTuple{ - info: info, - edge: edge, - }) - } - - return nil - }) - if err != nil && !channeldb.ErrGraphNoEdgesFound.Is(err) { - return er.Errorf("unable to retrieve outgoing channels: %v", - err) - } - - var signedUpdates []lnwire.Message - for _, chanToUpdate := range edgesToUpdate { - // Re-sign and update the channel on disk and retrieve our - // ChannelUpdate to broadcast. - chanAnn, chanUpdate, err := d.updateChannel( - chanToUpdate.info, chanToUpdate.edge, - ) - if err != nil { - return er.Errorf("unable to update channel: %v", err) - } - - // If we have a valid announcement to transmit, then we'll send - // that along with the update. - if chanAnn != nil { - signedUpdates = append(signedUpdates, chanAnn) - } - - signedUpdates = append(signedUpdates, chanUpdate) - } - - // If we don't have any public channels, we return as we don't want to - // broadcast anything that would reveal our existence. - if !havePublicChannels { - return nil - } - - // We'll also check that our NodeAnnouncement is not too old. - currentNodeAnn, err := d.cfg.SelfNodeAnnouncement(false) - if err != nil { - return er.Errorf("unable to get current node announment: %v", - err) - } - - timestamp := time.Unix(int64(currentNodeAnn.Timestamp), 0) - timeElapsed := now.Sub(timestamp) - - // If it's been a full day since we've re-broadcasted the - // node announcement, refresh it and resend it. - nodeAnnStr := "" - if timeElapsed >= d.cfg.RebroadcastInterval { - newNodeAnn, err := d.cfg.SelfNodeAnnouncement(true) - if err != nil { - return er.Errorf("unable to get refreshed node "+ - "announcement: %v", err) - } - - signedUpdates = append(signedUpdates, &newNodeAnn) - nodeAnnStr = " and our refreshed node announcement" - - // Before broadcasting the refreshed node announcement, add it - // to our own graph. - if err := d.addNode(&newNodeAnn); err != nil { - log.Errorf("Unable to add refreshed node announcement "+ - "to graph: %v", err) - } - } - - // If we don't have any updates to re-broadcast, then we'll exit - // early. - if len(signedUpdates) == 0 { - return nil - } - - log.Infof("Retransmitting %v outgoing channels%v", - len(edgesToUpdate), nodeAnnStr) - - // With all the wire announcements properly crafted, we'll broadcast - // our known outgoing channels to all our immediate peers. - if err := d.cfg.Broadcast(nil, signedUpdates...); err != nil { - return er.Errorf("unable to re-broadcast channels: %v", err) - } - - return nil -} - -// processChanPolicyUpdate generates a new set of channel updates for the -// provided list of edges and updates the backing ChannelGraphSource. -func (d *AuthenticatedGossiper) processChanPolicyUpdate( - edgesToUpdate []EdgeWithInfo) ([]networkMsg, er.R) { - - var chanUpdates []networkMsg - for _, edgeInfo := range edgesToUpdate { - // Now that we've collected all the channels we need to update, - // we'll re-sign and update the backing ChannelGraphSource, and - // retrieve our ChannelUpdate to broadcast. - _, chanUpdate, err := d.updateChannel( - edgeInfo.Info, edgeInfo.Edge, - ) - if err != nil { - return nil, err - } - - // We'll avoid broadcasting any updates for private channels to - // avoid directly giving away their existence. Instead, we'll - // send the update directly to the remote party. - if edgeInfo.Info.AuthProof == nil { - remotePubKey := remotePubFromChanInfo( - edgeInfo.Info, chanUpdate.ChannelFlags, - ) - err := d.reliableSender.sendMessage( - chanUpdate, remotePubKey, - ) - if err != nil { - log.Errorf("Unable to reliably send %v for "+ - "channel=%v to peer=%x: %v", - chanUpdate.MsgType(), - chanUpdate.ShortChannelID, - remotePubKey, err) - } - continue - } - - // We set ourselves as the source of this message to indicate - // that we shouldn't skip any peers when sending this message. - chanUpdates = append(chanUpdates, networkMsg{ - source: d.selfKey, - msg: chanUpdate, - }) - } - - return chanUpdates, nil -} - -// remotePubFromChanInfo returns the public key of the remote peer given a -// ChannelEdgeInfo that describe a channel we have with them. -func remotePubFromChanInfo(chanInfo *channeldb.ChannelEdgeInfo, - chanFlags lnwire.ChanUpdateChanFlags) [33]byte { - - var remotePubKey [33]byte - switch { - case chanFlags&lnwire.ChanUpdateDirection == 0: - remotePubKey = chanInfo.NodeKey2Bytes - case chanFlags&lnwire.ChanUpdateDirection == 1: - remotePubKey = chanInfo.NodeKey1Bytes - } - - return remotePubKey -} - -// processRejectedEdge examines a rejected edge to see if we can extract any -// new announcements from it. An edge will get rejected if we already added -// the same edge without AuthProof to the graph. If the received announcement -// contains a proof, we can add this proof to our edge. We can end up in this -// situation in the case where we create a channel, but for some reason fail -// to receive the remote peer's proof, while the remote peer is able to fully -// assemble the proof and craft the ChannelAnnouncement. -func (d *AuthenticatedGossiper) processRejectedEdge( - chanAnnMsg *lnwire.ChannelAnnouncement, - proof *channeldb.ChannelAuthProof) ([]networkMsg, er.R) { - - // First, we'll fetch the state of the channel as we know if from the - // database. - chanInfo, e1, e2, err := d.cfg.Router.GetChannelByID( - chanAnnMsg.ShortChannelID, - ) - if err != nil { - return nil, err - } - - // The edge is in the graph, and has a proof attached, then we'll just - // reject it as normal. - if chanInfo.AuthProof != nil { - return nil, nil - } - - // Otherwise, this means that the edge is within the graph, but it - // doesn't yet have a proper proof attached. If we did not receive - // the proof such that we now can add it, there's nothing more we - // can do. - if proof == nil { - return nil, nil - } - - // We'll then create then validate the new fully assembled - // announcement. - chanAnn, e1Ann, e2Ann, err := netann.CreateChanAnnouncement( - proof, chanInfo, e1, e2, - ) - if err != nil { - return nil, err - } - err = routing.ValidateChannelAnn(chanAnn) - if err != nil { - err := er.Errorf("assembled channel announcement proof "+ - "for shortChanID=%v isn't valid: %v", - chanAnnMsg.ShortChannelID, err) - log.Error(err) - return nil, err - } - - // If everything checks out, then we'll add the fully assembled proof - // to the database. - err = d.cfg.Router.AddProof(chanAnnMsg.ShortChannelID, proof) - if err != nil { - err := er.Errorf("unable add proof to shortChanID=%v: %v", - chanAnnMsg.ShortChannelID, err) - log.Error(err) - return nil, err - } - - // As we now have a complete channel announcement for this channel, - // we'll construct the announcement so they can be broadcast out to all - // our peers. - announcements := make([]networkMsg, 0, 3) - announcements = append(announcements, networkMsg{ - source: d.selfKey, - msg: chanAnn, - }) - if e1Ann != nil { - announcements = append(announcements, networkMsg{ - source: d.selfKey, - msg: e1Ann, - }) - } - if e2Ann != nil { - announcements = append(announcements, networkMsg{ - source: d.selfKey, - msg: e2Ann, - }) - - } - - return announcements, nil -} - -// addNode processes the given node announcement, and adds it to our channel -// graph. -func (d *AuthenticatedGossiper) addNode(msg *lnwire.NodeAnnouncement) er.R { - if err := routing.ValidateNodeAnn(msg); err != nil { - return er.Errorf("unable to validate node announcement: %v", - err) - } - - timestamp := time.Unix(int64(msg.Timestamp), 0) - features := lnwire.NewFeatureVector(msg.Features, lnwire.Features) - node := &channeldb.LightningNode{ - HaveNodeAnnouncement: true, - LastUpdate: timestamp, - Addresses: msg.Addresses, - PubKeyBytes: msg.NodeID, - Alias: msg.Alias.String(), - AuthSigBytes: msg.Signature.ToSignatureBytes(), - Features: features, - Color: msg.RGBColor, - ExtraOpaqueData: msg.ExtraOpaqueData, - } - - return d.cfg.Router.AddNode(node) -} - -// processNetworkAnnouncement processes a new network relate authenticated -// channel or node announcement or announcements proofs. If the announcement -// didn't affect the internal state due to either being out of date, invalid, -// or redundant, then nil is returned. Otherwise, the set of announcements will -// be returned which should be broadcasted to the rest of the network. -func (d *AuthenticatedGossiper) processNetworkAnnouncement( - nMsg *networkMsg) []networkMsg { - - // isPremature *MUST* be called with the gossiper's lock held. - isPremature := func(chanID lnwire.ShortChannelID, delta uint32) bool { - // TODO(roasbeef) make height delta 6 - // * or configurable - return chanID.BlockHeight+delta > d.bestHeight - } - - var announcements []networkMsg - - switch msg := nMsg.msg.(type) { - - // A new node announcement has arrived which either presents new - // information about a node in one of the channels we know about, or a - // updating previously advertised information. - case *lnwire.NodeAnnouncement: - timestamp := time.Unix(int64(msg.Timestamp), 0) - - // We'll quickly ask the router if it already has a - // newer update for this node so we can skip validating - // signatures if not required. - if d.cfg.Router.IsStaleNode(msg.NodeID, timestamp) { - nMsg.err <- nil - return nil - } - - if err := d.addNode(msg); err != nil { - if routing.IsError(er.Wrapped(err), routing.ErrOutdated, - routing.ErrIgnored) { - - log.Debug(err) - } else { - log.Error(err) - } - - nMsg.err <- err - return nil - } - - // In order to ensure we don't leak unadvertised nodes, we'll - // make a quick check to ensure this node intends to publicly - // advertise itself to the network. - isPublic, err := d.cfg.Router.IsPublicNode(msg.NodeID) - if err != nil { - log.Errorf("Unable to determine if node %x is "+ - "advertised: %v", msg.NodeID, err) - nMsg.err <- err - return nil - } - - // If it does, we'll add their announcement to our batch so that - // it can be broadcast to the rest of our peers. - if isPublic { - announcements = append(announcements, networkMsg{ - peer: nMsg.peer, - source: nMsg.source, - msg: msg, - }) - } else { - log.Tracef("Skipping broadcasting node announcement "+ - "for %x due to being unadvertised", msg.NodeID) - } - - nMsg.err <- nil - // TODO(roasbeef): get rid of the above - return announcements - - // A new channel announcement has arrived, this indicates the - // *creation* of a new channel within the network. This only advertises - // the existence of a channel and not yet the routing policies in - // either direction of the channel. - case *lnwire.ChannelAnnouncement: - // We'll ignore any channel announcements that target any chain - // other than the set of chains we know of. - if !bytes.Equal(msg.ChainHash[:], d.cfg.ChainHash[:]) { - err := er.Errorf("ignoring ChannelAnnouncement from "+ - "chain=%v, gossiper on chain=%v", msg.ChainHash, - d.cfg.ChainHash) - log.Errorf(err.String()) - - d.rejectMtx.Lock() - d.recentRejects[msg.ShortChannelID.ToUint64()] = struct{}{} - d.rejectMtx.Unlock() - - nMsg.err <- err - return nil - } - - // If the advertised inclusionary block is beyond our knowledge - // of the chain tip, then we'll put the announcement in limbo - // to be fully verified once we advance forward in the chain. - d.Lock() - if nMsg.isRemote && isPremature(msg.ShortChannelID, 0) { - blockHeight := msg.ShortChannelID.BlockHeight - log.Infof("Announcement for chan_id=(%v), is "+ - "premature: advertises height %v, only "+ - "height %v is known", - msg.ShortChannelID.ToUint64(), - msg.ShortChannelID.BlockHeight, - d.bestHeight) - - d.prematureAnnouncements[blockHeight] = append( - d.prematureAnnouncements[blockHeight], - nMsg, - ) - d.Unlock() - return nil - } - d.Unlock() - - // At this point, we'll now ask the router if this is a - // zombie/known edge. If so we can skip all the processing - // below. - if d.cfg.Router.IsKnownEdge(msg.ShortChannelID) { - nMsg.err <- nil - return nil - } - - // If this is a remote channel announcement, then we'll validate - // all the signatures within the proof as it should be well - // formed. - var proof *channeldb.ChannelAuthProof - if nMsg.isRemote { - if err := routing.ValidateChannelAnn(msg); err != nil { - err := er.Errorf("unable to validate "+ - "announcement: %v", err) - d.rejectMtx.Lock() - d.recentRejects[msg.ShortChannelID.ToUint64()] = struct{}{} - d.rejectMtx.Unlock() - - log.Error(err) - nMsg.err <- err - return nil - } - - // If the proof checks out, then we'll save the proof - // itself to the database so we can fetch it later when - // gossiping with other nodes. - proof = &channeldb.ChannelAuthProof{ - NodeSig1Bytes: msg.NodeSig1.ToSignatureBytes(), - NodeSig2Bytes: msg.NodeSig2.ToSignatureBytes(), - BitcoinSig1Bytes: msg.BitcoinSig1.ToSignatureBytes(), - BitcoinSig2Bytes: msg.BitcoinSig2.ToSignatureBytes(), - } - } - - // With the proof validate (if necessary), we can now store it - // within the database for our path finding and syncing needs. - var featureBuf bytes.Buffer - if err := msg.Features.Encode(&featureBuf); err != nil { - log.Errorf("unable to encode features: %v", err) - nMsg.err <- err - return nil - } - - edge := &channeldb.ChannelEdgeInfo{ - ChannelID: msg.ShortChannelID.ToUint64(), - ChainHash: msg.ChainHash, - NodeKey1Bytes: msg.NodeID1, - NodeKey2Bytes: msg.NodeID2, - BitcoinKey1Bytes: msg.BitcoinKey1, - BitcoinKey2Bytes: msg.BitcoinKey2, - AuthProof: proof, - Features: featureBuf.Bytes(), - ExtraOpaqueData: msg.ExtraOpaqueData, - } - - // If there were any optional message fields provided, we'll - // include them in its serialized disk representation now. - if nMsg.optionalMsgFields != nil { - if nMsg.optionalMsgFields.capacity != nil { - edge.Capacity = *nMsg.optionalMsgFields.capacity - } - if nMsg.optionalMsgFields.channelPoint != nil { - edge.ChannelPoint = *nMsg.optionalMsgFields.channelPoint - } - } - - // We will add the edge to the channel router. If the nodes - // present in this channel are not present in the database, a - // partial node will be added to represent each node while we - // wait for a node announcement. - // - // Before we add the edge to the database, we obtain - // the mutex for this channel ID. We do this to ensure - // no other goroutine has read the database and is now - // making decisions based on this DB state, before it - // writes to the DB. - d.channelMtx.Lock(msg.ShortChannelID.ToUint64()) - defer d.channelMtx.Unlock(msg.ShortChannelID.ToUint64()) - if err := d.cfg.Router.AddEdge(edge); err != nil { - // If the edge was rejected due to already being known, - // then it may be that case that this new message has a - // fresh channel proof, so we'll check. - if routing.IsError(er.Wrapped(err), routing.ErrOutdated, - routing.ErrIgnored) { - - // Attempt to process the rejected message to - // see if we get any new announcements. - anns, rErr := d.processRejectedEdge(msg, proof) - if rErr != nil { - d.rejectMtx.Lock() - d.recentRejects[msg.ShortChannelID.ToUint64()] = struct{}{} - d.rejectMtx.Unlock() - nMsg.err <- rErr - return nil - } - - // If while processing this rejected edge, we - // realized there's a set of announcements we - // could extract, then we'll return those - // directly. - if len(anns) != 0 { - nMsg.err <- nil - return anns - } - - // Otherwise, this is just a regular rejected - // edge. - log.Debugf("Router rejected channel "+ - "edge: %v", err) - } else { - log.Tracef("Router rejected channel "+ - "edge: %v", err) - } - - nMsg.err <- err - return nil - } - - // If we earlier received any ChannelUpdates for this channel, - // we can now process them, as the channel is added to the - // graph. - shortChanID := msg.ShortChannelID.ToUint64() - var channelUpdates []*networkMsg - - d.pChanUpdMtx.Lock() - channelUpdates = append(channelUpdates, d.prematureChannelUpdates[shortChanID]...) - - // Now delete the premature ChannelUpdates, since we added them - // all to the queue of network messages. - delete(d.prematureChannelUpdates, shortChanID) - d.pChanUpdMtx.Unlock() - - // Launch a new goroutine to handle each ChannelUpdate, this to - // ensure we don't block here, as we can handle only one - // announcement at a time. - for _, cu := range channelUpdates { - d.wg.Add(1) - go func(nMsg *networkMsg) { - defer d.wg.Done() - - switch msg := nMsg.msg.(type) { - - // Reprocess the message, making sure we return - // an error to the original caller in case the - // gossiper shuts down. - case *lnwire.ChannelUpdate: - log.Debugf("Reprocessing"+ - " ChannelUpdate for "+ - "shortChanID=%v", - msg.ShortChannelID.ToUint64()) - - select { - case d.networkMsgs <- nMsg: - case <-d.quit: - nMsg.err <- ErrGossiperShuttingDown.Default() - } - - // We don't expect any other message type than - // ChannelUpdate to be in this map. - default: - log.Errorf("Unsupported message type "+ - "found among ChannelUpdates: "+ - "%T", msg) - } - }(cu) - } - - // Channel announcement was successfully proceeded and know it - // might be broadcast to other connected nodes if it was - // announcement with proof (remote). - if proof != nil { - announcements = append(announcements, networkMsg{ - peer: nMsg.peer, - source: nMsg.source, - msg: msg, - }) - } - - nMsg.err <- nil - return announcements - - // A new authenticated channel edge update has arrived. This indicates - // that the directional information for an already known channel has - // been updated. - case *lnwire.ChannelUpdate: - // We'll ignore any channel announcements that target any chain - // other than the set of chains we know of. - if !bytes.Equal(msg.ChainHash[:], d.cfg.ChainHash[:]) { - err := er.Errorf("ignoring ChannelUpdate from "+ - "chain=%v, gossiper on chain=%v", msg.ChainHash, - d.cfg.ChainHash) - log.Errorf(err.String()) - - d.rejectMtx.Lock() - d.recentRejects[msg.ShortChannelID.ToUint64()] = struct{}{} - d.rejectMtx.Unlock() - - nMsg.err <- err - return nil - } - - blockHeight := msg.ShortChannelID.BlockHeight - shortChanID := msg.ShortChannelID.ToUint64() - - // If the advertised inclusionary block is beyond our knowledge - // of the chain tip, then we'll put the announcement in limbo - // to be fully verified once we advance forward in the chain. - d.Lock() - if nMsg.isRemote && isPremature(msg.ShortChannelID, 0) { - log.Infof("Update announcement for "+ - "short_chan_id(%v), is premature: advertises "+ - "height %v, only height %v is known", - shortChanID, blockHeight, - d.bestHeight) - - d.prematureAnnouncements[blockHeight] = append( - d.prematureAnnouncements[blockHeight], - nMsg, - ) - d.Unlock() - return nil - } - d.Unlock() - - // Before we perform any of the expensive checks below, we'll - // check whether this update is stale or is for a zombie - // channel in order to quickly reject it. - timestamp := time.Unix(int64(msg.Timestamp), 0) - if d.cfg.Router.IsStaleEdgePolicy( - msg.ShortChannelID, timestamp, msg.ChannelFlags, - ) { - nMsg.err <- nil - return nil - } - - // Get the node pub key as far as we don't have it in channel - // update announcement message. We'll need this to properly - // verify message signature. - // - // We make sure to obtain the mutex for this channel ID - // before we access the database. This ensures the state - // we read from the database has not changed between this - // point and when we call UpdateEdge() later. - d.channelMtx.Lock(msg.ShortChannelID.ToUint64()) - defer d.channelMtx.Unlock(msg.ShortChannelID.ToUint64()) - chanInfo, _, _, err := d.cfg.Router.GetChannelByID(msg.ShortChannelID) - switch { - // No error, break. - case err == nil: - break - - case channeldb.ErrZombieEdge.Is(err): - // Since we've deemed the update as not stale above, - // before marking it live, we'll make sure it has been - // signed by the correct party. The least-significant - // bit in the flag on the channel update tells us which - // edge is being updated. - var pubKey *btcec.PublicKey - switch { - case msg.ChannelFlags&lnwire.ChanUpdateDirection == 0: - pubKey, _ = chanInfo.NodeKey1() - case msg.ChannelFlags&lnwire.ChanUpdateDirection == 1: - pubKey, _ = chanInfo.NodeKey2() - } - - err := routing.VerifyChannelUpdateSignature(msg, pubKey) - if err != nil { - err := er.Errorf("unable to verify channel "+ - "update signature: %v", err) - log.Error(err) - nMsg.err <- err - return nil - } - - // With the signature valid, we'll proceed to mark the - // edge as live and wait for the channel announcement to - // come through again. - err = d.cfg.Router.MarkEdgeLive(msg.ShortChannelID) - if err != nil { - err := er.Errorf("unable to remove edge with "+ - "chan_id=%v from zombie index: %v", - msg.ShortChannelID, err) - log.Error(err) - nMsg.err <- err - return nil - } - - log.Debugf("Removed edge with chan_id=%v from zombie "+ - "index", msg.ShortChannelID) - - // We'll fallthrough to ensure we stash the update until - // we receive its corresponding ChannelAnnouncement. - // This is needed to ensure the edge exists in the graph - // before applying the update. - fallthrough - case channeldb.ErrGraphNotFound.Is(err): - fallthrough - case channeldb.ErrGraphNoEdgesFound.Is(err): - fallthrough - case channeldb.ErrEdgeNotFound.Is(err): - // If the edge corresponding to this ChannelUpdate was - // not found in the graph, this might be a channel in - // the process of being opened, and we haven't processed - // our own ChannelAnnouncement yet, hence it is not - // found in the graph. This usually gets resolved after - // the channel proofs are exchanged and the channel is - // broadcasted to the rest of the network, but in case - // this is a private channel this won't ever happen. - // This can also happen in the case of a zombie channel - // with a fresh update for which we don't have a - // ChannelAnnouncement for since we reject them. Because - // of this, we temporarily add it to a map, and - // reprocess it after our own ChannelAnnouncement has - // been processed. - d.pChanUpdMtx.Lock() - d.prematureChannelUpdates[shortChanID] = append( - d.prematureChannelUpdates[shortChanID], nMsg, - ) - d.pChanUpdMtx.Unlock() - - log.Debugf("Got ChannelUpdate for edge not found in "+ - "graph(shortChanID=%v), saving for "+ - "reprocessing later", shortChanID) - - // NOTE: We don't return anything on the error channel - // for this message, as we expect that will be done when - // this ChannelUpdate is later reprocessed. - return nil - - default: - err := er.Errorf("unable to validate channel update "+ - "short_chan_id=%v: %v", shortChanID, err) - log.Error(err) - nMsg.err <- err - - d.rejectMtx.Lock() - d.recentRejects[msg.ShortChannelID.ToUint64()] = struct{}{} - d.rejectMtx.Unlock() - return nil - } - - // The least-significant bit in the flag on the channel update - // announcement tells us "which" side of the channels directed - // edge is being updated. - var pubKey *btcec.PublicKey - switch { - case msg.ChannelFlags&lnwire.ChanUpdateDirection == 0: - pubKey, _ = chanInfo.NodeKey1() - case msg.ChannelFlags&lnwire.ChanUpdateDirection == 1: - pubKey, _ = chanInfo.NodeKey2() - } - - // Validate the channel announcement with the expected public key and - // channel capacity. In the case of an invalid channel update, we'll - // return an error to the caller and exit early. - err = routing.ValidateChannelUpdateAnn(pubKey, chanInfo.Capacity, msg) - if err != nil { - rErr := er.Errorf("unable to validate channel "+ - "update announcement for short_chan_id=%v: %v", - spew.Sdump(msg.ShortChannelID), err) - - log.Error(rErr) - nMsg.err <- rErr - return nil - } - - update := &channeldb.ChannelEdgePolicy{ - SigBytes: msg.Signature.ToSignatureBytes(), - ChannelID: shortChanID, - LastUpdate: timestamp, - MessageFlags: msg.MessageFlags, - ChannelFlags: msg.ChannelFlags, - TimeLockDelta: msg.TimeLockDelta, - MinHTLC: msg.HtlcMinimumMsat, - MaxHTLC: msg.HtlcMaximumMsat, - FeeBaseMSat: lnwire.MilliSatoshi(msg.BaseFee), - FeeProportionalMillionths: lnwire.MilliSatoshi(msg.FeeRate), - ExtraOpaqueData: msg.ExtraOpaqueData, - } - - if err := d.cfg.Router.UpdateEdge(update); err != nil { - if routing.IsError(er.Wrapped(err), routing.ErrOutdated, - routing.ErrIgnored) { - log.Debug(err) - } else { - d.rejectMtx.Lock() - d.recentRejects[msg.ShortChannelID.ToUint64()] = struct{}{} - d.rejectMtx.Unlock() - log.Error(err) - } - - nMsg.err <- err - return nil - } - - // If this is a local ChannelUpdate without an AuthProof, it - // means it is an update to a channel that is not (yet) - // supposed to be announced to the greater network. However, - // our channel counter party will need to be given the update, - // so we'll try sending the update directly to the remote peer. - if !nMsg.isRemote && chanInfo.AuthProof == nil { - // Get our peer's public key. - remotePubKey := remotePubFromChanInfo( - chanInfo, msg.ChannelFlags, - ) - - // Now, we'll attempt to send the channel update message - // reliably to the remote peer in the background, so - // that we don't block if the peer happens to be offline - // at the moment. - err := d.reliableSender.sendMessage(msg, remotePubKey) - if err != nil { - err := er.Errorf("unable to reliably send %v "+ - "for channel=%v to peer=%x: %v", - msg.MsgType(), msg.ShortChannelID, - remotePubKey, err) - nMsg.err <- err - return nil - } - } - - // Channel update announcement was successfully processed and - // now it can be broadcast to the rest of the network. However, - // we'll only broadcast the channel update announcement if it - // has an attached authentication proof. - if chanInfo.AuthProof != nil { - announcements = append(announcements, networkMsg{ - peer: nMsg.peer, - source: nMsg.source, - msg: msg, - }) - } - - nMsg.err <- nil - return announcements - - // A new signature announcement has been received. This indicates - // willingness of nodes involved in the funding of a channel to - // announce this new channel to the rest of the world. - case *lnwire.AnnounceSignatures: - needBlockHeight := msg.ShortChannelID.BlockHeight + - d.cfg.ProofMatureDelta - shortChanID := msg.ShortChannelID.ToUint64() - - prefix := "local" - if nMsg.isRemote { - prefix = "remote" - } - - log.Infof("Received new %v channel announcement for %v", prefix, - msg.ShortChannelID) - - // By the specification, channel announcement proofs should be - // sent after some number of confirmations after channel was - // registered in bitcoin blockchain. Therefore, we check if the - // proof is premature. If so we'll halt processing until the - // expected announcement height. This allows us to be tolerant - // to other clients if this constraint was changed. - d.Lock() - if isPremature(msg.ShortChannelID, d.cfg.ProofMatureDelta) { - d.prematureAnnouncements[needBlockHeight] = append( - d.prematureAnnouncements[needBlockHeight], - nMsg, - ) - log.Infof("Premature proof announcement, "+ - "current block height lower than needed: %v <"+ - " %v, add announcement to reprocessing batch", - d.bestHeight, needBlockHeight) - d.Unlock() - return nil - } - d.Unlock() - - // Ensure that we know of a channel with the target channel ID - // before proceeding further. - // - // We must acquire the mutex for this channel ID before getting - // the channel from the database, to ensure what we read does - // not change before we call AddProof() later. - d.channelMtx.Lock(msg.ShortChannelID.ToUint64()) - defer d.channelMtx.Unlock(msg.ShortChannelID.ToUint64()) - - chanInfo, e1, e2, err := d.cfg.Router.GetChannelByID( - msg.ShortChannelID) - if err != nil { - // TODO(andrew.shvv) this is dangerous because remote - // node might rewrite the waiting proof. - proof := channeldb.NewWaitingProof(nMsg.isRemote, msg) - err := d.cfg.WaitingProofStore.Add(proof) - if err != nil { - err := er.Errorf("unable to store "+ - "the proof for short_chan_id=%v: %v", - shortChanID, err) - log.Error(err) - nMsg.err <- err - return nil - } - - log.Infof("Orphan %v proof announcement with "+ - "short_chan_id=%v, adding "+ - "to waiting batch", prefix, shortChanID) - nMsg.err <- nil - return nil - } - - nodeID := nMsg.source.SerializeCompressed() - isFirstNode := bytes.Equal(nodeID, chanInfo.NodeKey1Bytes[:]) - isSecondNode := bytes.Equal(nodeID, chanInfo.NodeKey2Bytes[:]) - - // Ensure that channel that was retrieved belongs to the peer - // which sent the proof announcement. - if !(isFirstNode || isSecondNode) { - err := er.Errorf("channel that was received not "+ - "belongs to the peer which sent the proof, "+ - "short_chan_id=%v", shortChanID) - log.Error(err) - nMsg.err <- err - return nil - } - - // If proof was sent by a local sub-system, then we'll - // send the announcement signature to the remote node - // so they can also reconstruct the full channel - // announcement. - if !nMsg.isRemote { - var remotePubKey [33]byte - if isFirstNode { - remotePubKey = chanInfo.NodeKey2Bytes - } else { - remotePubKey = chanInfo.NodeKey1Bytes - } - // Since the remote peer might not be online - // we'll call a method that will attempt to - // deliver the proof when it comes online. - err := d.reliableSender.sendMessage(msg, remotePubKey) - if err != nil { - err := er.Errorf("unable to reliably send %v "+ - "for channel=%v to peer=%x: %v", - msg.MsgType(), msg.ShortChannelID, - remotePubKey, err) - nMsg.err <- err - return nil - } - } - - // Check if we already have the full proof for this channel. - if chanInfo.AuthProof != nil { - // If we already have the fully assembled proof, then - // the peer sending us their proof has probably not - // received our local proof yet. So be kind and send - // them the full proof. - if nMsg.isRemote { - peerID := nMsg.source.SerializeCompressed() - log.Debugf("Got AnnounceSignatures for " + - "channel with full proof.") - - d.wg.Add(1) - go func() { - defer d.wg.Done() - log.Debugf("Received half proof for "+ - "channel %v with existing "+ - "full proof. Sending full "+ - "proof to peer=%x", - msg.ChannelID, - peerID) - - chanAnn, _, _, err := netann.CreateChanAnnouncement( - chanInfo.AuthProof, chanInfo, - e1, e2, - ) - if err != nil { - log.Errorf("unable to gen "+ - "ann: %v", err) - return - } - err = nMsg.peer.SendMessage( - false, chanAnn, - ) - if err != nil { - log.Errorf("Failed sending "+ - "full proof to "+ - "peer=%x: %v", - peerID, err) - return - } - log.Debugf("Full proof sent to peer=%x"+ - " for chanID=%v", peerID, - msg.ChannelID) - }() - } - - log.Debugf("Already have proof for channel "+ - "with chanID=%v", msg.ChannelID) - nMsg.err <- nil - return nil - } - - // Check that we received the opposite proof. If so, then we're - // now able to construct the full proof, and create the channel - // announcement. If we didn't receive the opposite half of the - // proof than we should store it this one, and wait for - // opposite to be received. - proof := channeldb.NewWaitingProof(nMsg.isRemote, msg) - oppositeProof, err := d.cfg.WaitingProofStore.Get( - proof.OppositeKey(), - ) - if err != nil && !channeldb.ErrWaitingProofNotFound.Is(err) { - err := er.Errorf("unable to get "+ - "the opposite proof for short_chan_id=%v: %v", - shortChanID, err) - log.Error(err) - nMsg.err <- err - return nil - } - - if channeldb.ErrWaitingProofNotFound.Is(err) { - err := d.cfg.WaitingProofStore.Add(proof) - if err != nil { - err := er.Errorf("unable to store "+ - "the proof for short_chan_id=%v: %v", - shortChanID, err) - log.Error(err) - nMsg.err <- err - return nil - } - - log.Infof("1/2 of channel ann proof received for "+ - "short_chan_id=%v, waiting for other half", - shortChanID) - - nMsg.err <- nil - return nil - } - - // We now have both halves of the channel announcement proof, - // then we'll reconstruct the initial announcement so we can - // validate it shortly below. - var dbProof channeldb.ChannelAuthProof - if isFirstNode { - dbProof.NodeSig1Bytes = msg.NodeSignature.ToSignatureBytes() - dbProof.NodeSig2Bytes = oppositeProof.NodeSignature.ToSignatureBytes() - dbProof.BitcoinSig1Bytes = msg.BitcoinSignature.ToSignatureBytes() - dbProof.BitcoinSig2Bytes = oppositeProof.BitcoinSignature.ToSignatureBytes() - } else { - dbProof.NodeSig1Bytes = oppositeProof.NodeSignature.ToSignatureBytes() - dbProof.NodeSig2Bytes = msg.NodeSignature.ToSignatureBytes() - dbProof.BitcoinSig1Bytes = oppositeProof.BitcoinSignature.ToSignatureBytes() - dbProof.BitcoinSig2Bytes = msg.BitcoinSignature.ToSignatureBytes() - } - chanAnn, e1Ann, e2Ann, err := netann.CreateChanAnnouncement( - &dbProof, chanInfo, e1, e2, - ) - if err != nil { - log.Error(err) - nMsg.err <- err - return nil - } - - // With all the necessary components assembled validate the - // full channel announcement proof. - if err := routing.ValidateChannelAnn(chanAnn); err != nil { - err := er.Errorf("channel announcement proof "+ - "for short_chan_id=%v isn't valid: %v", - shortChanID, err) - - log.Error(err) - nMsg.err <- err - return nil - } - - // If the channel was returned by the router it means that - // existence of funding point and inclusion of nodes bitcoin - // keys in it already checked by the router. In this stage we - // should check that node keys are attest to the bitcoin keys - // by validating the signatures of announcement. If proof is - // valid then we'll populate the channel edge with it, so we - // can announce it on peer connect. - err = d.cfg.Router.AddProof(msg.ShortChannelID, &dbProof) - if err != nil { - err := er.Errorf("unable add proof to the "+ - "channel chanID=%v: %v", msg.ChannelID, err) - log.Error(err) - nMsg.err <- err - return nil - } - - err = d.cfg.WaitingProofStore.Remove(proof.OppositeKey()) - if err != nil { - err := er.Errorf("unable remove opposite proof "+ - "for the channel with chanID=%v: %v", - msg.ChannelID, err) - log.Error(err) - nMsg.err <- err - return nil - } - - // Proof was successfully created and now can announce the - // channel to the remain network. - log.Infof("Fully valid channel proof for short_chan_id=%v "+ - "constructed, adding to next ann batch", - shortChanID) - - // Assemble the necessary announcements to add to the next - // broadcasting batch. - announcements = append(announcements, networkMsg{ - peer: nMsg.peer, - source: nMsg.source, - msg: chanAnn, - }) - if e1Ann != nil { - announcements = append(announcements, networkMsg{ - peer: nMsg.peer, - source: nMsg.source, - msg: e1Ann, - }) - } - if e2Ann != nil { - announcements = append(announcements, networkMsg{ - peer: nMsg.peer, - source: nMsg.source, - msg: e2Ann, - }) - } - - // We'll also send along the node announcements for each channel - // participant if we know of them. To ensure our node - // announcement propagates to our channel counterparty, we'll - // set the source for each announcement to the node it belongs - // to, otherwise we won't send it since the source gets skipped. - // This isn't necessary for channel updates and announcement - // signatures since we send those directly to our channel - // counterparty through the gossiper's reliable sender. - node1Ann, err := d.fetchNodeAnn(chanInfo.NodeKey1Bytes) - if err != nil { - log.Debugf("Unable to fetch node announcement for "+ - "%x: %v", chanInfo.NodeKey1Bytes, err) - } else { - if nodeKey1, err := chanInfo.NodeKey1(); err == nil { - announcements = append(announcements, networkMsg{ - peer: nMsg.peer, - source: nodeKey1, - msg: node1Ann, - }) - } - } - node2Ann, err := d.fetchNodeAnn(chanInfo.NodeKey2Bytes) - if err != nil { - log.Debugf("Unable to fetch node announcement for "+ - "%x: %v", chanInfo.NodeKey2Bytes, err) - } else { - if nodeKey2, err := chanInfo.NodeKey2(); err == nil { - announcements = append(announcements, networkMsg{ - peer: nMsg.peer, - source: nodeKey2, - msg: node2Ann, - }) - } - } - - nMsg.err <- nil - return announcements - - default: - nMsg.err <- er.New("wrong type of the announcement") - return nil - } -} - -// fetchNodeAnn fetches the latest signed node announcement from our point of -// view for the node with the given public key. -func (d *AuthenticatedGossiper) fetchNodeAnn( - pubKey [33]byte) (*lnwire.NodeAnnouncement, er.R) { - - node, err := d.cfg.Router.FetchLightningNode(pubKey) - if err != nil { - return nil, err - } - - return node.NodeAnnouncement(true) -} - -// isMsgStale determines whether a message retrieved from the backing -// MessageStore is seen as stale by the current graph. -func (d *AuthenticatedGossiper) isMsgStale(msg lnwire.Message) bool { - switch msg := msg.(type) { - case *lnwire.AnnounceSignatures: - chanInfo, _, _, err := d.cfg.Router.GetChannelByID( - msg.ShortChannelID, - ) - - // If the channel cannot be found, it is most likely a leftover - // message for a channel that was closed, so we can consider it - // stale. - if channeldb.ErrEdgeNotFound.Is(err) { - return true - } - if err != nil { - log.Debugf("Unable to retrieve channel=%v from graph: "+ - "%v", err) - return false - } - - // If the proof exists in the graph, then we have successfully - // received the remote proof and assembled the full proof, so we - // can safely delete the local proof from the database. - return chanInfo.AuthProof != nil - - case *lnwire.ChannelUpdate: - _, p1, p2, err := d.cfg.Router.GetChannelByID(msg.ShortChannelID) - - // If the channel cannot be found, it is most likely a leftover - // message for a channel that was closed, so we can consider it - // stale. - if channeldb.ErrEdgeNotFound.Is(err) { - return true - } - if err != nil { - log.Debugf("Unable to retrieve channel=%v from graph: "+ - "%v", msg.ShortChannelID, err) - return false - } - - // Otherwise, we'll retrieve the correct policy that we - // currently have stored within our graph to check if this - // message is stale by comparing its timestamp. - var p *channeldb.ChannelEdgePolicy - if msg.ChannelFlags&lnwire.ChanUpdateDirection == 0 { - p = p1 - } else { - p = p2 - } - - // If the policy is still unknown, then we can consider this - // policy fresh. - if p == nil { - return false - } - - timestamp := time.Unix(int64(msg.Timestamp), 0) - return p.LastUpdate.After(timestamp) - - default: - // We'll make sure to not mark any unsupported messages as stale - // to ensure they are not removed. - return false - } -} - -// updateChannel creates a new fully signed update for the channel, and updates -// the underlying graph with the new state. -func (d *AuthenticatedGossiper) updateChannel(info *channeldb.ChannelEdgeInfo, - edge *channeldb.ChannelEdgePolicy) (*lnwire.ChannelAnnouncement, - *lnwire.ChannelUpdate, er.R) { - - // Parse the unsigned edge into a channel update. - chanUpdate := netann.UnsignedChannelUpdateFromEdge(info, edge) - - // We'll generate a new signature over a digest of the channel - // announcement itself and update the timestamp to ensure it propagate. - err := netann.SignChannelUpdate( - d.cfg.AnnSigner, d.selfKey, chanUpdate, - netann.ChanUpdSetTimestamp, - ) - if err != nil { - return nil, nil, err - } - - // Next, we'll set the new signature in place, and update the reference - // in the backing slice. - edge.LastUpdate = time.Unix(int64(chanUpdate.Timestamp), 0) - edge.SigBytes = chanUpdate.Signature.ToSignatureBytes() - - // To ensure that our signature is valid, we'll verify it ourself - // before committing it to the slice returned. - err = routing.ValidateChannelUpdateAnn(d.selfKey, info.Capacity, chanUpdate) - if err != nil { - return nil, nil, er.Errorf("generated invalid channel "+ - "update sig: %v", err) - } - - // Finally, we'll write the new edge policy to disk. - if err := d.cfg.Router.UpdateEdge(edge); err != nil { - return nil, nil, err - } - - // We'll also create the original channel announcement so the two can - // be broadcast along side each other (if necessary), but only if we - // have a full channel announcement for this channel. - var chanAnn *lnwire.ChannelAnnouncement - if info.AuthProof != nil { - chanID := lnwire.NewShortChanIDFromInt(info.ChannelID) - chanAnn = &lnwire.ChannelAnnouncement{ - ShortChannelID: chanID, - NodeID1: info.NodeKey1Bytes, - NodeID2: info.NodeKey2Bytes, - ChainHash: info.ChainHash, - BitcoinKey1: info.BitcoinKey1Bytes, - Features: lnwire.NewRawFeatureVector(), - BitcoinKey2: info.BitcoinKey2Bytes, - ExtraOpaqueData: edge.ExtraOpaqueData, - } - chanAnn.NodeSig1, err = lnwire.NewSigFromRawSignature( - info.AuthProof.NodeSig1Bytes, - ) - if err != nil { - return nil, nil, err - } - chanAnn.NodeSig2, err = lnwire.NewSigFromRawSignature( - info.AuthProof.NodeSig2Bytes, - ) - if err != nil { - return nil, nil, err - } - chanAnn.BitcoinSig1, err = lnwire.NewSigFromRawSignature( - info.AuthProof.BitcoinSig1Bytes, - ) - if err != nil { - return nil, nil, err - } - chanAnn.BitcoinSig2, err = lnwire.NewSigFromRawSignature( - info.AuthProof.BitcoinSig2Bytes, - ) - if err != nil { - return nil, nil, err - } - } - - return chanAnn, chanUpdate, err -} - -// SyncManager returns the gossiper's SyncManager instance. -func (d *AuthenticatedGossiper) SyncManager() *SyncManager { - return d.syncMgr -} diff --git a/lnd/discovery/gossiper_test.go b/lnd/discovery/gossiper_test.go deleted file mode 100644 index 9bc142c8..00000000 --- a/lnd/discovery/gossiper_test.go +++ /dev/null @@ -1,3943 +0,0 @@ -package discovery - -import ( - "bytes" - "encoding/hex" - "io/ioutil" - "math/big" - prand "math/rand" - "net" - "os" - "reflect" - "strings" - "sync" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/lnpeer" - "github.com/pkt-cash/pktd/lnd/lntest/mock" - "github.com/pkt-cash/pktd/lnd/lntest/wait" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/netann" - "github.com/pkt-cash/pktd/lnd/routing" - "github.com/pkt-cash/pktd/lnd/routing/route" - "github.com/pkt-cash/pktd/lnd/ticker" - "github.com/pkt-cash/pktd/wire" -) - -var ( - testAddr = &net.TCPAddr{IP: (net.IP)([]byte{0xA, 0x0, 0x0, 0x1}), - Port: 9000} - testAddrs = []net.Addr{testAddr} - testFeatures = lnwire.NewRawFeatureVector() - testSig = &btcec.Signature{ - R: new(big.Int), - S: new(big.Int), - } - _, _ = testSig.R.SetString("63724406601629180062774974542967536251589935445068131219452686511677818569431", 10) - _, _ = testSig.S.SetString("18801056069249825825291287104931333862866033135609736119018462340006816851118", 10) - - bitcoinKeyPriv1, _ = btcec.NewPrivateKey(btcec.S256()) - bitcoinKeyPub1 = bitcoinKeyPriv1.PubKey() - - nodeKeyPriv1, _ = btcec.NewPrivateKey(btcec.S256()) - nodeKeyPub1 = nodeKeyPriv1.PubKey() - - bitcoinKeyPriv2, _ = btcec.NewPrivateKey(btcec.S256()) - bitcoinKeyPub2 = bitcoinKeyPriv2.PubKey() - - nodeKeyPriv2, _ = btcec.NewPrivateKey(btcec.S256()) - nodeKeyPub2 = nodeKeyPriv2.PubKey() - - trickleDelay = time.Millisecond * 100 - retransmitDelay = time.Hour * 1 - proofMatureDelta uint32 - - // The test timestamp + rebroadcast interval makes sure messages won't - // be rebroadcasted automaticallty during the tests. - testTimestamp = uint32(1234567890) - rebroadcastInterval = time.Hour * 1000000 -) - -// makeTestDB creates a new instance of the ChannelDB for testing purposes. A -// callback which cleans up the created temporary directories is also returned -// and intended to be executed after the test completes. -func makeTestDB() (*channeldb.DB, func(), er.R) { - // First, create a temporary directory to be used for the duration of - // this test. - tempDirName, errr := ioutil.TempDir("", "channeldb") - if errr != nil { - return nil, nil, er.E(errr) - } - - // Next, create channeldb for the first time. - cdb, err := channeldb.Open(tempDirName) - if err != nil { - return nil, nil, err - } - - cleanUp := func() { - cdb.Close() - os.RemoveAll(tempDirName) - } - - return cdb, cleanUp, nil -} - -type mockGraphSource struct { - bestHeight uint32 - - mu sync.Mutex - nodes []channeldb.LightningNode - infos map[uint64]channeldb.ChannelEdgeInfo - edges map[uint64][]channeldb.ChannelEdgePolicy - zombies map[uint64][][33]byte -} - -func newMockRouter(height uint32) *mockGraphSource { - return &mockGraphSource{ - bestHeight: height, - infos: make(map[uint64]channeldb.ChannelEdgeInfo), - edges: make(map[uint64][]channeldb.ChannelEdgePolicy), - zombies: make(map[uint64][][33]byte), - } -} - -var _ routing.ChannelGraphSource = (*mockGraphSource)(nil) - -func (r *mockGraphSource) AddNode(node *channeldb.LightningNode) er.R { - r.mu.Lock() - defer r.mu.Unlock() - - r.nodes = append(r.nodes, *node) - return nil -} - -func (r *mockGraphSource) AddEdge(info *channeldb.ChannelEdgeInfo) er.R { - r.mu.Lock() - defer r.mu.Unlock() - - if _, ok := r.infos[info.ChannelID]; ok { - return er.New("info already exist") - } - - r.infos[info.ChannelID] = *info - return nil -} - -func (r *mockGraphSource) UpdateEdge(edge *channeldb.ChannelEdgePolicy) er.R { - r.mu.Lock() - defer r.mu.Unlock() - - if len(r.edges[edge.ChannelID]) == 0 { - r.edges[edge.ChannelID] = make([]channeldb.ChannelEdgePolicy, 2) - } - - if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 { - r.edges[edge.ChannelID][0] = *edge - } else { - r.edges[edge.ChannelID][1] = *edge - } - - return nil -} - -func (r *mockGraphSource) CurrentBlockHeight() (uint32, er.R) { - return r.bestHeight, nil -} - -func (r *mockGraphSource) AddProof(chanID lnwire.ShortChannelID, - proof *channeldb.ChannelAuthProof) er.R { - - r.mu.Lock() - defer r.mu.Unlock() - - chanIDInt := chanID.ToUint64() - info, ok := r.infos[chanIDInt] - if !ok { - return er.New("channel does not exist") - } - - info.AuthProof = proof - r.infos[chanIDInt] = info - - return nil -} - -func (r *mockGraphSource) ForEachNode(func(node *channeldb.LightningNode) er.R) er.R { - return nil -} - -func (r *mockGraphSource) ForAllOutgoingChannels(cb func(i *channeldb.ChannelEdgeInfo, - c *channeldb.ChannelEdgePolicy) er.R) er.R { - - r.mu.Lock() - defer r.mu.Unlock() - - chans := make(map[uint64]channeldb.ChannelEdge) - for _, info := range r.infos { - info := info - - edgeInfo := chans[info.ChannelID] - edgeInfo.Info = &info - chans[info.ChannelID] = edgeInfo - } - for _, edges := range r.edges { - edges := edges - - edge := chans[edges[0].ChannelID] - edge.Policy1 = &edges[0] - chans[edges[0].ChannelID] = edge - } - - for _, channel := range chans { - cb(channel.Info, channel.Policy1) - } - - return nil -} - -func (r *mockGraphSource) ForEachChannel(func(chanInfo *channeldb.ChannelEdgeInfo, - e1, e2 *channeldb.ChannelEdgePolicy) er.R) er.R { - return nil -} - -func (r *mockGraphSource) GetChannelByID(chanID lnwire.ShortChannelID) ( - *channeldb.ChannelEdgeInfo, - *channeldb.ChannelEdgePolicy, - *channeldb.ChannelEdgePolicy, er.R) { - - r.mu.Lock() - defer r.mu.Unlock() - - chanIDInt := chanID.ToUint64() - chanInfo, ok := r.infos[chanIDInt] - if !ok { - pubKeys, isZombie := r.zombies[chanIDInt] - if !isZombie { - return nil, nil, nil, channeldb.ErrEdgeNotFound.Default() - } - - return &channeldb.ChannelEdgeInfo{ - NodeKey1Bytes: pubKeys[0], - NodeKey2Bytes: pubKeys[1], - }, nil, nil, channeldb.ErrZombieEdge.Default() - } - - edges := r.edges[chanID.ToUint64()] - if len(edges) == 0 { - return &chanInfo, nil, nil, nil - } - - var edge1 *channeldb.ChannelEdgePolicy - if !reflect.DeepEqual(edges[0], channeldb.ChannelEdgePolicy{}) { - edge1 = &edges[0] - } - - var edge2 *channeldb.ChannelEdgePolicy - if !reflect.DeepEqual(edges[1], channeldb.ChannelEdgePolicy{}) { - edge2 = &edges[1] - } - - return &chanInfo, edge1, edge2, nil -} - -func (r *mockGraphSource) FetchLightningNode( - nodePub route.Vertex) (*channeldb.LightningNode, er.R) { - - for _, node := range r.nodes { - if bytes.Equal(nodePub[:], node.PubKeyBytes[:]) { - return &node, nil - } - } - - return nil, channeldb.ErrGraphNodeNotFound.Default() -} - -// IsStaleNode returns true if the graph source has a node announcement for the -// target node with a more recent timestamp. -func (r *mockGraphSource) IsStaleNode(nodePub route.Vertex, timestamp time.Time) bool { - r.mu.Lock() - defer r.mu.Unlock() - - for _, node := range r.nodes { - if node.PubKeyBytes == nodePub { - return node.LastUpdate.After(timestamp) || - node.LastUpdate.Equal(timestamp) - } - } - - // If we did not find the node among our existing graph nodes, we - // require the node to already have a channel in the graph to not be - // considered stale. - for _, info := range r.infos { - if info.NodeKey1Bytes == nodePub { - return false - } - if info.NodeKey2Bytes == nodePub { - return false - } - } - return true -} - -// IsPublicNode determines whether the given vertex is seen as a public node in -// the graph from the graph's source node's point of view. -func (r *mockGraphSource) IsPublicNode(node route.Vertex) (bool, er.R) { - for _, info := range r.infos { - if !bytes.Equal(node[:], info.NodeKey1Bytes[:]) && - !bytes.Equal(node[:], info.NodeKey2Bytes[:]) { - continue - } - - if info.AuthProof != nil { - return true, nil - } - } - return false, nil -} - -// IsKnownEdge returns true if the graph source already knows of the passed -// channel ID either as a live or zombie channel. -func (r *mockGraphSource) IsKnownEdge(chanID lnwire.ShortChannelID) bool { - r.mu.Lock() - defer r.mu.Unlock() - - chanIDInt := chanID.ToUint64() - _, exists := r.infos[chanIDInt] - _, isZombie := r.zombies[chanIDInt] - return exists || isZombie -} - -// IsStaleEdgePolicy returns true if the graph source has a channel edge for -// the passed channel ID (and flags) that have a more recent timestamp. -func (r *mockGraphSource) IsStaleEdgePolicy(chanID lnwire.ShortChannelID, - timestamp time.Time, flags lnwire.ChanUpdateChanFlags) bool { - - r.mu.Lock() - defer r.mu.Unlock() - - chanIDInt := chanID.ToUint64() - edges, ok := r.edges[chanIDInt] - if !ok { - // Since the edge doesn't exist, we'll check our zombie index as - // well. - _, isZombie := r.zombies[chanIDInt] - if !isZombie { - return false - } - - // Since it exists within our zombie index, we'll check that it - // respects the router's live edge horizon to determine whether - // it is stale or not. - return time.Since(timestamp) > routing.DefaultChannelPruneExpiry - } - - switch { - case flags&lnwire.ChanUpdateDirection == 0 && - !reflect.DeepEqual(edges[0], channeldb.ChannelEdgePolicy{}): - - return !timestamp.After(edges[0].LastUpdate) - - case flags&lnwire.ChanUpdateDirection == 1 && - !reflect.DeepEqual(edges[1], channeldb.ChannelEdgePolicy{}): - - return !timestamp.After(edges[1].LastUpdate) - - default: - return false - } -} - -// MarkEdgeLive clears an edge from our zombie index, deeming it as live. -// -// NOTE: This method is part of the ChannelGraphSource interface. -func (r *mockGraphSource) MarkEdgeLive(chanID lnwire.ShortChannelID) er.R { - r.mu.Lock() - defer r.mu.Unlock() - delete(r.zombies, chanID.ToUint64()) - return nil -} - -// MarkEdgeZombie marks an edge as a zombie within our zombie index. -func (r *mockGraphSource) MarkEdgeZombie(chanID lnwire.ShortChannelID, pubKey1, - pubKey2 [33]byte) er.R { - - r.mu.Lock() - defer r.mu.Unlock() - r.zombies[chanID.ToUint64()] = [][33]byte{pubKey1, pubKey2} - return nil -} - -type mockNotifier struct { - clientCounter uint32 - epochClients map[uint32]chan *chainntnfs.BlockEpoch - - sync.RWMutex -} - -func newMockNotifier() *mockNotifier { - return &mockNotifier{ - epochClients: make(map[uint32]chan *chainntnfs.BlockEpoch), - } -} - -func (m *mockNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, - _ []byte, numConfs, _ uint32) (*chainntnfs.ConfirmationEvent, er.R) { - - return nil, nil -} - -func (m *mockNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, _ []byte, - _ uint32) (*chainntnfs.SpendEvent, er.R) { - return nil, nil -} - -func (m *mockNotifier) notifyBlock(hash chainhash.Hash, height uint32) { - m.RLock() - defer m.RUnlock() - - for _, client := range m.epochClients { - client <- &chainntnfs.BlockEpoch{ - Height: int32(height), - Hash: &hash, - } - } -} - -func (m *mockNotifier) RegisterBlockEpochNtfn( - bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, er.R) { - m.RLock() - defer m.RUnlock() - - epochChan := make(chan *chainntnfs.BlockEpoch) - clientID := m.clientCounter - m.clientCounter++ - m.epochClients[clientID] = epochChan - - return &chainntnfs.BlockEpochEvent{ - Epochs: epochChan, - Cancel: func() {}, - }, nil -} - -func (m *mockNotifier) Start() er.R { - return nil -} - -func (m *mockNotifier) Started() bool { - return true -} - -func (m *mockNotifier) Stop() er.R { - return nil -} - -type annBatch struct { - nodeAnn1 *lnwire.NodeAnnouncement - nodeAnn2 *lnwire.NodeAnnouncement - - localChanAnn *lnwire.ChannelAnnouncement - remoteChanAnn *lnwire.ChannelAnnouncement - - chanUpdAnn1 *lnwire.ChannelUpdate - chanUpdAnn2 *lnwire.ChannelUpdate - - localProofAnn *lnwire.AnnounceSignatures - remoteProofAnn *lnwire.AnnounceSignatures -} - -func createAnnouncements(blockHeight uint32) (*annBatch, er.R) { - var err er.R - var batch annBatch - timestamp := testTimestamp - - batch.nodeAnn1, err = createNodeAnnouncement(nodeKeyPriv1, timestamp) - if err != nil { - return nil, err - } - - batch.nodeAnn2, err = createNodeAnnouncement(nodeKeyPriv2, timestamp) - if err != nil { - return nil, err - } - - batch.remoteChanAnn, err = createRemoteChannelAnnouncement(blockHeight) - if err != nil { - return nil, err - } - - batch.remoteProofAnn = &lnwire.AnnounceSignatures{ - ShortChannelID: lnwire.ShortChannelID{ - BlockHeight: blockHeight, - }, - NodeSignature: batch.remoteChanAnn.NodeSig2, - BitcoinSignature: batch.remoteChanAnn.BitcoinSig2, - } - - batch.localChanAnn, err = createRemoteChannelAnnouncement(blockHeight) - if err != nil { - return nil, err - } - - batch.localProofAnn = &lnwire.AnnounceSignatures{ - ShortChannelID: lnwire.ShortChannelID{ - BlockHeight: blockHeight, - }, - NodeSignature: batch.localChanAnn.NodeSig1, - BitcoinSignature: batch.localChanAnn.BitcoinSig1, - } - - batch.chanUpdAnn1, err = createUpdateAnnouncement( - blockHeight, 0, nodeKeyPriv1, timestamp, - ) - if err != nil { - return nil, err - } - - batch.chanUpdAnn2, err = createUpdateAnnouncement( - blockHeight, 1, nodeKeyPriv2, timestamp, - ) - if err != nil { - return nil, err - } - - return &batch, nil - -} - -func createNodeAnnouncement(priv *btcec.PrivateKey, - timestamp uint32, extraBytes ...[]byte) (*lnwire.NodeAnnouncement, er.R) { - - var err er.R - k := hex.EncodeToString(priv.Serialize()) - alias, err := lnwire.NewNodeAlias("kek" + k[:10]) - if err != nil { - return nil, err - } - - a := &lnwire.NodeAnnouncement{ - Timestamp: timestamp, - Addresses: testAddrs, - Alias: alias, - Features: testFeatures, - } - copy(a.NodeID[:], priv.PubKey().SerializeCompressed()) - if len(extraBytes) == 1 { - a.ExtraOpaqueData = extraBytes[0] - } - - signer := mock.SingleSigner{Privkey: priv} - sig, err := netann.SignAnnouncement(&signer, priv.PubKey(), a) - if err != nil { - return nil, err - } - - a.Signature, err = lnwire.NewSigFromSignature(sig) - if err != nil { - return nil, err - } - - return a, nil -} - -func createUpdateAnnouncement(blockHeight uint32, - flags lnwire.ChanUpdateChanFlags, - nodeKey *btcec.PrivateKey, timestamp uint32, - extraBytes ...[]byte) (*lnwire.ChannelUpdate, er.R) { - - var err er.R - - htlcMinMsat := lnwire.MilliSatoshi(prand.Int63()) - a := &lnwire.ChannelUpdate{ - ShortChannelID: lnwire.ShortChannelID{ - BlockHeight: blockHeight, - }, - Timestamp: timestamp, - MessageFlags: lnwire.ChanUpdateOptionMaxHtlc, - ChannelFlags: flags, - TimeLockDelta: uint16(prand.Int63()), - HtlcMinimumMsat: htlcMinMsat, - - // Since the max HTLC must be greater than the min HTLC to pass channel - // update validation, set it to double the min htlc. - HtlcMaximumMsat: 2 * htlcMinMsat, - FeeRate: uint32(prand.Int31()), - BaseFee: uint32(prand.Int31()), - } - if len(extraBytes) == 1 { - a.ExtraOpaqueData = extraBytes[0] - } - - err = signUpdate(nodeKey, a) - if err != nil { - return nil, err - } - - return a, nil -} - -func signUpdate(nodeKey *btcec.PrivateKey, a *lnwire.ChannelUpdate) er.R { - pub := nodeKey.PubKey() - signer := mock.SingleSigner{Privkey: nodeKey} - sig, err := netann.SignAnnouncement(&signer, pub, a) - if err != nil { - return err - } - - a.Signature, err = lnwire.NewSigFromSignature(sig) - if err != nil { - return err - } - - return nil -} - -func createAnnouncementWithoutProof(blockHeight uint32, - extraBytes ...[]byte) *lnwire.ChannelAnnouncement { - - a := &lnwire.ChannelAnnouncement{ - ShortChannelID: lnwire.ShortChannelID{ - BlockHeight: blockHeight, - TxIndex: 0, - TxPosition: 0, - }, - Features: testFeatures, - } - copy(a.NodeID1[:], nodeKeyPub1.SerializeCompressed()) - copy(a.NodeID2[:], nodeKeyPub2.SerializeCompressed()) - copy(a.BitcoinKey1[:], bitcoinKeyPub1.SerializeCompressed()) - copy(a.BitcoinKey2[:], bitcoinKeyPub2.SerializeCompressed()) - if len(extraBytes) == 1 { - a.ExtraOpaqueData = extraBytes[0] - } - - return a -} - -func createRemoteChannelAnnouncement(blockHeight uint32, - extraBytes ...[]byte) (*lnwire.ChannelAnnouncement, er.R) { - - a := createAnnouncementWithoutProof(blockHeight, extraBytes...) - - pub := nodeKeyPriv1.PubKey() - signer := mock.SingleSigner{Privkey: nodeKeyPriv1} - sig, err := netann.SignAnnouncement(&signer, pub, a) - if err != nil { - return nil, err - } - a.NodeSig1, err = lnwire.NewSigFromSignature(sig) - if err != nil { - return nil, err - } - - pub = nodeKeyPriv2.PubKey() - signer = mock.SingleSigner{Privkey: nodeKeyPriv2} - sig, err = netann.SignAnnouncement(&signer, pub, a) - if err != nil { - return nil, err - } - a.NodeSig2, err = lnwire.NewSigFromSignature(sig) - if err != nil { - return nil, err - } - - pub = bitcoinKeyPriv1.PubKey() - signer = mock.SingleSigner{Privkey: bitcoinKeyPriv1} - sig, err = netann.SignAnnouncement(&signer, pub, a) - if err != nil { - return nil, err - } - a.BitcoinSig1, err = lnwire.NewSigFromSignature(sig) - if err != nil { - return nil, err - } - - pub = bitcoinKeyPriv2.PubKey() - signer = mock.SingleSigner{Privkey: bitcoinKeyPriv2} - sig, err = netann.SignAnnouncement(&signer, pub, a) - if err != nil { - return nil, err - } - a.BitcoinSig2, err = lnwire.NewSigFromSignature(sig) - if err != nil { - return nil, err - } - - return a, nil -} - -type testCtx struct { - gossiper *AuthenticatedGossiper - router *mockGraphSource - notifier *mockNotifier - broadcastedMessage chan msgWithSenders -} - -func createTestCtx(startHeight uint32) (*testCtx, func(), er.R) { - // Next we'll initialize an instance of the channel router with mock - // versions of the chain and channel notifier. As we don't need to test - // any p2p functionality, the peer send and switch send, - // broadcast functions won't be populated. - notifier := newMockNotifier() - router := newMockRouter(startHeight) - - db, cleanUpDb, err := makeTestDB() - if err != nil { - return nil, nil, err - } - - waitingProofStore, err := channeldb.NewWaitingProofStore(db) - if err != nil { - cleanUpDb() - return nil, nil, err - } - - broadcastedMessage := make(chan msgWithSenders, 10) - gossiper := New(Config{ - Notifier: notifier, - Broadcast: func(senders map[route.Vertex]struct{}, - msgs ...lnwire.Message) er.R { - - for _, msg := range msgs { - broadcastedMessage <- msgWithSenders{ - msg: msg, - senders: senders, - } - } - - return nil - }, - NotifyWhenOnline: func(target [33]byte, - peerChan chan<- lnpeer.Peer) { - - pk, _ := btcec.ParsePubKey(target[:], btcec.S256()) - peerChan <- &mockPeer{pk, nil, nil} - }, - NotifyWhenOffline: func(_ [33]byte) <-chan struct{} { - c := make(chan struct{}) - return c - }, - SelfNodeAnnouncement: func(bool) (lnwire.NodeAnnouncement, er.R) { - return lnwire.NodeAnnouncement{ - Timestamp: testTimestamp, - }, nil - }, - Router: router, - TrickleDelay: trickleDelay, - RetransmitTicker: ticker.NewForce(retransmitDelay), - RebroadcastInterval: rebroadcastInterval, - ProofMatureDelta: proofMatureDelta, - WaitingProofStore: waitingProofStore, - MessageStore: newMockMessageStore(), - RotateTicker: ticker.NewForce(DefaultSyncerRotationInterval), - HistoricalSyncTicker: ticker.NewForce(DefaultHistoricalSyncInterval), - NumActiveSyncers: 3, - AnnSigner: &mock.SingleSigner{Privkey: nodeKeyPriv1}, - SubBatchDelay: time.Second * 5, - MinimumBatchSize: 10, - }, nodeKeyPub1) - - if err := gossiper.Start(); err != nil { - cleanUpDb() - return nil, nil, er.Errorf("unable to start router: %v", err) - } - - // Mark the graph as synced in order to allow the announcements to be - // broadcast. - gossiper.syncMgr.markGraphSynced() - - cleanUp := func() { - gossiper.Stop() - cleanUpDb() - } - - return &testCtx{ - router: router, - notifier: notifier, - gossiper: gossiper, - broadcastedMessage: broadcastedMessage, - }, cleanUp, nil -} - -// TestProcessAnnouncement checks that mature announcements are propagated to -// the router subsystem. -func TestProcessAnnouncement(t *testing.T) { - t.Parallel() - - timestamp := testTimestamp - ctx, cleanup, err := createTestCtx(0) - if err != nil { - t.Fatalf("can't create context: %v", err) - } - defer cleanup() - - assertSenderExistence := func(sender *btcec.PublicKey, msg msgWithSenders) { - if _, ok := msg.senders[route.NewVertex(sender)]; !ok { - t.Fatalf("sender=%x not present in %v", - sender.SerializeCompressed(), spew.Sdump(msg)) - } - } - - nodePeer := &mockPeer{nodeKeyPriv1.PubKey(), nil, nil} - - // First, we'll craft a valid remote channel announcement and send it to - // the gossiper so that it can be processed. - ca, err := createRemoteChannelAnnouncement(0) - if err != nil { - t.Fatalf("can't create channel announcement: %v", err) - } - - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement(ca, nodePeer): - case <-time.After(2 * time.Second): - t.Fatal("remote announcement not processed") - } - if err != nil { - t.Fatalf("can't process remote announcement: %v", err) - } - - // The announcement should be broadcast and included in our local view - // of the graph. - select { - case msg := <-ctx.broadcastedMessage: - assertSenderExistence(nodePeer.IdentityKey(), msg) - case <-time.After(2 * trickleDelay): - t.Fatal("announcement wasn't proceeded") - } - - if len(ctx.router.infos) != 1 { - t.Fatalf("edge wasn't added to router: %v", err) - } - - // We'll then craft the channel policy of the remote party and also send - // it to the gossiper. - ua, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, timestamp) - if err != nil { - t.Fatalf("can't create update announcement: %v", err) - } - - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement(ua, nodePeer): - case <-time.After(2 * time.Second): - t.Fatal("remote announcement not processed") - } - if err != nil { - t.Fatalf("can't process remote announcement: %v", err) - } - - // The channel policy should be broadcast to the rest of the network. - select { - case msg := <-ctx.broadcastedMessage: - assertSenderExistence(nodePeer.IdentityKey(), msg) - case <-time.After(2 * trickleDelay): - t.Fatal("announcement wasn't proceeded") - } - - if len(ctx.router.edges) != 1 { - t.Fatalf("edge update wasn't added to router: %v", err) - } - - // Finally, we'll craft the remote party's node announcement. - na, err := createNodeAnnouncement(nodeKeyPriv1, timestamp) - if err != nil { - t.Fatalf("can't create node announcement: %v", err) - } - - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement(na, nodePeer): - case <-time.After(2 * time.Second): - t.Fatal("remote announcement not processed") - } - if err != nil { - t.Fatalf("can't process remote announcement: %v", err) - } - - // It should also be broadcast to the network and included in our local - // view of the graph. - select { - case msg := <-ctx.broadcastedMessage: - assertSenderExistence(nodePeer.IdentityKey(), msg) - case <-time.After(2 * trickleDelay): - t.Fatal("announcement wasn't proceeded") - } - - if len(ctx.router.nodes) != 1 { - t.Fatalf("node wasn't added to router: %v", err) - } -} - -// TestPrematureAnnouncement checks that premature announcements are -// not propagated to the router subsystem until block with according -// block height received. -func TestPrematureAnnouncement(t *testing.T) { - t.Parallel() - - timestamp := testTimestamp - - ctx, cleanup, err := createTestCtx(0) - if err != nil { - t.Fatalf("can't create context: %v", err) - } - defer cleanup() - - _, err = createNodeAnnouncement(nodeKeyPriv1, timestamp) - if err != nil { - t.Fatalf("can't create node announcement: %v", err) - } - - nodePeer := &mockPeer{nodeKeyPriv1.PubKey(), nil, nil} - - // Pretending that we receive the valid channel announcement from - // remote side, but block height of this announcement is greater than - // highest know to us, for that reason it should be added to the - // repeat/premature batch. - ca, err := createRemoteChannelAnnouncement(1) - if err != nil { - t.Fatalf("can't create channel announcement: %v", err) - } - - select { - case <-ctx.gossiper.ProcessRemoteAnnouncement(ca, nodePeer): - t.Fatal("announcement was proceeded") - case <-time.After(100 * time.Millisecond): - } - - if len(ctx.router.infos) != 0 { - t.Fatal("edge was added to router") - } - - // Pretending that we receive the valid channel update announcement from - // remote side, but block height of this announcement is greater than - // highest know to us, for that reason it should be added to the - // repeat/premature batch. - ua, err := createUpdateAnnouncement(1, 0, nodeKeyPriv1, timestamp) - if err != nil { - t.Fatalf("can't create update announcement: %v", err) - } - - select { - case <-ctx.gossiper.ProcessRemoteAnnouncement(ua, nodePeer): - t.Fatal("announcement was proceeded") - case <-time.After(100 * time.Millisecond): - } - - if len(ctx.router.edges) != 0 { - t.Fatal("edge update was added to router") - } - - // Generate new block and waiting the previously added announcements - // to be proceeded. - newBlock := &wire.MsgBlock{} - ctx.notifier.notifyBlock(newBlock.Header.BlockHash(), 1) - - select { - case <-ctx.broadcastedMessage: - case <-time.After(2 * trickleDelay): - t.Fatal("announcement wasn't broadcasted") - } - - if len(ctx.router.infos) != 1 { - t.Fatalf("edge wasn't added to router: %v", err) - } - - select { - case <-ctx.broadcastedMessage: - case <-time.After(2 * trickleDelay): - t.Fatal("announcement wasn't broadcasted") - } - - if len(ctx.router.edges) != 1 { - t.Fatalf("edge update wasn't added to router: %v", err) - } -} - -// TestSignatureAnnouncementLocalFirst ensures that the AuthenticatedGossiper -// properly processes partial and fully announcement signatures message. -func TestSignatureAnnouncementLocalFirst(t *testing.T) { - t.Parallel() - - ctx, cleanup, err := createTestCtx(uint32(proofMatureDelta)) - if err != nil { - t.Fatalf("can't create context: %v", err) - } - defer cleanup() - - // Set up a channel that we can use to inspect the messages sent - // directly from the gossiper. - sentMsgs := make(chan lnwire.Message, 10) - ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(target [33]byte, - peerChan chan<- lnpeer.Peer) { - - pk, _ := btcec.ParsePubKey(target[:], btcec.S256()) - - select { - case peerChan <- &mockPeer{pk, sentMsgs, ctx.gossiper.quit}: - case <-ctx.gossiper.quit: - } - } - - batch, err := createAnnouncements(0) - if err != nil { - t.Fatalf("can't generate announcements: %v", err) - } - - localKey, err := btcec.ParsePubKey(batch.nodeAnn1.NodeID[:], btcec.S256()) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } - remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], btcec.S256()) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } - remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit} - - // Recreate lightning network topology. Initialize router with channel - // between two nodes. - select { - case err = <-ctx.gossiper.ProcessLocalAnnouncement( - batch.localChanAnn, localKey, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process local announcement") - } - if err != nil { - t.Fatalf("unable to process channel ann: %v", err) - } - select { - case <-ctx.broadcastedMessage: - t.Fatal("channel announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - select { - case err = <-ctx.gossiper.ProcessLocalAnnouncement( - batch.chanUpdAnn1, localKey, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process local announcement") - } - if err != nil { - t.Fatalf("unable to process channel update: %v", err) - } - select { - case <-ctx.broadcastedMessage: - t.Fatal("channel update announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - select { - case err = <-ctx.gossiper.ProcessLocalAnnouncement( - batch.nodeAnn1, localKey, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process local announcement") - } - if err != nil { - t.Fatalf("unable to process node ann: %v", err) - } - select { - case <-ctx.broadcastedMessage: - t.Fatal("node announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - // The local ChannelUpdate should now be sent directly to the remote peer, - // such that the edge can be used for routing, regardless if this channel - // is announced or not (private channel). - select { - case msg := <-sentMsgs: - assertMessage(t, batch.chanUpdAnn1, msg) - case <-time.After(1 * time.Second): - t.Fatal("gossiper did not send channel update to peer") - } - - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement( - batch.chanUpdAnn2, remotePeer, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process channel update: %v", err) - } - select { - case <-ctx.broadcastedMessage: - t.Fatal("channel update announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement( - batch.nodeAnn2, remotePeer, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process node ann: %v", err) - } - select { - case <-ctx.broadcastedMessage: - t.Fatal("node announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - // Pretending that we receive local channel announcement from funding - // manager, thereby kick off the announcement exchange process. - select { - case err = <-ctx.gossiper.ProcessLocalAnnouncement( - batch.localProofAnn, localKey, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process local proof: %v", err) - } - - select { - case <-ctx.broadcastedMessage: - t.Fatal("announcements were broadcast") - case <-time.After(2 * trickleDelay): - } - - number := 0 - if err := ctx.gossiper.cfg.WaitingProofStore.ForAll( - func(*channeldb.WaitingProof) er.R { - number++ - return nil - }, - func() { - number = 0 - }, - ); err != nil { - t.Fatalf("unable to retrieve objects from store: %v", err) - } - - if number != 1 { - t.Fatal("wrong number of objects in storage") - } - - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement( - batch.remoteProofAnn, remotePeer, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process remote proof: %v", err) - } - - for i := 0; i < 5; i++ { - select { - case <-ctx.broadcastedMessage: - case <-time.After(time.Second): - t.Fatal("announcement wasn't broadcast") - } - } - - number = 0 - if err := ctx.gossiper.cfg.WaitingProofStore.ForAll( - func(*channeldb.WaitingProof) er.R { - number++ - return nil - }, - func() { - number = 0 - }, - ); err != nil && !channeldb.ErrWaitingProofNotFound.Is(err) { - t.Fatalf("unable to retrieve objects from store: %v", err) - } - - if number != 0 { - t.Fatal("waiting proof should be removed from storage") - } -} - -// TestOrphanSignatureAnnouncement ensures that the gossiper properly -// processes announcement with unknown channel ids. -func TestOrphanSignatureAnnouncement(t *testing.T) { - t.Parallel() - - ctx, cleanup, err := createTestCtx(uint32(proofMatureDelta)) - if err != nil { - t.Fatalf("can't create context: %v", err) - } - defer cleanup() - - // Set up a channel that we can use to inspect the messages sent - // directly from the gossiper. - sentMsgs := make(chan lnwire.Message, 10) - ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(target [33]byte, - peerChan chan<- lnpeer.Peer) { - - pk, _ := btcec.ParsePubKey(target[:], btcec.S256()) - - select { - case peerChan <- &mockPeer{pk, sentMsgs, ctx.gossiper.quit}: - case <-ctx.gossiper.quit: - } - } - - batch, err := createAnnouncements(0) - if err != nil { - t.Fatalf("can't generate announcements: %v", err) - } - - localKey, err := btcec.ParsePubKey(batch.nodeAnn1.NodeID[:], btcec.S256()) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } - remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], btcec.S256()) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } - remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit} - - // Pretending that we receive local channel announcement from funding - // manager, thereby kick off the announcement exchange process, in - // this case the announcement should be added in the orphan batch - // because we haven't announce the channel yet. - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.remoteProofAnn, - remotePeer): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to proceed announcement: %v", err) - } - - number := 0 - if err := ctx.gossiper.cfg.WaitingProofStore.ForAll( - func(*channeldb.WaitingProof) er.R { - number++ - return nil - }, - func() { - number = 0 - }, - ); err != nil { - t.Fatalf("unable to retrieve objects from store: %v", err) - } - - if number != 1 { - t.Fatal("wrong number of objects in storage") - } - - // Recreate lightning network topology. Initialize router with channel - // between two nodes. - select { - case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.localChanAnn, - localKey): - case <-time.After(2 * time.Second): - t.Fatal("did not process local announcement") - } - - if err != nil { - t.Fatalf("unable to process: %v", err) - } - - select { - case <-ctx.broadcastedMessage: - t.Fatal("channel announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - select { - case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanUpdAnn1, - localKey): - case <-time.After(2 * time.Second): - t.Fatal("did not process local announcement") - } - if err != nil { - t.Fatalf("unable to process: %v", err) - } - - select { - case <-ctx.broadcastedMessage: - t.Fatal("channel update announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - select { - case err = <-ctx.gossiper.ProcessLocalAnnouncement( - batch.nodeAnn1, localKey, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process local announcement") - } - if err != nil { - t.Fatalf("unable to process node ann: %v", err) - } - select { - case <-ctx.broadcastedMessage: - t.Fatal("node announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - // The local ChannelUpdate should now be sent directly to the remote peer, - // such that the edge can be used for routing, regardless if this channel - // is announced or not (private channel). - select { - case msg := <-sentMsgs: - assertMessage(t, batch.chanUpdAnn1, msg) - case <-time.After(1 * time.Second): - t.Fatal("gossiper did not send channel update to peer") - } - - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.chanUpdAnn2, - remotePeer): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process node ann: %v", err) - } - select { - case <-ctx.broadcastedMessage: - t.Fatal("channel update announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement( - batch.nodeAnn2, remotePeer, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process: %v", err) - } - select { - case <-ctx.broadcastedMessage: - t.Fatal("node announcement announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - // After that we process local announcement, and waiting to receive - // the channel announcement. - select { - case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.localProofAnn, - localKey): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process: %v", err) - } - - // The local proof should be sent to the remote peer. - select { - case msg := <-sentMsgs: - assertMessage(t, batch.localProofAnn, msg) - case <-time.After(2 * time.Second): - t.Fatalf("local proof was not sent to peer") - } - - // And since both remote and local announcements are processed, we - // should be broadcasting the final channel announcements. - for i := 0; i < 5; i++ { - select { - case <-ctx.broadcastedMessage: - case <-time.After(time.Second): - t.Fatal("announcement wasn't broadcast") - } - } - - number = 0 - if err := ctx.gossiper.cfg.WaitingProofStore.ForAll( - func(p *channeldb.WaitingProof) er.R { - number++ - return nil - }, - func() { - number = 0 - }, - ); err != nil { - t.Fatalf("unable to retrieve objects from store: %v", err) - } - - if number != 0 { - t.Fatalf("wrong number of objects in storage: %v", number) - } -} - -// TestSignatureAnnouncementRetryAtStartup tests that if we restart the -// gossiper, it will retry sending the AnnounceSignatures to the peer if it did -// not succeed before shutting down, and the full channel proof is not yet -// assembled. -func TestSignatureAnnouncementRetryAtStartup(t *testing.T) { - t.Parallel() - - ctx, cleanup, err := createTestCtx(uint32(proofMatureDelta)) - if err != nil { - t.Fatalf("can't create context: %v", err) - } - defer cleanup() - - batch, err := createAnnouncements(0) - if err != nil { - t.Fatalf("can't generate announcements: %v", err) - } - - localKey, err := btcec.ParsePubKey(batch.nodeAnn1.NodeID[:], btcec.S256()) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } - remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], btcec.S256()) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } - - // Set up a channel to intercept the messages sent to the remote peer. - sentToPeer := make(chan lnwire.Message, 1) - remotePeer := &mockPeer{remoteKey, sentToPeer, ctx.gossiper.quit} - - // Since the reliable send to the remote peer of the local channel proof - // requires a notification when the peer comes online, we'll capture the - // channel through which it gets sent to control exactly when to - // dispatch it. - notifyPeers := make(chan chan<- lnpeer.Peer, 1) - ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(peer [33]byte, - connectedChan chan<- lnpeer.Peer) { - notifyPeers <- connectedChan - } - - // Recreate lightning network topology. Initialize router with channel - // between two nodes. - select { - case err = <-ctx.gossiper.ProcessLocalAnnouncement( - batch.localChanAnn, localKey, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process local announcement") - } - if err != nil { - t.Fatalf("unable to process channel ann: %v", err) - } - select { - case <-ctx.broadcastedMessage: - t.Fatal("channel announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - // Pretending that we receive local channel announcement from funding - // manager, thereby kick off the announcement exchange process. - select { - case err = <-ctx.gossiper.ProcessLocalAnnouncement( - batch.localProofAnn, localKey, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process :%v", err) - } - - // The gossiper should register for a notification for when the peer is - // online. - select { - case <-notifyPeers: - case <-time.After(2 * time.Second): - t.Fatalf("gossiper did not ask to get notified when " + - "peer is online") - } - - // The proof should not be broadcast yet since we're still missing the - // remote party's. - select { - case <-ctx.broadcastedMessage: - t.Fatal("announcements were broadcast") - case <-time.After(2 * trickleDelay): - } - - // And it shouldn't be sent to the peer either as they are offline. - select { - case msg := <-sentToPeer: - t.Fatalf("received unexpected message: %v", spew.Sdump(msg)) - case <-time.After(time.Second): - } - - number := 0 - if err := ctx.gossiper.cfg.WaitingProofStore.ForAll( - func(*channeldb.WaitingProof) er.R { - number++ - return nil - }, - func() { - number = 0 - }, - ); err != nil { - t.Fatalf("unable to retrieve objects from store: %v", err) - } - - if number != 1 { - t.Fatal("wrong number of objects in storage") - } - - // Restart the gossiper and restore its original NotifyWhenOnline and - // NotifyWhenOffline methods. This should trigger a new attempt to send - // the message to the peer. - ctx.gossiper.Stop() - gossiper := New(Config{ - Notifier: ctx.gossiper.cfg.Notifier, - Broadcast: ctx.gossiper.cfg.Broadcast, - NotifyWhenOnline: ctx.gossiper.reliableSender.cfg.NotifyWhenOnline, - NotifyWhenOffline: ctx.gossiper.reliableSender.cfg.NotifyWhenOffline, - SelfNodeAnnouncement: ctx.gossiper.cfg.SelfNodeAnnouncement, - Router: ctx.gossiper.cfg.Router, - TrickleDelay: trickleDelay, - RetransmitTicker: ticker.NewForce(retransmitDelay), - RebroadcastInterval: rebroadcastInterval, - ProofMatureDelta: proofMatureDelta, - WaitingProofStore: ctx.gossiper.cfg.WaitingProofStore, - MessageStore: ctx.gossiper.cfg.MessageStore, - RotateTicker: ticker.NewForce(DefaultSyncerRotationInterval), - HistoricalSyncTicker: ticker.NewForce(DefaultHistoricalSyncInterval), - NumActiveSyncers: 3, - MinimumBatchSize: 10, - SubBatchDelay: time.Second * 5, - }, ctx.gossiper.selfKey) - if err != nil { - t.Fatalf("unable to recreate gossiper: %v", err) - } - if err := gossiper.Start(); err != nil { - t.Fatalf("unable to start recreated gossiper: %v", err) - } - defer gossiper.Stop() - - // Mark the graph as synced in order to allow the announcements to be - // broadcast. - gossiper.syncMgr.markGraphSynced() - - ctx.gossiper = gossiper - remotePeer.quit = ctx.gossiper.quit - - // After starting up, the gossiper will see that it has a proof in the - // WaitingProofStore, and will retry sending its part to the remote. - // It should register for a notification for when the peer is online. - var peerChan chan<- lnpeer.Peer - select { - case peerChan = <-notifyPeers: - case <-time.After(2 * time.Second): - t.Fatalf("gossiper did not ask to get notified when " + - "peer is online") - } - - // Notify that peer is now online. This should allow the proof to be - // sent. - peerChan <- remotePeer - -out: - for { - select { - case msg := <-sentToPeer: - // Since the ChannelUpdate will also be resent as it is - // sent reliably, we'll need to filter it out. - if _, ok := msg.(*lnwire.AnnounceSignatures); !ok { - continue - } - - assertMessage(t, batch.localProofAnn, msg) - break out - case <-time.After(2 * time.Second): - t.Fatalf("gossiper did not send message when peer " + - "came online") - } - } - - // Now exchanging the remote channel proof, the channel announcement - // broadcast should continue as normal. - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement( - batch.remoteProofAnn, remotePeer, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process :%v", err) - } - - select { - case <-ctx.broadcastedMessage: - case <-time.After(time.Second): - t.Fatal("announcement wasn't broadcast") - } - - number = 0 - if err := ctx.gossiper.cfg.WaitingProofStore.ForAll( - func(*channeldb.WaitingProof) er.R { - number++ - return nil - }, - func() { - number = 0 - }, - ); err != nil && !channeldb.ErrWaitingProofNotFound.Is(err) { - t.Fatalf("unable to retrieve objects from store: %v", err) - } - - if number != 0 { - t.Fatal("waiting proof should be removed from storage") - } -} - -// TestSignatureAnnouncementFullProofWhenRemoteProof tests that if a remote -// proof is received when we already have the full proof, the gossiper will send -// the full proof (ChannelAnnouncement) to the remote peer. -func TestSignatureAnnouncementFullProofWhenRemoteProof(t *testing.T) { - t.Parallel() - - ctx, cleanup, err := createTestCtx(uint32(proofMatureDelta)) - if err != nil { - t.Fatalf("can't create context: %v", err) - } - defer cleanup() - - batch, err := createAnnouncements(0) - if err != nil { - t.Fatalf("can't generate announcements: %v", err) - } - - localKey, err := btcec.ParsePubKey(batch.nodeAnn1.NodeID[:], btcec.S256()) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } - remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], btcec.S256()) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } - - // Set up a channel we can use to inspect messages sent by the - // gossiper to the remote peer. - sentToPeer := make(chan lnwire.Message, 1) - remotePeer := &mockPeer{remoteKey, sentToPeer, ctx.gossiper.quit} - - // Override NotifyWhenOnline to return the remote peer which we expect - // meesages to be sent to. - ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(peer [33]byte, - peerChan chan<- lnpeer.Peer) { - - peerChan <- remotePeer - } - - // Recreate lightning network topology. Initialize router with channel - // between two nodes. - select { - case err = <-ctx.gossiper.ProcessLocalAnnouncement( - batch.localChanAnn, localKey, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process local announcement") - } - if err != nil { - t.Fatalf("unable to process channel ann: %v", err) - } - select { - case <-ctx.broadcastedMessage: - t.Fatal("channel announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - select { - case err = <-ctx.gossiper.ProcessLocalAnnouncement( - batch.chanUpdAnn1, localKey, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process local announcement") - } - if err != nil { - t.Fatalf("unable to process channel update: %v", err) - } - select { - case <-ctx.broadcastedMessage: - t.Fatal("channel update announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - select { - case msg := <-sentToPeer: - assertMessage(t, batch.chanUpdAnn1, msg) - case <-time.After(2 * time.Second): - t.Fatal("gossiper did not send channel update to remove peer") - } - - select { - case err = <-ctx.gossiper.ProcessLocalAnnouncement( - batch.nodeAnn1, localKey, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process local announcement") - } - if err != nil { - t.Fatalf("unable to process node ann:%v", err) - } - select { - case <-ctx.broadcastedMessage: - t.Fatal("node announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement( - batch.chanUpdAnn2, remotePeer, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process channel update: %v", err) - } - select { - case <-ctx.broadcastedMessage: - t.Fatal("channel update announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement( - batch.nodeAnn2, remotePeer, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process node ann: %v", err) - } - select { - case <-ctx.broadcastedMessage: - t.Fatal("node announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - // Pretending that we receive local channel announcement from funding - // manager, thereby kick off the announcement exchange process. - select { - case err = <-ctx.gossiper.ProcessLocalAnnouncement( - batch.localProofAnn, localKey, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process local announcement") - } - if err != nil { - t.Fatalf("unable to process local proof: %v", err) - } - - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement( - batch.remoteProofAnn, remotePeer, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process local announcement") - } - if err != nil { - t.Fatalf("unable to process remote proof: %v", err) - } - - // We expect the gossiper to send this message to the remote peer. - select { - case msg := <-sentToPeer: - assertMessage(t, batch.localProofAnn, msg) - case <-time.After(2 * time.Second): - t.Fatal("did not send local proof to peer") - } - - // All channel and node announcements should be broadcast. - for i := 0; i < 5; i++ { - select { - case <-ctx.broadcastedMessage: - case <-time.After(time.Second): - t.Fatal("announcement wasn't broadcast") - } - } - - number := 0 - if err := ctx.gossiper.cfg.WaitingProofStore.ForAll( - func(*channeldb.WaitingProof) er.R { - number++ - return nil - }, - func() { - number = 0 - }, - ); err != nil && !channeldb.ErrWaitingProofNotFound.Is(err) { - t.Fatalf("unable to retrieve objects from store: %v", err) - } - - if number != 0 { - t.Fatal("waiting proof should be removed from storage") - } - - // Now give the gossiper the remote proof yet again. This should - // trigger a send of the full ChannelAnnouncement. - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement( - batch.remoteProofAnn, remotePeer, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process local announcement") - } - if err != nil { - t.Fatalf("unable to process remote proof: %v", err) - } - - // We expect the gossiper to send this message to the remote peer. - select { - case msg := <-sentToPeer: - _, ok := msg.(*lnwire.ChannelAnnouncement) - if !ok { - t.Fatalf("expected ChannelAnnouncement, instead got %T", msg) - } - case <-time.After(2 * time.Second): - t.Fatal("did not send local proof to peer") - } -} - -// TestDeDuplicatedAnnouncements ensures that the deDupedAnnouncements struct -// properly stores and delivers the set of de-duplicated announcements. -func TestDeDuplicatedAnnouncements(t *testing.T) { - t.Parallel() - - timestamp := testTimestamp - announcements := deDupedAnnouncements{} - announcements.Reset() - - // Ensure that after new deDupedAnnouncements struct is created and - // reset that storage of each announcement type is empty. - if len(announcements.channelAnnouncements) != 0 { - t.Fatal("channel announcements map not empty after reset") - } - if len(announcements.channelUpdates) != 0 { - t.Fatal("channel updates map not empty after reset") - } - if len(announcements.nodeAnnouncements) != 0 { - t.Fatal("node announcements map not empty after reset") - } - - // Ensure that remote channel announcements are properly stored - // and de-duplicated. - ca, err := createRemoteChannelAnnouncement(0) - if err != nil { - t.Fatalf("can't create remote channel announcement: %v", err) - } - - nodePeer := &mockPeer{bitcoinKeyPub2, nil, nil} - announcements.AddMsgs(networkMsg{ - msg: ca, - peer: nodePeer, - source: nodePeer.IdentityKey(), - }) - if len(announcements.channelAnnouncements) != 1 { - t.Fatal("new channel announcement not stored in batch") - } - - // We'll create a second instance of the same announcement with the - // same channel ID. Adding this shouldn't cause an increase in the - // number of items as they should be de-duplicated. - ca2, err := createRemoteChannelAnnouncement(0) - if err != nil { - t.Fatalf("can't create remote channel announcement: %v", err) - } - announcements.AddMsgs(networkMsg{ - msg: ca2, - peer: nodePeer, - source: nodePeer.IdentityKey(), - }) - if len(announcements.channelAnnouncements) != 1 { - t.Fatal("channel announcement not replaced in batch") - } - - // Next, we'll ensure that channel update announcements are properly - // stored and de-duplicated. We do this by creating two updates - // announcements with the same short ID and flag. - ua, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, timestamp) - if err != nil { - t.Fatalf("can't create update announcement: %v", err) - } - announcements.AddMsgs(networkMsg{ - msg: ua, - peer: nodePeer, - source: nodePeer.IdentityKey(), - }) - if len(announcements.channelUpdates) != 1 { - t.Fatal("new channel update not stored in batch") - } - - // Adding the very same announcement shouldn't cause an increase in the - // number of ChannelUpdate announcements stored. - ua2, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, timestamp) - if err != nil { - t.Fatalf("can't create update announcement: %v", err) - } - announcements.AddMsgs(networkMsg{ - msg: ua2, - peer: nodePeer, - source: nodePeer.IdentityKey(), - }) - if len(announcements.channelUpdates) != 1 { - t.Fatal("channel update not replaced in batch") - } - - // Adding an announcement with a later timestamp should replace the - // stored one. - ua3, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, timestamp+1) - if err != nil { - t.Fatalf("can't create update announcement: %v", err) - } - announcements.AddMsgs(networkMsg{ - msg: ua3, - peer: nodePeer, - source: nodePeer.IdentityKey(), - }) - if len(announcements.channelUpdates) != 1 { - t.Fatal("channel update not replaced in batch") - } - - assertChannelUpdate := func(channelUpdate *lnwire.ChannelUpdate) { - channelKey := channelUpdateID{ - ua3.ShortChannelID, - ua3.ChannelFlags, - } - - mws, ok := announcements.channelUpdates[channelKey] - if !ok { - t.Fatal("channel update not in batch") - } - if mws.msg != channelUpdate { - t.Fatalf("expected channel update %v, got %v)", - channelUpdate, mws.msg) - } - } - - // Check that ua3 is the currently stored channel update. - assertChannelUpdate(ua3) - - // Adding a channel update with an earlier timestamp should NOT - // replace the one stored. - ua4, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, timestamp) - if err != nil { - t.Fatalf("can't create update announcement: %v", err) - } - announcements.AddMsgs(networkMsg{ - msg: ua4, - peer: nodePeer, - source: nodePeer.IdentityKey(), - }) - if len(announcements.channelUpdates) != 1 { - t.Fatal("channel update not in batch") - } - assertChannelUpdate(ua3) - - // Next well ensure that node announcements are properly de-duplicated. - // We'll first add a single instance with a node's private key. - na, err := createNodeAnnouncement(nodeKeyPriv1, timestamp) - if err != nil { - t.Fatalf("can't create node announcement: %v", err) - } - announcements.AddMsgs(networkMsg{ - msg: na, - peer: nodePeer, - source: nodePeer.IdentityKey(), - }) - if len(announcements.nodeAnnouncements) != 1 { - t.Fatal("new node announcement not stored in batch") - } - - // We'll now add another node to the batch. - na2, err := createNodeAnnouncement(nodeKeyPriv2, timestamp) - if err != nil { - t.Fatalf("can't create node announcement: %v", err) - } - announcements.AddMsgs(networkMsg{ - msg: na2, - peer: nodePeer, - source: nodePeer.IdentityKey(), - }) - if len(announcements.nodeAnnouncements) != 2 { - t.Fatal("second node announcement not stored in batch") - } - - // Adding a new instance of the _same_ node shouldn't increase the size - // of the node ann batch. - na3, err := createNodeAnnouncement(nodeKeyPriv2, timestamp) - if err != nil { - t.Fatalf("can't create node announcement: %v", err) - } - announcements.AddMsgs(networkMsg{ - msg: na3, - peer: nodePeer, - source: nodePeer.IdentityKey(), - }) - if len(announcements.nodeAnnouncements) != 2 { - t.Fatal("second node announcement not replaced in batch") - } - - // Ensure that node announcement with different pointer to same public - // key is still de-duplicated. - newNodeKeyPointer := nodeKeyPriv2 - na4, err := createNodeAnnouncement(newNodeKeyPointer, timestamp) - if err != nil { - t.Fatalf("can't create node announcement: %v", err) - } - announcements.AddMsgs(networkMsg{ - msg: na4, - peer: nodePeer, - source: nodePeer.IdentityKey(), - }) - if len(announcements.nodeAnnouncements) != 2 { - t.Fatal("second node announcement not replaced again in batch") - } - - // Ensure that node announcement with increased timestamp replaces - // what is currently stored. - na5, err := createNodeAnnouncement(nodeKeyPriv2, timestamp+1) - if err != nil { - t.Fatalf("can't create node announcement: %v", err) - } - announcements.AddMsgs(networkMsg{ - msg: na5, - peer: nodePeer, - source: nodePeer.IdentityKey(), - }) - if len(announcements.nodeAnnouncements) != 2 { - t.Fatal("node announcement not replaced in batch") - } - nodeID := route.NewVertex(nodeKeyPriv2.PubKey()) - stored, ok := announcements.nodeAnnouncements[nodeID] - if !ok { - t.Fatalf("node announcement not found in batch") - } - if stored.msg != na5 { - t.Fatalf("expected de-duped node announcement to be %v, got %v", - na5, stored.msg) - } - - // Ensure that announcement batch delivers channel announcements, - // channel updates, and node announcements in proper order. - batch := announcements.Emit() - if len(batch) != 4 { - t.Fatal("announcement batch incorrect length") - } - - if !reflect.DeepEqual(batch[0].msg, ca2) { - t.Fatalf("channel announcement not first in batch: got %v, "+ - "expected %v", spew.Sdump(batch[0].msg), spew.Sdump(ca2)) - } - - if !reflect.DeepEqual(batch[1].msg, ua3) { - t.Fatalf("channel update not next in batch: got %v, "+ - "expected %v", spew.Sdump(batch[1].msg), spew.Sdump(ua2)) - } - - // We'll ensure that both node announcements are present. We check both - // indexes as due to the randomized order of map iteration they may be - // in either place. - if !reflect.DeepEqual(batch[2].msg, na) && !reflect.DeepEqual(batch[3].msg, na) { - t.Fatal("first node announcement not in last part of batch: "+ - "got %v, expected %v", batch[2].msg, - na) - } - if !reflect.DeepEqual(batch[2].msg, na5) && !reflect.DeepEqual(batch[3].msg, na5) { - t.Fatalf("second node announcement not in last part of batch: "+ - "got %v, expected %v", batch[3].msg, - na5) - } - - // Ensure that after reset, storage of each announcement type - // in deDupedAnnouncements struct is empty again. - announcements.Reset() - if len(announcements.channelAnnouncements) != 0 { - t.Fatal("channel announcements map not empty after reset") - } - if len(announcements.channelUpdates) != 0 { - t.Fatal("channel updates map not empty after reset") - } - if len(announcements.nodeAnnouncements) != 0 { - t.Fatal("node announcements map not empty after reset") - } -} - -// TestForwardPrivateNodeAnnouncement ensures that we do not forward node -// announcements for nodes who do not intend to publicly advertise themselves. -func TestForwardPrivateNodeAnnouncement(t *testing.T) { - t.Parallel() - - const ( - startingHeight = 100 - timestamp = 123456 - ) - - ctx, cleanup, err := createTestCtx(startingHeight) - if err != nil { - t.Fatalf("can't create context: %v", err) - } - defer cleanup() - - // We'll start off by processing a channel announcement without a proof - // (i.e., an unadvertised channel), followed by a node announcement for - // this same channel announcement. - chanAnn := createAnnouncementWithoutProof(startingHeight - 2) - pubKey := nodeKeyPriv1.PubKey() - - select { - case err := <-ctx.gossiper.ProcessLocalAnnouncement(chanAnn, pubKey): - if err != nil { - t.Fatalf("unable to process local announcement: %v", err) - } - case <-time.After(2 * time.Second): - t.Fatalf("local announcement not processed") - } - - // The gossiper should not broadcast the announcement due to it not - // having its announcement signatures. - select { - case <-ctx.broadcastedMessage: - t.Fatal("gossiper should not have broadcast channel announcement") - case <-time.After(2 * trickleDelay): - } - - nodeAnn, err := createNodeAnnouncement(nodeKeyPriv1, timestamp) - if err != nil { - t.Fatalf("unable to create node announcement: %v", err) - } - - select { - case err := <-ctx.gossiper.ProcessLocalAnnouncement(nodeAnn, pubKey): - if err != nil { - t.Fatalf("unable to process remote announcement: %v", err) - } - case <-time.After(2 * time.Second): - t.Fatal("remote announcement not processed") - } - - // The gossiper should also not broadcast the node announcement due to - // it not being part of any advertised channels. - select { - case <-ctx.broadcastedMessage: - t.Fatal("gossiper should not have broadcast node announcement") - case <-time.After(2 * trickleDelay): - } - - // Now, we'll attempt to forward the NodeAnnouncement for the same node - // by opening a public channel on the network. We'll create a - // ChannelAnnouncement and hand it off to the gossiper in order to - // process it. - remoteChanAnn, err := createRemoteChannelAnnouncement(startingHeight - 1) - if err != nil { - t.Fatalf("unable to create remote channel announcement: %v", err) - } - peer := &mockPeer{pubKey, nil, nil} - - select { - case err := <-ctx.gossiper.ProcessRemoteAnnouncement(remoteChanAnn, peer): - if err != nil { - t.Fatalf("unable to process remote announcement: %v", err) - } - case <-time.After(2 * time.Second): - t.Fatal("remote announcement not processed") - } - - select { - case <-ctx.broadcastedMessage: - case <-time.After(2 * trickleDelay): - t.Fatal("gossiper should have broadcast the channel announcement") - } - - // We'll recreate the NodeAnnouncement with an updated timestamp to - // prevent a stale update. The NodeAnnouncement should now be forwarded. - nodeAnn, err = createNodeAnnouncement(nodeKeyPriv1, timestamp+1) - if err != nil { - t.Fatalf("unable to create node announcement: %v", err) - } - - select { - case err := <-ctx.gossiper.ProcessRemoteAnnouncement(nodeAnn, peer): - if err != nil { - t.Fatalf("unable to process remote announcement: %v", err) - } - case <-time.After(2 * time.Second): - t.Fatal("remote announcement not processed") - } - - select { - case <-ctx.broadcastedMessage: - case <-time.After(2 * trickleDelay): - t.Fatal("gossiper should have broadcast the node announcement") - } -} - -// TestRejectZombieEdge ensures that we properly reject any announcements for -// zombie edges. -func TestRejectZombieEdge(t *testing.T) { - t.Parallel() - - // We'll start by creating our test context with a batch of - // announcements. - ctx, cleanup, err := createTestCtx(0) - if err != nil { - t.Fatalf("unable to create test context: %v", err) - } - defer cleanup() - - batch, err := createAnnouncements(0) - if err != nil { - t.Fatalf("unable to create announcements: %v", err) - } - remotePeer := &mockPeer{pk: nodeKeyPriv2.PubKey()} - - // processAnnouncements is a helper closure we'll use to test that we - // properly process/reject announcements based on whether they're for a - // zombie edge or not. - processAnnouncements := func(isZombie bool) { - t.Helper() - - errChan := ctx.gossiper.ProcessRemoteAnnouncement( - batch.remoteChanAnn, remotePeer, - ) - select { - case err := <-errChan: - if isZombie && err != nil { - t.Fatalf("expected to reject live channel "+ - "announcement with nil error: %v", err) - } - if !isZombie && err != nil { - t.Fatalf("expected to process live channel "+ - "announcement: %v", err) - } - case <-time.After(time.Second): - t.Fatal("expected to process channel announcement") - } - select { - case <-ctx.broadcastedMessage: - if isZombie { - t.Fatal("expected to not broadcast zombie " + - "channel announcement") - } - case <-time.After(2 * trickleDelay): - if !isZombie { - t.Fatal("expected to broadcast live channel " + - "announcement") - } - } - - errChan = ctx.gossiper.ProcessRemoteAnnouncement( - batch.chanUpdAnn2, remotePeer, - ) - select { - case err := <-errChan: - if isZombie && err != nil { - t.Fatalf("expected to reject zombie channel "+ - "update with nil error: %v", err) - } - if !isZombie && err != nil { - t.Fatalf("expected to process live channel "+ - "update: %v", err) - } - case <-time.After(time.Second): - t.Fatal("expected to process channel update") - } - select { - case <-ctx.broadcastedMessage: - if isZombie { - t.Fatal("expected to not broadcast zombie " + - "channel update") - } - case <-time.After(2 * trickleDelay): - if !isZombie { - t.Fatal("expected to broadcast live channel " + - "update") - } - } - } - - // We'll mark the edge for which we'll process announcements for as a - // zombie within the router. This should reject any announcements for - // this edge while it remains as a zombie. - chanID := batch.remoteChanAnn.ShortChannelID - err = ctx.router.MarkEdgeZombie( - chanID, batch.remoteChanAnn.NodeID1, batch.remoteChanAnn.NodeID2, - ) - if err != nil { - t.Fatalf("unable to mark channel %v as zombie: %v", chanID, err) - } - - processAnnouncements(true) - - // If we then mark the edge as live, the edge's zombie status should be - // overridden and the announcements should be processed. - if err := ctx.router.MarkEdgeLive(chanID); err != nil { - t.Fatalf("unable mark channel %v as zombie: %v", chanID, err) - } - - processAnnouncements(false) -} - -// TestProcessZombieEdgeNowLive ensures that we can detect when a zombie edge -// becomes live by receiving a fresh update. -func TestProcessZombieEdgeNowLive(t *testing.T) { - t.Parallel() - - // We'll start by creating our test context with a batch of - // announcements. - ctx, cleanup, err := createTestCtx(0) - if err != nil { - t.Fatalf("unable to create test context: %v", err) - } - defer cleanup() - - batch, err := createAnnouncements(0) - if err != nil { - t.Fatalf("unable to create announcements: %v", err) - } - - localPrivKey := nodeKeyPriv1 - remotePrivKey := nodeKeyPriv2 - - remotePeer := &mockPeer{pk: remotePrivKey.PubKey()} - - // processAnnouncement is a helper closure we'll use to ensure an - // announcement is properly processed/rejected based on whether the edge - // is a zombie or not. The expectsErr boolean can be used to determine - // whether we should expect an error when processing the message, while - // the isZombie boolean can be used to determine whether the - // announcement should be or not be broadcast. - processAnnouncement := func(ann lnwire.Message, isZombie, expectsErr bool) { - t.Helper() - - errChan := ctx.gossiper.ProcessRemoteAnnouncement( - ann, remotePeer, - ) - - var err er.R - select { - case err = <-errChan: - case <-time.After(time.Second): - t.Fatal("expected to process announcement") - } - if expectsErr && err == nil { - t.Fatal("expected error when processing announcement") - } - if !expectsErr && err != nil { - t.Fatalf("received unexpected error when processing "+ - "announcement: %v", err) - } - - select { - case msgWithSenders := <-ctx.broadcastedMessage: - if isZombie { - t.Fatal("expected to not broadcast zombie " + - "channel message") - } - assertMessage(t, ann, msgWithSenders.msg) - - case <-time.After(2 * trickleDelay): - if !isZombie { - t.Fatal("expected to broadcast live channel " + - "message") - } - } - } - - // We'll generate a channel update with a timestamp far enough in the - // past to consider it a zombie. - zombieTimestamp := time.Now().Add(-routing.DefaultChannelPruneExpiry) - batch.chanUpdAnn2.Timestamp = uint32(zombieTimestamp.Unix()) - if err := signUpdate(remotePrivKey, batch.chanUpdAnn2); err != nil { - t.Fatalf("unable to sign update with new timestamp: %v", err) - } - - // We'll also add the edge to our zombie index. - chanID := batch.remoteChanAnn.ShortChannelID - err = ctx.router.MarkEdgeZombie( - chanID, batch.remoteChanAnn.NodeID1, batch.remoteChanAnn.NodeID2, - ) - if err != nil { - t.Fatalf("unable mark channel %v as zombie: %v", chanID, err) - } - - // Attempting to process the current channel update should fail due to - // its edge being considered a zombie and its timestamp not being within - // the live horizon. We should not expect an error here since it is just - // a stale update. - processAnnouncement(batch.chanUpdAnn2, true, false) - - // Now we'll generate a new update with a fresh timestamp. This should - // allow the channel update to be processed even though it is still - // marked as a zombie within the index, since it is a fresh new update. - // This won't work however since we'll sign it with the wrong private - // key (local rather than remote). - batch.chanUpdAnn2.Timestamp = uint32(time.Now().Unix()) - if err := signUpdate(localPrivKey, batch.chanUpdAnn2); err != nil { - t.Fatalf("unable to sign update with new timestamp: %v", err) - } - - // We should expect an error due to the signature being invalid. - processAnnouncement(batch.chanUpdAnn2, true, true) - - // Signing it with the correct private key should allow it to be - // processed. - if err := signUpdate(remotePrivKey, batch.chanUpdAnn2); err != nil { - t.Fatalf("unable to sign update with new timestamp: %v", err) - } - - // The channel update cannot be successfully processed and broadcast - // until the channel announcement is. Since the channel update indicates - // a fresh new update, the gossiper should stash it until it sees the - // corresponding channel announcement. - updateErrChan := ctx.gossiper.ProcessRemoteAnnouncement( - batch.chanUpdAnn2, remotePeer, - ) - - select { - case <-ctx.broadcastedMessage: - t.Fatal("expected to not broadcast live channel update " + - "without announcement") - case <-time.After(2 * trickleDelay): - } - - // We'll go ahead and process the channel announcement to ensure the - // channel update is processed thereafter. - processAnnouncement(batch.remoteChanAnn, false, false) - - // After successfully processing the announcement, the channel update - // should have been processed and broadcast successfully as well. - select { - case err := <-updateErrChan: - if err != nil { - t.Fatalf("expected to process live channel update: %v", - err) - } - case <-time.After(time.Second): - t.Fatal("expected to process announcement") - } - - select { - case msgWithSenders := <-ctx.broadcastedMessage: - assertMessage(t, batch.chanUpdAnn2, msgWithSenders.msg) - case <-time.After(2 * trickleDelay): - t.Fatal("expected to broadcast live channel update") - } -} - -// TestReceiveRemoteChannelUpdateFirst tests that if we receive a ChannelUpdate -// from the remote before we have processed our own ChannelAnnouncement, it will -// be reprocessed later, after our ChannelAnnouncement. -func TestReceiveRemoteChannelUpdateFirst(t *testing.T) { - t.Parallel() - - ctx, cleanup, err := createTestCtx(uint32(proofMatureDelta)) - if err != nil { - t.Fatalf("can't create context: %v", err) - } - defer cleanup() - - batch, err := createAnnouncements(0) - if err != nil { - t.Fatalf("can't generate announcements: %v", err) - } - - localKey, err := btcec.ParsePubKey(batch.nodeAnn1.NodeID[:], btcec.S256()) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } - remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], btcec.S256()) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } - - // Set up a channel that we can use to inspect the messages sent - // directly from the gossiper. - sentMsgs := make(chan lnwire.Message, 10) - remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit} - - // Override NotifyWhenOnline to return the remote peer which we expect - // meesages to be sent to. - ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(peer [33]byte, - peerChan chan<- lnpeer.Peer) { - - peerChan <- remotePeer - } - - // Recreate the case where the remote node is sending us its ChannelUpdate - // before we have been able to process our own ChannelAnnouncement and - // ChannelUpdate. - errRemoteAnn := ctx.gossiper.ProcessRemoteAnnouncement( - batch.chanUpdAnn2, remotePeer, - ) - select { - case <-ctx.broadcastedMessage: - t.Fatal("channel update announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.nodeAnn2, remotePeer) - if err != nil { - t.Fatalf("unable to process node ann: %v", err) - } - select { - case <-ctx.broadcastedMessage: - t.Fatal("node announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - // Since the remote ChannelUpdate was added for an edge that - // we did not already know about, it should have been added - // to the map of premature ChannelUpdates. Check that nothing - // was added to the graph. - chanInfo, e1, e2, err := ctx.router.GetChannelByID(batch.chanUpdAnn1.ShortChannelID) - if !channeldb.ErrEdgeNotFound.Is(err) { - t.Fatalf("Expected ErrEdgeNotFound, got: %v", err) - } - if chanInfo != nil { - t.Fatalf("chanInfo was not nil") - } - if e1 != nil { - t.Fatalf("e1 was not nil") - } - if e2 != nil { - t.Fatalf("e2 was not nil") - } - - // Recreate lightning network topology. Initialize router with channel - // between two nodes. - err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.localChanAnn, localKey) - if err != nil { - t.Fatalf("unable to process :%v", err) - } - select { - case <-ctx.broadcastedMessage: - t.Fatal("channel announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanUpdAnn1, localKey) - if err != nil { - t.Fatalf("unable to process :%v", err) - } - select { - case <-ctx.broadcastedMessage: - t.Fatal("channel update announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.nodeAnn1, localKey) - if err != nil { - t.Fatalf("unable to process :%v", err) - } - select { - case <-ctx.broadcastedMessage: - t.Fatal("node announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - // The local ChannelUpdate should now be sent directly to the remote peer, - // such that the edge can be used for routing, regardless if this channel - // is announced or not (private channel). - select { - case msg := <-sentMsgs: - assertMessage(t, batch.chanUpdAnn1, msg) - case <-time.After(1 * time.Second): - t.Fatal("gossiper did not send channel update to peer") - } - - // At this point the remote ChannelUpdate we received earlier should - // be reprocessed, as we now have the necessary edge entry in the graph. - select { - case err := <-errRemoteAnn: - if err != nil { - t.Fatalf("error re-processing remote update: %v", err) - } - case <-time.After(2 * trickleDelay): - t.Fatalf("remote update was not processed") - } - - // Check that the ChannelEdgePolicy was added to the graph. - chanInfo, e1, e2, err = ctx.router.GetChannelByID( - batch.chanUpdAnn1.ShortChannelID, - ) - if err != nil { - t.Fatalf("unable to get channel from router: %v", err) - } - if chanInfo == nil { - t.Fatalf("chanInfo was nil") - } - if e1 == nil { - t.Fatalf("e1 was nil") - } - if e2 == nil { - t.Fatalf("e2 was nil") - } - - // Pretending that we receive local channel announcement from funding - // manager, thereby kick off the announcement exchange process. - err = <-ctx.gossiper.ProcessLocalAnnouncement( - batch.localProofAnn, localKey, - ) - if err != nil { - t.Fatalf("unable to process :%v", err) - } - - select { - case <-ctx.broadcastedMessage: - t.Fatal("announcements were broadcast") - case <-time.After(2 * trickleDelay): - } - - number := 0 - if err := ctx.gossiper.cfg.WaitingProofStore.ForAll( - func(*channeldb.WaitingProof) er.R { - number++ - return nil - }, - func() { - number = 0 - }, - ); err != nil { - t.Fatalf("unable to retrieve objects from store: %v", err) - } - - if number != 1 { - t.Fatal("wrong number of objects in storage") - } - - err = <-ctx.gossiper.ProcessRemoteAnnouncement( - batch.remoteProofAnn, remotePeer, - ) - if err != nil { - t.Fatalf("unable to process :%v", err) - } - - for i := 0; i < 4; i++ { - select { - case <-ctx.broadcastedMessage: - case <-time.After(time.Second): - t.Fatal("announcement wasn't broadcast") - } - } - - number = 0 - if err := ctx.gossiper.cfg.WaitingProofStore.ForAll( - func(*channeldb.WaitingProof) er.R { - number++ - return nil - }, - func() { - number = 0 - }, - ); err != nil && !channeldb.ErrWaitingProofNotFound.Is(err) { - t.Fatalf("unable to retrieve objects from store: %v", err) - } - - if number != 0 { - t.Fatal("waiting proof should be removed from storage") - } -} - -// TestExtraDataChannelAnnouncementValidation tests that we're able to properly -// validate a ChannelAnnouncement that includes opaque bytes that we don't -// currently know of. -func TestExtraDataChannelAnnouncementValidation(t *testing.T) { - t.Parallel() - - ctx, cleanup, err := createTestCtx(0) - if err != nil { - t.Fatalf("can't create context: %v", err) - } - defer cleanup() - - remotePeer := &mockPeer{nodeKeyPriv1.PubKey(), nil, nil} - - // We'll now create an announcement that contains an extra set of bytes - // that we don't know of ourselves, but should still include in the - // final signature check. - extraBytes := []byte("gotta validate this stil!") - ca, err := createRemoteChannelAnnouncement(0, extraBytes) - if err != nil { - t.Fatalf("can't create channel announcement: %v", err) - } - - // We'll now send the announcement to the main gossiper. We should be - // able to validate this announcement to problem. - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement(ca, remotePeer): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process :%v", err) - } -} - -// TestExtraDataChannelUpdateValidation tests that we're able to properly -// validate a ChannelUpdate that includes opaque bytes that we don't currently -// know of. -func TestExtraDataChannelUpdateValidation(t *testing.T) { - t.Parallel() - - timestamp := testTimestamp - ctx, cleanup, err := createTestCtx(0) - if err != nil { - t.Fatalf("can't create context: %v", err) - } - defer cleanup() - - remotePeer := &mockPeer{nodeKeyPriv1.PubKey(), nil, nil} - - // In this scenario, we'll create two announcements, one regular - // channel announcement, and another channel update announcement, that - // has additional data that we won't be interpreting. - chanAnn, err := createRemoteChannelAnnouncement(0) - if err != nil { - t.Fatalf("unable to create chan ann: %v", err) - } - chanUpdAnn1, err := createUpdateAnnouncement( - 0, 0, nodeKeyPriv1, timestamp, - []byte("must also validate"), - ) - if err != nil { - t.Fatalf("unable to create chan up: %v", err) - } - chanUpdAnn2, err := createUpdateAnnouncement( - 0, 1, nodeKeyPriv2, timestamp, - []byte("must also validate"), - ) - if err != nil { - t.Fatalf("unable to create chan up: %v", err) - } - - // We should be able to properly validate all three messages without - // any issue. - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanAnn, remotePeer): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process announcement: %v", err) - } - - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanUpdAnn1, remotePeer): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process announcement: %v", err) - } - - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanUpdAnn2, remotePeer): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process announcement: %v", err) - } -} - -// TestExtraDataNodeAnnouncementValidation tests that we're able to properly -// validate a NodeAnnouncement that includes opaque bytes that we don't -// currently know of. -func TestExtraDataNodeAnnouncementValidation(t *testing.T) { - t.Parallel() - - ctx, cleanup, err := createTestCtx(0) - if err != nil { - t.Fatalf("can't create context: %v", err) - } - defer cleanup() - - remotePeer := &mockPeer{nodeKeyPriv1.PubKey(), nil, nil} - timestamp := testTimestamp - - // We'll create a node announcement that includes a set of opaque data - // which we don't know of, but will store anyway in order to ensure - // upgrades can flow smoothly in the future. - nodeAnn, err := createNodeAnnouncement( - nodeKeyPriv1, timestamp, []byte("gotta validate"), - ) - if err != nil { - t.Fatalf("can't create node announcement: %v", err) - } - - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement(nodeAnn, remotePeer): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process announcement: %v", err) - } -} - -// assertBroadcast checks that num messages are being broadcasted from the -// gossiper. The broadcasted messages are returned. -func assertBroadcast(t *testing.T, ctx *testCtx, num int) []lnwire.Message { - t.Helper() - - var msgs []lnwire.Message - for i := 0; i < num; i++ { - select { - case msg := <-ctx.broadcastedMessage: - msgs = append(msgs, msg.msg) - case <-time.After(time.Second): - t.Fatalf("expected %d messages to be broadcast, only "+ - "got %d", num, i) - } - } - - // No more messages should be broadcast. - select { - case msg := <-ctx.broadcastedMessage: - t.Fatalf("unexpected message was broadcast: %T", msg.msg) - case <-time.After(2 * trickleDelay): - } - - return msgs -} - -// assertProcessAnnouncemnt is a helper method that checks that the result of -// processing an announcement is successful. -func assertProcessAnnouncement(t *testing.T, result chan er.R) { - t.Helper() - - select { - case err := <-result: - if err != nil { - t.Fatalf("unable to process :%v", err) - } - case <-time.After(2 * time.Second): - t.Fatal("did not process announcement") - } -} - -// TestRetransmit checks that the expected announcements are retransmitted when -// the retransmit ticker ticks. -func TestRetransmit(t *testing.T) { - t.Parallel() - - ctx, cleanup, err := createTestCtx(proofMatureDelta) - if err != nil { - t.Fatalf("can't create context: %v", err) - } - defer cleanup() - - batch, err := createAnnouncements(0) - if err != nil { - t.Fatalf("can't generate announcements: %v", err) - } - - localKey, err := btcec.ParsePubKey(batch.nodeAnn1.NodeID[:], btcec.S256()) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } - remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], btcec.S256()) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } - remotePeer := &mockPeer{remoteKey, nil, nil} - - // Process a local channel annoucement, channel update and node - // announcement. No messages should be broadcasted yet, since no proof - // has been exchanged. - assertProcessAnnouncement( - t, ctx.gossiper.ProcessLocalAnnouncement( - batch.localChanAnn, localKey, - ), - ) - assertBroadcast(t, ctx, 0) - - assertProcessAnnouncement( - t, ctx.gossiper.ProcessLocalAnnouncement( - batch.chanUpdAnn1, localKey, - ), - ) - assertBroadcast(t, ctx, 0) - - assertProcessAnnouncement( - t, ctx.gossiper.ProcessLocalAnnouncement( - batch.nodeAnn1, localKey, - ), - ) - assertBroadcast(t, ctx, 0) - - // Add the remote channel update to the gossiper. Similarly, nothing - // should be broadcasted. - assertProcessAnnouncement( - t, ctx.gossiper.ProcessRemoteAnnouncement( - batch.chanUpdAnn2, remotePeer, - ), - ) - assertBroadcast(t, ctx, 0) - - // Now add the local and remote proof to the gossiper, which should - // trigger a broadcast of the announcements. - assertProcessAnnouncement( - t, ctx.gossiper.ProcessLocalAnnouncement( - batch.localProofAnn, localKey, - ), - ) - assertBroadcast(t, ctx, 0) - - assertProcessAnnouncement( - t, ctx.gossiper.ProcessRemoteAnnouncement( - batch.remoteProofAnn, remotePeer, - ), - ) - - // checkAnncouncments make sure the expected number of channel - // announcements + channel updates + node announcements are broadcast. - checkAnnouncements := func(t *testing.T, chanAnns, chanUpds, - nodeAnns int) { - - t.Helper() - - num := chanAnns + chanUpds + nodeAnns - anns := assertBroadcast(t, ctx, num) - - // Count the received announcements. - var chanAnn, chanUpd, nodeAnn int - for _, msg := range anns { - switch msg.(type) { - case *lnwire.ChannelAnnouncement: - chanAnn++ - case *lnwire.ChannelUpdate: - chanUpd++ - case *lnwire.NodeAnnouncement: - nodeAnn++ - } - } - - if chanAnn != chanAnns || chanUpd != chanUpds || - nodeAnn != nodeAnns { - t.Fatalf("unexpected number of announcements: "+ - "chanAnn=%d, chanUpd=%d, nodeAnn=%d", - chanAnn, chanUpd, nodeAnn) - } - } - - // All announcements should be broadcast, including the remote channel - // update. - checkAnnouncements(t, 1, 2, 1) - - // Now let the retransmit ticker tick, which should trigger updates to - // be rebroadcast. - now := time.Unix(int64(testTimestamp), 0) - future := now.Add(rebroadcastInterval + 10*time.Second) - select { - case ctx.gossiper.cfg.RetransmitTicker.(*ticker.Force).Force <- future: - case <-time.After(2 * time.Second): - t.Fatalf("unable to force tick") - } - - // The channel announcement + local channel update + node announcement - // should be re-broadcast. - checkAnnouncements(t, 1, 1, 1) -} - -// TestNodeAnnouncementNoChannels tests that NodeAnnouncements for nodes with -// no existing channels in the graph do not get forwarded. -func TestNodeAnnouncementNoChannels(t *testing.T) { - t.Parallel() - - ctx, cleanup, err := createTestCtx(0) - if err != nil { - t.Fatalf("can't create context: %v", err) - } - defer cleanup() - - batch, err := createAnnouncements(0) - if err != nil { - t.Fatalf("can't generate announcements: %v", err) - } - - remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], - btcec.S256()) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } - remotePeer := &mockPeer{remoteKey, nil, nil} - - // Process the remote node announcement. - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.nodeAnn2, - remotePeer): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process announcement: %v", err) - } - - // Since no channels or node announcements were already in the graph, - // the node announcement should be ignored, and not forwarded. - select { - case <-ctx.broadcastedMessage: - t.Fatal("node announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - // Now add the node's channel to the graph by processing the channel - // announement and channel update. - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.remoteChanAnn, - remotePeer): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process announcement: %v", err) - } - - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.chanUpdAnn2, - remotePeer): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process announcement: %v", err) - } - - // Now process the node announcement again. - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.nodeAnn2, remotePeer): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process announcement: %v", err) - } - - // This time the node announcement should be forwarded. The same should - // the channel announcement and update be. - for i := 0; i < 3; i++ { - select { - case <-ctx.broadcastedMessage: - case <-time.After(time.Second): - t.Fatal("announcement wasn't broadcast") - } - } - - // Processing the same node announement again should be ignored, as it - // is stale. - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.nodeAnn2, - remotePeer): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process announcement: %v", err) - } - - select { - case <-ctx.broadcastedMessage: - t.Fatal("node announcement was broadcast") - case <-time.After(2 * trickleDelay): - } -} - -// TestOptionalFieldsChannelUpdateValidation tests that we're able to properly -// validate the msg flags and optional max HTLC field of a ChannelUpdate. -func TestOptionalFieldsChannelUpdateValidation(t *testing.T) { - t.Parallel() - - ctx, cleanup, err := createTestCtx(0) - if err != nil { - t.Fatalf("can't create context: %v", err) - } - defer cleanup() - - chanUpdateHeight := uint32(0) - timestamp := uint32(123456) - nodePeer := &mockPeer{nodeKeyPriv1.PubKey(), nil, nil} - - // In this scenario, we'll test whether the message flags field in a channel - // update is properly handled. - chanAnn, err := createRemoteChannelAnnouncement(chanUpdateHeight) - if err != nil { - t.Fatalf("can't create channel announcement: %v", err) - } - - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanAnn, nodePeer): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process announcement: %v", err) - } - - // The first update should fail from an invalid max HTLC field, which is - // less than the min HTLC. - chanUpdAnn, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, timestamp) - if err != nil { - t.Fatalf("unable to create channel update: %v", err) - } - - chanUpdAnn.HtlcMinimumMsat = 5000 - chanUpdAnn.HtlcMaximumMsat = 4000 - if err := signUpdate(nodeKeyPriv1, chanUpdAnn); err != nil { - t.Fatalf("unable to sign channel update: %v", err) - } - - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanUpdAnn, nodePeer): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err == nil || !strings.Contains(err.String(), "invalid max htlc") { - t.Fatalf("expected chan update to error, instead got %v", err) - } - - // The second update should fail because the message flag is set but - // the max HTLC field is 0. - chanUpdAnn.HtlcMinimumMsat = 0 - chanUpdAnn.HtlcMaximumMsat = 0 - if err := signUpdate(nodeKeyPriv1, chanUpdAnn); err != nil { - t.Fatalf("unable to sign channel update: %v", err) - } - - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanUpdAnn, nodePeer): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err == nil || !strings.Contains(err.String(), "invalid max htlc") { - t.Fatalf("expected chan update to error, instead got %v", err) - } - - // The final update should succeed, since setting the flag 0 means the - // nonsense max_htlc field will just be ignored. - chanUpdAnn.MessageFlags = 0 - if err := signUpdate(nodeKeyPriv1, chanUpdAnn); err != nil { - t.Fatalf("unable to sign channel update: %v", err) - } - - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanUpdAnn, nodePeer): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote announcement") - } - if err != nil { - t.Fatalf("unable to process announcement: %v", err) - } -} - -// TestSendChannelUpdateReliably ensures that the latest channel update for a -// channel is always sent upon the remote party reconnecting. -func TestSendChannelUpdateReliably(t *testing.T) { - t.Parallel() - - // We'll start by creating our test context and a batch of - // announcements. - ctx, cleanup, err := createTestCtx(uint32(proofMatureDelta)) - if err != nil { - t.Fatalf("unable to create test context: %v", err) - } - defer cleanup() - - batch, err := createAnnouncements(0) - if err != nil { - t.Fatalf("can't generate announcements: %v", err) - } - - // We'll also create two keys, one for ourselves and another for the - // remote party. - localKey, err := btcec.ParsePubKey(batch.nodeAnn1.NodeID[:], btcec.S256()) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } - remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], btcec.S256()) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } - - // Set up a channel we can use to inspect messages sent by the - // gossiper to the remote peer. - sentToPeer := make(chan lnwire.Message, 1) - remotePeer := &mockPeer{remoteKey, sentToPeer, ctx.gossiper.quit} - - // Since we first wait to be notified of the peer before attempting to - // send the message, we'll overwrite NotifyWhenOnline and - // NotifyWhenOffline to instead give us access to the channel that will - // receive the notification. - notifyOnline := make(chan chan<- lnpeer.Peer, 1) - ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(_ [33]byte, - peerChan chan<- lnpeer.Peer) { - - notifyOnline <- peerChan - } - notifyOffline := make(chan chan struct{}, 1) - ctx.gossiper.reliableSender.cfg.NotifyWhenOffline = func( - _ [33]byte) <-chan struct{} { - - c := make(chan struct{}, 1) - notifyOffline <- c - return c - } - - // assertMsgSent is a helper closure we'll use to determine if the - // correct gossip message was sent. - assertMsgSent := func(msg lnwire.Message) { - t.Helper() - - select { - case msgSent := <-sentToPeer: - assertMessage(t, msg, msgSent) - case <-time.After(2 * time.Second): - t.Fatalf("did not send %v message to peer", - msg.MsgType()) - } - } - - // Process the channel announcement for which we'll send a channel - // update for. - select { - case err = <-ctx.gossiper.ProcessLocalAnnouncement( - batch.localChanAnn, localKey, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process local channel announcement") - } - if err != nil { - t.Fatalf("unable to process local channel announcement: %v", err) - } - - // It should not be broadcast due to not having an announcement proof. - select { - case <-ctx.broadcastedMessage: - t.Fatal("channel announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - // Now, we'll process the channel update. - select { - case err = <-ctx.gossiper.ProcessLocalAnnouncement( - batch.chanUpdAnn1, localKey, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process local channel update") - } - if err != nil { - t.Fatalf("unable to process local channel update: %v", err) - } - - // It should also not be broadcast due to the announcement not having an - // announcement proof. - select { - case <-ctx.broadcastedMessage: - t.Fatal("channel announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - // It should however send it to the peer directly. In order to do so, - // it'll request a notification for when the peer is online. - var peerChan chan<- lnpeer.Peer - select { - case peerChan = <-notifyOnline: - case <-time.After(2 * time.Second): - t.Fatal("gossiper did not request notification upon peer " + - "connection") - } - - // We can go ahead and notify the peer, which should trigger the message - // to be sent. - peerChan <- remotePeer - assertMsgSent(batch.chanUpdAnn1) - - // The gossiper should now request a notification for when the peer - // disconnects. We'll also trigger this now. - var offlineChan chan struct{} - select { - case offlineChan = <-notifyOffline: - case <-time.After(2 * time.Second): - t.Fatal("gossiper did not request notification upon peer " + - "disconnection") - } - - close(offlineChan) - - // Since it's offline, the gossiper should request another notification - // for when it comes back online. - select { - case peerChan = <-notifyOnline: - case <-time.After(2 * time.Second): - t.Fatal("gossiper did not request notification upon peer " + - "connection") - } - - // Now that the remote peer is offline, we'll send a new channel update. - batch.chanUpdAnn1.Timestamp++ - if err := signUpdate(nodeKeyPriv1, batch.chanUpdAnn1); err != nil { - t.Fatalf("unable to sign new channel update: %v", err) - } - - // With the new update created, we'll go ahead and process it. - select { - case err = <-ctx.gossiper.ProcessLocalAnnouncement( - batch.chanUpdAnn1, localKey, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process local channel update") - } - if err != nil { - t.Fatalf("unable to process local channel update: %v", err) - } - - // It should also not be broadcast due to the announcement not having an - // announcement proof. - select { - case <-ctx.broadcastedMessage: - t.Fatal("channel announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - // The message should not be sent since the peer remains offline. - select { - case msg := <-sentToPeer: - t.Fatalf("received unexpected message: %v", spew.Sdump(msg)) - case <-time.After(time.Second): - } - - // Once again, we'll notify the peer is online and ensure the new - // channel update is received. This will also cause an offline - // notification to be requested again. - peerChan <- remotePeer - assertMsgSent(batch.chanUpdAnn1) - - select { - case offlineChan = <-notifyOffline: - case <-time.After(2 * time.Second): - t.Fatal("gossiper did not request notification upon peer " + - "disconnection") - } - - // We'll then exchange proofs with the remote peer in order to announce - // the channel. - select { - case err = <-ctx.gossiper.ProcessLocalAnnouncement( - batch.localProofAnn, localKey, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process local channel proof") - } - if err != nil { - t.Fatalf("unable to process local channel proof: %v", err) - } - - // No messages should be broadcast as we don't have the full proof yet. - select { - case <-ctx.broadcastedMessage: - t.Fatal("channel announcement was broadcast") - case <-time.After(2 * trickleDelay): - } - - // Our proof should be sent to the remote peer however. - assertMsgSent(batch.localProofAnn) - - select { - case err = <-ctx.gossiper.ProcessRemoteAnnouncement( - batch.remoteProofAnn, remotePeer, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process remote channel proof") - } - if err != nil { - t.Fatalf("unable to process remote channel proof: %v", err) - } - - // Now that we've constructed our full proof, we can assert that the - // channel has been announced. - for i := 0; i < 2; i++ { - select { - case <-ctx.broadcastedMessage: - case <-time.After(2 * trickleDelay): - t.Fatal("expected channel to be announced") - } - } - - // With the channel announced, we'll generate a new channel update. This - // one won't take the path of the reliable sender, as the channel has - // already been announced. We'll keep track of the old message that is - // now stale to use later on. - staleChannelUpdate := batch.chanUpdAnn1 - newChannelUpdate := &lnwire.ChannelUpdate{} - *newChannelUpdate = *staleChannelUpdate - newChannelUpdate.Timestamp++ - if err := signUpdate(nodeKeyPriv1, newChannelUpdate); err != nil { - t.Fatalf("unable to sign new channel update: %v", err) - } - - // Process the new channel update. It should not be sent to the peer - // directly since the reliable sender only applies when the channel is - // not announced. - select { - case err = <-ctx.gossiper.ProcessLocalAnnouncement( - newChannelUpdate, localKey, - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process local channel update") - } - if err != nil { - t.Fatalf("unable to process local channel update: %v", err) - } - select { - case <-ctx.broadcastedMessage: - case <-time.After(2 * trickleDelay): - t.Fatal("channel update was not broadcast") - } - select { - case msg := <-sentToPeer: - t.Fatalf("received unexpected message: %v", spew.Sdump(msg)) - case <-time.After(time.Second): - } - - // Then, we'll trigger the reliable sender to send its pending messages - // by triggering an offline notification for the peer, followed by an - // online one. - close(offlineChan) - - select { - case peerChan = <-notifyOnline: - case <-time.After(2 * time.Second): - t.Fatal("gossiper did not request notification upon peer " + - "connection") - } - - peerChan <- remotePeer - - // At this point, we should have sent both the AnnounceSignatures and - // stale ChannelUpdate. - for i := 0; i < 2; i++ { - var msg lnwire.Message - select { - case msg = <-sentToPeer: - case <-time.After(time.Second): - t.Fatal("expected to send message") - } - - switch msg := msg.(type) { - case *lnwire.ChannelUpdate: - assertMessage(t, staleChannelUpdate, msg) - case *lnwire.AnnounceSignatures: - assertMessage(t, batch.localProofAnn, msg) - default: - t.Fatalf("send unexpected %v message", msg.MsgType()) - } - } - - // Since the messages above are now deemed as stale, they should be - // removed from the message store. - err = wait.NoError(func() er.R { - msgs, err := ctx.gossiper.cfg.MessageStore.Messages() - if err != nil { - return er.Errorf("unable to retrieve pending "+ - "messages: %v", err) - } - if len(msgs) != 0 { - return er.Errorf("expected no messages left, found %d", - len(msgs)) - } - return nil - }, time.Second) - if err != nil { - t.Fatal(err) - } -} - -func sendLocalMsg(t *testing.T, ctx *testCtx, msg lnwire.Message, - localPub *btcec.PublicKey, optionalMsgFields ...OptionalMsgField) { - - t.Helper() - - var err er.R - select { - case err = <-ctx.gossiper.ProcessLocalAnnouncement( - msg, localPub, optionalMsgFields..., - ): - case <-time.After(2 * time.Second): - t.Fatal("did not process local announcement") - } - if err != nil { - t.Fatalf("unable to process channel msg: %v", err) - } -} - -func sendRemoteMsg(t *testing.T, ctx *testCtx, msg lnwire.Message, - remotePeer lnpeer.Peer) { - - t.Helper() - - select { - case err := <-ctx.gossiper.ProcessRemoteAnnouncement(msg, remotePeer): - if err != nil { - t.Fatalf("unable to process channel msg: %v", err) - } - case <-time.After(2 * time.Second): - t.Fatal("did not process local announcement") - } -} - -func assertBroadcastMsg(t *testing.T, ctx *testCtx, - predicate func(lnwire.Message) er.R) { - - t.Helper() - - // We don't care about the order of the broadcast, only that our target - // predicate returns true for any of the messages, so we'll continue to - // retry until either we hit our timeout, or it returns with no error - // (message found). - err := wait.NoError(func() er.R { - select { - case msg := <-ctx.broadcastedMessage: - return predicate(msg.msg) - case <-time.After(2 * trickleDelay): - return er.Errorf("no message broadcast") - } - }, time.Second*5) - if err != nil { - t.Fatal(err) - } -} - -// TestPropagateChanPolicyUpdate tests that we're able to issue requests to -// update policies for all channels and also select target channels. -// Additionally, we ensure that we don't propagate updates for any private -// channels. -func TestPropagateChanPolicyUpdate(t *testing.T) { - t.Parallel() - - // First, we'll make out test context and add 3 random channels to the - // graph. - startingHeight := uint32(10) - ctx, cleanup, err := createTestCtx(startingHeight) - if err != nil { - t.Fatalf("unable to create test context: %v", err) - } - defer cleanup() - - const numChannels = 3 - channelsToAnnounce := make([]*annBatch, 0, numChannels) - for i := 0; i < numChannels; i++ { - newChan, err := createAnnouncements(uint32(i + 1)) - if err != nil { - t.Fatalf("unable to make new channel ann: %v", err) - } - - channelsToAnnounce = append(channelsToAnnounce, newChan) - } - - localKey := nodeKeyPriv1.PubKey() - remoteKey := nodeKeyPriv2.PubKey() - - sentMsgs := make(chan lnwire.Message, 10) - remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit} - - // The forced code path for sending the private ChannelUpdate to the - // remote peer will be hit, forcing it to request a notification that - // the remote peer is active. We'll ensure that it targets the proper - // pubkey, and hand it our mock peer above. - notifyErr := make(chan er.R, 1) - ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func( - targetPub [33]byte, peerChan chan<- lnpeer.Peer) { - - if !bytes.Equal(targetPub[:], remoteKey.SerializeCompressed()) { - notifyErr <- er.Errorf("reliableSender attempted to send the "+ - "message to the wrong peer: expected %x got %x", - remoteKey.SerializeCompressed(), - targetPub) - } - - peerChan <- remotePeer - } - - // With our channel announcements created, we'll now send them all to - // the gossiper in order for it to process. However, we'll hold back - // the channel ann proof from the first channel in order to have it be - // marked as private channel. - firstChanID := channelsToAnnounce[0].localChanAnn.ShortChannelID - for i, batch := range channelsToAnnounce { - // channelPoint ensures that each channel policy in the map - // returned by PropagateChanPolicyUpdate has a unique key. Since - // the map is keyed by wire.OutPoint, we want to ensure that - // each channel has a unique channel point. - channelPoint := ChannelPoint(wire.OutPoint{Index: uint32(i)}) - - sendLocalMsg(t, ctx, batch.localChanAnn, localKey, channelPoint) - sendLocalMsg(t, ctx, batch.chanUpdAnn1, localKey) - sendLocalMsg(t, ctx, batch.nodeAnn1, localKey) - - sendRemoteMsg(t, ctx, batch.chanUpdAnn2, remotePeer) - sendRemoteMsg(t, ctx, batch.nodeAnn2, remotePeer) - - // We'll skip sending the auth proofs from the first channel to - // ensure that it's seen as a private channel. - if batch.localChanAnn.ShortChannelID == firstChanID { - continue - } - - sendLocalMsg(t, ctx, batch.localProofAnn, localKey) - sendRemoteMsg(t, ctx, batch.remoteProofAnn, remotePeer) - } - - // Drain out any broadcast or direct messages we might not have read up - // to this point. We'll also check out notifyErr to detect if the - // reliable sender had an issue sending to the remote peer. -out: - for { - select { - case <-ctx.broadcastedMessage: - case <-sentMsgs: - case err := <-notifyErr: - t.Fatal(err) - default: - break out - } - } - - // Now that all of our channels are loaded, we'll attempt to update the - // policy of all of them. - const newTimeLockDelta = 100 - var edgesToUpdate []EdgeWithInfo - err = ctx.router.ForAllOutgoingChannels(func( - info *channeldb.ChannelEdgeInfo, - edge *channeldb.ChannelEdgePolicy) er.R { - - edge.TimeLockDelta = uint16(newTimeLockDelta) - edgesToUpdate = append(edgesToUpdate, EdgeWithInfo{ - Info: info, - Edge: edge, - }) - - return nil - }) - if err != nil { - t.Fatal(err) - } - - err = ctx.gossiper.PropagateChanPolicyUpdate(edgesToUpdate) - if err != nil { - t.Fatalf("unable to chan policies: %v", err) - } - - // Two channel updates should now be broadcast, with neither of them - // being the channel our first private channel. - for i := 0; i < numChannels-1; i++ { - assertBroadcastMsg(t, ctx, func(msg lnwire.Message) er.R { - upd, ok := msg.(*lnwire.ChannelUpdate) - if !ok { - return er.Errorf("channel update not "+ - "broadcast, instead %T was", msg) - } - - if upd.ShortChannelID == firstChanID { - return er.Errorf("private channel upd " + - "broadcast") - } - if upd.TimeLockDelta != newTimeLockDelta { - return er.Errorf("wrong delta: expected %v, "+ - "got %v", newTimeLockDelta, - upd.TimeLockDelta) - } - - return nil - }) - } - - // Finally the ChannelUpdate should have been sent directly to the - // remote peer via the reliable sender. - select { - case msg := <-sentMsgs: - upd, ok := msg.(*lnwire.ChannelUpdate) - if !ok { - t.Fatalf("channel update not "+ - "broadcast, instead %T was", msg) - } - if upd.TimeLockDelta != newTimeLockDelta { - t.Fatalf("wrong delta: expected %v, "+ - "got %v", newTimeLockDelta, - upd.TimeLockDelta) - } - if upd.ShortChannelID != firstChanID { - t.Fatalf("private channel upd " + - "broadcast") - } - case <-time.After(time.Second * 5): - t.Fatalf("message not sent directly to peer") - } - - // At this point, no other ChannelUpdate messages should be broadcast - // as we sent the two public ones to the network, and the private one - // was sent directly to the peer. - for { - select { - case msg := <-ctx.broadcastedMessage: - if upd, ok := msg.msg.(*lnwire.ChannelUpdate); ok { - if upd.ShortChannelID == firstChanID { - t.Fatalf("chan update msg received: %v", - spew.Sdump(msg)) - } - } - default: - return - } - } -} - -// TestProcessChannelAnnouncementOptionalMsgFields ensures that the gossiper can -// properly handled optional message fields provided by the caller when -// processing a channel announcement. -func TestProcessChannelAnnouncementOptionalMsgFields(t *testing.T) { - t.Parallel() - - // We'll start by creating our test context and a set of test channel - // announcements. - ctx, cleanup, err := createTestCtx(0) - if err != nil { - t.Fatalf("unable to create test context: %v", err) - } - defer cleanup() - - chanAnn1 := createAnnouncementWithoutProof(100) - chanAnn2 := createAnnouncementWithoutProof(101) - localKey := nodeKeyPriv1.PubKey() - - // assertOptionalMsgFields is a helper closure that ensures the optional - // message fields were set as intended. - assertOptionalMsgFields := func(chanID lnwire.ShortChannelID, - capacity btcutil.Amount, channelPoint wire.OutPoint) { - - t.Helper() - - edge, _, _, err := ctx.router.GetChannelByID(chanID) - if err != nil { - t.Fatalf("unable to get channel by id: %v", err) - } - if edge.Capacity != capacity { - t.Fatalf("expected capacity %v, got %v", capacity, - edge.Capacity) - } - if edge.ChannelPoint != channelPoint { - t.Fatalf("expected channel point %v, got %v", - channelPoint, edge.ChannelPoint) - } - } - - // We'll process the first announcement without any optional fields. We - // should see the channel's capacity and outpoint have a zero value. - sendLocalMsg(t, ctx, chanAnn1, localKey) - assertOptionalMsgFields(chanAnn1.ShortChannelID, 0, wire.OutPoint{}) - - // Providing the capacity and channel point as optional fields should - // propagate them all the way down to the router. - capacity := btcutil.Amount(1000) - channelPoint := wire.OutPoint{Index: 1} - sendLocalMsg( - t, ctx, chanAnn2, localKey, ChannelCapacity(capacity), - ChannelPoint(channelPoint), - ) - assertOptionalMsgFields(chanAnn2.ShortChannelID, capacity, channelPoint) -} - -func assertMessage(t *testing.T, expected, got lnwire.Message) { - t.Helper() - - if !reflect.DeepEqual(expected, got) { - t.Fatalf("expected: %v\ngot: %v", spew.Sdump(expected), - spew.Sdump(got)) - } -} - -// TestSplitAnnouncementsCorrectSubBatches checks that we split a given -// sizes of announcement list into the correct number of batches. -func TestSplitAnnouncementsCorrectSubBatches(t *testing.T) { - t.Parallel() - - const subBatchSize = 10 - - announcementBatchSizes := []int{2, 5, 20, 45, 80, 100, 1005} - expectedNumberMiniBatches := []int{1, 1, 2, 5, 8, 10, 101} - - lengthAnnouncementBatchSizes := len(announcementBatchSizes) - lengthExpectedNumberMiniBatches := len(expectedNumberMiniBatches) - - if lengthAnnouncementBatchSizes != lengthExpectedNumberMiniBatches { - t.Fatal("Length of announcementBatchSizes and " + - "expectedNumberMiniBatches should be equal") - } - - for testIndex := range announcementBatchSizes { - var batchSize = announcementBatchSizes[testIndex] - announcementBatch := make([]msgWithSenders, batchSize) - - splitAnnouncementBatch := splitAnnouncementBatches( - subBatchSize, announcementBatch, - ) - - lengthMiniBatches := len(splitAnnouncementBatch) - - if lengthMiniBatches != expectedNumberMiniBatches[testIndex] { - t.Fatalf("Expecting %d mini batches, actual %d", - expectedNumberMiniBatches[testIndex], lengthMiniBatches) - } - } -} - -func assertCorrectSubBatchSize(t *testing.T, expectedSubBatchSize, - actualSubBatchSize int) { - - t.Helper() - - if actualSubBatchSize != expectedSubBatchSize { - t.Fatalf("Expecting subBatch size of %d, actual %d", - expectedSubBatchSize, actualSubBatchSize) - } -} - -// TestCalculateCorrectSubBatchSize checks that we check the correct -// sub batch size for each of the input vectors of batch sizes. -func TestCalculateCorrectSubBatchSizes(t *testing.T) { - t.Parallel() - - const minimumSubBatchSize = 10 - const batchDelay = time.Duration(100) - const subBatchDelay = time.Duration(10) - - batchSizes := []int{2, 200, 250, 305, 352, 10010, 1000001} - expectedSubBatchSize := []int{10, 20, 25, 31, 36, 1001, 100001} - - for testIndex := range batchSizes { - batchSize := batchSizes[testIndex] - expectedBatchSize := expectedSubBatchSize[testIndex] - - actualSubBatchSize := calculateSubBatchSize( - batchDelay, subBatchDelay, minimumSubBatchSize, batchSize, - ) - - assertCorrectSubBatchSize(t, expectedBatchSize, actualSubBatchSize) - } -} - -// TestCalculateCorrectSubBatchSizesDifferentDelay checks that we check the -// correct sub batch size for each of different delay. -func TestCalculateCorrectSubBatchSizesDifferentDelay(t *testing.T) { - t.Parallel() - - const batchSize = 100 - const minimumSubBatchSize = 10 - - batchDelays := []time.Duration{100, 50, 20, 25, 5, 0} - const subBatchDelay = 10 - - expectedSubBatchSize := []int{10, 20, 50, 40, 100, 100} - - for testIndex := range batchDelays { - batchDelay := batchDelays[testIndex] - expectedBatchSize := expectedSubBatchSize[testIndex] - - actualSubBatchSize := calculateSubBatchSize( - batchDelay, subBatchDelay, minimumSubBatchSize, batchSize, - ) - - assertCorrectSubBatchSize(t, expectedBatchSize, actualSubBatchSize) - } -} - -// TestBroadcastAnnsAfterGraphSynced ensures that we only broadcast -// announcements after the graph has been considered as synced, i.e., after our -// initial historical sync has completed. -func TestBroadcastAnnsAfterGraphSynced(t *testing.T) { - t.Parallel() - - ctx, cleanup, err := createTestCtx(10) - if err != nil { - t.Fatalf("can't create context: %v", err) - } - defer cleanup() - - // We'll mark the graph as not synced. This should prevent us from - // broadcasting any messages we've received as part of our initial - // historical sync. - ctx.gossiper.syncMgr.markGraphSyncing() - - assertBroadcast := func(msg lnwire.Message, isRemote bool, - shouldBroadcast bool) { - - t.Helper() - - nodePeer := &mockPeer{nodeKeyPriv1.PubKey(), nil, nil} - var errChan chan er.R - if isRemote { - errChan = ctx.gossiper.ProcessRemoteAnnouncement( - msg, nodePeer, - ) - } else { - errChan = ctx.gossiper.ProcessLocalAnnouncement( - msg, nodePeer.pk, - ) - } - - select { - case err := <-errChan: - if err != nil { - t.Fatalf("unable to process gossip message: %v", - err) - } - case <-time.After(2 * time.Second): - t.Fatal("gossip message not processed") - } - - select { - case <-ctx.broadcastedMessage: - if !shouldBroadcast { - t.Fatal("gossip message was broadcast") - } - case <-time.After(2 * trickleDelay): - if shouldBroadcast { - t.Fatal("gossip message wasn't broadcast") - } - } - } - - // A remote channel announcement should not be broadcast since the graph - // has not yet been synced. - chanAnn1, err := createRemoteChannelAnnouncement(0) - if err != nil { - t.Fatalf("unable to create channel announcement: %v", err) - } - assertBroadcast(chanAnn1, true, false) - - // A local channel announcement should be broadcast though, regardless - // of whether we've synced our graph or not. - chanUpd, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, 1) - if err != nil { - t.Fatalf("unable to create channel announcement: %v", err) - } - assertBroadcast(chanUpd, false, true) - - // Mark the graph as synced, which should allow the channel announcement - // should to be broadcast. - ctx.gossiper.syncMgr.markGraphSynced() - - chanAnn2, err := createRemoteChannelAnnouncement(1) - if err != nil { - t.Fatalf("unable to create channel announcement: %v", err) - } - assertBroadcast(chanAnn2, true, true) -} diff --git a/lnd/discovery/message_store.go b/lnd/discovery/message_store.go deleted file mode 100644 index 0c497731..00000000 --- a/lnd/discovery/message_store.go +++ /dev/null @@ -1,299 +0,0 @@ -package discovery - -import ( - "bytes" - "encoding/binary" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -var ( - // messageStoreBucket is a key used to create a top level bucket in the - // gossiper database, used for storing messages that are to be sent to - // peers. Upon restarts, these messages will be read and resent to their - // respective peers. - // - // maps: - // pubKey (33 bytes) + msgShortChanID (8 bytes) + msgType (2 bytes) -> msg - messageStoreBucket = []byte("message-store") - - // ErrUnsupportedMessage is an error returned when we attempt to add a - // message to the store that is not supported. - ErrUnsupportedMessage = Err.CodeWithDetail("ErrUnsupportedMessage", "unsupported message type") - - // ErrCorruptedMessageStore indicates that the on-disk bucketing - // structure has altered since the gossip message store instance was - // initialized. - ErrCorruptedMessageStore = Err.CodeWithDetail("ErrCorruptedMessageStore", "gossip message store has been "+ - "corrupted") -) - -// GossipMessageStore is a store responsible for storing gossip messages which -// we should reliably send to our peers. -type GossipMessageStore interface { - // AddMessage adds a message to the store for this peer. - AddMessage(lnwire.Message, [33]byte) er.R - - // DeleteMessage deletes a message from the store for this peer. - DeleteMessage(lnwire.Message, [33]byte) er.R - - // Messages returns the total set of messages that exist within the - // store for all peers. - Messages() (map[[33]byte][]lnwire.Message, er.R) - - // Peers returns the public key of all peers with messages within the - // store. - Peers() (map[[33]byte]struct{}, er.R) - - // MessagesForPeer returns the set of messages that exists within the - // store for the given peer. - MessagesForPeer([33]byte) ([]lnwire.Message, er.R) -} - -// MessageStore is an implementation of the GossipMessageStore interface backed -// by a channeldb instance. By design, this store will only keep the latest -// version of a message (like in the case of multiple ChannelUpdate's) for a -// channel with a peer. -type MessageStore struct { - db *channeldb.DB -} - -// A compile-time assertion to ensure messageStore implements the -// GossipMessageStore interface. -var _ GossipMessageStore = (*MessageStore)(nil) - -// NewMessageStore creates a new message store backed by a channeldb instance. -func NewMessageStore(db *channeldb.DB) (*MessageStore, er.R) { - err := kvdb.Batch(db.Backend, func(tx kvdb.RwTx) er.R { - _, err := tx.CreateTopLevelBucket(messageStoreBucket) - return err - }) - if err != nil { - return nil, er.Errorf("unable to create required buckets: %v", - err) - } - - return &MessageStore{db}, nil -} - -// msgShortChanID retrieves the short channel ID of the message. -func msgShortChanID(msg lnwire.Message) (lnwire.ShortChannelID, er.R) { - var shortChanID lnwire.ShortChannelID - switch msg := msg.(type) { - case *lnwire.AnnounceSignatures: - shortChanID = msg.ShortChannelID - case *lnwire.ChannelUpdate: - shortChanID = msg.ShortChannelID - default: - return shortChanID, ErrUnsupportedMessage.Default() - } - - return shortChanID, nil -} - -// messageStoreKey constructs the database key for the message to be stored. -func messageStoreKey(msg lnwire.Message, peerPubKey [33]byte) ([]byte, er.R) { - shortChanID, err := msgShortChanID(msg) - if err != nil { - return nil, err - } - - var k [33 + 8 + 2]byte - copy(k[:33], peerPubKey[:]) - binary.BigEndian.PutUint64(k[33:41], shortChanID.ToUint64()) - binary.BigEndian.PutUint16(k[41:43], uint16(msg.MsgType())) - - return k[:], nil -} - -// AddMessage adds a message to the store for this peer. -func (s *MessageStore) AddMessage(msg lnwire.Message, peerPubKey [33]byte) er.R { - // Construct the key for which we'll find this message with in the store. - msgKey, err := messageStoreKey(msg, peerPubKey) - if err != nil { - return err - } - - // Serialize the message with its wire encoding. - var b bytes.Buffer - if _, err := lnwire.WriteMessage(&b, msg, 0); err != nil { - return err - } - - return kvdb.Batch(s.db.Backend, func(tx kvdb.RwTx) er.R { - messageStore := tx.ReadWriteBucket(messageStoreBucket) - if messageStore == nil { - return ErrCorruptedMessageStore.Default() - } - - return messageStore.Put(msgKey, b.Bytes()) - }) -} - -// DeleteMessage deletes a message from the store for this peer. -func (s *MessageStore) DeleteMessage(msg lnwire.Message, - peerPubKey [33]byte) er.R { - - // Construct the key for which we'll find this message with in the - // store. - msgKey, err := messageStoreKey(msg, peerPubKey) - if err != nil { - return err - } - - return kvdb.Batch(s.db.Backend, func(tx kvdb.RwTx) er.R { - messageStore := tx.ReadWriteBucket(messageStoreBucket) - if messageStore == nil { - return ErrCorruptedMessageStore.Default() - } - - // In the event that we're attempting to delete a ChannelUpdate - // from the store, we'll make sure that we're actually deleting - // the correct one as it can be overwritten. - if msg, ok := msg.(*lnwire.ChannelUpdate); ok { - // Deleting a value from a bucket that doesn't exist - // acts as a NOP, so we'll return if a message doesn't - // exist under this key. - v := messageStore.Get(msgKey) - if v == nil { - return nil - } - - dbMsg, err := lnwire.ReadMessage(bytes.NewReader(v), 0) - if err != nil { - return err - } - - // If the timestamps don't match, then the update stored - // should be the latest one, so we'll avoid deleting it. - if msg.Timestamp != dbMsg.(*lnwire.ChannelUpdate).Timestamp { - return nil - } - } - - return messageStore.Delete(msgKey) - }) -} - -// readMessage reads a message from its serialized form and ensures its -// supported by the current version of the message store. -func readMessage(msgBytes []byte) (lnwire.Message, er.R) { - msg, err := lnwire.ReadMessage(bytes.NewReader(msgBytes), 0) - if err != nil { - return nil, err - } - - // Check if the message is supported by the store. We can reuse the - // check for ShortChannelID as its a dependency on messages stored. - if _, err := msgShortChanID(msg); err != nil { - return nil, err - } - - return msg, nil -} - -// Messages returns the total set of messages that exist within the store for -// all peers. -func (s *MessageStore) Messages() (map[[33]byte][]lnwire.Message, er.R) { - var msgs map[[33]byte][]lnwire.Message - err := kvdb.View(s.db, func(tx kvdb.RTx) er.R { - messageStore := tx.ReadBucket(messageStoreBucket) - if messageStore == nil { - return ErrCorruptedMessageStore.Default() - } - - return messageStore.ForEach(func(k, v []byte) er.R { - var pubKey [33]byte - copy(pubKey[:], k[:33]) - - // Deserialize the message from its raw bytes and filter - // out any which are not currently supported by the - // store. - msg, err := readMessage(v) - if ErrUnsupportedMessage.Is(err) { - return nil - } - if err != nil { - return err - } - - msgs[pubKey] = append(msgs[pubKey], msg) - return nil - }) - }, func() { - msgs = make(map[[33]byte][]lnwire.Message) - }) - if err != nil { - return nil, err - } - - return msgs, nil -} - -// MessagesForPeer returns the set of messages that exists within the store for -// the given peer. -func (s *MessageStore) MessagesForPeer( - peerPubKey [33]byte) ([]lnwire.Message, er.R) { - - var msgs []lnwire.Message - err := kvdb.View(s.db, func(tx kvdb.RTx) er.R { - messageStore := tx.ReadBucket(messageStoreBucket) - if messageStore == nil { - return ErrCorruptedMessageStore.Default() - } - - c := messageStore.ReadCursor() - k, v := c.Seek(peerPubKey[:]) - for ; bytes.HasPrefix(k, peerPubKey[:]); k, v = c.Next() { - // Deserialize the message from its raw bytes and filter - // out any which are not currently supported by the - // store. - msg, err := readMessage(v) - if ErrUnsupportedMessage.Is(err) { - continue - } - if err != nil { - return err - } - - msgs = append(msgs, msg) - } - - return nil - }, func() { - msgs = nil - }) - if err != nil { - return nil, err - } - - return msgs, nil -} - -// Peers returns the public key of all peers with messages within the store. -func (s *MessageStore) Peers() (map[[33]byte]struct{}, er.R) { - var peers map[[33]byte]struct{} - err := kvdb.View(s.db, func(tx kvdb.RTx) er.R { - messageStore := tx.ReadBucket(messageStoreBucket) - if messageStore == nil { - return ErrCorruptedMessageStore.Default() - } - - return messageStore.ForEach(func(k, _ []byte) er.R { - var pubKey [33]byte - copy(pubKey[:], k[:33]) - peers[pubKey] = struct{}{} - return nil - }) - }, func() { - peers = make(map[[33]byte]struct{}) - }) - if err != nil { - return nil, err - } - - return peers, nil -} diff --git a/lnd/discovery/message_store_test.go b/lnd/discovery/message_store_test.go deleted file mode 100644 index 2df02803..00000000 --- a/lnd/discovery/message_store_test.go +++ /dev/null @@ -1,352 +0,0 @@ -package discovery - -import ( - "bytes" - "io/ioutil" - "math/rand" - "os" - "reflect" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -func createTestMessageStore(t *testing.T) (*MessageStore, func()) { - t.Helper() - - tempDir, errr := ioutil.TempDir("", "channeldb") - if errr != nil { - t.Fatalf("unable to create temp dir: %v", errr) - } - db, err := channeldb.Open(tempDir) - if err != nil { - os.RemoveAll(tempDir) - t.Fatalf("unable to open db: %v", err) - } - - cleanUp := func() { - db.Close() - os.RemoveAll(tempDir) - } - - store, err := NewMessageStore(db) - if err != nil { - cleanUp() - t.Fatalf("unable to initialize message store: %v", err) - } - - return store, cleanUp -} - -func randPubKey(t *testing.T) *btcec.PublicKey { - priv, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Fatalf("unable to create private key: %v", err) - } - - return priv.PubKey() -} - -func randCompressedPubKey(t *testing.T) [33]byte { - t.Helper() - - pubKey := randPubKey(t) - - var compressedPubKey [33]byte - copy(compressedPubKey[:], pubKey.SerializeCompressed()) - - return compressedPubKey -} - -func randAnnounceSignatures() *lnwire.AnnounceSignatures { - return &lnwire.AnnounceSignatures{ - ShortChannelID: lnwire.NewShortChanIDFromInt(rand.Uint64()), - } -} - -func randChannelUpdate() *lnwire.ChannelUpdate { - return &lnwire.ChannelUpdate{ - ShortChannelID: lnwire.NewShortChanIDFromInt(rand.Uint64()), - } -} - -// TestMessageStoreMessages ensures that messages can be properly queried from -// the store. -func TestMessageStoreMessages(t *testing.T) { - t.Parallel() - - // We'll start by creating our test message store. - msgStore, cleanUp := createTestMessageStore(t) - defer cleanUp() - - // We'll then create some test messages for two test peers, and none for - // an additional test peer. - channelUpdate1 := randChannelUpdate() - announceSignatures1 := randAnnounceSignatures() - peer1 := randCompressedPubKey(t) - if err := msgStore.AddMessage(channelUpdate1, peer1); err != nil { - t.Fatalf("unable to add message: %v", err) - } - if err := msgStore.AddMessage(announceSignatures1, peer1); err != nil { - t.Fatalf("unable to add message: %v", err) - } - expectedPeerMsgs1 := map[uint64]lnwire.MessageType{ - channelUpdate1.ShortChannelID.ToUint64(): channelUpdate1.MsgType(), - announceSignatures1.ShortChannelID.ToUint64(): announceSignatures1.MsgType(), - } - - channelUpdate2 := randChannelUpdate() - peer2 := randCompressedPubKey(t) - if err := msgStore.AddMessage(channelUpdate2, peer2); err != nil { - t.Fatalf("unable to add message: %v", err) - } - expectedPeerMsgs2 := map[uint64]lnwire.MessageType{ - channelUpdate2.ShortChannelID.ToUint64(): channelUpdate2.MsgType(), - } - - peer3 := randCompressedPubKey(t) - expectedPeerMsgs3 := map[uint64]lnwire.MessageType{} - - // assertPeerMsgs is a helper closure that we'll use to ensure we - // retrieve the correct set of messages for a given peer. - assertPeerMsgs := func(peerMsgs []lnwire.Message, - expected map[uint64]lnwire.MessageType) { - - t.Helper() - - if len(peerMsgs) != len(expected) { - t.Fatalf("expected %d pending messages, got %d", - len(expected), len(peerMsgs)) - } - for _, msg := range peerMsgs { - var shortChanID uint64 - switch msg := msg.(type) { - case *lnwire.AnnounceSignatures: - shortChanID = msg.ShortChannelID.ToUint64() - case *lnwire.ChannelUpdate: - shortChanID = msg.ShortChannelID.ToUint64() - default: - t.Fatalf("found unexpected message type %T", msg) - } - - msgType, ok := expected[shortChanID] - if !ok { - t.Fatalf("retrieved message with unexpected ID "+ - "%d from store", shortChanID) - } - if msgType != msg.MsgType() { - t.Fatalf("expected message of type %v, got %v", - msg.MsgType(), msgType) - } - } - } - - // Then, we'll query the store for the set of messages for each peer and - // ensure it matches what we expect. - peers := [][33]byte{peer1, peer2, peer3} - expectedPeerMsgs := []map[uint64]lnwire.MessageType{ - expectedPeerMsgs1, expectedPeerMsgs2, expectedPeerMsgs3, - } - for i, peer := range peers { - peerMsgs, err := msgStore.MessagesForPeer(peer) - if err != nil { - t.Fatalf("unable to retrieve messages: %v", err) - } - assertPeerMsgs(peerMsgs, expectedPeerMsgs[i]) - } - - // Finally, we'll query the store for all of its messages of every peer. - // Again, each peer should have a set of messages that match what we - // expect. - // - // We'll construct the expected response. Only the first two peers will - // have messages. - totalPeerMsgs := make(map[[33]byte]map[uint64]lnwire.MessageType, 2) - for i := 0; i < 2; i++ { - totalPeerMsgs[peers[i]] = expectedPeerMsgs[i] - } - - msgs, err := msgStore.Messages() - if err != nil { - t.Fatalf("unable to retrieve all peers with pending messages: "+ - "%v", err) - } - if len(msgs) != len(totalPeerMsgs) { - t.Fatalf("expected %d peers with messages, got %d", - len(totalPeerMsgs), len(msgs)) - } - for peer, peerMsgs := range msgs { - expected, ok := totalPeerMsgs[peer] - if !ok { - t.Fatalf("expected to find pending messages for peer %x", - peer) - } - - assertPeerMsgs(peerMsgs, expected) - } - - peerPubKeys, err := msgStore.Peers() - if err != nil { - t.Fatalf("unable to retrieve all peers with pending messages: "+ - "%v", err) - } - if len(peerPubKeys) != len(totalPeerMsgs) { - t.Fatalf("expected %d peers with messages, got %d", - len(totalPeerMsgs), len(peerPubKeys)) - } - for peerPubKey := range peerPubKeys { - if _, ok := totalPeerMsgs[peerPubKey]; !ok { - t.Fatalf("expected to find peer %x", peerPubKey) - } - } -} - -// TestMessageStoreUnsupportedMessage ensures that we are not able to add a -// message which is unsupported, and if a message is found to be unsupported by -// the current version of the store, that it is properly filtered out from the -// response. -func TestMessageStoreUnsupportedMessage(t *testing.T) { - t.Parallel() - - // We'll start by creating our test message store. - msgStore, cleanUp := createTestMessageStore(t) - defer cleanUp() - - // Create a message that is known to not be supported by the store. - peer := randCompressedPubKey(t) - unsupportedMsg := &lnwire.Error{} - - // Attempting to add it to the store should result in - // ErrUnsupportedMessage. - err := msgStore.AddMessage(unsupportedMsg, peer) - if !ErrUnsupportedMessage.Is(err) { - t.Fatalf("expected ErrUnsupportedMessage, got %v", err) - } - - // We'll now pretend that the message is actually supported in a future - // version of the store, so it's able to be added successfully. To - // replicate this, we'll add the message manually rather than through - // the existing AddMessage method. - msgKey := peer[:] - var rawMsg bytes.Buffer - if _, err := lnwire.WriteMessage(&rawMsg, unsupportedMsg, 0); err != nil { - t.Fatalf("unable to serialize message: %v", err) - } - err = kvdb.Update(msgStore.db, func(tx kvdb.RwTx) er.R { - messageStore := tx.ReadWriteBucket(messageStoreBucket) - return messageStore.Put(msgKey, rawMsg.Bytes()) - }, func() {}) - if err != nil { - t.Fatalf("unable to add unsupported message to store: %v", err) - } - - // Finally, we'll check that the store can properly filter out messages - // that are currently unknown to it. We'll make sure this is done for - // both Messages and MessagesForPeer. - totalMsgs, err := msgStore.Messages() - if err != nil { - t.Fatalf("unable to retrieve messages: %v", err) - } - if len(totalMsgs) != 0 { - t.Fatalf("expected to filter out unsupported message") - } - peerMsgs, err := msgStore.MessagesForPeer(peer) - if err != nil { - t.Fatalf("unable to retrieve peer messages: %v", err) - } - if len(peerMsgs) != 0 { - t.Fatalf("expected to filter out unsupported message") - } -} - -// TestMessageStoreDeleteMessage ensures that we can properly delete messages -// from the store. -func TestMessageStoreDeleteMessage(t *testing.T) { - t.Parallel() - - msgStore, cleanUp := createTestMessageStore(t) - defer cleanUp() - - // assertMsg is a helper closure we'll use to ensure a message - // does/doesn't exist within the store. - assertMsg := func(msg lnwire.Message, peer [33]byte, exists bool) { - t.Helper() - - storeMsgs, err := msgStore.MessagesForPeer(peer) - if err != nil { - t.Fatalf("unable to retrieve messages: %v", err) - } - - found := false - for _, storeMsg := range storeMsgs { - if reflect.DeepEqual(msg, storeMsg) { - found = true - } - } - - if found != exists { - str := "find" - if !exists { - str = "not find" - } - t.Fatalf("expected to %v message %v", str, - spew.Sdump(msg)) - } - } - - // An AnnounceSignatures message should exist within the store after - // adding it, and should no longer exists after deleting it. - peer := randCompressedPubKey(t) - annSig := randAnnounceSignatures() - if err := msgStore.AddMessage(annSig, peer); err != nil { - t.Fatalf("unable to add message: %v", err) - } - assertMsg(annSig, peer, true) - if err := msgStore.DeleteMessage(annSig, peer); err != nil { - t.Fatalf("unable to delete message: %v", err) - } - assertMsg(annSig, peer, false) - - // The store allows overwriting ChannelUpdates, since there can be - // multiple versions, so we'll test things slightly different. - // - // The ChannelUpdate message should exist within the store after adding - // it. - chanUpdate := randChannelUpdate() - if err := msgStore.AddMessage(chanUpdate, peer); err != nil { - t.Fatalf("unable to add message: %v", err) - } - assertMsg(chanUpdate, peer, true) - - // Now, we'll create a new version for the same ChannelUpdate message. - // Adding this one to the store will overwrite the previous one, so only - // the new one should exist. - newChanUpdate := randChannelUpdate() - newChanUpdate.ShortChannelID = chanUpdate.ShortChannelID - newChanUpdate.Timestamp = chanUpdate.Timestamp + 1 - if err := msgStore.AddMessage(newChanUpdate, peer); err != nil { - t.Fatalf("unable to add message: %v", err) - } - assertMsg(chanUpdate, peer, false) - assertMsg(newChanUpdate, peer, true) - - // Deleting the older message should act as a NOP and should NOT delete - // the newer version as the older no longer exists. - if err := msgStore.DeleteMessage(chanUpdate, peer); err != nil { - t.Fatalf("unable to delete message: %v", err) - } - assertMsg(chanUpdate, peer, false) - assertMsg(newChanUpdate, peer, true) - - // The newer version should no longer exist within the store after - // deleting it. - if err := msgStore.DeleteMessage(newChanUpdate, peer); err != nil { - t.Fatalf("unable to delete message: %v", err) - } - assertMsg(newChanUpdate, peer, false) -} diff --git a/lnd/discovery/mock_test.go b/lnd/discovery/mock_test.go deleted file mode 100644 index 5297a3e5..00000000 --- a/lnd/discovery/mock_test.go +++ /dev/null @@ -1,146 +0,0 @@ -package discovery - -import ( - "net" - "sync" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/lnpeer" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/wire" -) - -// mockPeer implements the lnpeer.Peer interface and is used to test the -// gossiper's interaction with peers. -type mockPeer struct { - pk *btcec.PublicKey - sentMsgs chan lnwire.Message - quit chan struct{} -} - -var _ lnpeer.Peer = (*mockPeer)(nil) - -func (p *mockPeer) SendMessage(_ bool, msgs ...lnwire.Message) er.R { - if p.sentMsgs == nil && p.quit == nil { - return nil - } - - for _, msg := range msgs { - select { - case p.sentMsgs <- msg: - case <-p.quit: - return er.New("peer disconnected") - } - } - - return nil -} - -func (p *mockPeer) SendMessageLazy(sync bool, msgs ...lnwire.Message) er.R { - return p.SendMessage(sync, msgs...) -} - -func (p *mockPeer) AddNewChannel(_ *channeldb.OpenChannel, _ <-chan struct{}) er.R { - return nil -} -func (p *mockPeer) WipeChannel(_ *wire.OutPoint) {} -func (p *mockPeer) IdentityKey() *btcec.PublicKey { return p.pk } -func (p *mockPeer) PubKey() [33]byte { - var pubkey [33]byte - copy(pubkey[:], p.pk.SerializeCompressed()) - return pubkey -} -func (p *mockPeer) Address() net.Addr { return nil } -func (p *mockPeer) QuitSignal() <-chan struct{} { - return p.quit -} -func (p *mockPeer) LocalFeatures() *lnwire.FeatureVector { - return nil -} -func (p *mockPeer) RemoteFeatures() *lnwire.FeatureVector { - return nil -} - -// mockMessageStore is an in-memory implementation of the MessageStore interface -// used for the gossiper's unit tests. -type mockMessageStore struct { - sync.Mutex - messages map[[33]byte]map[lnwire.Message]struct{} -} - -func newMockMessageStore() *mockMessageStore { - return &mockMessageStore{ - messages: make(map[[33]byte]map[lnwire.Message]struct{}), - } -} - -var _ GossipMessageStore = (*mockMessageStore)(nil) - -func (s *mockMessageStore) AddMessage(msg lnwire.Message, pubKey [33]byte) er.R { - s.Lock() - defer s.Unlock() - - if _, ok := s.messages[pubKey]; !ok { - s.messages[pubKey] = make(map[lnwire.Message]struct{}) - } - - s.messages[pubKey][msg] = struct{}{} - - return nil -} - -func (s *mockMessageStore) DeleteMessage(msg lnwire.Message, pubKey [33]byte) er.R { - s.Lock() - defer s.Unlock() - - peerMsgs, ok := s.messages[pubKey] - if !ok { - return nil - } - - delete(peerMsgs, msg) - return nil -} - -func (s *mockMessageStore) Messages() (map[[33]byte][]lnwire.Message, er.R) { - s.Lock() - defer s.Unlock() - - msgs := make(map[[33]byte][]lnwire.Message, len(s.messages)) - for peer, peerMsgs := range s.messages { - for msg := range peerMsgs { - msgs[peer] = append(msgs[peer], msg) - } - } - return msgs, nil -} - -func (s *mockMessageStore) Peers() (map[[33]byte]struct{}, er.R) { - s.Lock() - defer s.Unlock() - - peers := make(map[[33]byte]struct{}, len(s.messages)) - for peer := range s.messages { - peers[peer] = struct{}{} - } - return peers, nil -} - -func (s *mockMessageStore) MessagesForPeer(pubKey [33]byte) ([]lnwire.Message, er.R) { - s.Lock() - defer s.Unlock() - - peerMsgs, ok := s.messages[pubKey] - if !ok { - return nil, nil - } - - msgs := make([]lnwire.Message, 0, len(peerMsgs)) - for msg := range peerMsgs { - msgs = append(msgs, msg) - } - - return msgs, nil -} diff --git a/lnd/discovery/reliable_sender.go b/lnd/discovery/reliable_sender.go deleted file mode 100644 index c08fcf10..00000000 --- a/lnd/discovery/reliable_sender.go +++ /dev/null @@ -1,332 +0,0 @@ -package discovery - -import ( - "sync" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnpeer" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/pktlog/log" -) - -// reliableSenderCfg contains all of necessary items for the reliableSender to -// carry out its duties. -type reliableSenderCfg struct { - // NotifyWhenOnline is a function that allows the gossiper to be - // notified when a certain peer comes online, allowing it to - // retry sending a peer message. - // - // NOTE: The peerChan channel must be buffered. - NotifyWhenOnline func(peerPubKey [33]byte, peerChan chan<- lnpeer.Peer) - - // NotifyWhenOffline is a function that allows the gossiper to be - // notified when a certain peer disconnects, allowing it to request a - // notification for when it reconnects. - NotifyWhenOffline func(peerPubKey [33]byte) <-chan struct{} - - // MessageStore is a persistent storage of gossip messages which we will - // use to determine which messages need to be resent for a given peer. - MessageStore GossipMessageStore - - // IsMsgStale determines whether a message retrieved from the backing - // MessageStore is seen as stale by the current graph. - IsMsgStale func(lnwire.Message) bool -} - -// peerManager contains the set of channels required for the peerHandler to -// properly carry out its duties. -type peerManager struct { - // msgs is the channel through which messages will be streamed to the - // handler in order to send the message to the peer while they're - // online. - msgs chan lnwire.Message - - // done is a channel that will be closed to signal that the handler for - // the given peer has been torn down for whatever reason. - done chan struct{} -} - -// reliableSender is a small subsystem of the gossiper used to reliably send -// gossip messages to peers. -type reliableSender struct { - start sync.Once - stop sync.Once - - cfg reliableSenderCfg - - // activePeers keeps track of whether a peerHandler exists for a given - // peer. A peerHandler is tasked with handling requests for messages - // that should be reliably sent to peers while also taking into account - // the peer's connection lifecycle. - activePeers map[[33]byte]peerManager - activePeersMtx sync.Mutex - - wg sync.WaitGroup - quit chan struct{} -} - -// newReliableSender returns a new reliableSender backed by the given config. -func newReliableSender(cfg *reliableSenderCfg) *reliableSender { - return &reliableSender{ - cfg: *cfg, - activePeers: make(map[[33]byte]peerManager), - quit: make(chan struct{}), - } -} - -// Start spawns message handlers for any peers with pending messages. -func (s *reliableSender) Start() er.R { - var err er.R - s.start.Do(func() { - err = s.resendPendingMsgs() - }) - return err -} - -// Stop halts the reliable sender from sending messages to peers. -func (s *reliableSender) Stop() { - s.stop.Do(func() { - close(s.quit) - s.wg.Wait() - }) -} - -// sendMessage constructs a request to send a message reliably to a peer. In the -// event that the peer is currently offline, this will only write the message to -// disk. Once the peer reconnects, this message, along with any others pending, -// will be sent to the peer. -func (s *reliableSender) sendMessage(msg lnwire.Message, peerPubKey [33]byte) er.R { - // We'll start by persisting the message to disk. This allows us to - // resend the message upon restarts and peer reconnections. - if err := s.cfg.MessageStore.AddMessage(msg, peerPubKey); err != nil { - return err - } - - // Then, we'll spawn a peerHandler for this peer to handle resending its - // pending messages while taking into account its connection lifecycle. -spawnHandler: - msgHandler, ok := s.spawnPeerHandler(peerPubKey) - - // If the handler wasn't previously active, we can exit now as we know - // that the message will be sent once the peer online notification is - // received. This prevents us from potentially sending the message - // twice. - if !ok { - return nil - } - - // Otherwise, we'll attempt to stream the message to the handler. - // There's a subtle race condition where the handler can be torn down - // due to all of the messages sent being stale, so we'll handle this - // gracefully by spawning another one to prevent blocking. - select { - case msgHandler.msgs <- msg: - case <-msgHandler.done: - goto spawnHandler - case <-s.quit: - return ErrGossiperShuttingDown.Default() - } - - return nil -} - -// spawnPeerMsgHandler spawns a peerHandler for the given peer if there isn't -// one already active. The boolean returned signals whether there was already -// one active or not. -func (s *reliableSender) spawnPeerHandler(peerPubKey [33]byte) (peerManager, bool) { - s.activePeersMtx.Lock() - defer s.activePeersMtx.Unlock() - - msgHandler, ok := s.activePeers[peerPubKey] - if !ok { - msgHandler = peerManager{ - msgs: make(chan lnwire.Message), - done: make(chan struct{}), - } - s.activePeers[peerPubKey] = msgHandler - - s.wg.Add(1) - go s.peerHandler(msgHandler, peerPubKey) - } - - return msgHandler, ok -} - -// peerHandler is responsible for handling our reliable message send requests -// for a given peer while also taking into account the peer's connection -// lifecycle. Any messages that are attempted to be sent while the peer is -// offline will be queued and sent once the peer reconnects. -// -// NOTE: This must be run as a goroutine. -func (s *reliableSender) peerHandler(peerMgr peerManager, peerPubKey [33]byte) { - defer s.wg.Done() - - // We'll start by requesting a notification for when the peer - // reconnects. - peerChan := make(chan lnpeer.Peer, 1) - -waitUntilOnline: - log.Debugf("Requesting online notification for peer=%x", peerPubKey) - - s.cfg.NotifyWhenOnline(peerPubKey, peerChan) - - var peer lnpeer.Peer -out: - for { - select { - // While we're waiting, we'll also consume any messages that - // must be sent to prevent blocking the caller. These can be - // ignored for now since the peer is currently offline. Once - // they reconnect, the messages will be sent since they should - // have been persisted to disk. - case msg := <-peerMgr.msgs: - // Retrieve the short channel ID for which this message - // applies for logging purposes. The error can be - // ignored as the store can only contain messages which - // have a ShortChannelID field. - shortChanID, _ := msgShortChanID(msg) - log.Debugf("Received request to send %v message for "+ - "channel=%v while peer=%x is offline", - msg.MsgType(), shortChanID, peerPubKey) - - case peer = <-peerChan: - break out - - case <-s.quit: - return - } - } - - log.Debugf("Peer=%x is now online, proceeding to send pending messages", - peerPubKey) - - // Once we detect the peer has reconnected, we'll also request a - // notification for when they disconnect. We'll use this to make sure - // they haven't disconnected (in the case of a flappy peer, etc.) by the - // time we attempt to send them the pending messages. - log.Debugf("Requesting offline notification for peer=%x", peerPubKey) - - offlineChan := s.cfg.NotifyWhenOffline(peerPubKey) - - pendingMsgs, err := s.cfg.MessageStore.MessagesForPeer(peerPubKey) - if err != nil { - log.Errorf("Unable to retrieve pending messages for peer %x: %v", - peerPubKey, err) - return - } - - // With the peer online, we can now proceed to send our pending messages - // for them. - for _, msg := range pendingMsgs { - // Retrieve the short channel ID for which this message applies - // for logging purposes. The error can be ignored as the store - // can only contain messages which have a ShortChannelID field. - shortChanID, _ := msgShortChanID(msg) - - // Ensure the peer is still online right before sending the - // message. - select { - case <-offlineChan: - goto waitUntilOnline - default: - } - - if err := peer.SendMessage(false, msg); err != nil { - log.Errorf("Unable to send %v message for channel=%v "+ - "to %x: %v", msg.MsgType(), shortChanID, - peerPubKey, err) - goto waitUntilOnline - } - - log.Debugf("Successfully sent %v message for channel=%v with "+ - "peer=%x upon reconnection", msg.MsgType(), shortChanID, - peerPubKey) - - // Now that the message has at least been sent once, we can - // check whether it's stale. This guarantees that - // AnnounceSignatures are sent at least once if we happen to - // already have signatures for both parties. - if s.cfg.IsMsgStale(msg) { - err := s.cfg.MessageStore.DeleteMessage(msg, peerPubKey) - if err != nil { - log.Errorf("Unable to remove stale %v message "+ - "for channel=%v with peer %x: %v", - msg.MsgType(), shortChanID, peerPubKey, - err) - continue - } - - log.Debugf("Removed stale %v message for channel=%v "+ - "with peer=%x", msg.MsgType(), shortChanID, - peerPubKey) - } - } - - // If all of our messages were stale, then there's no need for this - // handler to continue running, so we can exit now. - pendingMsgs, err = s.cfg.MessageStore.MessagesForPeer(peerPubKey) - if err != nil { - log.Errorf("Unable to retrieve pending messages for peer %x: %v", - peerPubKey, err) - return - } - - if len(pendingMsgs) == 0 { - log.Debugf("No pending messages left for peer=%x", peerPubKey) - - s.activePeersMtx.Lock() - delete(s.activePeers, peerPubKey) - s.activePeersMtx.Unlock() - - close(peerMgr.done) - - return - } - - // Once the pending messages are sent, we can continue to send any - // future messages while the peer remains connected. - for { - select { - case msg := <-peerMgr.msgs: - // Retrieve the short channel ID for which this message - // applies for logging purposes. The error can be - // ignored as the store can only contain messages which - // have a ShortChannelID field. - shortChanID, _ := msgShortChanID(msg) - - if err := peer.SendMessage(false, msg); err != nil { - log.Errorf("Unable to send %v message for "+ - "channel=%v to %x: %v", msg.MsgType(), - shortChanID, peerPubKey, err) - } - - log.Debugf("Successfully sent %v message for "+ - "channel=%v with peer=%x", msg.MsgType(), - shortChanID, peerPubKey) - - case <-offlineChan: - goto waitUntilOnline - - case <-s.quit: - return - } - } -} - -// resendPendingMsgs retrieves and sends all of the messages within the message -// store that should be reliably sent to their respective peers. -func (s *reliableSender) resendPendingMsgs() er.R { - // Fetch all of the peers for which we have pending messages for and - // spawn a peerMsgHandler for each. Once the peer is seen as online, all - // of the pending messages will be sent. - peers, err := s.cfg.MessageStore.Peers() - if err != nil { - return err - } - - for peer := range peers { - s.spawnPeerHandler(peer) - } - - return nil -} diff --git a/lnd/discovery/reliable_sender_test.go b/lnd/discovery/reliable_sender_test.go deleted file mode 100644 index 36032775..00000000 --- a/lnd/discovery/reliable_sender_test.go +++ /dev/null @@ -1,290 +0,0 @@ -package discovery - -import ( - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnpeer" - "github.com/pkt-cash/pktd/lnd/lntest/wait" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// newTestReliableSender creates a new reliable sender instance used for -// testing. -func newTestReliableSender(t *testing.T) *reliableSender { - t.Helper() - - cfg := &reliableSenderCfg{ - NotifyWhenOnline: func(pubKey [33]byte, - peerChan chan<- lnpeer.Peer) { - pk, err := btcec.ParsePubKey(pubKey[:], btcec.S256()) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } - peerChan <- &mockPeer{pk: pk} - }, - NotifyWhenOffline: func(_ [33]byte) <-chan struct{} { - c := make(chan struct{}, 1) - return c - }, - MessageStore: newMockMessageStore(), - IsMsgStale: func(lnwire.Message) bool { - return false - }, - } - - return newReliableSender(cfg) -} - -// assertMsgsSent ensures that the given messages can be read from a mock peer's -// msgChan. -func assertMsgsSent(t *testing.T, msgChan chan lnwire.Message, - msgs ...lnwire.Message) { - - t.Helper() - - m := make(map[lnwire.Message]struct{}, len(msgs)) - for _, msg := range msgs { - m[msg] = struct{}{} - } - - for i := 0; i < len(msgs); i++ { - select { - case msg := <-msgChan: - if _, ok := m[msg]; !ok { - t.Fatalf("found unexpected message sent: %v", - spew.Sdump(msg)) - } - case <-time.After(time.Second): - t.Fatal("reliable sender did not send message to peer") - } - } -} - -// TestReliableSenderFlow ensures that the flow for sending messages reliably to -// a peer while taking into account its connection lifecycle works as expected. -func TestReliableSenderFlow(t *testing.T) { - t.Parallel() - - reliableSender := newTestReliableSender(t) - - // Create a mock peer to send the messages to. - pubKey := randPubKey(t) - msgsSent := make(chan lnwire.Message) - peer := &mockPeer{pubKey, msgsSent, reliableSender.quit} - - // Override NotifyWhenOnline and NotifyWhenOffline to provide the - // notification channels so that we can control when notifications get - // dispatched. - notifyOnline := make(chan chan<- lnpeer.Peer, 2) - notifyOffline := make(chan chan struct{}, 1) - - reliableSender.cfg.NotifyWhenOnline = func(_ [33]byte, - peerChan chan<- lnpeer.Peer) { - notifyOnline <- peerChan - } - reliableSender.cfg.NotifyWhenOffline = func(_ [33]byte) <-chan struct{} { - c := make(chan struct{}, 1) - notifyOffline <- c - return c - } - - // We'll start by creating our first message which we should reliably - // send to our peer. - msg1 := randChannelUpdate() - var peerPubKey [33]byte - copy(peerPubKey[:], pubKey.SerializeCompressed()) - if err := reliableSender.sendMessage(msg1, peerPubKey); err != nil { - t.Fatalf("unable to reliably send message: %v", err) - } - - // Since there isn't a peerHandler for this peer currently active due to - // this being the first message being sent reliably, we should expect to - // see a notification request for when the peer is online. - var peerChan chan<- lnpeer.Peer - select { - case peerChan = <-notifyOnline: - case <-time.After(time.Second): - t.Fatal("reliable sender did not request online notification") - } - - // We'll then attempt to send another additional message reliably. - msg2 := randAnnounceSignatures() - if err := reliableSender.sendMessage(msg2, peerPubKey); err != nil { - t.Fatalf("unable to reliably send message: %v", err) - } - - // This should not however request another peer online notification as - // the peerHandler has already been started and is waiting for the - // notification to be dispatched. - select { - case <-notifyOnline: - t.Fatal("reliable sender should not request online notification") - case <-time.After(time.Second): - } - - // We'll go ahead and notify the peer. - peerChan <- peer - - // By doing so, we should expect to see a notification request for when - // the peer is offline. - var offlineChan chan struct{} - select { - case offlineChan = <-notifyOffline: - case <-time.After(time.Second): - t.Fatal("reliable sender did not request offline notification") - } - - // We should also see the messages arrive at the peer since they are now - // seen as online. - assertMsgsSent(t, peer.sentMsgs, msg1, msg2) - - // Then, we'll send one more message reliably. - msg3 := randChannelUpdate() - if err := reliableSender.sendMessage(msg3, peerPubKey); err != nil { - t.Fatalf("unable to reliably send message: %v", err) - } - - // Again, this should not request another peer online notification - // request since we are currently waiting for the peer to be offline. - select { - case <-notifyOnline: - t.Fatal("reliable sender should not request online notification") - case <-time.After(time.Second): - } - - // The expected message should be sent to the peer. - assertMsgsSent(t, peer.sentMsgs, msg3) - - // We'll then notify that the peer is offline. - close(offlineChan) - - // This should cause an online notification to be requested. - select { - case peerChan = <-notifyOnline: - case <-time.After(time.Second): - t.Fatal("reliable sender did not request online notification") - } - - // Once we dispatch it, we should expect to see the messages be resent - // to the peer as they are not stale. - peerChan <- peer - - select { - case <-notifyOffline: - case <-time.After(5 * time.Second): - t.Fatal("reliable sender did not request offline notification") - } - - assertMsgsSent(t, peer.sentMsgs, msg1, msg2, msg3) -} - -// TestReliableSenderStaleMessages ensures that the reliable sender is no longer -// active for a peer which has successfully sent all of its messages and deemed -// them as stale. -func TestReliableSenderStaleMessages(t *testing.T) { - t.Parallel() - - reliableSender := newTestReliableSender(t) - - // Create a mock peer to send the messages to. - pubKey := randPubKey(t) - msgsSent := make(chan lnwire.Message) - peer := &mockPeer{pubKey, msgsSent, reliableSender.quit} - - // Override NotifyWhenOnline to provide the notification channel so that - // we can control when notifications get dispatched. - notifyOnline := make(chan chan<- lnpeer.Peer, 1) - reliableSender.cfg.NotifyWhenOnline = func(_ [33]byte, - peerChan chan<- lnpeer.Peer) { - notifyOnline <- peerChan - } - - // We'll also override IsMsgStale to mark all messages as stale as we're - // interested in testing the stale message behavior. - reliableSender.cfg.IsMsgStale = func(_ lnwire.Message) bool { - return true - } - - // We'll start by creating our first message which we should reliably - // send to our peer, but will be seen as stale. - msg1 := randAnnounceSignatures() - var peerPubKey [33]byte - copy(peerPubKey[:], pubKey.SerializeCompressed()) - if err := reliableSender.sendMessage(msg1, peerPubKey); err != nil { - t.Fatalf("unable to reliably send message: %v", err) - } - - // Since there isn't a peerHandler for this peer currently active due to - // this being the first message being sent reliably, we should expect to - // see a notification request for when the peer is online. - var peerChan chan<- lnpeer.Peer - select { - case peerChan = <-notifyOnline: - case <-time.After(time.Second): - t.Fatal("reliable sender did not request online notification") - } - - // We'll go ahead and notify the peer. - peerChan <- peer - - // This should cause the message to be sent to the peer since they are - // now seen as online. The message will be sent at least once to ensure - // they can propagate before deciding whether they are stale or not. - assertMsgsSent(t, peer.sentMsgs, msg1) - - // We'll create another message which we'll send reliably. This one - // won't be seen as stale. - msg2 := randChannelUpdate() - - // We'll then wait for the message to be removed from the backing - // message store since it is seen as stale and has been sent at least - // once. Once the message is removed, the peerHandler should be torn - // down as there are no longer any pending messages within the store. - err := wait.NoError(func() er.R { - msgs, err := reliableSender.cfg.MessageStore.MessagesForPeer( - peerPubKey, - ) - if err != nil { - return er.Errorf("unable to retrieve messages for "+ - "peer: %v", err) - } - if len(msgs) != 0 { - return er.Errorf("expected to not find any "+ - "messages for peer, found %d", len(msgs)) - } - - return nil - }, time.Second) - if err != nil { - t.Fatal(err) - } - - // Override IsMsgStale to no longer mark messages as stale. - reliableSender.cfg.IsMsgStale = func(_ lnwire.Message) bool { - return false - } - - // We'll request the message to be sent reliably. - if err := reliableSender.sendMessage(msg2, peerPubKey); err != nil { - t.Fatalf("unable to reliably send message: %v", err) - } - - // We should see an online notification request indicating that a new - // peerHandler has been spawned since it was previously torn down. - select { - case peerChan = <-notifyOnline: - case <-time.After(time.Second): - t.Fatal("reliable sender did not request online notification") - } - - // Finally, notifying the peer is online should prompt the message to be - // sent. Only the ChannelUpdate will be sent in this case since the - // AnnounceSignatures message above was seen as stale. - peerChan <- peer - - assertMsgsSent(t, peer.sentMsgs, msg2) -} diff --git a/lnd/discovery/sync_manager.go b/lnd/discovery/sync_manager.go deleted file mode 100644 index 68de9266..00000000 --- a/lnd/discovery/sync_manager.go +++ /dev/null @@ -1,702 +0,0 @@ -package discovery - -import ( - "sync" - "sync/atomic" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/lnpeer" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/routing/route" - "github.com/pkt-cash/pktd/lnd/ticker" - "github.com/pkt-cash/pktd/pktlog/log" -) - -const ( - // DefaultSyncerRotationInterval is the default interval in which we'll - // rotate a single active syncer. - DefaultSyncerRotationInterval = 20 * time.Minute - - // DefaultHistoricalSyncInterval is the default interval in which we'll - // force a historical sync to ensure we have as much of the public - // network as possible. - DefaultHistoricalSyncInterval = time.Hour -) - -var ( - // ErrSyncManagerExiting is an error returned when we attempt to - // start/stop a gossip syncer for a connected/disconnected peer, but the - // SyncManager has already been stopped. - ErrSyncManagerExiting = Err.CodeWithDetail("ErrSyncManagerExiting", "sync manager exiting") -) - -// newSyncer in an internal message we'll use within the SyncManager to signal -// that we should create a GossipSyncer for a newly connected peer. -type newSyncer struct { - // peer is the newly connected peer. - peer lnpeer.Peer - - // doneChan serves as a signal to the caller that the SyncManager's - // internal state correctly reflects the stale active syncer. - doneChan chan struct{} -} - -// staleSyncer is an internal message we'll use within the SyncManager to signal -// that a peer has disconnected and its GossipSyncer should be removed. -type staleSyncer struct { - // peer is the peer that has disconnected. - peer route.Vertex - - // doneChan serves as a signal to the caller that the SyncManager's - // internal state correctly reflects the stale active syncer. This is - // needed to ensure we always create a new syncer for a flappy peer - // after they disconnect if they happened to be an active syncer. - doneChan chan struct{} -} - -// SyncManagerCfg contains all of the dependencies required for the SyncManager -// to carry out its duties. -type SyncManagerCfg struct { - // ChainHash is a hash that indicates the specific network of the active - // chain. - ChainHash chainhash.Hash - - // ChanSeries is an interface that provides access to a time series view - // of the current known channel graph. Each GossipSyncer enabled peer - // will utilize this in order to create and respond to channel graph - // time series queries. - ChanSeries ChannelGraphTimeSeries - - // NumActiveSyncers is the number of peers for which we should have - // active syncers with. After reaching NumActiveSyncers, any future - // gossip syncers will be passive. - NumActiveSyncers int - - // RotateTicker is a ticker responsible for notifying the SyncManager - // when it should rotate its active syncers. A single active syncer with - // a chansSynced state will be exchanged for a passive syncer in order - // to ensure we don't keep syncing with the same peers. - RotateTicker ticker.Ticker - - // HistoricalSyncTicker is a ticker responsible for notifying the - // SyncManager when it should attempt a historical sync with a gossip - // sync peer. - HistoricalSyncTicker ticker.Ticker - - // IgnoreHistoricalFilters will prevent syncers from replying with - // historical data when the remote peer sets a gossip_timestamp_range. - // This prevents ranges with old start times from causing us to dump the - // graph on connect. - IgnoreHistoricalFilters bool -} - -// SyncManager is a subsystem of the gossiper that manages the gossip syncers -// for peers currently connected. When a new peer is connected, the manager will -// create its accompanying gossip syncer and determine whether it should have an -// ActiveSync or PassiveSync sync type based on how many other gossip syncers -// are currently active. Any ActiveSync gossip syncers are started in a -// round-robin manner to ensure we're not syncing with multiple peers at the -// same time. The first GossipSyncer registered with the SyncManager will -// attempt a historical sync to ensure we have as much of the public channel -// graph as possible. -type SyncManager struct { - // initialHistoricalSyncCompleted serves as a barrier when initializing - // new active GossipSyncers. If 0, the initial historical sync has not - // completed, so we'll defer initializing any active GossipSyncers. If - // 1, then we can transition the GossipSyncer immediately. We set up - // this barrier to ensure we have most of the graph before attempting to - // accept new updates at tip. - // - // NOTE: This must be used atomically. - initialHistoricalSyncCompleted int32 - - start sync.Once - stop sync.Once - - cfg SyncManagerCfg - - // newSyncers is a channel we'll use to process requests to create - // GossipSyncers for newly connected peers. - newSyncers chan *newSyncer - - // staleSyncers is a channel we'll use to process requests to tear down - // GossipSyncers for disconnected peers. - staleSyncers chan *staleSyncer - - // syncersMu guards the read and write access to the activeSyncers and - // inactiveSyncers maps below. - syncersMu sync.Mutex - - // activeSyncers is the set of all syncers for which we are currently - // receiving graph updates from. The number of possible active syncers - // is bounded by NumActiveSyncers. - activeSyncers map[route.Vertex]*GossipSyncer - - // inactiveSyncers is the set of all syncers for which we are not - // currently receiving new graph updates from. - inactiveSyncers map[route.Vertex]*GossipSyncer - - wg sync.WaitGroup - quit chan struct{} -} - -// newSyncManager constructs a new SyncManager backed by the given config. -func newSyncManager(cfg *SyncManagerCfg) *SyncManager { - return &SyncManager{ - cfg: *cfg, - newSyncers: make(chan *newSyncer), - staleSyncers: make(chan *staleSyncer), - activeSyncers: make( - map[route.Vertex]*GossipSyncer, cfg.NumActiveSyncers, - ), - inactiveSyncers: make(map[route.Vertex]*GossipSyncer), - quit: make(chan struct{}), - } -} - -// Start starts the SyncManager in order to properly carry out its duties. -func (m *SyncManager) Start() { - m.start.Do(func() { - m.wg.Add(1) - go m.syncerHandler() - }) -} - -// Stop stops the SyncManager from performing its duties. -func (m *SyncManager) Stop() { - m.stop.Do(func() { - close(m.quit) - m.wg.Wait() - - for _, syncer := range m.inactiveSyncers { - syncer.Stop() - } - for _, syncer := range m.activeSyncers { - syncer.Stop() - } - }) -} - -// syncerHandler is the SyncManager's main event loop responsible for: -// -// 1. Creating and tearing down GossipSyncers for connected/disconnected peers. - -// 2. Finding new peers to receive graph updates from to ensure we don't only -// receive them from the same set of peers. - -// 3. Finding new peers to force a historical sync with to ensure we have as -// much of the public network as possible. -// -// NOTE: This must be run as a goroutine. -func (m *SyncManager) syncerHandler() { - defer m.wg.Done() - - m.cfg.RotateTicker.Resume() - defer m.cfg.RotateTicker.Stop() - - m.cfg.HistoricalSyncTicker.Resume() - defer m.cfg.HistoricalSyncTicker.Stop() - - var ( - // initialHistoricalSyncer is the syncer we are currently - // performing an initial historical sync with. - initialHistoricalSyncer *GossipSyncer - - // initialHistoricalSyncSignal is a signal that will fire once - // the intiial historical sync has been completed. This is - // crucial to ensure that another historical sync isn't - // attempted just because the initialHistoricalSyncer was - // disconnected. - initialHistoricalSyncSignal chan struct{} - ) - - for { - select { - // A new peer has been connected, so we'll create its - // accompanying GossipSyncer. - case newSyncer := <-m.newSyncers: - // If we already have a syncer, then we'll exit early as - // we don't want to override it. - if _, ok := m.GossipSyncer(newSyncer.peer.PubKey()); ok { - close(newSyncer.doneChan) - continue - } - - s := m.createGossipSyncer(newSyncer.peer) - - // attemptHistoricalSync determines whether we should - // attempt an initial historical sync when a new peer - // connects. - attemptHistoricalSync := false - - m.syncersMu.Lock() - switch { - // Regardless of whether the initial historical sync - // has completed, we'll re-trigger a historical sync if - // we no longer have any syncers. This might be - // necessary if we lost all our peers at one point, and - // now we finally have one again. - case len(m.activeSyncers) == 0 && - len(m.inactiveSyncers) == 0: - - attemptHistoricalSync = true - fallthrough - - // If we've exceeded our total number of active syncers, - // we'll initialize this GossipSyncer as passive. - case len(m.activeSyncers) >= m.cfg.NumActiveSyncers: - fallthrough - - // If the initial historical sync has yet to complete, - // then we'll declare it as passive and attempt to - // transition it when the initial historical sync - // completes. - case !m.IsGraphSynced(): - s.setSyncType(PassiveSync) - m.inactiveSyncers[s.cfg.peerPub] = s - - // The initial historical sync has completed, so we can - // immediately start the GossipSyncer as active. - default: - s.setSyncType(ActiveSync) - m.activeSyncers[s.cfg.peerPub] = s - } - m.syncersMu.Unlock() - - s.Start() - - // Once we create the GossipSyncer, we'll signal to the - // caller that they can proceed since the SyncManager's - // internal state has been updated. - close(newSyncer.doneChan) - - // We'll force a historical sync with the first peer we - // connect to, to ensure we get as much of the graph as - // possible. - if !attemptHistoricalSync { - continue - } - m.markGraphSyncing() - - log.Debugf("Attempting initial historical sync with "+ - "GossipSyncer(%x)", s.cfg.peerPub) - - if err := s.historicalSync(); err != nil { - log.Errorf("Unable to attempt initial "+ - "historical sync with "+ - "GossipSyncer(%x): %v", s.cfg.peerPub, - err) - continue - } - - // Once the historical sync has started, we'll get a - // keep track of the corresponding syncer to properly - // handle disconnects. We'll also use a signal to know - // when the historical sync completed. - initialHistoricalSyncer = s - initialHistoricalSyncSignal = s.ResetSyncedSignal() - - // An existing peer has disconnected, so we'll tear down its - // corresponding GossipSyncer. - case staleSyncer := <-m.staleSyncers: - // Once the corresponding GossipSyncer has been stopped - // and removed, we'll signal to the caller that they can - // proceed since the SyncManager's internal state has - // been updated. - m.removeGossipSyncer(staleSyncer.peer) - close(staleSyncer.doneChan) - - // If we don't have an initialHistoricalSyncer, or we do - // but it is not the peer being disconnected, then we - // have nothing left to do and can proceed. - switch { - case initialHistoricalSyncer == nil: - fallthrough - case staleSyncer.peer != initialHistoricalSyncer.cfg.peerPub: - continue - } - - // Otherwise, our initialHistoricalSyncer corresponds to - // the peer being disconnected, so we'll have to find a - // replacement. - log.Debug("Finding replacement for intitial " + - "historical sync") - - s := m.forceHistoricalSync() - if s == nil { - log.Debug("No eligible replacement found " + - "for initial historical sync") - continue - } - - log.Debugf("Replaced initial historical "+ - "GossipSyncer(%v) with GossipSyncer(%x)", - staleSyncer.peer, s.cfg.peerPub) - - initialHistoricalSyncer = s - initialHistoricalSyncSignal = s.ResetSyncedSignal() - - // Our initial historical sync signal has completed, so we'll - // nil all of the relevant fields as they're no longer needed. - case <-initialHistoricalSyncSignal: - initialHistoricalSyncer = nil - initialHistoricalSyncSignal = nil - m.markGraphSynced() - - log.Debug("Initial historical sync completed") - - // With the initial historical sync complete, we can - // begin receiving new graph updates at tip. We'll - // determine whether we can have any more active - // GossipSyncers. If we do, we'll randomly select some - // that are currently passive to transition. - m.syncersMu.Lock() - numActiveLeft := m.cfg.NumActiveSyncers - len(m.activeSyncers) - if numActiveLeft <= 0 { - m.syncersMu.Unlock() - continue - } - - log.Debugf("Attempting to transition %v passive "+ - "GossipSyncers to active", numActiveLeft) - - for i := 0; i < numActiveLeft; i++ { - chooseRandomSyncer( - m.inactiveSyncers, m.transitionPassiveSyncer, - ) - } - - m.syncersMu.Unlock() - - // Our RotateTicker has ticked, so we'll attempt to rotate a - // single active syncer with a passive one. - case <-m.cfg.RotateTicker.Ticks(): - m.rotateActiveSyncerCandidate() - - // Our HistoricalSyncTicker has ticked, so we'll randomly select - // a peer and force a historical sync with them. - case <-m.cfg.HistoricalSyncTicker.Ticks(): - s := m.forceHistoricalSync() - - // If we don't have a syncer available or we've already - // performed our initial historical sync, then we have - // nothing left to do. - if s == nil || m.IsGraphSynced() { - continue - } - - // Otherwise, we'll track the peer we've performed a - // historical sync with in order to handle the case - // where our previous historical sync peer did not - // respond to our queries and we haven't ingested as - // much of the graph as we should. - initialHistoricalSyncer = s - initialHistoricalSyncSignal = s.ResetSyncedSignal() - - case <-m.quit: - return - } - } -} - -// createGossipSyncer creates the GossipSyncer for a newly connected peer. -func (m *SyncManager) createGossipSyncer(peer lnpeer.Peer) *GossipSyncer { - nodeID := route.Vertex(peer.PubKey()) - log.Infof("Creating new GossipSyncer for peer=%x", nodeID[:]) - - encoding := lnwire.EncodingSortedPlain - s := newGossipSyncer(gossipSyncerCfg{ - chainHash: m.cfg.ChainHash, - peerPub: nodeID, - channelSeries: m.cfg.ChanSeries, - encodingType: encoding, - chunkSize: encodingTypeToChunkSize[encoding], - batchSize: requestBatchSize, - sendToPeer: func(msgs ...lnwire.Message) er.R { - return peer.SendMessageLazy(false, msgs...) - }, - sendToPeerSync: func(msgs ...lnwire.Message) er.R { - return peer.SendMessageLazy(true, msgs...) - }, - ignoreHistoricalFilters: m.cfg.IgnoreHistoricalFilters, - }) - - // Gossip syncers are initialized by default in a PassiveSync type - // and chansSynced state so that they can reply to any peer queries or - // handle any sync transitions. - s.setSyncState(chansSynced) - s.setSyncType(PassiveSync) - return s -} - -// removeGossipSyncer removes all internal references to the disconnected peer's -// GossipSyncer and stops it. In the event of an active GossipSyncer being -// disconnected, a passive GossipSyncer, if any, will take its place. -func (m *SyncManager) removeGossipSyncer(peer route.Vertex) { - m.syncersMu.Lock() - defer m.syncersMu.Unlock() - - s, ok := m.gossipSyncer(peer) - if !ok { - return - } - - log.Infof("Removing GossipSyncer for peer=%v", peer) - - // We'll stop the GossipSyncer for the disconnected peer in a goroutine - // to prevent blocking the SyncManager. - go s.Stop() - - // If it's a non-active syncer, then we can just exit now. - if _, ok := m.inactiveSyncers[peer]; ok { - delete(m.inactiveSyncers, peer) - return - } - - // Otherwise, we'll need find a new one to replace it, if any. - delete(m.activeSyncers, peer) - newActiveSyncer := chooseRandomSyncer( - m.inactiveSyncers, m.transitionPassiveSyncer, - ) - if newActiveSyncer == nil { - return - } - - log.Debugf("Replaced active GossipSyncer(%x) with GossipSyncer(%x)", - peer, newActiveSyncer.cfg.peerPub) -} - -// rotateActiveSyncerCandidate rotates a single active syncer. In order to -// achieve this, the active syncer must be in a chansSynced state in order to -// process the sync transition. -func (m *SyncManager) rotateActiveSyncerCandidate() { - m.syncersMu.Lock() - defer m.syncersMu.Unlock() - - // If we couldn't find an eligible active syncer to rotate, we can - // return early. - activeSyncer := chooseRandomSyncer(m.activeSyncers, nil) - if activeSyncer == nil { - log.Debug("No eligible active syncer to rotate") - return - } - - // Similarly, if we don't have a candidate to rotate with, we can return - // early as well. - candidate := chooseRandomSyncer(m.inactiveSyncers, nil) - if candidate == nil { - log.Debug("No eligible candidate to rotate active syncer") - return - } - - // Otherwise, we'll attempt to transition each syncer to their - // respective new sync type. - log.Debugf("Rotating active GossipSyncer(%x) with GossipSyncer(%x)", - activeSyncer.cfg.peerPub, candidate.cfg.peerPub) - - if err := m.transitionActiveSyncer(activeSyncer); err != nil { - log.Errorf("Unable to transition active GossipSyncer(%x): %v", - activeSyncer.cfg.peerPub, err) - return - } - - if err := m.transitionPassiveSyncer(candidate); err != nil { - log.Errorf("Unable to transition passive GossipSyncer(%x): %v", - activeSyncer.cfg.peerPub, err) - return - } -} - -// transitionActiveSyncer transitions an active syncer to a passive one. -// -// NOTE: This must be called with the syncersMu lock held. -func (m *SyncManager) transitionActiveSyncer(s *GossipSyncer) er.R { - log.Debugf("Transitioning active GossipSyncer(%x) to passive", - s.cfg.peerPub) - - if err := s.ProcessSyncTransition(PassiveSync); err != nil { - return err - } - - delete(m.activeSyncers, s.cfg.peerPub) - m.inactiveSyncers[s.cfg.peerPub] = s - - return nil -} - -// transitionPassiveSyncer transitions a passive syncer to an active one. -// -// NOTE: This must be called with the syncersMu lock held. -func (m *SyncManager) transitionPassiveSyncer(s *GossipSyncer) er.R { - log.Debugf("Transitioning passive GossipSyncer(%x) to active", - s.cfg.peerPub) - - if err := s.ProcessSyncTransition(ActiveSync); err != nil { - return err - } - - delete(m.inactiveSyncers, s.cfg.peerPub) - m.activeSyncers[s.cfg.peerPub] = s - - return nil -} - -// forceHistoricalSync chooses a syncer with a remote peer at random and forces -// a historical sync with it. -func (m *SyncManager) forceHistoricalSync() *GossipSyncer { - m.syncersMu.Lock() - defer m.syncersMu.Unlock() - - // We'll sample from both sets of active and inactive syncers in the - // event that we don't have any inactive syncers. - return chooseRandomSyncer(m.gossipSyncers(), func(s *GossipSyncer) er.R { - return s.historicalSync() - }) -} - -// chooseRandomSyncer iterates through the set of syncers given and returns the -// first one which was able to successfully perform the action enclosed in the -// function closure. -// -// NOTE: It's possible for a nil value to be returned if there are no eligible -// candidate syncers. -func chooseRandomSyncer(syncers map[route.Vertex]*GossipSyncer, - action func(*GossipSyncer) er.R) *GossipSyncer { - - for _, s := range syncers { - // Only syncers in a chansSynced state are viable for sync - // transitions, so skip any that aren't. - if s.syncState() != chansSynced { - continue - } - - if action != nil { - if err := action(s); err != nil { - log.Debugf("Skipping eligible candidate "+ - "GossipSyncer(%x): %v", s.cfg.peerPub, - err) - continue - } - } - - return s - } - - return nil -} - -// InitSyncState is called by outside sub-systems when a connection is -// established to a new peer that understands how to perform channel range -// queries. We'll allocate a new GossipSyncer for it, and start any goroutines -// needed to handle new queries. The first GossipSyncer registered with the -// SyncManager will attempt a historical sync to ensure we have as much of the -// public channel graph as possible. -// -// TODO(wilmer): Only mark as ActiveSync if this isn't a channel peer. -func (m *SyncManager) InitSyncState(peer lnpeer.Peer) er.R { - done := make(chan struct{}) - - select { - case m.newSyncers <- &newSyncer{ - peer: peer, - doneChan: done, - }: - case <-m.quit: - return ErrSyncManagerExiting.Default() - } - - select { - case <-done: - return nil - case <-m.quit: - return ErrSyncManagerExiting.Default() - } -} - -// PruneSyncState is called by outside sub-systems once a peer that we were -// previously connected to has been disconnected. In this case we can stop the -// existing GossipSyncer assigned to the peer and free up resources. -func (m *SyncManager) PruneSyncState(peer route.Vertex) { - done := make(chan struct{}) - - // We avoid returning an error when the SyncManager is stopped since the - // GossipSyncer will be stopped then anyway. - select { - case m.staleSyncers <- &staleSyncer{ - peer: peer, - doneChan: done, - }: - case <-m.quit: - return - } - - select { - case <-done: - case <-m.quit: - } -} - -// GossipSyncer returns the associated gossip syncer of a peer. The boolean -// returned signals whether there exists a gossip syncer for the peer. -func (m *SyncManager) GossipSyncer(peer route.Vertex) (*GossipSyncer, bool) { - m.syncersMu.Lock() - defer m.syncersMu.Unlock() - return m.gossipSyncer(peer) -} - -// gossipSyncer returns the associated gossip syncer of a peer. The boolean -// returned signals whether there exists a gossip syncer for the peer. -func (m *SyncManager) gossipSyncer(peer route.Vertex) (*GossipSyncer, bool) { - syncer, ok := m.inactiveSyncers[peer] - if ok { - return syncer, true - } - syncer, ok = m.activeSyncers[peer] - if ok { - return syncer, true - } - return nil, false -} - -// GossipSyncers returns all of the currently initialized gossip syncers. -func (m *SyncManager) GossipSyncers() map[route.Vertex]*GossipSyncer { - m.syncersMu.Lock() - defer m.syncersMu.Unlock() - return m.gossipSyncers() -} - -// gossipSyncers returns all of the currently initialized gossip syncers. -func (m *SyncManager) gossipSyncers() map[route.Vertex]*GossipSyncer { - numSyncers := len(m.inactiveSyncers) + len(m.activeSyncers) - syncers := make(map[route.Vertex]*GossipSyncer, numSyncers) - - for _, syncer := range m.inactiveSyncers { - syncers[syncer.cfg.peerPub] = syncer - } - for _, syncer := range m.activeSyncers { - syncers[syncer.cfg.peerPub] = syncer - } - - return syncers -} - -// markGraphSynced allows us to report that the initial historical sync has -// completed. -func (m *SyncManager) markGraphSynced() { - atomic.StoreInt32(&m.initialHistoricalSyncCompleted, 1) -} - -// markGraphSyncing allows us to report that the initial historical sync is -// still undergoing. -func (m *SyncManager) markGraphSyncing() { - atomic.StoreInt32(&m.initialHistoricalSyncCompleted, 0) -} - -// IsGraphSynced determines whether we've completed our initial historical sync. -// The initial historical sync is done to ensure we've ingested as much of the -// public graph as possible. -func (m *SyncManager) IsGraphSynced() bool { - return atomic.LoadInt32(&m.initialHistoricalSyncCompleted) == 1 -} diff --git a/lnd/discovery/sync_manager_test.go b/lnd/discovery/sync_manager_test.go deleted file mode 100644 index df29c738..00000000 --- a/lnd/discovery/sync_manager_test.go +++ /dev/null @@ -1,586 +0,0 @@ -package discovery - -import ( - "math" - "reflect" - "sync/atomic" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lntest/wait" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/ticker" -) - -// randPeer creates a random peer. -func randPeer(t *testing.T, quit chan struct{}) *mockPeer { - t.Helper() - - return &mockPeer{ - pk: randPubKey(t), - sentMsgs: make(chan lnwire.Message), - quit: quit, - } -} - -// newTestSyncManager creates a new test SyncManager using mock implementations -// of its dependencies. -func newTestSyncManager(numActiveSyncers int) *SyncManager { - hID := lnwire.ShortChannelID{BlockHeight: latestKnownHeight} - return newSyncManager(&SyncManagerCfg{ - ChanSeries: newMockChannelGraphTimeSeries(hID), - RotateTicker: ticker.NewForce(DefaultSyncerRotationInterval), - HistoricalSyncTicker: ticker.NewForce(DefaultHistoricalSyncInterval), - NumActiveSyncers: numActiveSyncers, - }) -} - -// TestSyncManagerNumActiveSyncers ensures that we are unable to have more than -// NumActiveSyncers active syncers. -func TestSyncManagerNumActiveSyncers(t *testing.T) { - t.Parallel() - - // We'll start by creating our test sync manager which will hold up to - // 3 active syncers. - const numActiveSyncers = 3 - const numSyncers = numActiveSyncers + 1 - - syncMgr := newTestSyncManager(numActiveSyncers) - syncMgr.Start() - defer syncMgr.Stop() - - // We'll go ahead and create our syncers. We'll gather the ones which - // should be active and passive to check them later on. - for i := 0; i < numActiveSyncers; i++ { - peer := randPeer(t, syncMgr.quit) - syncMgr.InitSyncState(peer) - s := assertSyncerExistence(t, syncMgr, peer) - - // The first syncer registered always attempts a historical - // sync. - if i == 0 { - assertTransitionToChansSynced(t, s, peer) - } - assertActiveGossipTimestampRange(t, peer) - assertSyncerStatus(t, s, chansSynced, ActiveSync) - } - - for i := 0; i < numSyncers-numActiveSyncers; i++ { - peer := randPeer(t, syncMgr.quit) - syncMgr.InitSyncState(peer) - s := assertSyncerExistence(t, syncMgr, peer) - assertSyncerStatus(t, s, chansSynced, PassiveSync) - } -} - -// TestSyncManagerNewActiveSyncerAfterDisconnect ensures that we can regain an -// active syncer after losing one due to the peer disconnecting. -func TestSyncManagerNewActiveSyncerAfterDisconnect(t *testing.T) { - t.Parallel() - - // We'll create our test sync manager to have two active syncers. - syncMgr := newTestSyncManager(2) - syncMgr.Start() - defer syncMgr.Stop() - - // The first will be an active syncer that performs a historical sync - // since it is the first one registered with the SyncManager. - historicalSyncPeer := randPeer(t, syncMgr.quit) - syncMgr.InitSyncState(historicalSyncPeer) - historicalSyncer := assertSyncerExistence(t, syncMgr, historicalSyncPeer) - assertTransitionToChansSynced(t, historicalSyncer, historicalSyncPeer) - assertActiveGossipTimestampRange(t, historicalSyncPeer) - assertSyncerStatus(t, historicalSyncer, chansSynced, ActiveSync) - - // Then, we'll create the second active syncer, which is the one we'll - // disconnect. - activeSyncPeer := randPeer(t, syncMgr.quit) - syncMgr.InitSyncState(activeSyncPeer) - activeSyncer := assertSyncerExistence(t, syncMgr, activeSyncPeer) - assertActiveGossipTimestampRange(t, activeSyncPeer) - assertSyncerStatus(t, activeSyncer, chansSynced, ActiveSync) - - // It will then be torn down to simulate a disconnection. Since there - // are no other candidate syncers available, the active syncer won't be - // replaced. - syncMgr.PruneSyncState(activeSyncPeer.PubKey()) - - // Then, we'll start our active syncer again, but this time we'll also - // have a passive syncer available to replace the active syncer after - // the peer disconnects. - syncMgr.InitSyncState(activeSyncPeer) - activeSyncer = assertSyncerExistence(t, syncMgr, activeSyncPeer) - assertActiveGossipTimestampRange(t, activeSyncPeer) - assertSyncerStatus(t, activeSyncer, chansSynced, ActiveSync) - - // Create our second peer, which should be initialized as a passive - // syncer. - newActiveSyncPeer := randPeer(t, syncMgr.quit) - syncMgr.InitSyncState(newActiveSyncPeer) - newActiveSyncer := assertSyncerExistence(t, syncMgr, newActiveSyncPeer) - assertSyncerStatus(t, newActiveSyncer, chansSynced, PassiveSync) - - // Disconnect our active syncer, which should trigger the SyncManager to - // replace it with our passive syncer. - go syncMgr.PruneSyncState(activeSyncPeer.PubKey()) - assertPassiveSyncerTransition(t, newActiveSyncer, newActiveSyncPeer) -} - -// TestSyncManagerRotateActiveSyncerCandidate tests that we can successfully -// rotate our active syncers after a certain interval. -func TestSyncManagerRotateActiveSyncerCandidate(t *testing.T) { - t.Parallel() - - // We'll create our sync manager with three active syncers. - syncMgr := newTestSyncManager(1) - syncMgr.Start() - defer syncMgr.Stop() - - // The first syncer registered always performs a historical sync. - activeSyncPeer := randPeer(t, syncMgr.quit) - syncMgr.InitSyncState(activeSyncPeer) - activeSyncer := assertSyncerExistence(t, syncMgr, activeSyncPeer) - assertTransitionToChansSynced(t, activeSyncer, activeSyncPeer) - assertActiveGossipTimestampRange(t, activeSyncPeer) - assertSyncerStatus(t, activeSyncer, chansSynced, ActiveSync) - - // We'll send a tick to force a rotation. Since there aren't any - // candidates, none of the active syncers will be rotated. - syncMgr.cfg.RotateTicker.(*ticker.Force).Force <- time.Time{} - assertNoMsgSent(t, activeSyncPeer) - assertSyncerStatus(t, activeSyncer, chansSynced, ActiveSync) - - // We'll then go ahead and add a passive syncer. - passiveSyncPeer := randPeer(t, syncMgr.quit) - syncMgr.InitSyncState(passiveSyncPeer) - passiveSyncer := assertSyncerExistence(t, syncMgr, passiveSyncPeer) - assertSyncerStatus(t, passiveSyncer, chansSynced, PassiveSync) - - // We'll force another rotation - this time, since we have a passive - // syncer available, they should be rotated. - syncMgr.cfg.RotateTicker.(*ticker.Force).Force <- time.Time{} - - // The transition from an active syncer to a passive syncer causes the - // peer to send out a new GossipTimestampRange in the past so that they - // don't receive new graph updates. - assertActiveSyncerTransition(t, activeSyncer, activeSyncPeer) - - // The transition from a passive syncer to an active syncer causes the - // peer to send a new GossipTimestampRange with the current timestamp to - // signal that they would like to receive new graph updates from their - // peers. This will also cause the gossip syncer to redo its state - // machine, starting from its initial syncingChans state. We'll then - // need to transition it to its final chansSynced state to ensure the - // next syncer is properly started in the round-robin. - assertPassiveSyncerTransition(t, passiveSyncer, passiveSyncPeer) -} - -// TestSyncManagerInitialHistoricalSync ensures that we only attempt a single -// historical sync during the SyncManager's startup. If the peer corresponding -// to the initial historical syncer disconnects, we should attempt to find a -// replacement. -func TestSyncManagerInitialHistoricalSync(t *testing.T) { - t.Parallel() - - syncMgr := newTestSyncManager(0) - - // The graph should not be considered as synced since the sync manager - // has yet to start. - if syncMgr.IsGraphSynced() { - t.Fatal("expected graph to not be considered as synced") - } - - syncMgr.Start() - defer syncMgr.Stop() - - // We should expect to see a QueryChannelRange message with a - // FirstBlockHeight of the genesis block, signaling that an initial - // historical sync is being attempted. - peer := randPeer(t, syncMgr.quit) - syncMgr.InitSyncState(peer) - assertMsgSent(t, peer, &lnwire.QueryChannelRange{ - FirstBlockHeight: 0, - NumBlocks: math.MaxUint32, - }) - - // The graph should not be considered as synced since the initial - // historical sync has not finished. - if syncMgr.IsGraphSynced() { - t.Fatal("expected graph to not be considered as synced") - } - - // If an additional peer connects, then another historical sync should - // not be attempted. - finalHistoricalPeer := randPeer(t, syncMgr.quit) - syncMgr.InitSyncState(finalHistoricalPeer) - finalHistoricalSyncer := assertSyncerExistence(t, syncMgr, finalHistoricalPeer) - assertNoMsgSent(t, finalHistoricalPeer) - - // If we disconnect the peer performing the initial historical sync, a - // new one should be chosen. - syncMgr.PruneSyncState(peer.PubKey()) - - // Complete the initial historical sync by transitionining the syncer to - // its final chansSynced state. The graph should be considered as synced - // after the fact. - assertTransitionToChansSynced(t, finalHistoricalSyncer, finalHistoricalPeer) - if !syncMgr.IsGraphSynced() { - t.Fatal("expected graph to be considered as synced") - } - - // Once the initial historical sync has succeeded, another one should - // not be attempted by disconnecting the peer who performed it. - extraPeer := randPeer(t, syncMgr.quit) - syncMgr.InitSyncState(extraPeer) - assertNoMsgSent(t, extraPeer) - syncMgr.PruneSyncState(finalHistoricalPeer.PubKey()) - assertNoMsgSent(t, extraPeer) -} - -// TestSyncManagerHistoricalSyncOnReconnect tests that the sync manager will -// re-trigger a historical sync when a new peer connects after a historical -// sync has completed, but we have lost all peers. -func TestSyncManagerHistoricalSyncOnReconnect(t *testing.T) { - t.Parallel() - - syncMgr := newTestSyncManager(2) - syncMgr.Start() - defer syncMgr.Stop() - - // We should expect to see a QueryChannelRange message with a - // FirstBlockHeight of the genesis block, signaling that an initial - // historical sync is being attempted. - peer := randPeer(t, syncMgr.quit) - syncMgr.InitSyncState(peer) - s := assertSyncerExistence(t, syncMgr, peer) - assertTransitionToChansSynced(t, s, peer) - assertActiveGossipTimestampRange(t, peer) - assertSyncerStatus(t, s, chansSynced, ActiveSync) - - // Now that the historical sync is completed, we prune the syncer, - // simulating all peers having disconnected. - syncMgr.PruneSyncState(peer.PubKey()) - - // If a new peer now connects, then another historical sync should - // be attempted. This is to ensure we get an up-to-date graph if we - // haven't had any peers for a time. - nextPeer := randPeer(t, syncMgr.quit) - syncMgr.InitSyncState(nextPeer) - s1 := assertSyncerExistence(t, syncMgr, nextPeer) - assertTransitionToChansSynced(t, s1, nextPeer) - assertActiveGossipTimestampRange(t, nextPeer) - assertSyncerStatus(t, s1, chansSynced, ActiveSync) -} - -// TestSyncManagerForceHistoricalSync ensures that we can perform routine -// historical syncs whenever the HistoricalSyncTicker fires. -func TestSyncManagerForceHistoricalSync(t *testing.T) { - t.Parallel() - - syncMgr := newTestSyncManager(0) - syncMgr.Start() - defer syncMgr.Stop() - - // We should expect to see a QueryChannelRange message with a - // FirstBlockHeight of the genesis block, signaling that a historical - // sync is being attempted. - peer := randPeer(t, syncMgr.quit) - syncMgr.InitSyncState(peer) - assertMsgSent(t, peer, &lnwire.QueryChannelRange{ - FirstBlockHeight: 0, - NumBlocks: math.MaxUint32, - }) - - // If an additional peer connects, then a historical sync should not be - // attempted again. - extraPeer := randPeer(t, syncMgr.quit) - syncMgr.InitSyncState(extraPeer) - assertNoMsgSent(t, extraPeer) - - // Then, we'll send a tick to force a historical sync. This should - // trigger the extra peer to also perform a historical sync since the - // first peer is not eligible due to not being in a chansSynced state. - syncMgr.cfg.HistoricalSyncTicker.(*ticker.Force).Force <- time.Time{} - assertMsgSent(t, extraPeer, &lnwire.QueryChannelRange{ - FirstBlockHeight: 0, - NumBlocks: math.MaxUint32, - }) -} - -// TestSyncManagerGraphSyncedAfterHistoricalSyncReplacement ensures that the -// sync manager properly marks the graph as synced given that our initial -// historical sync has stalled, but a replacement has fully completed. -func TestSyncManagerGraphSyncedAfterHistoricalSyncReplacement(t *testing.T) { - t.Parallel() - - syncMgr := newTestSyncManager(0) - syncMgr.Start() - defer syncMgr.Stop() - - // We should expect to see a QueryChannelRange message with a - // FirstBlockHeight of the genesis block, signaling that an initial - // historical sync is being attempted. - peer := randPeer(t, syncMgr.quit) - syncMgr.InitSyncState(peer) - assertMsgSent(t, peer, &lnwire.QueryChannelRange{ - FirstBlockHeight: 0, - NumBlocks: math.MaxUint32, - }) - - // The graph should not be considered as synced since the initial - // historical sync has not finished. - if syncMgr.IsGraphSynced() { - t.Fatal("expected graph to not be considered as synced") - } - - // If an additional peer connects, then another historical sync should - // not be attempted. - finalHistoricalPeer := randPeer(t, syncMgr.quit) - syncMgr.InitSyncState(finalHistoricalPeer) - finalHistoricalSyncer := assertSyncerExistence(t, syncMgr, finalHistoricalPeer) - assertNoMsgSent(t, finalHistoricalPeer) - - // To simulate that our initial historical sync has stalled, we'll force - // a historical sync with the new peer to ensure it is replaced. - syncMgr.cfg.HistoricalSyncTicker.(*ticker.Force).Force <- time.Time{} - - // The graph should still not be considered as synced since the - // replacement historical sync has not finished. - if syncMgr.IsGraphSynced() { - t.Fatal("expected graph to not be considered as synced") - } - - // Complete the replacement historical sync by transitioning the syncer - // to its final chansSynced state. The graph should be considered as - // synced after the fact. - assertTransitionToChansSynced(t, finalHistoricalSyncer, finalHistoricalPeer) - if !syncMgr.IsGraphSynced() { - t.Fatal("expected graph to be considered as synced") - } -} - -// TestSyncManagerWaitUntilInitialHistoricalSync ensures that no GossipSyncers -// are initialized as ActiveSync until the initial historical sync has been -// completed. Once it does, the pending GossipSyncers should be transitioned to -// ActiveSync. -func TestSyncManagerWaitUntilInitialHistoricalSync(t *testing.T) { - t.Parallel() - - const numActiveSyncers = 2 - - // We'll start by creating our test sync manager which will hold up to - // 2 active syncers. - syncMgr := newTestSyncManager(numActiveSyncers) - syncMgr.Start() - defer syncMgr.Stop() - - // We'll go ahead and create our syncers. - peers := make([]*mockPeer, 0, numActiveSyncers) - syncers := make([]*GossipSyncer, 0, numActiveSyncers) - for i := 0; i < numActiveSyncers; i++ { - peer := randPeer(t, syncMgr.quit) - peers = append(peers, peer) - - syncMgr.InitSyncState(peer) - s := assertSyncerExistence(t, syncMgr, peer) - syncers = append(syncers, s) - - // The first one always attempts a historical sync. We won't - // transition it to chansSynced to ensure the remaining syncers - // aren't started as active. - if i == 0 { - assertSyncerStatus(t, s, syncingChans, PassiveSync) - continue - } - - // The rest should remain in a passive and chansSynced state, - // and they should be queued to transition to active once the - // initial historical sync is completed. - assertNoMsgSent(t, peer) - assertSyncerStatus(t, s, chansSynced, PassiveSync) - } - - // To ensure we don't transition any pending active syncers that have - // previously disconnected, we'll disconnect the last one. - stalePeer := peers[numActiveSyncers-1] - syncMgr.PruneSyncState(stalePeer.PubKey()) - - // Then, we'll complete the initial historical sync by transitioning the - // historical syncer to its final chansSynced state. This should trigger - // all of the pending active syncers to transition, except for the one - // we disconnected. - assertTransitionToChansSynced(t, syncers[0], peers[0]) - for i, s := range syncers { - if i == numActiveSyncers-1 { - assertNoMsgSent(t, peers[i]) - continue - } - assertPassiveSyncerTransition(t, s, peers[i]) - } -} - -// assertNoMsgSent is a helper function that ensures a peer hasn't sent any -// messages. -func assertNoMsgSent(t *testing.T, peer *mockPeer) { - t.Helper() - - select { - case msg := <-peer.sentMsgs: - t.Fatalf("peer %x sent unexpected message %v", peer.PubKey(), - spew.Sdump(msg)) - case <-time.After(time.Second): - } -} - -// assertMsgSent asserts that the peer has sent the given message. -func assertMsgSent(t *testing.T, peer *mockPeer, msg lnwire.Message) { - t.Helper() - - var msgSent lnwire.Message - select { - case msgSent = <-peer.sentMsgs: - case <-time.After(time.Second): - t.Fatalf("expected peer %x to send %T message", peer.PubKey(), - msg) - } - - if !reflect.DeepEqual(msgSent, msg) { - t.Fatalf("expected peer %x to send message: %v\ngot: %v", - peer.PubKey(), spew.Sdump(msg), spew.Sdump(msgSent)) - } -} - -// assertActiveGossipTimestampRange is a helper function that ensures a peer has -// sent a lnwire.GossipTimestampRange message indicating that it would like to -// receive new graph updates. -func assertActiveGossipTimestampRange(t *testing.T, peer *mockPeer) { - t.Helper() - - var msgSent lnwire.Message - select { - case msgSent = <-peer.sentMsgs: - case <-time.After(2 * time.Second): - t.Fatalf("expected peer %x to send lnwire.GossipTimestampRange "+ - "message", peer.PubKey()) - } - - msg, ok := msgSent.(*lnwire.GossipTimestampRange) - if !ok { - t.Fatalf("expected peer %x to send %T message", peer.PubKey(), - msg) - } - if msg.FirstTimestamp == 0 { - t.Fatalf("expected *lnwire.GossipTimestampRange message with " + - "non-zero FirstTimestamp") - } - if msg.TimestampRange == 0 { - t.Fatalf("expected *lnwire.GossipTimestampRange message with " + - "non-zero TimestampRange") - } -} - -// assertSyncerExistence asserts that a GossipSyncer exists for the given peer. -func assertSyncerExistence(t *testing.T, syncMgr *SyncManager, - peer *mockPeer) *GossipSyncer { - - t.Helper() - - s, ok := syncMgr.GossipSyncer(peer.PubKey()) - if !ok { - t.Fatalf("gossip syncer for peer %x not found", peer.PubKey()) - } - - return s -} - -// assertSyncerStatus asserts that the gossip syncer for the given peer matches -// the expected sync state and type. -func assertSyncerStatus(t *testing.T, s *GossipSyncer, syncState syncerState, - syncType SyncerType) { - - t.Helper() - - // We'll check the status of our syncer within a WaitPredicate as some - // sync transitions might cause this to be racy. - err := wait.NoError(func() er.R { - state := s.syncState() - if s.syncState() != syncState { - return er.Errorf("expected syncState %v for peer "+ - "%x, got %v", syncState, s.cfg.peerPub, state) - } - - typ := s.SyncType() - if s.SyncType() != syncType { - return er.Errorf("expected syncType %v for peer "+ - "%x, got %v", syncType, s.cfg.peerPub, typ) - } - - return nil - }, time.Second) - if err != nil { - t.Fatal(err) - } -} - -// assertTransitionToChansSynced asserts the transition of an ActiveSync -// GossipSyncer to its final chansSynced state. -func assertTransitionToChansSynced(t *testing.T, s *GossipSyncer, peer *mockPeer) { - t.Helper() - - query := &lnwire.QueryChannelRange{ - FirstBlockHeight: 0, - NumBlocks: math.MaxUint32, - } - assertMsgSent(t, peer, query) - - s.ProcessQueryMsg(&lnwire.ReplyChannelRange{ - QueryChannelRange: *query, - Complete: 1, - }, nil) - - chanSeries := s.cfg.channelSeries.(*mockChannelGraphTimeSeries) - - select { - case <-chanSeries.filterReq: - chanSeries.filterResp <- nil - case <-time.After(2 * time.Second): - t.Fatal("expected to receive FilterKnownChanIDs request") - } - - err := wait.NoError(func() er.R { - state := syncerState(atomic.LoadUint32(&s.state)) - if state != chansSynced { - return er.Errorf("expected syncerState %v, got %v", - chansSynced, state) - } - - return nil - }, time.Second) - if err != nil { - t.Fatal(err) - } -} - -// assertPassiveSyncerTransition asserts that a gossip syncer goes through all -// of its expected steps when transitioning from passive to active. -func assertPassiveSyncerTransition(t *testing.T, s *GossipSyncer, peer *mockPeer) { - - t.Helper() - - assertActiveGossipTimestampRange(t, peer) - assertSyncerStatus(t, s, chansSynced, ActiveSync) -} - -// assertActiveSyncerTransition asserts that a gossip syncer goes through all of -// its expected steps when transitioning from active to passive. -func assertActiveSyncerTransition(t *testing.T, s *GossipSyncer, peer *mockPeer) { - t.Helper() - - assertMsgSent(t, peer, &lnwire.GossipTimestampRange{ - FirstTimestamp: uint32(zeroTimestamp.Unix()), - TimestampRange: 0, - }) - assertSyncerStatus(t, s, chansSynced, PassiveSync) -} diff --git a/lnd/discovery/syncer.go b/lnd/discovery/syncer.go deleted file mode 100644 index 104108a9..00000000 --- a/lnd/discovery/syncer.go +++ /dev/null @@ -1,1442 +0,0 @@ -package discovery - -import ( - "fmt" - "math" - "sync" - "sync/atomic" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/lnpeer" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/pktlog/log" - "golang.org/x/time/rate" -) - -// SyncerType encapsulates the different types of syncing mechanisms for a -// gossip syncer. -type SyncerType uint8 - -const ( - // ActiveSync denotes that a gossip syncer: - // - // 1. Should not attempt to synchronize with the remote peer for - // missing channels. - // 2. Should respond to queries from the remote peer. - // 3. Should receive new updates from the remote peer. - // - // They are started in a chansSynced state in order to accomplish their - // responsibilities above. - ActiveSync SyncerType = iota - - // PassiveSync denotes that a gossip syncer: - // - // 1. Should not attempt to synchronize with the remote peer for - // missing channels. - // 2. Should respond to queries from the remote peer. - // 3. Should not receive new updates from the remote peer. - // - // They are started in a chansSynced state in order to accomplish their - // responsibilities above. - PassiveSync -) - -// String returns a human readable string describing the target SyncerType. -func (t SyncerType) String() string { - switch t { - case ActiveSync: - return "ActiveSync" - case PassiveSync: - return "PassiveSync" - default: - return fmt.Sprintf("unknown sync type %d", t) - } -} - -// syncerState is an enum that represents the current state of the GossipSyncer. -// As the syncer is a state machine, we'll gate our actions based off of the -// current state and the next incoming message. -type syncerState uint32 - -const ( - // syncingChans is the default state of the GossipSyncer. We start in - // this state when a new peer first connects and we don't yet know if - // we're fully synchronized. - syncingChans syncerState = iota - - // waitingQueryRangeReply is the second main phase of the GossipSyncer. - // We enter this state after we send out our first QueryChannelRange - // reply. We'll stay in this state until the remote party sends us a - // ReplyShortChanIDsEnd message that indicates they've responded to our - // query entirely. After this state, we'll transition to - // waitingQueryChanReply after we send out requests for all the new - // chan ID's to us. - waitingQueryRangeReply - - // queryNewChannels is the third main phase of the GossipSyncer. In - // this phase we'll send out all of our QueryShortChanIDs messages in - // response to the new channels that we don't yet know about. - queryNewChannels - - // waitingQueryChanReply is the fourth main phase of the GossipSyncer. - // We enter this phase once we've sent off a query chink to the remote - // peer. We'll stay in this phase until we receive a - // ReplyShortChanIDsEnd message which indicates that the remote party - // has responded to all of our requests. - waitingQueryChanReply - - // chansSynced is the terminal stage of the GossipSyncer. Once we enter - // this phase, we'll send out our update horizon, which filters out the - // set of channel updates that we're interested in. In this state, - // we'll be able to accept any outgoing messages from the - // AuthenticatedGossiper, and decide if we should forward them to our - // target peer based on its update horizon. - chansSynced -) - -// String returns a human readable string describing the target syncerState. -func (s syncerState) String() string { - switch s { - case syncingChans: - return "syncingChans" - - case waitingQueryRangeReply: - return "waitingQueryRangeReply" - - case queryNewChannels: - return "queryNewChannels" - - case waitingQueryChanReply: - return "waitingQueryChanReply" - - case chansSynced: - return "chansSynced" - - default: - return "UNKNOWN STATE" - } -} - -const ( - // DefaultMaxUndelayedQueryReplies specifies how many gossip queries we - // will respond to immediately before starting to delay responses. - DefaultMaxUndelayedQueryReplies = 10 - - // DefaultDelayedQueryReplyInterval is the length of time we will wait - // before responding to gossip queries after replying to - // maxUndelayedQueryReplies queries. - DefaultDelayedQueryReplyInterval = 5 * time.Second - - // chanRangeQueryBuffer is the number of blocks back that we'll go when - // asking the remote peer for their any channels they know of beyond - // our highest known channel ID. - chanRangeQueryBuffer = 144 - - // syncTransitionTimeout is the default timeout in which we'll wait up - // to when attempting to perform a sync transition. - syncTransitionTimeout = 5 * time.Second - - // requestBatchSize is the maximum number of channels we will query the - // remote peer for in a QueryShortChanIDs message. - requestBatchSize = 500 -) - -var ( - // encodingTypeToChunkSize maps an encoding type, to the max number of - // short chan ID's using the encoding type that we can fit into a - // single message safely. - encodingTypeToChunkSize = map[lnwire.ShortChanIDEncoding]int32{ - lnwire.EncodingSortedPlain: 8000, - } - - // ErrGossipSyncerExiting signals that the syncer has been killed. - ErrGossipSyncerExiting = Err.CodeWithDetail("ErrGossipSyncerExiting", "gossip syncer exiting") - - // ErrSyncTransitionTimeout is an error returned when we've timed out - // attempting to perform a sync transition. - ErrSyncTransitionTimeout = Err.CodeWithDetail("ErrSyncTransitionTimeout", "timed out attempting to "+ - "transition sync type") - - // zeroTimestamp is the timestamp we'll use when we want to indicate to - // peers that we do not want to receive any new graph updates. - zeroTimestamp time.Time -) - -// syncTransitionReq encapsulates a request for a gossip syncer sync transition. -type syncTransitionReq struct { - newSyncType SyncerType - errChan chan er.R -} - -// historicalSyncReq encapsulates a request for a gossip syncer to perform a -// historical sync. -type historicalSyncReq struct { - // doneChan is a channel that serves as a signal and is closed to ensure - // the historical sync is attempted by the time we return to the caller. - doneChan chan struct{} -} - -// gossipSyncerCfg is a struct that packages all the information a GossipSyncer -// needs to carry out its duties. -type gossipSyncerCfg struct { - // chainHash is the chain that this syncer is responsible for. - chainHash chainhash.Hash - - // peerPub is the public key of the peer we're syncing with, serialized - // in compressed format. - peerPub [33]byte - - // channelSeries is the primary interface that we'll use to generate - // our queries and respond to the queries of the remote peer. - channelSeries ChannelGraphTimeSeries - - // encodingType is the current encoding type we're aware of. Requests - // with different encoding types will be rejected. - encodingType lnwire.ShortChanIDEncoding - - // chunkSize is the max number of short chan IDs using the syncer's - // encoding type that we can fit into a single message safely. - chunkSize int32 - - // batchSize is the max number of channels the syncer will query from - // the remote node in a single QueryShortChanIDs request. - batchSize int32 - - // sendToPeer sends a variadic number of messages to the remote peer. - // This method should not block while waiting for sends to be written - // to the wire. - sendToPeer func(...lnwire.Message) er.R - - // sendToPeerSync sends a variadic number of messages to the remote - // peer, blocking until all messages have been sent successfully or a - // write error is encountered. - sendToPeerSync func(...lnwire.Message) er.R - - // maxUndelayedQueryReplies specifies how many gossip queries we will - // respond to immediately before starting to delay responses. - maxUndelayedQueryReplies int - - // delayedQueryReplyInterval is the length of time we will wait before - // responding to gossip queries after replying to - // maxUndelayedQueryReplies queries. - delayedQueryReplyInterval time.Duration - - // noSyncChannels will prevent the GossipSyncer from spawning a - // channelGraphSyncer, meaning we will not try to reconcile unknown - // channels with the remote peer. - noSyncChannels bool - - // noReplyQueries will prevent the GossipSyncer from spawning a - // replyHandler, meaning we will not reply to queries from our remote - // peer. - noReplyQueries bool - - // ignoreHistoricalFilters will prevent syncers from replying with - // historical data when the remote peer sets a gossip_timestamp_range. - // This prevents ranges with old start times from causing us to dump the - // graph on connect. - ignoreHistoricalFilters bool -} - -// GossipSyncer is a struct that handles synchronizing the channel graph state -// with a remote peer. The GossipSyncer implements a state machine that will -// progressively ensure we're synchronized with the channel state of the remote -// node. Once both nodes have been synchronized, we'll use an update filter to -// filter out which messages should be sent to a remote peer based on their -// update horizon. If the update horizon isn't specified, then we won't send -// them any channel updates at all. -type GossipSyncer struct { - started sync.Once - stopped sync.Once - - // state is the current state of the GossipSyncer. - // - // NOTE: This variable MUST be used atomically. - state uint32 - - // syncType denotes the SyncerType the gossip syncer is currently - // exercising. - // - // NOTE: This variable MUST be used atomically. - syncType uint32 - - // remoteUpdateHorizon is the update horizon of the remote peer. We'll - // use this to properly filter out any messages. - remoteUpdateHorizon *lnwire.GossipTimestampRange - - // localUpdateHorizon is our local update horizon, we'll use this to - // determine if we've already sent out our update. - localUpdateHorizon *lnwire.GossipTimestampRange - - // syncTransitions is a channel through which new sync type transition - // requests will be sent through. These requests should only be handled - // when the gossip syncer is in a chansSynced state to ensure its state - // machine behaves as expected. - syncTransitionReqs chan *syncTransitionReq - - // historicalSyncReqs is a channel that serves as a signal for the - // gossip syncer to perform a historical sync. These can only be done - // once the gossip syncer is in a chansSynced state to ensure its state - // machine behaves as expected. - historicalSyncReqs chan *historicalSyncReq - - // genHistoricalChanRangeQuery when true signals to the gossip syncer - // that it should request the remote peer for all of its known channel - // IDs starting from the genesis block of the chain. This can only - // happen if the gossip syncer receives a request to attempt a - // historical sync. It can be unset if the syncer ever transitions from - // PassiveSync to ActiveSync. - genHistoricalChanRangeQuery bool - - // gossipMsgs is a channel that all responses to our queries from the - // target peer will be sent over, these will be read by the - // channelGraphSyncer. - gossipMsgs chan lnwire.Message - - // queryMsgs is a channel that all queries from the remote peer will be - // received over, these will be read by the replyHandler. - queryMsgs chan lnwire.Message - - // curQueryRangeMsg keeps track of the latest QueryChannelRange message - // we've sent to a peer to ensure we've consumed all expected replies. - // This field is primarily used within the waitingQueryChanReply state. - curQueryRangeMsg *lnwire.QueryChannelRange - - // prevReplyChannelRange keeps track of the previous ReplyChannelRange - // message we've received from a peer to ensure they've fully replied to - // our query by ensuring they covered our requested block range. This - // field is primarily used within the waitingQueryChanReply state. - prevReplyChannelRange *lnwire.ReplyChannelRange - - // bufferedChanRangeReplies is used in the waitingQueryChanReply to - // buffer all the chunked response to our query. - bufferedChanRangeReplies []lnwire.ShortChannelID - - // newChansToQuery is used to pass the set of channels we should query - // for from the waitingQueryChanReply state to the queryNewChannels - // state. - newChansToQuery []lnwire.ShortChannelID - - cfg gossipSyncerCfg - - // rateLimiter dictates the frequency with which we will reply to gossip - // queries from a peer. This is used to delay responses to peers to - // prevent DOS vulnerabilities if they are spamming with an unreasonable - // number of queries. - rateLimiter *rate.Limiter - - // syncedSignal is a channel that, if set, will be closed when the - // GossipSyncer reaches its terminal chansSynced state. - syncedSignal chan struct{} - - sync.Mutex - - quit chan struct{} - wg sync.WaitGroup -} - -// newGossipSyncer returns a new instance of the GossipSyncer populated using -// the passed config. -func newGossipSyncer(cfg gossipSyncerCfg) *GossipSyncer { - // If no parameter was specified for max undelayed query replies, set it - // to the default of 5 queries. - if cfg.maxUndelayedQueryReplies <= 0 { - cfg.maxUndelayedQueryReplies = DefaultMaxUndelayedQueryReplies - } - - // If no parameter was specified for delayed query reply interval, set - // to the default of 5 seconds. - if cfg.delayedQueryReplyInterval <= 0 { - cfg.delayedQueryReplyInterval = DefaultDelayedQueryReplyInterval - } - - // Construct a rate limiter that will govern how frequently we reply to - // gossip queries from this peer. The limiter will automatically adjust - // during periods of quiescence, and increase the reply interval under - // load. - interval := rate.Every(cfg.delayedQueryReplyInterval) - rateLimiter := rate.NewLimiter( - interval, cfg.maxUndelayedQueryReplies, - ) - - return &GossipSyncer{ - cfg: cfg, - rateLimiter: rateLimiter, - syncTransitionReqs: make(chan *syncTransitionReq), - historicalSyncReqs: make(chan *historicalSyncReq), - gossipMsgs: make(chan lnwire.Message, 100), - queryMsgs: make(chan lnwire.Message, 100), - quit: make(chan struct{}), - } -} - -// Start starts the GossipSyncer and any goroutines that it needs to carry out -// its duties. -func (g *GossipSyncer) Start() { - g.started.Do(func() { - log.Debugf("Starting GossipSyncer(%x)", g.cfg.peerPub[:]) - - // TODO(conner): only spawn channelGraphSyncer if remote - // supports gossip queries, and only spawn replyHandler if we - // advertise support - if !g.cfg.noSyncChannels { - g.wg.Add(1) - go g.channelGraphSyncer() - } - if !g.cfg.noReplyQueries { - g.wg.Add(1) - go g.replyHandler() - } - }) -} - -// Stop signals the GossipSyncer for a graceful exit, then waits until it has -// exited. -func (g *GossipSyncer) Stop() { - g.stopped.Do(func() { - close(g.quit) - g.wg.Wait() - }) -} - -// channelGraphSyncer is the main goroutine responsible for ensuring that we -// properly channel graph state with the remote peer, and also that we only -// send them messages which actually pass their defined update horizon. -func (g *GossipSyncer) channelGraphSyncer() { - defer g.wg.Done() - - for { - state := g.syncState() - syncType := g.SyncType() - - log.Debugf("GossipSyncer(%x): state=%v, type=%v", - g.cfg.peerPub[:], state, syncType) - - switch state { - // When we're in this state, we're trying to synchronize our - // view of the network with the remote peer. We'll kick off - // this sync by asking them for the set of channels they - // understand, as we'll as responding to any other queries by - // them. - case syncingChans: - // If we're in this state, then we'll send the remote - // peer our opening QueryChannelRange message. - queryRangeMsg, err := g.genChanRangeQuery( - g.genHistoricalChanRangeQuery, - ) - if err != nil { - log.Errorf("Unable to gen chan range "+ - "query: %v", err) - return - } - - err = g.cfg.sendToPeer(queryRangeMsg) - if err != nil { - log.Errorf("Unable to send chan range "+ - "query: %v", err) - return - } - - // With the message sent successfully, we'll transition - // into the next state where we wait for their reply. - g.setSyncState(waitingQueryRangeReply) - - // In this state, we've sent out our initial channel range - // query and are waiting for the final response from the remote - // peer before we perform a diff to see with channels they know - // of that we don't. - case waitingQueryRangeReply: - // We'll wait to either process a new message from the - // remote party, or exit due to the gossiper exiting, - // or us being signalled to do so. - select { - case msg := <-g.gossipMsgs: - // The remote peer is sending a response to our - // initial query, we'll collate this response, - // and see if it's the final one in the series. - // If so, we can then transition to querying - // for the new channels. - queryReply, ok := msg.(*lnwire.ReplyChannelRange) - if ok { - err := g.processChanRangeReply(queryReply) - if err != nil { - log.Errorf("Unable to "+ - "process chan range "+ - "query: %v", err) - return - } - continue - } - - log.Warnf("Unexpected message: %T in state=%v", - msg, state) - - case <-g.quit: - return - } - - // We'll enter this state once we've discovered which channels - // the remote party knows of that we don't yet know of - // ourselves. - case queryNewChannels: - // First, we'll attempt to continue our channel - // synchronization by continuing to send off another - // query chunk. - done, err := g.synchronizeChanIDs() - if err != nil { - log.Errorf("Unable to sync chan IDs: %v", err) - } - - // If this wasn't our last query, then we'll need to - // transition to our waiting state. - if !done { - g.setSyncState(waitingQueryChanReply) - continue - } - - // If we're fully synchronized, then we can transition - // to our terminal state. - g.setSyncState(chansSynced) - - // In this state, we've just sent off a new query for channels - // that we don't yet know of. We'll remain in this state until - // the remote party signals they've responded to our query in - // totality. - case waitingQueryChanReply: - // Once we've sent off our query, we'll wait for either - // an ending reply, or just another query from the - // remote peer. - select { - case msg := <-g.gossipMsgs: - // If this is the final reply to one of our - // queries, then we'll loop back into our query - // state to send of the remaining query chunks. - _, ok := msg.(*lnwire.ReplyShortChanIDsEnd) - if ok { - g.setSyncState(queryNewChannels) - continue - } - - log.Warnf("Unexpected message: %T in state=%v", - msg, state) - - case <-g.quit: - return - } - - // This is our final terminal state where we'll only reply to - // any further queries by the remote peer. - case chansSynced: - g.Lock() - if g.syncedSignal != nil { - close(g.syncedSignal) - g.syncedSignal = nil - } - g.Unlock() - - // If we haven't yet sent out our update horizon, and - // we want to receive real-time channel updates, we'll - // do so now. - if g.localUpdateHorizon == nil && syncType == ActiveSync { - err := g.sendGossipTimestampRange( - time.Now(), math.MaxUint32, - ) - if err != nil { - log.Errorf("Unable to send update "+ - "horizon to %x: %v", - g.cfg.peerPub, err) - } - } - - // With our horizon set, we'll simply reply to any new - // messages or process any state transitions and exit if - // needed. - select { - case req := <-g.syncTransitionReqs: - req.errChan <- g.handleSyncTransition(req) - - case req := <-g.historicalSyncReqs: - g.handleHistoricalSync(req) - - case <-g.quit: - return - } - } - } -} - -// replyHandler is an event loop whose sole purpose is to reply to the remote -// peers queries. Our replyHandler will respond to messages generated by their -// channelGraphSyncer, and vice versa. Each party's channelGraphSyncer drives -// the other's replyHandler, allowing the replyHandler to operate independently -// from the state machine maintained on the same node. -// -// NOTE: This method MUST be run as a goroutine. -func (g *GossipSyncer) replyHandler() { - defer g.wg.Done() - - for { - select { - case msg := <-g.queryMsgs: - err := g.replyPeerQueries(msg) - switch { - case ErrGossipSyncerExiting.Is(err): - return - - case lnpeer.ErrPeerExiting.Is(err): - return - - case err != nil: - log.Errorf("Unable to reply to peer "+ - "query: %v", err) - } - - case <-g.quit: - return - } - } -} - -// sendGossipTimestampRange constructs and sets a GossipTimestampRange for the -// syncer and sends it to the remote peer. -func (g *GossipSyncer) sendGossipTimestampRange(firstTimestamp time.Time, - timestampRange uint32) er.R { - - endTimestamp := firstTimestamp.Add( - time.Duration(timestampRange) * time.Second, - ) - - log.Infof("GossipSyncer(%x): applying gossipFilter(start=%v, end=%v)", - g.cfg.peerPub[:], firstTimestamp, endTimestamp) - - localUpdateHorizon := &lnwire.GossipTimestampRange{ - ChainHash: g.cfg.chainHash, - FirstTimestamp: uint32(firstTimestamp.Unix()), - TimestampRange: timestampRange, - } - - if err := g.cfg.sendToPeer(localUpdateHorizon); err != nil { - return err - } - - if firstTimestamp == zeroTimestamp && timestampRange == 0 { - g.localUpdateHorizon = nil - } else { - g.localUpdateHorizon = localUpdateHorizon - } - - return nil -} - -// synchronizeChanIDs is called by the channelGraphSyncer when we need to query -// the remote peer for its known set of channel IDs within a particular block -// range. This method will be called continually until the entire range has -// been queried for with a response received. We'll chunk our requests as -// required to ensure they fit into a single message. We may re-renter this -// state in the case that chunking is required. -func (g *GossipSyncer) synchronizeChanIDs() (bool, er.R) { - // If we're in this state yet there are no more new channels to query - // for, then we'll transition to our final synced state and return true - // to signal that we're fully synchronized. - if len(g.newChansToQuery) == 0 { - log.Infof("GossipSyncer(%x): no more chans to query", - g.cfg.peerPub[:]) - return true, nil - } - - // Otherwise, we'll issue our next chunked query to receive replies - // for. - var queryChunk []lnwire.ShortChannelID - - // If the number of channels to query for is less than the chunk size, - // then we can issue a single query. - if int32(len(g.newChansToQuery)) < g.cfg.batchSize { - queryChunk = g.newChansToQuery - g.newChansToQuery = nil - - } else { - // Otherwise, we'll need to only query for the next chunk. - // We'll slice into our query chunk, then slide down our main - // pointer down by the chunk size. - queryChunk = g.newChansToQuery[:g.cfg.batchSize] - g.newChansToQuery = g.newChansToQuery[g.cfg.batchSize:] - } - - log.Infof("GossipSyncer(%x): querying for %v new channels", - g.cfg.peerPub[:], len(queryChunk)) - - // With our chunk obtained, we'll send over our next query, then return - // false indicating that we're net yet fully synced. - err := g.cfg.sendToPeer(&lnwire.QueryShortChanIDs{ - ChainHash: g.cfg.chainHash, - EncodingType: lnwire.EncodingSortedPlain, - ShortChanIDs: queryChunk, - }) - - return false, err -} - -// isLegacyReplyChannelRange determines where a ReplyChannelRange message is -// considered legacy. There was a point where lnd used to include the same query -// over multiple replies, rather than including the portion of the query the -// reply is handling. We'll use this as a way of detecting whether we are -// communicating with a legacy node so we can properly sync with them. -func isLegacyReplyChannelRange(query *lnwire.QueryChannelRange, - reply *lnwire.ReplyChannelRange) bool { - - return reply.QueryChannelRange == *query -} - -// processChanRangeReply is called each time the GossipSyncer receives a new -// reply to the initial range query to discover new channels that it didn't -// previously know of. -func (g *GossipSyncer) processChanRangeReply(msg *lnwire.ReplyChannelRange) er.R { - // If we're not communicating with a legacy node, we'll apply some - // further constraints on their reply to ensure it satisfies our query. - if !isLegacyReplyChannelRange(g.curQueryRangeMsg, msg) { - // The first block should be within our original request. - if msg.FirstBlockHeight < g.curQueryRangeMsg.FirstBlockHeight { - return er.Errorf("reply includes channels for height "+ - "%v prior to query %v", msg.FirstBlockHeight, - g.curQueryRangeMsg.FirstBlockHeight) - } - - // The last block should also be. We don't need to check the - // intermediate ones because they should already be in sorted - // order. - replyLastHeight := msg.QueryChannelRange.LastBlockHeight() - queryLastHeight := g.curQueryRangeMsg.LastBlockHeight() - if replyLastHeight > queryLastHeight { - return er.Errorf("reply includes channels for height "+ - "%v after query %v", replyLastHeight, - queryLastHeight) - } - - // If we've previously received a reply for this query, look at - // its last block to ensure the current reply properly follows - // it. - if g.prevReplyChannelRange != nil { - prevReply := g.prevReplyChannelRange - prevReplyLastHeight := prevReply.LastBlockHeight() - - // The current reply can either start from the previous - // reply's last block, if there are still more channels - // for the same block, or the block after. - if msg.FirstBlockHeight != prevReplyLastHeight && - msg.FirstBlockHeight != prevReplyLastHeight+1 { - - return er.Errorf("first block of reply %v "+ - "does not continue from last block of "+ - "previous %v", msg.FirstBlockHeight, - prevReplyLastHeight) - } - } - } - - g.prevReplyChannelRange = msg - g.bufferedChanRangeReplies = append( - g.bufferedChanRangeReplies, msg.ShortChanIDs..., - ) - - log.Infof("GossipSyncer(%x): buffering chan range reply of size=%v", - g.cfg.peerPub[:], len(msg.ShortChanIDs)) - - // If this isn't the last response, then we can exit as we've already - // buffered the latest portion of the streaming reply. - switch { - // If we're communicating with a legacy node, we'll need to look at the - // complete field. - case isLegacyReplyChannelRange(g.curQueryRangeMsg, msg): - if msg.Complete == 0 { - return nil - } - - // Otherwise, we'll look at the reply's height range. - default: - replyLastHeight := msg.QueryChannelRange.LastBlockHeight() - queryLastHeight := g.curQueryRangeMsg.LastBlockHeight() - - // TODO(wilmer): This might require some padding if the remote - // node is not aware of the last height we sent them, i.e., is - // behind a few blocks from us. - if replyLastHeight < queryLastHeight { - return nil - } - } - - log.Infof("GossipSyncer(%x): filtering through %v chans", - g.cfg.peerPub[:], len(g.bufferedChanRangeReplies)) - - // Otherwise, this is the final response, so we'll now check to see - // which channels they know of that we don't. - newChans, err := g.cfg.channelSeries.FilterKnownChanIDs( - g.cfg.chainHash, g.bufferedChanRangeReplies, - ) - if err != nil { - return er.Errorf("unable to filter chan ids: %v", err) - } - - // As we've received the entirety of the reply, we no longer need to - // hold on to the set of buffered replies or the original query that - // prompted the replies, so we'll let that be garbage collected now. - g.curQueryRangeMsg = nil - g.prevReplyChannelRange = nil - g.bufferedChanRangeReplies = nil - - // If there aren't any channels that we don't know of, then we can - // switch straight to our terminal state. - if len(newChans) == 0 { - log.Infof("GossipSyncer(%x): remote peer has no new chans", - g.cfg.peerPub[:]) - - g.setSyncState(chansSynced) - return nil - } - - // Otherwise, we'll set the set of channels that we need to query for - // the next state, and also transition our state. - g.newChansToQuery = newChans - g.setSyncState(queryNewChannels) - - log.Infof("GossipSyncer(%x): starting query for %v new chans", - g.cfg.peerPub[:], len(newChans)) - - return nil -} - -// genChanRangeQuery generates the initial message we'll send to the remote -// party when we're kicking off the channel graph synchronization upon -// connection. The historicalQuery boolean can be used to generate a query from -// the genesis block of the chain. -func (g *GossipSyncer) genChanRangeQuery( - historicalQuery bool) (*lnwire.QueryChannelRange, er.R) { - - // First, we'll query our channel graph time series for its highest - // known channel ID. - newestChan, err := g.cfg.channelSeries.HighestChanID(g.cfg.chainHash) - if err != nil { - return nil, err - } - - // Once we have the chan ID of the newest, we'll obtain the block height - // of the channel, then subtract our default horizon to ensure we don't - // miss any channels. By default, we go back 1 day from the newest - // channel, unless we're attempting a historical sync, where we'll - // actually start from the genesis block instead. - var startHeight uint32 - switch { - case historicalQuery: - fallthrough - case newestChan.BlockHeight <= chanRangeQueryBuffer: - startHeight = 0 - default: - startHeight = uint32(newestChan.BlockHeight - chanRangeQueryBuffer) - } - - log.Infof("GossipSyncer(%x): requesting new chans from height=%v "+ - "and %v blocks after", g.cfg.peerPub[:], startHeight, - math.MaxUint32-startHeight) - - // Finally, we'll craft the channel range query, using our starting - // height, then asking for all known channels to the foreseeable end of - // the main chain. - query := &lnwire.QueryChannelRange{ - ChainHash: g.cfg.chainHash, - FirstBlockHeight: startHeight, - NumBlocks: math.MaxUint32 - startHeight, - } - g.curQueryRangeMsg = query - - return query, nil -} - -// replyPeerQueries is called in response to any query by the remote peer. -// We'll examine our state and send back our best response. -func (g *GossipSyncer) replyPeerQueries(msg lnwire.Message) er.R { - reservation := g.rateLimiter.Reserve() - delay := reservation.Delay() - - // If we've already replied a handful of times, we will start to delay - // responses back to the remote peer. This can help prevent DOS attacks - // where the remote peer spams us endlessly. - if delay > 0 { - log.Infof("GossipSyncer(%x): rate limiting gossip replies, "+ - "responding in %s", g.cfg.peerPub[:], delay) - - select { - case <-time.After(delay): - case <-g.quit: - return ErrGossipSyncerExiting.Default() - } - } - - switch msg := msg.(type) { - - // In this state, we'll also handle any incoming channel range queries - // from the remote peer as they're trying to sync their state as well. - case *lnwire.QueryChannelRange: - return g.replyChanRangeQuery(msg) - - // If the remote peer skips straight to requesting new channels that - // they don't know of, then we'll ensure that we also handle this case. - case *lnwire.QueryShortChanIDs: - return g.replyShortChanIDs(msg) - - default: - return er.Errorf("unknown message: %T", msg) - } -} - -// replyChanRangeQuery will be dispatched in response to a channel range query -// by the remote node. We'll query the channel time series for channels that -// meet the channel range, then chunk our responses to the remote node. We also -// ensure that our final fragment carries the "complete" bit to indicate the -// end of our streaming response. -func (g *GossipSyncer) replyChanRangeQuery(query *lnwire.QueryChannelRange) er.R { - // Before responding, we'll check to ensure that the remote peer is - // querying for the same chain that we're on. If not, we'll send back a - // response with a complete value of zero to indicate we're on a - // different chain. - if g.cfg.chainHash != query.ChainHash { - log.Warnf("Remote peer requested QueryChannelRange for "+ - "chain=%v, we're on chain=%v", query.ChainHash, - g.cfg.chainHash) - - return g.cfg.sendToPeerSync(&lnwire.ReplyChannelRange{ - QueryChannelRange: *query, - Complete: 0, - EncodingType: g.cfg.encodingType, - ShortChanIDs: nil, - }) - } - - log.Infof("GossipSyncer(%x): filtering chan range: start_height=%v, "+ - "num_blocks=%v", g.cfg.peerPub[:], query.FirstBlockHeight, - query.NumBlocks) - - // Next, we'll consult the time series to obtain the set of known - // channel ID's that match their query. - startBlock := query.FirstBlockHeight - endBlock := query.LastBlockHeight() - channelRange, err := g.cfg.channelSeries.FilterChannelRange( - query.ChainHash, startBlock, endBlock, - ) - if err != nil { - return err - } - - // TODO(roasbeef): means can't send max uint above? - // * or make internal 64 - - // In the base case (no actual response) the first block and last block - // will match those of the query. In the loop below, we'll update these - // two variables incrementally with each chunk to properly compute the - // starting block for each response and the number of blocks in a - // response. - firstBlockHeight := startBlock - lastBlockHeight := endBlock - - numChannels := int32(len(channelRange)) - numChansSent := int32(0) - for { - // We'll send our this response in a streaming manner, - // chunk-by-chunk. We do this as there's a transport message - // size limit which we'll need to adhere to. - var channelChunk []lnwire.ShortChannelID - - // We know this is the final chunk, if the difference between - // the total number of channels, and the number of channels - // we've sent is less-than-or-equal to the chunk size. - isFinalChunk := (numChannels - numChansSent) <= g.cfg.chunkSize - - // If this is indeed the last chunk, then we'll send the - // remainder of the channels. - if isFinalChunk { - channelChunk = channelRange[numChansSent:] - - log.Infof("GossipSyncer(%x): sending final chan "+ - "range chunk, size=%v", g.cfg.peerPub[:], - len(channelChunk)) - } else { - // Otherwise, we'll only send off a fragment exactly - // sized to the proper chunk size. - channelChunk = channelRange[numChansSent : numChansSent+g.cfg.chunkSize] - - log.Infof("GossipSyncer(%x): sending range chunk of "+ - "size=%v", g.cfg.peerPub[:], len(channelChunk)) - } - - // If we have any channels at all to return, then we need to - // update our pointers to the first and last blocks for each - // response. - if len(channelChunk) > 0 { - // If this is the first response we'll send, we'll point - // the first block to the first block in the query. - // Otherwise, we'll continue from the block we left off - // at. - if numChansSent == 0 { - firstBlockHeight = startBlock - } else { - firstBlockHeight = lastBlockHeight - } - - // If this is the last response we'll send, we'll point - // the last block to the last block of the query. - // Otherwise, we'll set it to the height of the last - // channel in the chunk. - if isFinalChunk { - lastBlockHeight = endBlock - } else { - lastBlockHeight = channelChunk[len(channelChunk)-1].BlockHeight - } - } - - // The number of blocks contained in this response (the total - // span) is the difference between the last channel ID and the - // first in the range. We add one as even if all channels - // returned are in the same block, we need to count that. - numBlocksInResp := lastBlockHeight - firstBlockHeight + 1 - - // With our chunk assembled, we'll now send to the remote peer - // the current chunk. - replyChunk := lnwire.ReplyChannelRange{ - QueryChannelRange: lnwire.QueryChannelRange{ - ChainHash: query.ChainHash, - NumBlocks: numBlocksInResp, - FirstBlockHeight: firstBlockHeight, - }, - Complete: 0, - EncodingType: g.cfg.encodingType, - ShortChanIDs: channelChunk, - } - if isFinalChunk { - replyChunk.Complete = 1 - } - if err := g.cfg.sendToPeerSync(&replyChunk); err != nil { - return err - } - - // If this was the final chunk, then we'll exit now as our - // response is now complete. - if isFinalChunk { - return nil - } - - numChansSent += int32(len(channelChunk)) - } -} - -// replyShortChanIDs will be dispatched in response to a query by the remote -// node for information concerning a set of short channel ID's. Our response -// will be sent in a streaming chunked manner to ensure that we remain below -// the current transport level message size. -func (g *GossipSyncer) replyShortChanIDs(query *lnwire.QueryShortChanIDs) er.R { - // Before responding, we'll check to ensure that the remote peer is - // querying for the same chain that we're on. If not, we'll send back a - // response with a complete value of zero to indicate we're on a - // different chain. - if g.cfg.chainHash != query.ChainHash { - log.Warnf("Remote peer requested QueryShortChanIDs for "+ - "chain=%v, we're on chain=%v", query.ChainHash, - g.cfg.chainHash) - - return g.cfg.sendToPeerSync(&lnwire.ReplyShortChanIDsEnd{ - ChainHash: query.ChainHash, - Complete: 0, - }) - } - - if len(query.ShortChanIDs) == 0 { - log.Infof("GossipSyncer(%x): ignoring query for blank short chan ID's", - g.cfg.peerPub[:]) - return nil - } - - log.Infof("GossipSyncer(%x): fetching chan anns for %v chans", - g.cfg.peerPub[:], len(query.ShortChanIDs)) - - // Now that we know we're on the same chain, we'll query the channel - // time series for the set of messages that we know of which satisfies - // the requirement of being a chan ann, chan update, or a node ann - // related to the set of queried channels. - replyMsgs, err := g.cfg.channelSeries.FetchChanAnns( - query.ChainHash, query.ShortChanIDs, - ) - if err != nil { - return er.Errorf("unable to fetch chan anns for %v..., %v", - query.ShortChanIDs[0].ToUint64(), err) - } - - // Reply with any messages related to those channel ID's, we'll write - // each one individually and synchronously to throttle the sends and - // perform buffering of responses in the syncer as opposed to the peer. - for _, msg := range replyMsgs { - err := g.cfg.sendToPeerSync(msg) - if err != nil { - return err - } - } - - // Regardless of whether we had any messages to reply with, send over - // the sentinel message to signal that the stream has terminated. - return g.cfg.sendToPeerSync(&lnwire.ReplyShortChanIDsEnd{ - ChainHash: query.ChainHash, - Complete: 1, - }) -} - -// ApplyGossipFilter applies a gossiper filter sent by the remote node to the -// state machine. Once applied, we'll ensure that we don't forward any messages -// to the peer that aren't within the time range of the filter. -func (g *GossipSyncer) ApplyGossipFilter(filter *lnwire.GossipTimestampRange) er.R { - g.Lock() - - g.remoteUpdateHorizon = filter - - startTime := time.Unix(int64(g.remoteUpdateHorizon.FirstTimestamp), 0) - endTime := startTime.Add( - time.Duration(g.remoteUpdateHorizon.TimestampRange) * time.Second, - ) - - g.Unlock() - - // If requested, don't reply with historical gossip data when the remote - // peer sets their gossip timestamp range. - if g.cfg.ignoreHistoricalFilters { - return nil - } - - // Now that the remote peer has applied their filter, we'll query the - // database for all the messages that are beyond this filter. - newUpdatestoSend, err := g.cfg.channelSeries.UpdatesInHorizon( - g.cfg.chainHash, startTime, endTime, - ) - if err != nil { - return err - } - - log.Infof("GossipSyncer(%x): applying new update horizon: start=%v, "+ - "end=%v, backlog_size=%v", g.cfg.peerPub[:], startTime, endTime, - len(newUpdatestoSend)) - - // If we don't have any to send, then we can return early. - if len(newUpdatestoSend) == 0 { - return nil - } - - // We'll conclude by launching a goroutine to send out any updates. - g.wg.Add(1) - go func() { - defer g.wg.Done() - - for _, msg := range newUpdatestoSend { - err := g.cfg.sendToPeerSync(msg) - switch { - case ErrGossipSyncerExiting.Is(err): - return - - case lnpeer.ErrPeerExiting.Is(err): - return - - case err != nil: - log.Errorf("Unable to send message for "+ - "peer catch up: %v", err) - } - } - }() - - return nil -} - -// FilterGossipMsgs takes a set of gossip messages, and only send it to a peer -// iff the message is within the bounds of their set gossip filter. If the peer -// doesn't have a gossip filter set, then no messages will be forwarded. -func (g *GossipSyncer) FilterGossipMsgs(msgs ...msgWithSenders) { - // If the peer doesn't have an update horizon set, then we won't send - // it any new update messages. - if g.remoteUpdateHorizon == nil { - return - } - - // If we've been signaled to exit, or are exiting, then we'll stop - // short. - select { - case <-g.quit: - return - default: - } - - // TODO(roasbeef): need to ensure that peer still online...send msg to - // gossiper on peer termination to signal peer disconnect? - - var err er.R - - // Before we filter out the messages, we'll construct an index over the - // set of channel announcements and channel updates. This will allow us - // to quickly check if we should forward a chan ann, based on the known - // channel updates for a channel. - chanUpdateIndex := make(map[lnwire.ShortChannelID][]*lnwire.ChannelUpdate) - for _, msg := range msgs { - chanUpdate, ok := msg.msg.(*lnwire.ChannelUpdate) - if !ok { - continue - } - - chanUpdateIndex[chanUpdate.ShortChannelID] = append( - chanUpdateIndex[chanUpdate.ShortChannelID], chanUpdate, - ) - } - - // We'll construct a helper function that we'll us below to determine - // if a given messages passes the gossip msg filter. - g.Lock() - startTime := time.Unix(int64(g.remoteUpdateHorizon.FirstTimestamp), 0) - endTime := startTime.Add( - time.Duration(g.remoteUpdateHorizon.TimestampRange) * time.Second, - ) - g.Unlock() - - passesFilter := func(timeStamp uint32) bool { - t := time.Unix(int64(timeStamp), 0) - return t.Equal(startTime) || - (t.After(startTime) && t.Before(endTime)) - } - - msgsToSend := make([]lnwire.Message, 0, len(msgs)) - for _, msg := range msgs { - // If the target peer is the peer that sent us this message, - // then we'll exit early as we don't need to filter this - // message. - if _, ok := msg.senders[g.cfg.peerPub]; ok { - continue - } - - switch msg := msg.msg.(type) { - - // For each channel announcement message, we'll only send this - // message if the channel updates for the channel are between - // our time range. - case *lnwire.ChannelAnnouncement: - // First, we'll check if the channel updates are in - // this message batch. - chanUpdates, ok := chanUpdateIndex[msg.ShortChannelID] - if !ok { - // If not, we'll attempt to query the database - // to see if we know of the updates. - chanUpdates, err = g.cfg.channelSeries.FetchChanUpdates( - g.cfg.chainHash, msg.ShortChannelID, - ) - if err != nil { - log.Warnf("no channel updates found for "+ - "short_chan_id=%v", - msg.ShortChannelID) - continue - } - } - - for _, chanUpdate := range chanUpdates { - if passesFilter(chanUpdate.Timestamp) { - msgsToSend = append(msgsToSend, msg) - break - } - } - - if len(chanUpdates) == 0 { - msgsToSend = append(msgsToSend, msg) - } - - // For each channel update, we'll only send if it the timestamp - // is between our time range. - case *lnwire.ChannelUpdate: - if passesFilter(msg.Timestamp) { - msgsToSend = append(msgsToSend, msg) - } - - // Similarly, we only send node announcements if the update - // timestamp ifs between our set gossip filter time range. - case *lnwire.NodeAnnouncement: - if passesFilter(msg.Timestamp) { - msgsToSend = append(msgsToSend, msg) - } - } - } - - log.Tracef("GossipSyncer(%x): filtered gossip msgs: set=%v, sent=%v", - g.cfg.peerPub[:], len(msgs), len(msgsToSend)) - - if len(msgsToSend) == 0 { - return - } - - g.cfg.sendToPeer(msgsToSend...) -} - -// ProcessQueryMsg is used by outside callers to pass new channel time series -// queries to the internal processing goroutine. -func (g *GossipSyncer) ProcessQueryMsg(msg lnwire.Message, peerQuit <-chan struct{}) { - var msgChan chan lnwire.Message - switch msg.(type) { - case *lnwire.QueryChannelRange, *lnwire.QueryShortChanIDs: - msgChan = g.queryMsgs - default: - msgChan = g.gossipMsgs - } - - select { - case msgChan <- msg: - case <-peerQuit: - case <-g.quit: - } -} - -// setSyncState sets the gossip syncer's state to the given state. -func (g *GossipSyncer) setSyncState(state syncerState) { - atomic.StoreUint32(&g.state, uint32(state)) -} - -// syncState returns the current syncerState of the target GossipSyncer. -func (g *GossipSyncer) syncState() syncerState { - return syncerState(atomic.LoadUint32(&g.state)) -} - -// ResetSyncedSignal returns a channel that will be closed in order to serve as -// a signal for when the GossipSyncer has reached its chansSynced state. -func (g *GossipSyncer) ResetSyncedSignal() chan struct{} { - g.Lock() - defer g.Unlock() - - syncedSignal := make(chan struct{}) - - syncState := syncerState(atomic.LoadUint32(&g.state)) - if syncState == chansSynced { - close(syncedSignal) - return syncedSignal - } - - g.syncedSignal = syncedSignal - return g.syncedSignal -} - -// ProcessSyncTransition sends a request to the gossip syncer to transition its -// sync type to a new one. -// -// NOTE: This can only be done once the gossip syncer has reached its final -// chansSynced state. -func (g *GossipSyncer) ProcessSyncTransition(newSyncType SyncerType) er.R { - errChan := make(chan er.R, 1) - select { - case g.syncTransitionReqs <- &syncTransitionReq{ - newSyncType: newSyncType, - errChan: errChan, - }: - case <-time.After(syncTransitionTimeout): - return ErrSyncTransitionTimeout.Default() - case <-g.quit: - return ErrGossipSyncerExiting.Default() - } - - select { - case err := <-errChan: - return err - case <-g.quit: - return ErrGossipSyncerExiting.Default() - } -} - -// handleSyncTransition handles a new sync type transition request. -// -// NOTE: The gossip syncer might have another sync state as a result of this -// transition. -func (g *GossipSyncer) handleSyncTransition(req *syncTransitionReq) er.R { - // Return early from any NOP sync transitions. - syncType := g.SyncType() - if syncType == req.newSyncType { - return nil - } - - log.Debugf("GossipSyncer(%x): transitioning from %v to %v", - g.cfg.peerPub, syncType, req.newSyncType) - - var ( - firstTimestamp time.Time - timestampRange uint32 - ) - - switch req.newSyncType { - // If an active sync has been requested, then we should resume receiving - // new graph updates from the remote peer. - case ActiveSync: - firstTimestamp = time.Now() - timestampRange = math.MaxUint32 - - // If a PassiveSync transition has been requested, then we should no - // longer receive any new updates from the remote peer. We can do this - // by setting our update horizon to a range in the past ensuring no - // graph updates match the timestamp range. - case PassiveSync: - firstTimestamp = zeroTimestamp - timestampRange = 0 - - default: - return er.Errorf("unhandled sync transition %v", - req.newSyncType) - } - - err := g.sendGossipTimestampRange(firstTimestamp, timestampRange) - if err != nil { - return er.Errorf("unable to send local update horizon: %v", err) - } - - g.setSyncType(req.newSyncType) - - return nil -} - -// setSyncType sets the gossip syncer's sync type to the given type. -func (g *GossipSyncer) setSyncType(syncType SyncerType) { - atomic.StoreUint32(&g.syncType, uint32(syncType)) -} - -// SyncType returns the current SyncerType of the target GossipSyncer. -func (g *GossipSyncer) SyncType() SyncerType { - return SyncerType(atomic.LoadUint32(&g.syncType)) -} - -// historicalSync sends a request to the gossip syncer to perofmr a historical -// sync. -// -// NOTE: This can only be done once the gossip syncer has reached its final -// chansSynced state. -func (g *GossipSyncer) historicalSync() er.R { - done := make(chan struct{}) - - select { - case g.historicalSyncReqs <- &historicalSyncReq{ - doneChan: done, - }: - case <-time.After(syncTransitionTimeout): - return ErrSyncTransitionTimeout.Default() - case <-g.quit: - return ErrGossiperShuttingDown.Default() - } - - select { - case <-done: - return nil - case <-g.quit: - return ErrGossiperShuttingDown.Default() - } -} - -// handleHistoricalSync handles a request to the gossip syncer to perform a -// historical sync. -func (g *GossipSyncer) handleHistoricalSync(req *historicalSyncReq) { - // We'll go back to our initial syncingChans state in order to request - // the remote peer to give us all of the channel IDs they know of - // starting from the genesis block. - g.genHistoricalChanRangeQuery = true - g.setSyncState(syncingChans) - close(req.doneChan) -} diff --git a/lnd/discovery/syncer_test.go b/lnd/discovery/syncer_test.go deleted file mode 100644 index 17167bbe..00000000 --- a/lnd/discovery/syncer_test.go +++ /dev/null @@ -1,2303 +0,0 @@ -package discovery - -import ( - "math" - "reflect" - "sync" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -const ( - defaultEncoding = lnwire.EncodingSortedPlain - latestKnownHeight = 1337 -) - -var ( - defaultChunkSize = encodingTypeToChunkSize[defaultEncoding] -) - -type horizonQuery struct { - chain chainhash.Hash - start time.Time - end time.Time -} -type filterRangeReq struct { - startHeight, endHeight uint32 -} - -type mockChannelGraphTimeSeries struct { - highestID lnwire.ShortChannelID - - horizonReq chan horizonQuery - horizonResp chan []lnwire.Message - - filterReq chan []lnwire.ShortChannelID - filterResp chan []lnwire.ShortChannelID - - filterRangeReqs chan filterRangeReq - filterRangeResp chan []lnwire.ShortChannelID - - annReq chan []lnwire.ShortChannelID - annResp chan []lnwire.Message - - updateReq chan lnwire.ShortChannelID - updateResp chan []*lnwire.ChannelUpdate -} - -func newMockChannelGraphTimeSeries( - hID lnwire.ShortChannelID) *mockChannelGraphTimeSeries { - - return &mockChannelGraphTimeSeries{ - highestID: hID, - - horizonReq: make(chan horizonQuery, 1), - horizonResp: make(chan []lnwire.Message, 1), - - filterReq: make(chan []lnwire.ShortChannelID, 1), - filterResp: make(chan []lnwire.ShortChannelID, 1), - - filterRangeReqs: make(chan filterRangeReq, 1), - filterRangeResp: make(chan []lnwire.ShortChannelID, 1), - - annReq: make(chan []lnwire.ShortChannelID, 1), - annResp: make(chan []lnwire.Message, 1), - - updateReq: make(chan lnwire.ShortChannelID, 1), - updateResp: make(chan []*lnwire.ChannelUpdate, 1), - } -} - -func (m *mockChannelGraphTimeSeries) HighestChanID(chain chainhash.Hash) (*lnwire.ShortChannelID, er.R) { - return &m.highestID, nil -} -func (m *mockChannelGraphTimeSeries) UpdatesInHorizon(chain chainhash.Hash, - startTime time.Time, endTime time.Time) ([]lnwire.Message, er.R) { - - m.horizonReq <- horizonQuery{ - chain, startTime, endTime, - } - - return <-m.horizonResp, nil -} -func (m *mockChannelGraphTimeSeries) FilterKnownChanIDs(chain chainhash.Hash, - superSet []lnwire.ShortChannelID) ([]lnwire.ShortChannelID, er.R) { - - m.filterReq <- superSet - - return <-m.filterResp, nil -} -func (m *mockChannelGraphTimeSeries) FilterChannelRange(chain chainhash.Hash, - startHeight, endHeight uint32) ([]lnwire.ShortChannelID, er.R) { - - m.filterRangeReqs <- filterRangeReq{startHeight, endHeight} - - return <-m.filterRangeResp, nil -} -func (m *mockChannelGraphTimeSeries) FetchChanAnns(chain chainhash.Hash, - shortChanIDs []lnwire.ShortChannelID) ([]lnwire.Message, er.R) { - - m.annReq <- shortChanIDs - - return <-m.annResp, nil -} -func (m *mockChannelGraphTimeSeries) FetchChanUpdates(chain chainhash.Hash, - shortChanID lnwire.ShortChannelID) ([]*lnwire.ChannelUpdate, er.R) { - - m.updateReq <- shortChanID - - return <-m.updateResp, nil -} - -var _ ChannelGraphTimeSeries = (*mockChannelGraphTimeSeries)(nil) - -// newTestSyncer creates a new test instance of a GossipSyncer. A buffered -// message channel is returned for intercepting messages sent from the syncer, -// in addition to a mock channel series which allows the test to control which -// messages the syncer knows of or wishes to filter out. The variadic flags are -// treated as positional arguments where the first index signals that the syncer -// should spawn a channelGraphSyncer and second index signals that the syncer -// should spawn a replyHandler. Any flags beyond the first two are currently -// ignored. If no flags are provided, both a channelGraphSyncer and replyHandler -// will be spawned by default. -func newTestSyncer(hID lnwire.ShortChannelID, - encodingType lnwire.ShortChanIDEncoding, chunkSize int32, - flags ...bool) (chan []lnwire.Message, - *GossipSyncer, *mockChannelGraphTimeSeries) { - - syncChannels := true - replyQueries := true - if len(flags) > 0 { - syncChannels = flags[0] - } - if len(flags) > 1 { - replyQueries = flags[1] - } - - msgChan := make(chan []lnwire.Message, 20) - cfg := gossipSyncerCfg{ - channelSeries: newMockChannelGraphTimeSeries(hID), - encodingType: encodingType, - chunkSize: chunkSize, - batchSize: chunkSize, - noSyncChannels: !syncChannels, - noReplyQueries: !replyQueries, - sendToPeer: func(msgs ...lnwire.Message) er.R { - msgChan <- msgs - return nil - }, - sendToPeerSync: func(msgs ...lnwire.Message) er.R { - msgChan <- msgs - return nil - }, - delayedQueryReplyInterval: 2 * time.Second, - } - syncer := newGossipSyncer(cfg) - - return msgChan, syncer, cfg.channelSeries.(*mockChannelGraphTimeSeries) -} - -// TestGossipSyncerFilterGossipMsgsNoHorizon tests that if the remote peer -// doesn't have a horizon set, then we won't send any incoming messages to it. -func TestGossipSyncerFilterGossipMsgsNoHorizon(t *testing.T) { - t.Parallel() - - // First, we'll create a GossipSyncer instance with a canned sendToPeer - // message to allow us to intercept their potential sends. - msgChan, syncer, _ := newTestSyncer( - lnwire.NewShortChanIDFromInt(10), defaultEncoding, - defaultChunkSize, - ) - - // With the syncer created, we'll create a set of messages to filter - // through the gossiper to the target peer. - msgs := []msgWithSenders{ - { - msg: &lnwire.NodeAnnouncement{Timestamp: uint32(time.Now().Unix())}, - }, - { - msg: &lnwire.NodeAnnouncement{Timestamp: uint32(time.Now().Unix())}, - }, - } - - // We'll then attempt to filter the set of messages through the target - // peer. - syncer.FilterGossipMsgs(msgs...) - - // As the remote peer doesn't yet have a gossip timestamp set, we - // shouldn't receive any outbound messages. - select { - case msg := <-msgChan: - t.Fatalf("received message but shouldn't have: %v", - spew.Sdump(msg)) - - case <-time.After(time.Millisecond * 10): - } -} - -func unixStamp(a int64) uint32 { - t := time.Unix(a, 0) - return uint32(t.Unix()) -} - -// TestGossipSyncerFilterGossipMsgsAll tests that we're able to properly filter -// out a set of incoming messages based on the set remote update horizon for a -// peer. We tests all messages type, and all time straddling. We'll also send a -// channel ann that already has a channel update on disk. -func TestGossipSyncerFilterGossipMsgsAllInMemory(t *testing.T) { - t.Parallel() - - // First, we'll create a GossipSyncer instance with a canned sendToPeer - // message to allow us to intercept their potential sends. - msgChan, syncer, chanSeries := newTestSyncer( - lnwire.NewShortChanIDFromInt(10), defaultEncoding, - defaultChunkSize, - ) - - // We'll create then apply a remote horizon for the target peer with a - // set of manually selected timestamps. - remoteHorizon := &lnwire.GossipTimestampRange{ - FirstTimestamp: unixStamp(25000), - TimestampRange: uint32(1000), - } - syncer.remoteUpdateHorizon = remoteHorizon - - // With the syncer created, we'll create a set of messages to filter - // through the gossiper to the target peer. Our message will consist of - // one node announcement above the horizon, one below. Additionally, - // we'll include a chan ann with an update below the horizon, one - // with an update timestamp above the horizon, and one without any - // channel updates at all. - msgs := []msgWithSenders{ - { - // Node ann above horizon. - msg: &lnwire.NodeAnnouncement{Timestamp: unixStamp(25001)}, - }, - { - // Node ann below horizon. - msg: &lnwire.NodeAnnouncement{Timestamp: unixStamp(5)}, - }, - { - // Node ann above horizon. - msg: &lnwire.NodeAnnouncement{Timestamp: unixStamp(999999)}, - }, - { - // Ann tuple below horizon. - msg: &lnwire.ChannelAnnouncement{ - ShortChannelID: lnwire.NewShortChanIDFromInt(10), - }, - }, - { - msg: &lnwire.ChannelUpdate{ - ShortChannelID: lnwire.NewShortChanIDFromInt(10), - Timestamp: unixStamp(5), - }, - }, - { - // Ann tuple above horizon. - msg: &lnwire.ChannelAnnouncement{ - ShortChannelID: lnwire.NewShortChanIDFromInt(15), - }, - }, - { - msg: &lnwire.ChannelUpdate{ - ShortChannelID: lnwire.NewShortChanIDFromInt(15), - Timestamp: unixStamp(25002), - }, - }, - { - // Ann tuple beyond horizon. - msg: &lnwire.ChannelAnnouncement{ - ShortChannelID: lnwire.NewShortChanIDFromInt(20), - }, - }, - { - msg: &lnwire.ChannelUpdate{ - ShortChannelID: lnwire.NewShortChanIDFromInt(20), - Timestamp: unixStamp(999999), - }, - }, - { - // Ann w/o an update at all, the update in the DB will - // be below the horizon. - msg: &lnwire.ChannelAnnouncement{ - ShortChannelID: lnwire.NewShortChanIDFromInt(25), - }, - }, - } - - // Before we send off the query, we'll ensure we send the missing - // channel update for that final ann. It will be below the horizon, so - // shouldn't be sent anyway. - errCh := make(chan er.R, 1) - go func() { - select { - case <-time.After(time.Second * 15): - errCh <- er.New("no query received") - return - case query := <-chanSeries.updateReq: - // It should be asking for the chan updates of short - // chan ID 25. - expectedID := lnwire.NewShortChanIDFromInt(25) - if expectedID != query { - errCh <- er.Errorf("wrong query id: expected %v, got %v", - expectedID, query) - return - } - - // If so, then we'll send back the missing update. - chanSeries.updateResp <- []*lnwire.ChannelUpdate{ - { - ShortChannelID: lnwire.NewShortChanIDFromInt(25), - Timestamp: unixStamp(5), - }, - } - errCh <- nil - } - }() - - // We'll then instruct the gossiper to filter this set of messages. - syncer.FilterGossipMsgs(msgs...) - - // Out of all the messages we sent in, we should only get 2 of them - // back. - select { - case <-time.After(time.Second * 15): - t.Fatalf("no msgs received") - - case msgs := <-msgChan: - if len(msgs) != 3 { - t.Fatalf("expected 3 messages instead got %v "+ - "messages: %v", len(msgs), spew.Sdump(msgs)) - } - } - - // Wait for error from goroutine. - select { - case <-time.After(time.Second * 30): - t.Fatalf("goroutine did not return within 30 seconds") - case err := <-errCh: - if err != nil { - t.Fatal(err) - } - } -} - -// TestGossipSyncerApplyNoHistoricalGossipFilter tests that once a gossip filter -// is applied for the remote peer, then we don't send the peer all known -// messages which are within their desired time horizon. -func TestGossipSyncerApplyNoHistoricalGossipFilter(t *testing.T) { - t.Parallel() - - // First, we'll create a GossipSyncer instance with a canned sendToPeer - // message to allow us to intercept their potential sends. - _, syncer, chanSeries := newTestSyncer( - lnwire.NewShortChanIDFromInt(10), defaultEncoding, - defaultChunkSize, - ) - syncer.cfg.ignoreHistoricalFilters = true - - // We'll apply this gossip horizon for the remote peer. - remoteHorizon := &lnwire.GossipTimestampRange{ - FirstTimestamp: unixStamp(25000), - TimestampRange: uint32(1000), - } - - // After applying the gossip filter, the chan series should not be - // queried using the updated horizon. - errChan := make(chan er.R, 1) - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - - select { - // No query received, success. - case <-time.After(3 * time.Second): - errChan <- nil - - // Unexpected query received. - case <-chanSeries.horizonReq: - errChan <- er.New("chan series should not have been " + - "queried") - } - }() - - // We'll now attempt to apply the gossip filter for the remote peer. - syncer.ApplyGossipFilter(remoteHorizon) - - // Ensure that the syncer's remote horizon was properly updated. - if !reflect.DeepEqual(syncer.remoteUpdateHorizon, remoteHorizon) { - t.Fatalf("expected remote horizon: %v, got: %v", - remoteHorizon, syncer.remoteUpdateHorizon) - } - - // Wait for the query check to finish. - wg.Wait() - - // Assert that no query was made as a result of applying the gossip - // filter. - err := <-errChan - if err != nil { - t.Fatalf(err.String()) - } -} - -// TestGossipSyncerApplyGossipFilter tests that once a gossip filter is applied -// for the remote peer, then we send the peer all known messages which are -// within their desired time horizon. -func TestGossipSyncerApplyGossipFilter(t *testing.T) { - t.Parallel() - - // First, we'll create a GossipSyncer instance with a canned sendToPeer - // message to allow us to intercept their potential sends. - msgChan, syncer, chanSeries := newTestSyncer( - lnwire.NewShortChanIDFromInt(10), defaultEncoding, - defaultChunkSize, - ) - - // We'll apply this gossip horizon for the remote peer. - remoteHorizon := &lnwire.GossipTimestampRange{ - FirstTimestamp: unixStamp(25000), - TimestampRange: uint32(1000), - } - - // Before we apply the horizon, we'll dispatch a response to the query - // that the syncer will issue. - errCh := make(chan er.R, 1) - go func() { - select { - case <-time.After(time.Second * 15): - errCh <- er.New("no query recvd") - return - case query := <-chanSeries.horizonReq: - // The syncer should have translated the time range - // into the proper star time. - if remoteHorizon.FirstTimestamp != uint32(query.start.Unix()) { - errCh <- er.Errorf("wrong query stamp: expected %v, got %v", - remoteHorizon.FirstTimestamp, query.start) - return - } - - // For this first response, we'll send back an empty - // set of messages. As result, we shouldn't send any - // messages. - chanSeries.horizonResp <- []lnwire.Message{} - errCh <- nil - } - }() - - // We'll now attempt to apply the gossip filter for the remote peer. - err := syncer.ApplyGossipFilter(remoteHorizon) - if err != nil { - t.Fatalf("unable to apply filter: %v", err) - } - - // There should be no messages in the message queue as we didn't send - // the syncer and messages within the horizon. - select { - case msgs := <-msgChan: - t.Fatalf("expected no msgs, instead got %v", spew.Sdump(msgs)) - default: - } - - // Wait for error result from goroutine. - select { - case <-time.After(time.Second * 30): - t.Fatalf("goroutine did not return within 30 seconds") - case err := <-errCh: - if err != nil { - t.Fatal(err) - } - } - - // If we repeat the process, but give the syncer a set of valid - // messages, then these should be sent to the remote peer. - go func() { - select { - case <-time.After(time.Second * 15): - errCh <- er.New("no query recvd") - return - case query := <-chanSeries.horizonReq: - // The syncer should have translated the time range - // into the proper star time. - if remoteHorizon.FirstTimestamp != uint32(query.start.Unix()) { - errCh <- er.Errorf("wrong query stamp: expected %v, got %v", - remoteHorizon.FirstTimestamp, query.start) - return - } - - // For this first response, we'll send back a proper - // set of messages that should be echoed back. - chanSeries.horizonResp <- []lnwire.Message{ - &lnwire.ChannelUpdate{ - ShortChannelID: lnwire.NewShortChanIDFromInt(25), - Timestamp: unixStamp(5), - }, - } - errCh <- nil - } - }() - err = syncer.ApplyGossipFilter(remoteHorizon) - if err != nil { - t.Fatalf("unable to apply filter: %v", err) - } - - // We should get back the exact same message. - select { - case <-time.After(time.Second * 15): - t.Fatalf("no msgs received") - - case msgs := <-msgChan: - if len(msgs) != 1 { - t.Fatalf("wrong messages: expected %v, got %v", - 1, len(msgs)) - } - } - - // Wait for error result from goroutine. - select { - case <-time.After(time.Second * 30): - t.Fatalf("goroutine did not return within 30 seconds") - case err := <-errCh: - if err != nil { - t.Fatal(err) - } - } -} - -// TestGossipSyncerQueryChannelRangeWrongChainHash tests that if we receive a -// channel range query for the wrong chain, then we send back a response with no -// channels and complete=0. -func TestGossipSyncerQueryChannelRangeWrongChainHash(t *testing.T) { - t.Parallel() - - // First, we'll create a GossipSyncer instance with a canned sendToPeer - // message to allow us to intercept their potential sends. - msgChan, syncer, _ := newTestSyncer( - lnwire.NewShortChanIDFromInt(10), defaultEncoding, - defaultChunkSize, - ) - - // We'll now ask the syncer to reply to a channel range query, but for a - // chain that it isn't aware of. - query := &lnwire.QueryChannelRange{ - ChainHash: *chaincfg.SimNetParams.GenesisHash, - FirstBlockHeight: 0, - NumBlocks: math.MaxUint32, - } - err := syncer.replyChanRangeQuery(query) - if err != nil { - t.Fatalf("unable to process short chan ID's: %v", err) - } - - select { - case <-time.After(time.Second * 15): - t.Fatalf("no msgs received") - - case msgs := <-msgChan: - // We should get back exactly one message, that's a - // ReplyChannelRange with a matching query, and a complete value - // of zero. - if len(msgs) != 1 { - t.Fatalf("wrong messages: expected %v, got %v", - 1, len(msgs)) - } - - msg, ok := msgs[0].(*lnwire.ReplyChannelRange) - if !ok { - t.Fatalf("expected lnwire.ReplyChannelRange, got %T", msg) - } - - if msg.QueryChannelRange != *query { - t.Fatalf("wrong query channel range in reply: "+ - "expected: %v\ngot: %v", spew.Sdump(*query), - spew.Sdump(msg.QueryChannelRange)) - } - if msg.Complete != 0 { - t.Fatalf("expected complete set to 0, got %v", - msg.Complete) - } - } -} - -// TestGossipSyncerReplyShortChanIDsWrongChainHash tests that if we get a chan -// ID query for the wrong chain, then we send back only a short ID end with -// complete=0. -func TestGossipSyncerReplyShortChanIDsWrongChainHash(t *testing.T) { - t.Parallel() - - // First, we'll create a GossipSyncer instance with a canned sendToPeer - // message to allow us to intercept their potential sends. - msgChan, syncer, _ := newTestSyncer( - lnwire.NewShortChanIDFromInt(10), defaultEncoding, - defaultChunkSize, - ) - - // We'll now ask the syncer to reply to a chan ID query, but for a - // chain that it isn't aware of. - err := syncer.replyShortChanIDs(&lnwire.QueryShortChanIDs{ - ChainHash: *chaincfg.SimNetParams.GenesisHash, - }) - if err != nil { - t.Fatalf("unable to process short chan ID's: %v", err) - } - - select { - case <-time.After(time.Second * 15): - t.Fatalf("no msgs received") - case msgs := <-msgChan: - - // We should get back exactly one message, that's a - // ReplyShortChanIDsEnd with a matching chain hash, and a - // complete value of zero. - if len(msgs) != 1 { - t.Fatalf("wrong messages: expected %v, got %v", - 1, len(msgs)) - } - - msg, ok := msgs[0].(*lnwire.ReplyShortChanIDsEnd) - if !ok { - t.Fatalf("expected lnwire.ReplyShortChanIDsEnd "+ - "instead got %T", msg) - } - - if msg.ChainHash != *chaincfg.SimNetParams.GenesisHash { - t.Fatalf("wrong chain hash: expected %v, got %v", - msg.ChainHash, chaincfg.SimNetParams.GenesisHash) - } - if msg.Complete != 0 { - t.Fatalf("complete set incorrectly") - } - } -} - -// TestGossipSyncerReplyShortChanIDs tests that in the case of a known chain -// hash for a QueryShortChanIDs, we'll return the set of matching -// announcements, as well as an ending ReplyShortChanIDsEnd message. -func TestGossipSyncerReplyShortChanIDs(t *testing.T) { - t.Parallel() - - // First, we'll create a GossipSyncer instance with a canned sendToPeer - // message to allow us to intercept their potential sends. - msgChan, syncer, chanSeries := newTestSyncer( - lnwire.NewShortChanIDFromInt(10), defaultEncoding, - defaultChunkSize, - ) - - queryChanIDs := []lnwire.ShortChannelID{ - lnwire.NewShortChanIDFromInt(1), - lnwire.NewShortChanIDFromInt(2), - lnwire.NewShortChanIDFromInt(3), - } - - queryReply := []lnwire.Message{ - &lnwire.ChannelAnnouncement{ - ShortChannelID: lnwire.NewShortChanIDFromInt(20), - }, - &lnwire.ChannelUpdate{ - ShortChannelID: lnwire.NewShortChanIDFromInt(20), - Timestamp: unixStamp(999999), - }, - &lnwire.NodeAnnouncement{Timestamp: unixStamp(25001)}, - } - - // We'll then craft a reply to the upcoming query for all the matching - // channel announcements for a particular set of short channel ID's. - errCh := make(chan er.R, 1) - go func() { - select { - case <-time.After(time.Second * 15): - errCh <- er.New("no query recvd") - return - case chanIDs := <-chanSeries.annReq: - // The set of chan ID's should match exactly. - if !reflect.DeepEqual(chanIDs, queryChanIDs) { - errCh <- er.Errorf("wrong chan IDs: expected %v, got %v", - queryChanIDs, chanIDs) - return - } - - // If they do, then we'll send back a response with - // some canned messages. - chanSeries.annResp <- queryReply - errCh <- nil - } - }() - - // With our set up above complete, we'll now attempt to obtain a reply - // from the channel syncer for our target chan ID query. - err := syncer.replyShortChanIDs(&lnwire.QueryShortChanIDs{ - ShortChanIDs: queryChanIDs, - }) - if err != nil { - t.Fatalf("unable to query for chan IDs: %v", err) - } - - for i := 0; i < len(queryReply)+1; i++ { - select { - case <-time.After(time.Second * 15): - t.Fatalf("no msgs received") - - // We should get back exactly 4 messages. The first 3 are the - // same messages we sent above, and the query end message. - case msgs := <-msgChan: - if len(msgs) != 1 { - t.Fatalf("wrong number of messages: "+ - "expected %v, got %v", 1, len(msgs)) - } - - isQueryReply := i < len(queryReply) - finalMsg, ok := msgs[0].(*lnwire.ReplyShortChanIDsEnd) - - switch { - case isQueryReply && - !reflect.DeepEqual(queryReply[i], msgs[0]): - - t.Fatalf("wrong message: expected %v, got %v", - spew.Sdump(queryReply[i]), - spew.Sdump(msgs[0])) - - case !isQueryReply && !ok: - t.Fatalf("expected lnwire.ReplyShortChanIDsEnd"+ - " instead got %T", msgs[3]) - - case !isQueryReply && finalMsg.Complete != 1: - t.Fatalf("complete wasn't set") - } - } - } - - // Wait for error from goroutine. - select { - case <-time.After(time.Second * 30): - t.Fatalf("goroutine did not return within 30 seconds") - case err := <-errCh: - if err != nil { - t.Fatal(err) - } - } -} - -// TestGossipSyncerReplyChanRangeQuery tests that if we receive a -// QueryChannelRange message, then we'll properly send back a chunked reply to -// the remote peer. -func TestGossipSyncerReplyChanRangeQuery(t *testing.T) { - t.Parallel() - - // We'll use a smaller chunk size so we can easily test all the edge - // cases. - const chunkSize = 2 - - // We'll now create our test gossip syncer that will shortly respond to - // our canned query. - msgChan, syncer, chanSeries := newTestSyncer( - lnwire.NewShortChanIDFromInt(10), defaultEncoding, chunkSize, - ) - - // Next, we'll craft a query to ask for all the new chan ID's after - // block 100. - const startingBlockHeight = 100 - const numBlocks = 50 - const endingBlockHeight = startingBlockHeight + numBlocks - 1 - query := &lnwire.QueryChannelRange{ - FirstBlockHeight: uint32(startingBlockHeight), - NumBlocks: uint32(numBlocks), - } - - // We'll then launch a goroutine to reply to the query with a set of 5 - // responses. This will ensure we get two full chunks, and one partial - // chunk. - queryResp := []lnwire.ShortChannelID{ - { - BlockHeight: uint32(startingBlockHeight), - }, - { - BlockHeight: 102, - }, - { - BlockHeight: 104, - }, - { - BlockHeight: 106, - }, - { - BlockHeight: 108, - }, - } - - errCh := make(chan er.R, 1) - go func() { - select { - case <-time.After(time.Second * 15): - errCh <- er.New("no query recvd") - return - case filterReq := <-chanSeries.filterRangeReqs: - // We should be querying for block 100 to 150. - if filterReq.startHeight != startingBlockHeight && - filterReq.endHeight != endingBlockHeight { - - errCh <- er.Errorf("wrong height range: %v", - spew.Sdump(filterReq)) - return - } - - // If the proper request was sent, then we'll respond - // with our set of short channel ID's. - chanSeries.filterRangeResp <- queryResp - errCh <- nil - } - }() - - // With our goroutine active, we'll now issue the query. - if err := syncer.replyChanRangeQuery(query); err != nil { - t.Fatalf("unable to issue query: %v", err) - } - - // At this point, we'll now wait for the syncer to send the chunked - // reply. We should get three sets of messages as two of them should be - // full, while the other is the final fragment. - const numExpectedChunks = 3 - respMsgs := make([]lnwire.ShortChannelID, 0, 5) - for i := 0; i < numExpectedChunks; i++ { - select { - case <-time.After(time.Second * 15): - t.Fatalf("no msgs received") - - case msg := <-msgChan: - resp := msg[0] - rangeResp, ok := resp.(*lnwire.ReplyChannelRange) - if !ok { - t.Fatalf("expected ReplyChannelRange instead got %T", msg) - } - - // We'll determine the correct values of each field in - // each response based on the order that they were sent. - var ( - expectedFirstBlockHeight uint32 - expectedNumBlocks uint32 - expectedComplete uint8 - ) - - switch { - // The first reply should range from our starting block - // height until it reaches its maximum capacity of - // channels. - case i == 0: - expectedFirstBlockHeight = startingBlockHeight - expectedNumBlocks = chunkSize + 1 - - // The last reply should range starting from the next - // block of our previous reply up until the ending - // height of the query. It should also have the Complete - // bit set. - case i == numExpectedChunks-1: - expectedFirstBlockHeight = respMsgs[len(respMsgs)-1].BlockHeight - expectedNumBlocks = endingBlockHeight - expectedFirstBlockHeight + 1 - expectedComplete = 1 - - // Any intermediate replies should range starting from - // the next block of our previous reply up until it - // reaches its maximum capacity of channels. - default: - expectedFirstBlockHeight = respMsgs[len(respMsgs)-1].BlockHeight - expectedNumBlocks = 5 - } - - switch { - case rangeResp.FirstBlockHeight != expectedFirstBlockHeight: - t.Fatalf("FirstBlockHeight in resp #%d "+ - "incorrect: expected %v, got %v", i+1, - expectedFirstBlockHeight, - rangeResp.FirstBlockHeight) - - case rangeResp.NumBlocks != expectedNumBlocks: - t.Fatalf("NumBlocks in resp #%d incorrect: "+ - "expected %v, got %v", i+1, - expectedNumBlocks, rangeResp.NumBlocks) - - case rangeResp.Complete != expectedComplete: - t.Fatalf("Complete in resp #%d incorrect: "+ - "expected %v, got %v", i+1, - expectedNumBlocks, rangeResp.Complete) - } - - respMsgs = append(respMsgs, rangeResp.ShortChanIDs...) - } - } - - // We should get back exactly 5 short chan ID's, and they should match - // exactly the ID's we sent as a reply. - if len(respMsgs) != len(queryResp) { - t.Fatalf("expected %v chan ID's, instead got %v", - len(queryResp), spew.Sdump(respMsgs)) - } - if !reflect.DeepEqual(queryResp, respMsgs) { - t.Fatalf("mismatched response: expected %v, got %v", - spew.Sdump(queryResp), spew.Sdump(respMsgs)) - } - - // Wait for error from goroutine. - select { - case <-time.After(time.Second * 30): - t.Fatalf("goroutine did not return within 30 seconds") - case err := <-errCh: - if err != nil { - t.Fatal(err) - } - } -} - -// TestGossipSyncerReplyChanRangeQuery tests a variety of -// QueryChannelRange messages to ensure the underlying queries are -// executed with the correct block range -func TestGossipSyncerReplyChanRangeQueryBlockRange(t *testing.T) { - t.Parallel() - - // First create our test gossip syncer that will handle and - // respond to the test queries - _, syncer, chanSeries := newTestSyncer( - lnwire.NewShortChanIDFromInt(10), defaultEncoding, math.MaxInt32, - ) - - // Next construct test queries with various startBlock and endBlock - // ranges - queryReqs := []*lnwire.QueryChannelRange{ - // full range example - { - FirstBlockHeight: uint32(0), - NumBlocks: uint32(math.MaxUint32), - }, - - // small query example that does not overflow - { - FirstBlockHeight: uint32(1000), - NumBlocks: uint32(100), - }, - - // overflow example - { - FirstBlockHeight: uint32(1000), - NumBlocks: uint32(math.MaxUint32), - }, - } - - // Next construct the expected filterRangeReq startHeight and endHeight - // values that we will compare to the captured values - expFilterReqs := []filterRangeReq{ - { - startHeight: uint32(0), - endHeight: uint32(math.MaxUint32 - 1), - }, - { - startHeight: uint32(1000), - endHeight: uint32(1099), - }, - { - startHeight: uint32(1000), - endHeight: uint32(math.MaxUint32), - }, - } - - // We'll then launch a goroutine to capture the filterRangeReqs for - // each request and return those results once all queries have been - // received - resultsCh := make(chan []filterRangeReq, 1) - errCh := make(chan er.R, 1) - go func() { - // We will capture the values supplied to the chanSeries here - // and return the results once all the requests have been - // collected - capFilterReqs := []filterRangeReq{} - - for filterReq := range chanSeries.filterRangeReqs { - // capture the filter request so we can compare to the - // expected values later - capFilterReqs = append(capFilterReqs, filterReq) - - // Reply with an empty result for each query to allow - // unblock the caller - queryResp := []lnwire.ShortChannelID{} - chanSeries.filterRangeResp <- queryResp - - // Once we have collected all results send the results - // back to the main thread and terminate the goroutine - if len(capFilterReqs) == len(expFilterReqs) { - resultsCh <- capFilterReqs - return - } - - } - }() - - // We'll launch a goroutine to send the query sequentially. This - // goroutine ensures that the timeout logic below on the mainthread - // will be reached - go func() { - for _, query := range queryReqs { - if err := syncer.replyChanRangeQuery(query); err != nil { - errCh <- er.Errorf("unable to issue query: %v", err) - return - } - } - }() - - // Wait for the results to be collected and validate that the - // collected results match the expected results, the timeout to - // expire, or an error to occur - select { - case capFilterReq := <-resultsCh: - if !reflect.DeepEqual(expFilterReqs, capFilterReq) { - t.Fatalf("mismatched filter reqs: expected %v, got %v", - spew.Sdump(expFilterReqs), spew.Sdump(capFilterReq)) - } - case <-time.After(time.Second * 10): - t.Fatalf("goroutine did not return within 10 seconds") - case err := <-errCh: - if err != nil { - t.Fatal(err) - } - } -} - -// TestGossipSyncerReplyChanRangeQueryNoNewChans tests that if we issue a reply -// for a channel range query, and we don't have any new channels, then we send -// back a single response that signals completion. -func TestGossipSyncerReplyChanRangeQueryNoNewChans(t *testing.T) { - t.Parallel() - - // We'll now create our test gossip syncer that will shortly respond to - // our canned query. - msgChan, syncer, chanSeries := newTestSyncer( - lnwire.NewShortChanIDFromInt(10), defaultEncoding, - defaultChunkSize, - ) - - // Next, we'll craft a query to ask for all the new chan ID's after - // block 100. - query := &lnwire.QueryChannelRange{ - FirstBlockHeight: 100, - NumBlocks: 50, - } - - // We'll then launch a goroutine to reply to the query no new channels. - resp := []lnwire.ShortChannelID{} - errCh := make(chan er.R, 1) - go func() { - select { - case <-time.After(time.Second * 15): - errCh <- er.New("no query recvd") - return - case filterReq := <-chanSeries.filterRangeReqs: - // We should be querying for block 100 to 150. - if filterReq.startHeight != 100 && filterReq.endHeight != 150 { - errCh <- er.Errorf("wrong height range: %v", - spew.Sdump(filterReq)) - return - } - // If the proper request was sent, then we'll respond - // with our blank set of short chan ID's. - chanSeries.filterRangeResp <- resp - errCh <- nil - } - }() - - // With our goroutine active, we'll now issue the query. - if err := syncer.replyChanRangeQuery(query); err != nil { - t.Fatalf("unable to issue query: %v", err) - } - - // We should get back exactly one message, and the message should - // indicate that this is the final in the series. - select { - case <-time.After(time.Second * 15): - t.Fatalf("no msgs received") - - case msg := <-msgChan: - resp := msg[0] - rangeResp, ok := resp.(*lnwire.ReplyChannelRange) - if !ok { - t.Fatalf("expected ReplyChannelRange instead got %T", msg) - } - - if len(rangeResp.ShortChanIDs) != 0 { - t.Fatalf("expected no chan ID's, instead "+ - "got: %v", spew.Sdump(rangeResp.ShortChanIDs)) - } - if rangeResp.Complete != 1 { - t.Fatalf("complete wasn't set") - } - } - - // Wait for error from goroutine. - select { - case <-time.After(time.Second * 30): - t.Fatalf("goroutine did not return within 30 seconds") - case err := <-errCh: - if err != nil { - t.Fatal(err) - } - } -} - -// TestGossipSyncerGenChanRangeQuery tests that given the current best known -// channel ID, we properly generate an correct initial channel range response. -func TestGossipSyncerGenChanRangeQuery(t *testing.T) { - t.Parallel() - - // First, we'll create a GossipSyncer instance with a canned sendToPeer - // message to allow us to intercept their potential sends. - const startingHeight = 200 - _, syncer, _ := newTestSyncer( - lnwire.ShortChannelID{BlockHeight: startingHeight}, - defaultEncoding, defaultChunkSize, - ) - - // If we now ask the syncer to generate an initial range query, it - // should return a start height that's back chanRangeQueryBuffer - // blocks. - rangeQuery, err := syncer.genChanRangeQuery(false) - if err != nil { - t.Fatalf("unable to resp: %v", err) - } - - firstHeight := uint32(startingHeight - chanRangeQueryBuffer) - if rangeQuery.FirstBlockHeight != firstHeight { - t.Fatalf("incorrect chan range query: expected %v, %v", - rangeQuery.FirstBlockHeight, - startingHeight-chanRangeQueryBuffer) - } - if rangeQuery.NumBlocks != math.MaxUint32-firstHeight { - t.Fatalf("wrong num blocks: expected %v, got %v", - math.MaxUint32-firstHeight, rangeQuery.NumBlocks) - } - - // Generating a historical range query should result in a start height - // of 0. - rangeQuery, err = syncer.genChanRangeQuery(true) - if err != nil { - t.Fatalf("unable to resp: %v", err) - } - if rangeQuery.FirstBlockHeight != 0 { - t.Fatalf("incorrect chan range query: expected %v, %v", 0, - rangeQuery.FirstBlockHeight) - } - if rangeQuery.NumBlocks != math.MaxUint32 { - t.Fatalf("wrong num blocks: expected %v, got %v", - math.MaxUint32, rangeQuery.NumBlocks) - } -} - -// TestGossipSyncerProcessChanRangeReply tests that we'll properly buffer -// replied channel replies until we have the complete version. -func TestGossipSyncerProcessChanRangeReply(t *testing.T) { - t.Parallel() - - t.Run("legacy", func(t *testing.T) { - testGossipSyncerProcessChanRangeReply(t, true) - }) - t.Run("block ranges", func(t *testing.T) { - testGossipSyncerProcessChanRangeReply(t, false) - }) -} - -// testGossipSyncerProcessChanRangeReply tests that we'll properly buffer -// replied channel replies until we have the complete version. The legacy -// option, if set, uses the Complete field of the reply to determine when we've -// received all expected replies. Otherwise, it looks at the block ranges of -// each reply instead. -func testGossipSyncerProcessChanRangeReply(t *testing.T, legacy bool) { - t.Parallel() - - // First, we'll create a GossipSyncer instance with a canned sendToPeer - // message to allow us to intercept their potential sends. - highestID := lnwire.ShortChannelID{ - BlockHeight: latestKnownHeight, - } - _, syncer, chanSeries := newTestSyncer( - highestID, defaultEncoding, defaultChunkSize, - ) - - startingState := syncer.state - - query, err := syncer.genChanRangeQuery(true) - if err != nil { - t.Fatalf("unable to generate channel range query: %v", err) - } - - var replyQueries []*lnwire.QueryChannelRange - if legacy { - // Each reply query is the same as the original query in the - // legacy mode. - replyQueries = []*lnwire.QueryChannelRange{query, query, query} - } else { - // When interpreting block ranges, the first reply should start - // from our requested first block, and the last should end at - // our requested last block. - replyQueries = []*lnwire.QueryChannelRange{ - { - FirstBlockHeight: 0, - NumBlocks: 11, - }, - { - FirstBlockHeight: 11, - NumBlocks: 1, - }, - { - FirstBlockHeight: 12, - NumBlocks: query.NumBlocks - 12, - }, - } - } - - replies := []*lnwire.ReplyChannelRange{ - { - QueryChannelRange: *replyQueries[0], - ShortChanIDs: []lnwire.ShortChannelID{ - { - BlockHeight: 10, - }, - }, - }, - { - QueryChannelRange: *replyQueries[1], - ShortChanIDs: []lnwire.ShortChannelID{ - { - BlockHeight: 11, - }, - }, - }, - { - QueryChannelRange: *replyQueries[2], - Complete: 1, - ShortChanIDs: []lnwire.ShortChannelID{ - { - BlockHeight: 12, - }, - }, - }, - } - - // We'll begin by sending the syncer a set of non-complete channel - // range replies. - if err := syncer.processChanRangeReply(replies[0]); err != nil { - t.Fatalf("unable to process reply: %v", err) - } - if err := syncer.processChanRangeReply(replies[1]); err != nil { - t.Fatalf("unable to process reply: %v", err) - } - - // At this point, we should still be in our starting state as the query - // hasn't finished. - if syncer.state != startingState { - t.Fatalf("state should not have transitioned") - } - - expectedReq := []lnwire.ShortChannelID{ - { - BlockHeight: 10, - }, - { - BlockHeight: 11, - }, - { - BlockHeight: 12, - }, - } - - // As we're about to send the final response, we'll launch a goroutine - // to respond back with a filtered set of chan ID's. - errCh := make(chan er.R, 1) - go func() { - select { - case <-time.After(time.Second * 15): - errCh <- er.New("no query received") - return - - case req := <-chanSeries.filterReq: - // We should get a request for the entire range of short - // chan ID's. - if !reflect.DeepEqual(expectedReq, req) { - errCh <- er.Errorf("wrong request: expected %v, got %v", - expectedReq, req) - return - } - - // We'll send back only the last two to simulate filtering. - chanSeries.filterResp <- expectedReq[1:] - errCh <- nil - } - }() - - // If we send the final message, then we should transition to - // queryNewChannels as we've sent a non-empty set of new channels. - if err := syncer.processChanRangeReply(replies[2]); err != nil { - t.Fatalf("unable to process reply: %v", err) - } - - if syncer.syncState() != queryNewChannels { - t.Fatalf("wrong state: expected %v instead got %v", - queryNewChannels, syncer.state) - } - if !reflect.DeepEqual(syncer.newChansToQuery, expectedReq[1:]) { - t.Fatalf("wrong set of chans to query: expected %v, got %v", - syncer.newChansToQuery, expectedReq[1:]) - } - - // Wait for error from goroutine. - select { - case <-time.After(time.Second * 30): - t.Fatalf("goroutine did not return within 30 seconds") - case err := <-errCh: - if err != nil { - t.Fatal(err) - } - } -} - -// TestGossipSyncerSynchronizeChanIDs tests that we properly request chunks of -// the short chan ID's which were unknown to us. We'll ensure that we request -// chunk by chunk, and after the last chunk, we return true indicating that we -// can transition to the synced stage. -func TestGossipSyncerSynchronizeChanIDs(t *testing.T) { - t.Parallel() - - // We'll modify the chunk size to be a smaller value, so we can ensure - // our chunk parsing works properly. With this value we should get 3 - // queries: two full chunks, and one lingering chunk. - const chunkSize = 2 - - // First, we'll create a GossipSyncer instance with a canned sendToPeer - // message to allow us to intercept their potential sends. - msgChan, syncer, _ := newTestSyncer( - lnwire.NewShortChanIDFromInt(10), defaultEncoding, chunkSize, - ) - - // Next, we'll construct a set of chan ID's that we should query for, - // and set them as newChansToQuery within the state machine. - newChanIDs := []lnwire.ShortChannelID{ - lnwire.NewShortChanIDFromInt(1), - lnwire.NewShortChanIDFromInt(2), - lnwire.NewShortChanIDFromInt(3), - lnwire.NewShortChanIDFromInt(4), - lnwire.NewShortChanIDFromInt(5), - } - syncer.newChansToQuery = newChanIDs - - for i := 0; i < chunkSize*2; i += 2 { - // With our set up complete, we'll request a sync of chan ID's. - done, err := syncer.synchronizeChanIDs() - if err != nil { - t.Fatalf("unable to sync chan IDs: %v", err) - } - - // At this point, we shouldn't yet be done as only 2 items - // should have been queried for. - if done { - t.Fatalf("syncer shown as done, but shouldn't be!") - } - - // We should've received a new message from the syncer. - select { - case <-time.After(time.Second * 15): - t.Fatalf("no msgs received") - - case msg := <-msgChan: - queryMsg, ok := msg[0].(*lnwire.QueryShortChanIDs) - if !ok { - t.Fatalf("expected QueryShortChanIDs instead "+ - "got %T", msg) - } - - // The query message should have queried for the first - // two chan ID's, and nothing more. - if !reflect.DeepEqual(queryMsg.ShortChanIDs, newChanIDs[i:i+chunkSize]) { - t.Fatalf("wrong query: expected %v, got %v", - spew.Sdump(newChanIDs[i:i+chunkSize]), - queryMsg.ShortChanIDs) - } - } - - // With the proper message sent out, the internal state of the - // syncer should reflect that it still has more channels to - // query for. - if !reflect.DeepEqual(syncer.newChansToQuery, newChanIDs[i+chunkSize:]) { - t.Fatalf("incorrect chans to query for: expected %v, got %v", - spew.Sdump(newChanIDs[i+chunkSize:]), - syncer.newChansToQuery) - } - } - - // At this point, only one more channel should be lingering for the - // syncer to query for. - if !reflect.DeepEqual(newChanIDs[chunkSize*2:], syncer.newChansToQuery) { - t.Fatalf("wrong chans to query: expected %v, got %v", - newChanIDs[chunkSize*2:], syncer.newChansToQuery) - } - - // If we issue another query, the syncer should tell us that it's done. - done, err := syncer.synchronizeChanIDs() - if err != nil { - t.Fatalf("unable to sync chan IDs: %v", err) - } - if done { - t.Fatalf("syncer should be finished!") - } - - select { - case <-time.After(time.Second * 15): - t.Fatalf("no msgs received") - - case msg := <-msgChan: - queryMsg, ok := msg[0].(*lnwire.QueryShortChanIDs) - if !ok { - t.Fatalf("expected QueryShortChanIDs instead "+ - "got %T", msg) - } - - // The query issued should simply be the last item. - if !reflect.DeepEqual(queryMsg.ShortChanIDs, newChanIDs[chunkSize*2:]) { - t.Fatalf("wrong query: expected %v, got %v", - spew.Sdump(newChanIDs[chunkSize*2:]), - queryMsg.ShortChanIDs) - } - - // There also should be no more channels to query. - if len(syncer.newChansToQuery) != 0 { - t.Fatalf("should be no more chans to query for, "+ - "instead have %v", - spew.Sdump(syncer.newChansToQuery)) - } - } -} - -// TestGossipSyncerDelayDOS tests that the gossip syncer will begin delaying -// queries after its prescribed allotment of undelayed query responses. Once -// this happens, all query replies should be delayed by the configurated -// interval. -func TestGossipSyncerDelayDOS(t *testing.T) { - t.Parallel() - - // We'll modify the chunk size to be a smaller value, since we'll be - // sending a modest number of queries. After exhausting our undelayed - // gossip queries, we'll send two extra queries and ensure that they are - // delayed properly. - const chunkSize = 2 - const numDelayedQueries = 2 - const delayTolerance = time.Millisecond * 200 - - // First, we'll create two GossipSyncer instances with a canned - // sendToPeer message to allow us to intercept their potential sends. - highestID := lnwire.ShortChannelID{ - BlockHeight: 1144, - } - msgChan1, syncer1, chanSeries1 := newTestSyncer( - highestID, defaultEncoding, chunkSize, true, false, - ) - syncer1.Start() - defer syncer1.Stop() - - msgChan2, syncer2, chanSeries2 := newTestSyncer( - highestID, defaultEncoding, chunkSize, false, true, - ) - syncer2.Start() - defer syncer2.Stop() - - // Record the delayed query reply interval used by each syncer. - delayedQueryInterval := syncer1.cfg.delayedQueryReplyInterval - - // Record the number of undelayed queries allowed by the syncers. - numUndelayedQueries := syncer1.cfg.maxUndelayedQueryReplies - - // We will send enough queries to exhaust the undelayed responses, and - // then send two more queries which should be delayed. An additional one - // is subtracted from the total since undelayed message will be consumed - // by the initial QueryChannelRange. - numQueryResponses := numUndelayedQueries + numDelayedQueries - 1 - - // The total number of responses must include the initial reply each - // syncer will make to QueryChannelRange. - numTotalQueries := 1 + numQueryResponses - - // The total number of channels each syncer needs to request must be - // scaled by the chunk size being used. - numTotalChans := numQueryResponses * chunkSize - - // Construct enough channels so that all of the queries will have enough - // channels. Since syncer1 won't know of any channels, their sets are - // inherently disjoint. - var syncer2Chans []lnwire.ShortChannelID - for i := 0; i < numTotalChans; i++ { - syncer2Chans = append(syncer2Chans, lnwire.ShortChannelID{ - BlockHeight: highestID.BlockHeight - 1, - TxIndex: uint32(i), - }) - } - - // We'll kick off the test by asserting syncer1 sends over the - // QueryChannelRange message the other node. - select { - case <-time.After(time.Second * 2): - t.Fatalf("didn't get msg from syncer1") - - case msgs := <-msgChan1: - for _, msg := range msgs { - // The message MUST be a QueryChannelRange message. - _, ok := msg.(*lnwire.QueryChannelRange) - if !ok { - t.Fatalf("wrong message: expected "+ - "QueryChannelRange for %T", msg) - } - - select { - case <-time.After(time.Second * 2): - t.Fatalf("node 2 didn't read msg") - - case syncer2.queryMsgs <- msg: - - } - } - } - - // At this point, we'll need to a response from syncer2's channel - // series. This will cause syncer1 to simply request the entire set of - // channels from syncer2. This will count as the first undelayed - // response for sycner2. - select { - case <-time.After(time.Second * 2): - t.Fatalf("no query recvd") - - case <-chanSeries2.filterRangeReqs: - // We'll send back all the channels that it should know of. - chanSeries2.filterRangeResp <- syncer2Chans - } - - // At this point, we'll assert that the ReplyChannelRange message is - // sent by sycner2. - for i := 0; i < numQueryResponses; i++ { - select { - case <-time.After(time.Second * 2): - t.Fatalf("didn't get msg from syncer2") - - case msgs := <-msgChan2: - for _, msg := range msgs { - // The message MUST be a ReplyChannelRange message. - _, ok := msg.(*lnwire.ReplyChannelRange) - if !ok { - t.Fatalf("wrong message: expected "+ - "QueryChannelRange for %T", msg) - } - - select { - case <-time.After(time.Second * 2): - t.Fatalf("node 2 didn't read msg") - - case syncer1.gossipMsgs <- msg: - } - } - } - } - - // We'll now have syncer1 process the received sids from syncer2. - select { - case <-time.After(time.Second * 2): - t.Fatalf("no query recvd") - - case <-chanSeries1.filterReq: - chanSeries1.filterResp <- syncer2Chans - } - - // At this point, syncer1 should start to send out initial requests to - // query the chan IDs of the remote party. We'll keep track of the - // number of queries made using the iterated value, which starts at one - // due the initial contribution of the QueryChannelRange msgs. - for i := 1; i < numTotalQueries; i++ { - expDelayResponse := i >= numUndelayedQueries - queryBatch(t, - msgChan1, msgChan2, - syncer1, syncer2, - chanSeries2, - expDelayResponse, - delayedQueryInterval, - delayTolerance, - ) - } -} - -// queryBatch is a helper method that will query for a single batch of channels -// from a peer and assert the responses. The method can also be used to assert -// the same transition happens, but is delayed by the remote peer's DOS -// rate-limiting. The provided chanSeries should belong to syncer2. -// -// The state transition performed is the following: -// syncer1 -- QueryShortChanIDs --> syncer2 -// chanSeries.FetchChanAnns() -// syncer1 <-- ReplyShortChanIDsEnd -- syncer2 -// -// If expDelayResponse is true, this method will assert that the call the -// FetchChanAnns happens between: -// [delayedQueryInterval-delayTolerance, delayedQueryInterval+delayTolerance]. -func queryBatch(t *testing.T, - msgChan1, msgChan2 chan []lnwire.Message, - syncer1, syncer2 *GossipSyncer, - chanSeries *mockChannelGraphTimeSeries, - expDelayResponse bool, - delayedQueryInterval, delayTolerance time.Duration) { - - t.Helper() - - // First, we'll assert that syncer1 sends a QueryShortChanIDs message to - // the remote peer. - select { - case <-time.After(time.Second * 2): - t.Fatalf("didn't get msg from syncer2") - - case msgs := <-msgChan1: - for _, msg := range msgs { - // The message MUST be a QueryShortChanIDs message. - _, ok := msg.(*lnwire.QueryShortChanIDs) - if !ok { - t.Fatalf("wrong message: expected "+ - "QueryShortChanIDs for %T", msg) - } - - select { - case <-time.After(time.Second * 2): - t.Fatalf("node 2 didn't read msg") - - case syncer2.queryMsgs <- msg: - } - } - } - - // We'll then respond to with an empty set of replies (as it doesn't - // affect the test). - switch { - - // If this query has surpassed the undelayed query threshold, we will - // impose stricter timing constraints on the response times. We'll first - // test that syncer2's chanSeries doesn't immediately receive a query, - // and then check that the query hasn't gone unanswered entirely. - case expDelayResponse: - // Create a before and after timeout to test, our test - // will ensure the messages are delivered to the peer - // in this timeframe. - before := time.After( - delayedQueryInterval - delayTolerance, - ) - after := time.After( - delayedQueryInterval + delayTolerance, - ) - - // First, ensure syncer2 doesn't try to respond up until the - // before time fires. - select { - case <-before: - // Query is delayed, proceed. - - case <-chanSeries.annReq: - t.Fatalf("DOSy query was not delayed") - } - - // If syncer2 doesn't attempt a response within the allowed - // interval, then the messages are probably lost. - select { - case <-after: - t.Fatalf("no delayed query received") - - case <-chanSeries.annReq: - chanSeries.annResp <- []lnwire.Message{} - } - - // Otherwise, syncer2 should query its chanSeries promtly. - default: - select { - case <-time.After(50 * time.Millisecond): - t.Fatalf("no query recvd") - - case <-chanSeries.annReq: - chanSeries.annResp <- []lnwire.Message{} - } - } - - // Finally, assert that syncer2 replies to syncer1 with a - // ReplyShortChanIDsEnd. - select { - case <-time.After(50 * time.Millisecond): - t.Fatalf("didn't get msg from syncer2") - - case msgs := <-msgChan2: - for _, msg := range msgs { - // The message MUST be a ReplyShortChanIDsEnd message. - _, ok := msg.(*lnwire.ReplyShortChanIDsEnd) - if !ok { - t.Fatalf("wrong message: expected "+ - "ReplyShortChanIDsEnd for %T", msg) - } - - select { - case <-time.After(time.Second * 2): - t.Fatalf("node 2 didn't read msg") - - case syncer1.gossipMsgs <- msg: - } - } - } -} - -// TestGossipSyncerRoutineSync tests all state transitions of the main syncer -// goroutine. This ensures that given an encounter with a peer that has a set -// of distinct channels, then we'll properly synchronize our channel state with -// them. -func TestGossipSyncerRoutineSync(t *testing.T) { - t.Parallel() - - // We'll modify the chunk size to be a smaller value, so we can ensure - // our chunk parsing works properly. With this value we should get 3 - // queries: two full chunks, and one lingering chunk. - const chunkSize = 2 - - // First, we'll create two GossipSyncer instances with a canned - // sendToPeer message to allow us to intercept their potential sends. - highestID := lnwire.ShortChannelID{ - BlockHeight: 1144, - } - msgChan1, syncer1, chanSeries1 := newTestSyncer( - highestID, defaultEncoding, chunkSize, true, false, - ) - syncer1.Start() - defer syncer1.Stop() - - msgChan2, syncer2, chanSeries2 := newTestSyncer( - highestID, defaultEncoding, chunkSize, false, true, - ) - syncer2.Start() - defer syncer2.Stop() - - // Although both nodes are at the same height, syncer will have 3 chan - // ID's that syncer1 doesn't know of. - syncer2Chans := []lnwire.ShortChannelID{ - {BlockHeight: highestID.BlockHeight - 3}, - {BlockHeight: highestID.BlockHeight - 2}, - {BlockHeight: highestID.BlockHeight - 1}, - } - - // We'll kick off the test by passing over the QueryChannelRange - // messages from syncer1 to syncer2. - select { - case <-time.After(time.Second * 2): - t.Fatalf("didn't get msg from syncer1") - - case msgs := <-msgChan1: - for _, msg := range msgs { - // The message MUST be a QueryChannelRange message. - _, ok := msg.(*lnwire.QueryChannelRange) - if !ok { - t.Fatalf("wrong message: expected "+ - "QueryChannelRange for %T", msg) - } - - select { - case <-time.After(time.Second * 2): - t.Fatalf("node 2 didn't read msg") - - case syncer2.queryMsgs <- msg: - - } - } - } - - // At this point, we'll need to send a response from syncer2 to syncer1 - // using syncer2's channels This will cause syncer1 to simply request - // the entire set of channels from the other. - select { - case <-time.After(time.Second * 2): - t.Fatalf("no query recvd") - - case <-chanSeries2.filterRangeReqs: - // We'll send back all the channels that it should know of. - chanSeries2.filterRangeResp <- syncer2Chans - } - - // At this point, we'll assert that syncer2 replies with the - // ReplyChannelRange messages. Two replies are expected since the chunk - // size is 2, and we need to query for 3 channels. - for i := 0; i < chunkSize; i++ { - select { - case <-time.After(time.Second * 2): - t.Fatalf("didn't get msg from syncer2") - - case msgs := <-msgChan2: - for _, msg := range msgs { - // The message MUST be a ReplyChannelRange message. - _, ok := msg.(*lnwire.ReplyChannelRange) - if !ok { - t.Fatalf("wrong message: expected "+ - "QueryChannelRange for %T", msg) - } - - select { - case <-time.After(time.Second * 2): - t.Fatalf("node 2 didn't read msg") - - case syncer1.gossipMsgs <- msg: - } - } - } - } - - // We'll now send back a chunked response from syncer2 back to sycner1. - select { - case <-time.After(time.Second * 2): - t.Fatalf("no query recvd") - - case <-chanSeries1.filterReq: - chanSeries1.filterResp <- syncer2Chans - } - - // At this point, syncer1 should start to send out initial requests to - // query the chan IDs of the remote party. As the chunk size is 2, - // they'll need 2 rounds in order to fully reconcile the state. - for i := 0; i < chunkSize; i++ { - queryBatch(t, - msgChan1, msgChan2, - syncer1, syncer2, - chanSeries2, - false, 0, 0, - ) - } - - // At this stage syncer1 should now be sending over its initial - // GossipTimestampRange messages as it should be fully synced. - select { - case <-time.After(time.Second * 2): - t.Fatalf("didn't get msg from syncer1") - - case msgs := <-msgChan1: - for _, msg := range msgs { - // The message MUST be a GossipTimestampRange message. - _, ok := msg.(*lnwire.GossipTimestampRange) - if !ok { - t.Fatalf("wrong message: expected "+ - "QueryChannelRange for %T", msg) - } - - select { - case <-time.After(time.Second * 2): - t.Fatalf("node 2 didn't read msg") - - case syncer2.gossipMsgs <- msg: - - } - } - } -} - -// TestGossipSyncerAlreadySynced tests that if we attempt to synchronize two -// syncers that have the exact same state, then they'll skip straight to the -// final state and not perform any channel queries. -func TestGossipSyncerAlreadySynced(t *testing.T) { - t.Parallel() - - // We'll modify the chunk size to be a smaller value, so we can ensure - // our chunk parsing works properly. With this value we should get 3 - // queries: two full chunks, and one lingering chunk. - const chunkSize = 2 - const numChans = 3 - - // First, we'll create two GossipSyncer instances with a canned - // sendToPeer message to allow us to intercept their potential sends. - highestID := lnwire.ShortChannelID{ - BlockHeight: 1144, - } - msgChan1, syncer1, chanSeries1 := newTestSyncer( - highestID, defaultEncoding, chunkSize, - ) - syncer1.Start() - defer syncer1.Stop() - - msgChan2, syncer2, chanSeries2 := newTestSyncer( - highestID, defaultEncoding, chunkSize, - ) - syncer2.Start() - defer syncer2.Stop() - - // The channel state of both syncers will be identical. They should - // recognize this, and skip the sync phase below. - var syncer1Chans, syncer2Chans []lnwire.ShortChannelID - for i := numChans; i > 0; i-- { - shortChanID := lnwire.ShortChannelID{ - BlockHeight: highestID.BlockHeight - uint32(i), - } - syncer1Chans = append(syncer1Chans, shortChanID) - syncer2Chans = append(syncer2Chans, shortChanID) - } - - // We'll now kick off the test by allowing both side to send their - // QueryChannelRange messages to each other. - select { - case <-time.After(time.Second * 2): - t.Fatalf("didn't get msg from syncer1") - - case msgs := <-msgChan1: - for _, msg := range msgs { - // The message MUST be a QueryChannelRange message. - _, ok := msg.(*lnwire.QueryChannelRange) - if !ok { - t.Fatalf("wrong message: expected "+ - "QueryChannelRange for %T", msg) - } - - select { - case <-time.After(time.Second * 2): - t.Fatalf("node 2 didn't read msg") - - case syncer2.queryMsgs <- msg: - - } - } - } - select { - case <-time.After(time.Second * 2): - t.Fatalf("didn't get msg from syncer2") - - case msgs := <-msgChan2: - for _, msg := range msgs { - // The message MUST be a QueryChannelRange message. - _, ok := msg.(*lnwire.QueryChannelRange) - if !ok { - t.Fatalf("wrong message: expected "+ - "QueryChannelRange for %T", msg) - } - - select { - case <-time.After(time.Second * 2): - t.Fatalf("node 2 didn't read msg") - - case syncer1.queryMsgs <- msg: - - } - } - } - - // We'll now send back the range each side should send over: the set of - // channels they already know about. - select { - case <-time.After(time.Second * 2): - t.Fatalf("no query recvd") - - case <-chanSeries1.filterRangeReqs: - // We'll send all the channels that it should know of. - chanSeries1.filterRangeResp <- syncer1Chans - } - select { - case <-time.After(time.Second * 2): - t.Fatalf("no query recvd") - - case <-chanSeries2.filterRangeReqs: - // We'll send back all the channels that it should know of. - chanSeries2.filterRangeResp <- syncer2Chans - } - - // Next, we'll thread through the replies of both parties. As the chunk - // size is 2, and they both know of 3 channels, it'll take two around - // and two chunks. - for i := 0; i < chunkSize; i++ { - select { - case <-time.After(time.Second * 2): - t.Fatalf("didn't get msg from syncer1") - - case msgs := <-msgChan1: - for _, msg := range msgs { - // The message MUST be a ReplyChannelRange message. - _, ok := msg.(*lnwire.ReplyChannelRange) - if !ok { - t.Fatalf("wrong message: expected "+ - "QueryChannelRange for %T", msg) - } - - select { - case <-time.After(time.Second * 2): - t.Fatalf("node 2 didn't read msg") - - case syncer2.gossipMsgs <- msg: - } - } - } - } - for i := 0; i < chunkSize; i++ { - select { - case <-time.After(time.Second * 2): - t.Fatalf("didn't get msg from syncer2") - - case msgs := <-msgChan2: - for _, msg := range msgs { - // The message MUST be a ReplyChannelRange message. - _, ok := msg.(*lnwire.ReplyChannelRange) - if !ok { - t.Fatalf("wrong message: expected "+ - "QueryChannelRange for %T", msg) - } - - select { - case <-time.After(time.Second * 2): - t.Fatalf("node 2 didn't read msg") - - case syncer1.gossipMsgs <- msg: - } - } - } - } - - // Now that both sides have the full responses, we'll send over the - // channels that they need to filter out. As both sides have the exact - // same set of channels, they should skip to the final state. - select { - case <-time.After(time.Second * 2): - t.Fatalf("no query recvd") - - case <-chanSeries1.filterReq: - chanSeries1.filterResp <- []lnwire.ShortChannelID{} - } - select { - case <-time.After(time.Second * 2): - t.Fatalf("no query recvd") - - case <-chanSeries2.filterReq: - chanSeries2.filterResp <- []lnwire.ShortChannelID{} - } - - // As both parties are already synced, the next message they send to - // each other should be the GossipTimestampRange message. - select { - case <-time.After(time.Second * 2): - t.Fatalf("didn't get msg from syncer1") - - case msgs := <-msgChan1: - for _, msg := range msgs { - // The message MUST be a GossipTimestampRange message. - _, ok := msg.(*lnwire.GossipTimestampRange) - if !ok { - t.Fatalf("wrong message: expected "+ - "QueryChannelRange for %T", msg) - } - - select { - case <-time.After(time.Second * 2): - t.Fatalf("node 2 didn't read msg") - - case syncer2.gossipMsgs <- msg: - - } - } - } - select { - case <-time.After(time.Second * 2): - t.Fatalf("didn't get msg from syncer1") - - case msgs := <-msgChan2: - for _, msg := range msgs { - // The message MUST be a GossipTimestampRange message. - _, ok := msg.(*lnwire.GossipTimestampRange) - if !ok { - t.Fatalf("wrong message: expected "+ - "QueryChannelRange for %T", msg) - } - - select { - case <-time.After(time.Second * 2): - t.Fatalf("node 2 didn't read msg") - - case syncer1.gossipMsgs <- msg: - - } - } - } -} - -// TestGossipSyncerSyncTransitions ensures that the gossip syncer properly -// carries out its duties when accepting a new sync transition request. -func TestGossipSyncerSyncTransitions(t *testing.T) { - t.Parallel() - - assertMsgSent := func(t *testing.T, msgChan chan []lnwire.Message, - msg lnwire.Message) { - - t.Helper() - - var msgSent lnwire.Message - select { - case msgs := <-msgChan: - if len(msgs) != 1 { - t.Fatal("expected to send a single message at "+ - "a time, got %d", len(msgs)) - } - msgSent = msgs[0] - case <-time.After(time.Second): - t.Fatalf("expected to send %T message", msg) - } - - if !reflect.DeepEqual(msgSent, msg) { - t.Fatalf("expected to send message: %v\ngot: %v", - spew.Sdump(msg), spew.Sdump(msgSent)) - } - } - - tests := []struct { - name string - entrySyncType SyncerType - finalSyncType SyncerType - assert func(t *testing.T, msgChan chan []lnwire.Message, - syncer *GossipSyncer) - }{ - { - name: "active to passive", - entrySyncType: ActiveSync, - finalSyncType: PassiveSync, - assert: func(t *testing.T, msgChan chan []lnwire.Message, - g *GossipSyncer) { - - // When transitioning from active to passive, we - // should expect to see a new local update - // horizon sent to the remote peer indicating - // that it would not like to receive any future - // updates. - assertMsgSent(t, msgChan, &lnwire.GossipTimestampRange{ - FirstTimestamp: uint32(zeroTimestamp.Unix()), - TimestampRange: 0, - }) - - syncState := g.syncState() - if syncState != chansSynced { - t.Fatalf("expected syncerState %v, "+ - "got %v", chansSynced, syncState) - } - }, - }, - { - name: "passive to active", - entrySyncType: PassiveSync, - finalSyncType: ActiveSync, - assert: func(t *testing.T, msgChan chan []lnwire.Message, - g *GossipSyncer) { - - // When transitioning from historical to active, - // we should expect to see a new local update - // horizon sent to the remote peer indicating - // that it would like to receive any future - // updates. - firstTimestamp := uint32(time.Now().Unix()) - assertMsgSent(t, msgChan, &lnwire.GossipTimestampRange{ - FirstTimestamp: firstTimestamp, - TimestampRange: math.MaxUint32, - }) - - syncState := g.syncState() - if syncState != chansSynced { - t.Fatalf("expected syncerState %v, "+ - "got %v", chansSynced, syncState) - } - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - // We'll start each test by creating our syncer. We'll - // initialize it with a state of chansSynced, as that's - // the only time when it can process sync transitions. - msgChan, syncer, _ := newTestSyncer( - lnwire.ShortChannelID{ - BlockHeight: latestKnownHeight, - }, - defaultEncoding, defaultChunkSize, - ) - syncer.setSyncState(chansSynced) - - // We'll set the initial syncType to what the test - // demands. - syncer.setSyncType(test.entrySyncType) - - // We'll then start the syncer in order to process the - // request. - syncer.Start() - defer syncer.Stop() - - syncer.ProcessSyncTransition(test.finalSyncType) - - // The syncer should now have the expected final - // SyncerType that the test expects. - syncType := syncer.SyncType() - if syncType != test.finalSyncType { - t.Fatalf("expected syncType %v, got %v", - test.finalSyncType, syncType) - } - - // Finally, we'll run a set of assertions for each test - // to ensure the syncer performed its expected duties - // after processing its sync transition. - test.assert(t, msgChan, syncer) - }) - } -} - -// TestGossipSyncerHistoricalSync tests that a gossip syncer can perform a -// historical sync with the remote peer. -func TestGossipSyncerHistoricalSync(t *testing.T) { - t.Parallel() - - // We'll create a new gossip syncer and manually override its state to - // chansSynced. This is necessary as the syncer can only process - // historical sync requests in this state. - msgChan, syncer, _ := newTestSyncer( - lnwire.ShortChannelID{BlockHeight: latestKnownHeight}, - defaultEncoding, defaultChunkSize, - ) - syncer.setSyncType(PassiveSync) - syncer.setSyncState(chansSynced) - - syncer.Start() - defer syncer.Stop() - - syncer.historicalSync() - - // We should expect to see a single lnwire.QueryChannelRange message be - // sent to the remote peer with a FirstBlockHeight of 0. - expectedMsg := &lnwire.QueryChannelRange{ - FirstBlockHeight: 0, - NumBlocks: math.MaxUint32, - } - - select { - case msgs := <-msgChan: - if len(msgs) != 1 { - t.Fatalf("expected to send a single "+ - "lnwire.QueryChannelRange message, got %d", - len(msgs)) - } - if !reflect.DeepEqual(msgs[0], expectedMsg) { - t.Fatalf("expected to send message: %v\ngot: %v", - spew.Sdump(expectedMsg), spew.Sdump(msgs[0])) - } - case <-time.After(time.Second): - t.Fatalf("expected to send a lnwire.QueryChannelRange message") - } -} - -// TestGossipSyncerSyncedSignal ensures that we receive a signal when a gossip -// syncer reaches its terminal chansSynced state. -func TestGossipSyncerSyncedSignal(t *testing.T) { - t.Parallel() - - // We'll create a new gossip syncer and manually override its state to - // chansSynced. - _, syncer, _ := newTestSyncer( - lnwire.NewShortChanIDFromInt(10), defaultEncoding, - defaultChunkSize, - ) - syncer.setSyncState(chansSynced) - - // We'll go ahead and request a signal to be notified of when it reaches - // this state. - signalChan := syncer.ResetSyncedSignal() - - // Starting the gossip syncer should cause the signal to be delivered. - syncer.Start() - - select { - case <-signalChan: - case <-time.After(time.Second): - t.Fatal("expected to receive chansSynced signal") - } - - syncer.Stop() - - // We'll try this again, but this time we'll request the signal after - // the syncer is active and has already reached its chansSynced state. - _, syncer, _ = newTestSyncer( - lnwire.NewShortChanIDFromInt(10), defaultEncoding, - defaultChunkSize, - ) - - syncer.setSyncState(chansSynced) - - syncer.Start() - defer syncer.Stop() - - signalChan = syncer.ResetSyncedSignal() - - // The signal should be delivered immediately. - select { - case <-signalChan: - case <-time.After(time.Second): - t.Fatal("expected to receive chansSynced signal") - } -} diff --git a/lnd/doc.go b/lnd/doc.go deleted file mode 100644 index 4e2eb511..00000000 --- a/lnd/doc.go +++ /dev/null @@ -1 +0,0 @@ -package lnd diff --git a/lnd/docker/README.md b/lnd/docker/README.md deleted file mode 100644 index a6343622..00000000 --- a/lnd/docker/README.md +++ /dev/null @@ -1,337 +0,0 @@ -This document is written for people who are eager to do something with -the Lightning Network Daemon (`lnd`). This folder uses `docker-compose` to -package `lnd` and `btcd` together to make deploying the two daemons as easy as -typing a few commands. All configuration between `lnd` and `btcd` are handled -automatically by their `docker-compose` config file. - -### Prerequisites -Name | Version ---------|--------- -docker-compose | 1.9.0 -docker | 1.13.0 - -### Table of content - * [Create lightning network cluster](#create-lightning-network-cluster) - * [Connect to faucet lightning node](#connect-to-faucet-lightning-node) - * [Questions](#questions) - -### Create lightning network cluster -This section describes a workflow on `simnet`, a development/test network -that's similar to Bitcoin Core's `regtest` mode. In `simnet` mode blocks can be -generated at will, as the difficulty is very low. This makes it an ideal -environment for testing as one doesn't need to wait tens of minutes for blocks -to arrive in order to test channel related functionality. Additionally, it's -possible to spin up an arbitrary number of `lnd` instances within containers to -create a mini development cluster. All state is saved between instances using a -shared volume. - -Current workflow is big because we recreate the whole network by ourselves, -next versions will use the started `btcd` bitcoin node in `testnet` and -`faucet` wallet from which you will get the bitcoins. - -In the workflow below, we describe the steps required to recreate the following -topology, and send a payment from `Alice` to `Bob`. -``` -+ ----- + + --- + -| Alice | <--- channel ---> | Bob | <--- Bob and Alice are the lightning network daemons which -+ ----- + + --- + create channels and interact with each other using the - | | Bitcoin network as source of truth. - | | - + - - - - - + - - - - - - + - | - + --------------- + - | Bitcoin network | <--- In the current scenario for simplicity we create only one - + --------------- + "btcd" node which represents the Bitcoin network, in a - real situation Alice and Bob will likely be - connected to different Bitcoin nodes. -``` - -**General workflow is the following:** - - * Create a `btcd` node running on a private `simnet`. - * Create `Alice`, one of the `lnd` nodes in our simulation network. - * Create `Bob`, the other `lnd` node in our simulation network. - * Mine some blocks to send `Alice` some bitcoins. - * Open channel between `Alice` and `Bob`. - * Send payment from `Alice` to `Bob`. - * Close the channel between `Alice` and `Bob`. - * Check that on-chain `Bob` balance was changed. - -Start `btcd`, and then create an address for `Alice` that we'll directly mine -bitcoin into. -```bash -# Init bitcoin network env variable: -$ export NETWORK="simnet" - -# Create persistent volumes for alice and bob. -$ docker volume create simnet_lnd_alice -$ docker volume create simnet_lnd_bob - -# Run the "Alice" container and log into it: -$ docker-compose run -d --name alice --volume simnet_lnd_alice:/root/.lnd lnd -$ docker exec -i -t alice bash - -# Generate a new backward compatible nested p2sh address for Alice: -alice$ lncli --network=simnet newaddress np2wkh - -# Recreate "btcd" node and set Alice's address as mining address: -$ MINING_ADDRESS= docker-compose up -d btcd - -# Generate 400 blocks (we need at least "100 >=" blocks because of coinbase -# block maturity and "300 ~=" in order to activate segwit): -$ docker exec -it btcd /start-btcctl.sh generate 400 - -# Check that segwit is active: -$ docker exec -it btcd /start-btcctl.sh getblockchaininfo | grep -A 1 segwit -``` - -Check `Alice` balance: -``` -alice$ lncli --network=simnet walletbalance -``` - -Connect `Bob` node to `Alice` node. - -```bash -# Run "Bob" node and log into it: -$ docker-compose run -d --name bob --volume simnet_lnd_bob:/root/.lnd lnd -$ docker exec -i -t bob bash - -# Get the identity pubkey of "Bob" node: -bob$ lncli --network=simnet getinfo - -{ - ----->"identity_pubkey": "0343bc80b914aebf8e50eb0b8e445fc79b9e6e8e5e018fa8c5f85c7d429c117b38", - "alias": "", - "num_pending_channels": 0, - "num_active_channels": 0, - "num_inactive_channels": 0, - "num_peers": 0, - "block_height": 1215, - "block_hash": "7d0bc86ea4151ed3b5be908ea883d2ac3073263537bcf8ca2dca4bec22e79d50", - "synced_to_chain": true, - "testnet": false - "chains": [ - "bitcoin" - ] -} - -# Get the IP address of "Bob" node: -$ docker inspect bob | grep IPAddress - -# Connect "Alice" to the "Bob" node: -alice$ lncli --network=simnet connect @ - -# Check list of peers on "Alice" side: -alice$ lncli --network=simnet listpeers -{ - "peers": [ - { - "pub_key": "0343bc80b914aebf8e50eb0b8e445fc79b9e6e8e5e018fa8c5f85c7d429c117b38", - "address": "172.19.0.4:9735", - "bytes_sent": "357", - "bytes_recv": "357", - "sat_sent": "0", - "sat_recv": "0", - "inbound": true, - "ping_time": "0" - } - ] -} - -# Check list of peers on "Bob" side: -bob$ lncli --network=simnet listpeers -{ - "peers": [ - { - "pub_key": "03d0cd35b761f789983f3cfe82c68170cd1c3266b39220c24f7dd72ef4be0883eb", - "address": "172.19.0.3:51932", - "bytes_sent": "357", - "bytes_recv": "357", - "sat_sent": "0", - "sat_recv": "0", - "inbound": false, - "ping_time": "0" - } - ] -} -``` - -Create the `Alice<->Bob` channel. -```bash -# Open the channel with "Bob": -alice$ lncli --network=simnet openchannel --node_key= --local_amt=1000000 - -# Include funding transaction in block thereby opening the channel: -$ docker exec -it btcd /start-btcctl.sh generate 3 - -# Check that channel with "Bob" was opened: -alice$ lncli --network=simnet listchannels -{ - "channels": [ - { - "active": true, - "remote_pubkey": "0343bc80b914aebf8e50eb0b8e445fc79b9e6e8e5e018fa8c5f85c7d429c117b38", - "channel_point": "3511ae8a52c97d957eaf65f828504e68d0991f0276adff94c6ba91c7f6cd4275:0", - "chan_id": "1337006139441152", - "capacity": "1005000", - "local_balance": "1000000", - "remote_balance": "0", - "commit_fee": "8688", - "commit_weight": "600", - "fee_per_kw": "12000", - "unsettled_balance": "0", - "total_satoshis_sent": "0", - "total_satoshis_received": "0", - "num_updates": "0", - "pending_htlcs": [ - ], - "csv_delay": 4 - } - ] -} -``` - -Send the payment from `Alice` to `Bob`. -```bash -# Add invoice on "Bob" side: -bob$ lncli --network=simnet addinvoice --amt=10000 -{ - "r_hash": "", - "pay_req": "", -} - -# Send payment from "Alice" to "Bob": -alice$ lncli --network=simnet sendpayment --pay_req= - -# Check "Alice"'s channel balance -alice$ lncli --network=simnet channelbalance - -# Check "Bob"'s channel balance -bob$ lncli --network=simnet channelbalance -``` - -Now we have open channel in which we sent only one payment, let's imagine -that we sent lots of them and we'd now like to close the channel. Let's do -it! -```bash -# List the "Alice" channel and retrieve "channel_point" which represents -# the opened channel: -alice$ lncli --network=simnet listchannels -{ - "channels": [ - { - "active": true, - "remote_pubkey": "0343bc80b914aebf8e50eb0b8e445fc79b9e6e8e5e018fa8c5f85c7d429c117b38", - ---->"channel_point": "3511ae8a52c97d957eaf65f828504e68d0991f0276adff94c6ba91c7f6cd4275:0", - "chan_id": "1337006139441152", - "capacity": "1005000", - "local_balance": "990000", - "remote_balance": "10000", - "commit_fee": "8688", - "commit_weight": "724", - "fee_per_kw": "12000", - "unsettled_balance": "0", - "total_satoshis_sent": "10000", - "total_satoshis_received": "0", - "num_updates": "2", - "pending_htlcs": [ - ], - "csv_delay": 4 - } - ] -} - -# Channel point consists of two numbers separated by a colon. The first one -# is "funding_txid" and the second one is "output_index": -alice$ lncli --network=simnet closechannel --funding_txid= --output_index= - -# Include close transaction in a block thereby closing the channel: -$ docker exec -it btcd /start-btcctl.sh generate 3 - -# Check "Alice" on-chain balance was credited by her settled amount in the channel: -alice$ lncli --network=simnet walletbalance - -# Check "Bob" on-chain balance was credited with the funds he received in the -# channel: -bob$ lncli --network=simnet walletbalance -{ - "total_balance": "10000", - "confirmed_balance": "10000", - "unconfirmed_balance": "0" -} -``` - -### Connect to faucet lightning node -In order to be more confident with `lnd` commands I suggest you to try -to create a mini lightning network cluster ([Create lightning network cluster](#create-lightning-network-cluster)). - -In this section we will try to connect our node to the faucet/hub node -which we will create a channel with and send some amount of -bitcoins. The schema will be following: - -``` -+ ----- + + ------ + (1) + --- + -| Alice | <--- channel ---> | Faucet | <--- channel ---> | Bob | -+ ----- + + ------ + + --- + - | | | - | | | <--- (2) - + - - - - - - - - - - - - - + - - - - - - - - - - - - - + - | - + --------------- + - | Bitcoin network | <--- (3) - + --------------- + - - - (1) You may connect an additional node "Bob" and make the multihop - payment Alice->Faucet->Bob - - (2) "Faucet", "Alice" and "Bob" are the lightning network daemons which - create channels to interact with each other using the Bitcoin network - as source of truth. - - (3) In current scenario "Alice" and "Faucet" lightning network nodes - connect to different Bitcoin nodes. If you decide to connect "Bob" - to "Faucet" then the already created "btcd" node would be sufficient. -``` - -First of all you need to run `btcd` node in `testnet` and wait for it to be -synced with test network (`May the Force and Patience be with you`). -```bash -# Init bitcoin network env variable: -$ NETWORK="testnet" docker-compose up -``` - -After `btcd` synced, connect `Alice` to the `Faucet` node. - -The `Faucet` node address can be found at the [Faucet Lightning Community webpage](https://faucet.lightning.community). - -```bash -# Run "Alice" container and log into it: -$ docker-compose run -d --name alice lnd_btc; docker exec -i -t "alice" bash - -# Connect "Alice" to the "Faucet" node: -alice$ lncli --network=testnet connect @ -``` - -After a connection is achieved, the `Faucet` node should create the channel -and send some amount of bitcoins to `Alice`. - -**What you may do next?:** -- Send some amount to `Faucet` node back. -- Connect `Bob` node to the `Faucet` and make multihop payment (`Alice->Faucet->Bob`) -- Close channel with `Faucet` and check the onchain balance. - -### Building standalone docker images - -Instructions on how to build standalone docker images (for development or -production), outside of `docker-compose`, see the -[docker docs](../docs/DOCKER.md). - -### Questions -[![Irc](https://img.shields.io/badge/chat-on%20freenode-brightgreen.svg)](https://webchat.freenode.net/?channels=lnd) - -* How to see `alice` | `bob` | `btcd` logs? -```bash -docker-compose logs -``` diff --git a/lnd/docker/btcd/Dockerfile b/lnd/docker/btcd/Dockerfile deleted file mode 100644 index 5de4a389..00000000 --- a/lnd/docker/btcd/Dockerfile +++ /dev/null @@ -1,60 +0,0 @@ -FROM golang:1.12-alpine as builder - -LABEL maintainer="Olaoluwa Osuntokun " - -# Install build dependencies such as git and glide. -RUN apk add --no-cache git gcc musl-dev - -WORKDIR $GOPATH/src/github.com/btcsuite/btcd - -# Pin down btcd to a version that we know works with lnd. -ARG BTCD_VERSION=v0.20.1-beta - -# Grab and install the latest version of of btcd and all related dependencies. -RUN git clone https://github.com/btcsuite/btcd.git . \ - && git checkout $BTCD_VERSION \ - && GO111MODULE=on go install -v . ./cmd/... - -# Start a new image -FROM alpine as final - -# Expose mainnet ports (server, rpc) -EXPOSE 8333 8334 - -# Expose testnet ports (server, rpc) -EXPOSE 18333 18334 - -# Expose simnet ports (server, rpc) -EXPOSE 18555 18556 - -# Expose segnet ports (server, rpc) -EXPOSE 28901 28902 - -# Copy the compiled binaries from the builder image. -COPY --from=builder /go/bin/addblock /bin/ -COPY --from=builder /go/bin/btcctl /bin/ -COPY --from=builder /go/bin/btcd /bin/ -COPY --from=builder /go/bin/findcheckpoint /bin/ -COPY --from=builder /go/bin/gencerts /bin/ - -COPY "start-btcctl.sh" . -COPY "start-btcd.sh" . - -RUN apk add --no-cache \ - bash \ - ca-certificates \ -&& mkdir "/rpc" "/root/.btcd" "/root/.btcctl" \ -&& touch "/root/.btcd/btcd.conf" \ -&& chmod +x start-btcctl.sh \ -&& chmod +x start-btcd.sh \ -# Manually generate certificate and add all domains, it is needed to connect -# "btcctl" and "lnd" to "btcd" over docker links. -&& "/bin/gencerts" --host="*" --directory="/rpc" --force - -# Create a volume to house pregenerated RPC credentials. This will be -# shared with any lnd, btcctl containers so they can securely query btcd's RPC -# server. -# You should NOT do this before certificate generation! -# Otherwise manually generated certificate will be overridden with shared -# mounted volume! For more info read dockerfile "VOLUME" documentation. -VOLUME ["/rpc"] diff --git a/lnd/docker/btcd/start-btcctl.sh b/lnd/docker/btcd/start-btcctl.sh deleted file mode 100755 index 8bd5fda0..00000000 --- a/lnd/docker/btcd/start-btcctl.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env bash - -# exit from script if error was raised. -set -e - -# error function is used within a bash function in order to send the error -# message directly to the stderr output and exit. -error() { - echo "$1" > /dev/stderr - exit 0 -} - -# return is used within bash function in order to return the value. -return() { - echo "$1" -} - -# set_default function gives the ability to move the setting of default -# env variable from docker file to the script thereby giving the ability to the -# user override it durin container start. -set_default() { - # docker initialized env variables with blank string and we can't just - # use -z flag as usually. - BLANK_STRING='""' - - VARIABLE="$1" - DEFAULT="$2" - - if [[ -z "$VARIABLE" || "$VARIABLE" == "$BLANK_STRING" ]]; then - - if [ -z "$DEFAULT" ]; then - error "You should specify default variable" - else - VARIABLE="$DEFAULT" - fi - fi - - return "$VARIABLE" -} - -# Set default variables if needed. -RPCUSER=$(set_default "$RPCUSER" "devuser") -RPCPASS=$(set_default "$RPCPASS" "devpass") -NETWORK=$(set_default "$NETWORK" "simnet") - -PARAMS="" -if [ "$NETWORK" != "mainnet" ]; then - PARAMS="--$NETWORK" -fi - -PARAMS=$(echo $PARAMS \ - "--rpccert=/rpc/rpc.cert" \ - "--rpcuser=$RPCUSER" \ - "--rpcpass=$RPCPASS" \ - "--rpcserver=localhost" \ -) - -PARAMS="$PARAMS $@" -exec btcctl $PARAMS diff --git a/lnd/docker/btcd/start-btcd.sh b/lnd/docker/btcd/start-btcd.sh deleted file mode 100755 index 4f5f7ba3..00000000 --- a/lnd/docker/btcd/start-btcd.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env bash - -# exit from script if error was raised. -set -e - -# error function is used within a bash function in order to send the error -# message directly to the stderr output and exit. -error() { - echo "$1" > /dev/stderr - exit 0 -} - -# return is used within bash function in order to return the value. -return() { - echo "$1" -} - -# set_default function gives the ability to move the setting of default -# env variable from docker file to the script thereby giving the ability to the -# user override it durin container start. -set_default() { - # docker initialized env variables with blank string and we can't just - # use -z flag as usually. - BLANK_STRING='""' - - VARIABLE="$1" - DEFAULT="$2" - - if [[ -z "$VARIABLE" || "$VARIABLE" == "$BLANK_STRING" ]]; then - - if [ -z "$DEFAULT" ]; then - error "You should specify default variable" - else - VARIABLE="$DEFAULT" - fi - fi - - return "$VARIABLE" -} - -# Set default variables if needed. -RPCUSER=$(set_default "$RPCUSER" "devuser") -RPCPASS=$(set_default "$RPCPASS" "devpass") -DEBUG=$(set_default "$DEBUG" "info") -NETWORK=$(set_default "$NETWORK" "simnet") - -PARAMS="" -if [ "$NETWORK" != "mainnet" ]; then - PARAMS="--$NETWORK" -fi - -PARAMS=$(echo $PARAMS \ - "--debuglevel=$DEBUG" \ - "--rpcuser=$RPCUSER" \ - "--rpcpass=$RPCPASS" \ - "--datadir=/data" \ - "--logdir=/data" \ - "--rpccert=/rpc/rpc.cert" \ - "--rpckey=/rpc/rpc.key" \ - "--rpclisten=0.0.0.0" \ - "--txindex" -) - -# Set the mining flag only if address is non empty. -if [[ -n "$MINING_ADDRESS" ]]; then - PARAMS="$PARAMS --miningaddr=$MINING_ADDRESS" -fi - -# Add user parameters to command. -PARAMS="$PARAMS $@" - -# Print command and start bitcoin node. -echo "Command: btcd $PARAMS" -exec btcd $PARAMS diff --git a/lnd/docker/docker-compose.ltc.yml b/lnd/docker/docker-compose.ltc.yml deleted file mode 100644 index 5bf9c323..00000000 --- a/lnd/docker/docker-compose.ltc.yml +++ /dev/null @@ -1,55 +0,0 @@ -version: '2' -services: - # ltc is an image of litecoin node which used as base image for ltcd and - # ltcctl. The environment variables default values determined on stage of - # container start within starting script. - ltcd: - image: ltcd - container_name: ltcd - build: - context: ltcd/ - volumes: - - shared:/rpc - - litecoin:/data - environment: - - RPCUSER - - RPCPASS - - NETWORK - - DEBUG - - MINING_ADDRESS - entrypoint: ["./start-ltcd.sh"] - - lnd: - image: lnd - container_name: lnd_ltc - build: - context: ../ - dockerfile: dev.Dockerfile - environment: - - RPCUSER - - RPCPASS - - NETWORK - - CHAIN - - DEBUG - volumes: - - shared:/rpc - - lnd_ltc:/root/.lnd - entrypoint: ["./start-lnd.sh"] - links: - - "ltcd:blockchain" - -volumes: - # shared volume is need to store the btcd rpc certificates and use it within - # ltcctl and lnd containers. - shared: - driver: local - - # litecoin volume is needed for maintaining blockchain persistence - # during ltcd container recreation. - litecoin: - driver: local - - # lnd volume is used for persisting lnd application data and chain state - # during container lifecycle. - lnd_ltc: - driver: local diff --git a/lnd/docker/docker-compose.yml b/lnd/docker/docker-compose.yml deleted file mode 100644 index 61cd58a5..00000000 --- a/lnd/docker/docker-compose.yml +++ /dev/null @@ -1,55 +0,0 @@ -version: '2' -services: - # btc is an image of bitcoin node which used as base image for btcd and - # btccli. The environment variables default values determined on stage of - # container start within starting script. - btcd: - image: btcd - container_name: btcd - build: - context: btcd/ - volumes: - - shared:/rpc - - bitcoin:/data - environment: - - RPCUSER - - RPCPASS - - NETWORK - - DEBUG - - MINING_ADDRESS - entrypoint: ["./start-btcd.sh"] - - lnd: - image: lnd - container_name: lnd - build: - context: ../ - dockerfile: dev.Dockerfile - environment: - - RPCUSER - - RPCPASS - - NETWORK - - CHAIN - - DEBUG - volumes: - - shared:/rpc - - lnd:/root/.lnd - entrypoint: ["./start-lnd.sh"] - links: - - "btcd:blockchain" - -volumes: - # shared volume is need to store the btcd rpc certificates and use it within - # btcctl and lnd containers. - shared: - driver: local - - # bitcoin volume is needed for maintaining blockchain persistence - # during btcd container recreation. - bitcoin: - driver: local - - # lnd volume is used for persisting lnd application data and chain state - # during container lifecycle. - lnd: - driver: local diff --git a/lnd/docker/lnd/start-lnd.sh b/lnd/docker/lnd/start-lnd.sh deleted file mode 100755 index c7bfe305..00000000 --- a/lnd/docker/lnd/start-lnd.sh +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env bash - -# exit from script if error was raised. -set -e - -# error function is used within a bash function in order to send the error -# message directly to the stderr output and exit. -error() { - echo "$1" > /dev/stderr - exit 0 -} - -# return is used within bash function in order to return the value. -return() { - echo "$1" -} - -# set_default function gives the ability to move the setting of default -# env variable from docker file to the script thereby giving the ability to the -# user override it during container start. -set_default() { - # docker initialized env variables with blank string and we can't just - # use -z flag as usually. - BLANK_STRING='""' - - VARIABLE="$1" - DEFAULT="$2" - - if [[ -z "$VARIABLE" || "$VARIABLE" == "$BLANK_STRING" ]]; then - - if [ -z "$DEFAULT" ]; then - error "You should specify default variable" - else - VARIABLE="$DEFAULT" - fi - fi - - return "$VARIABLE" -} - -# Set default variables if needed. -RPCUSER=$(set_default "$RPCUSER" "devuser") -RPCPASS=$(set_default "$RPCPASS" "devpass") -DEBUG=$(set_default "$DEBUG" "debug") -NETWORK=$(set_default "$NETWORK" "simnet") -CHAIN=$(set_default "$CHAIN" "bitcoin") -BACKEND="btcd" -HOSTNAME=$(hostname) -if [[ "$CHAIN" == "litecoin" ]]; then - BACKEND="ltcd" -fi - -# CAUTION: DO NOT use the --noseedback for production/mainnet setups, ever! -# Also, setting --rpclisten to $HOSTNAME will cause it to listen on an IP -# address that is reachable on the internal network. If you do this outside of -# docker, this might be a security concern! - -exec lnd \ - --noseedbackup \ - "--$CHAIN.active" \ - "--$CHAIN.$NETWORK" \ - "--$CHAIN.node"="btcd" \ - "--$BACKEND.rpccert"="/rpc/rpc.cert" \ - "--$BACKEND.rpchost"="blockchain" \ - "--$BACKEND.rpcuser"="$RPCUSER" \ - "--$BACKEND.rpcpass"="$RPCPASS" \ - "--rpclisten=$HOSTNAME:10009" \ - --debuglevel="$DEBUG" \ - "$@" diff --git a/lnd/docker/ltcd/Dockerfile b/lnd/docker/ltcd/Dockerfile deleted file mode 100644 index e82ee9d0..00000000 --- a/lnd/docker/ltcd/Dockerfile +++ /dev/null @@ -1,49 +0,0 @@ -FROM golang:1.12-alpine as builder - -LABEL maintainer="Olaoluwa Osuntokun " - -# Grab and install the latest version of roasbeef's fork of ltcd and all -# related dependencies. -WORKDIR $GOPATH/src/github.com/ltcsuite/ltcd -RUN apk add --no-cache --update alpine-sdk git -RUN git clone https://github.com/ltcsuite/ltcd ./ -RUN GO111MODULE=on go install -v . ./cmd/... -RUN GO111MODULE=on go install . ./cmd/ltcctl ./cmd/gencerts - -# Start a new image -FROM alpine as final - -# Expose mainnet ports (server, rpc) -EXPOSE 9333 9334 - -# Expose testnet ports (server, rpc) -EXPOSE 19334 19335 - -# Expose simnet ports (server, rpc) -EXPOSE 18555 18556 - -# Copy the compiled binaries from the builder image. -COPY --from=builder /go/bin/ltcctl /bin/ -COPY --from=builder /go/bin/ltcd /bin/ -COPY --from=builder /go/bin/gencerts /bin/ - -COPY "start-ltcctl.sh" . -COPY "start-ltcd.sh" . - -RUN apk add --no-cache \ - bash \ - ca-certificates \ -&& chmod +x start-ltcctl.sh \ -&& chmod +x start-ltcd.sh \ -&& mkdir "/rpc" "/root/.ltcd" "/root/.ltcctl" \ -&& touch "/root/.ltcd/ltcd.conf" \ -# "ltcctl" and "lnd" to "ltcd" over docker links. -&& "/bin/gencerts" --host="*" --directory="/rpc" --force - -# Create a volume to house pregenerated RPC credentials. This will be -# shared with any lnd, btcctl containers so they can securely query ltcd's RPC -# server. -# You should NOT do this before certificate generation! -# Otherwise manually generated certificate will be overridden with shared -# mounted volume! For more info read dockerfile "VOLUME" documentation. -VOLUME ["/rpc"] diff --git a/lnd/docker/ltcd/start-ltcctl.sh b/lnd/docker/ltcd/start-ltcctl.sh deleted file mode 100755 index 2888ab9c..00000000 --- a/lnd/docker/ltcd/start-ltcctl.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env bash - -# exit from script if error was raised. -set -e - -# error function is used within a bash function in order to send the error -# message directly to the stderr output and exit. -error() { - echo "$1" > /dev/stderr - exit 0 -} - -# return is used within bash function in order to return the value. -return() { - echo "$1" -} - -# set_default function gives the ability to move the setting of default -# env variable from docker file to the script thereby giving the ability to the -# user override it durin container start. -set_default() { - # docker initialized env variables with blank string and we can't just - # use -z flag as usually. - BLANK_STRING='""' - - VARIABLE="$1" - DEFAULT="$2" - - if [[ -z "$VARIABLE" || "$VARIABLE" == "$BLANK_STRING" ]]; then - - if [ -z "$DEFAULT" ]; then - error "You should specify default variable" - else - VARIABLE="$DEFAULT" - fi - fi - - return "$VARIABLE" -} - -# Set default variables if needed. -RPCUSER=$(set_default "$RPCUSER" "devuser") -RPCPASS=$(set_default "$RPCPASS" "devpass") -NETWORK=$(set_default "$NETWORK" "simnet") - -PARAMS="" -if [ "$NETWORK" != "mainnet" ]; then - PARAMS="--$NETWORK" -fi - -PARAMS=$(echo $PARAMS \ - "--rpccert=/rpc/rpc.cert" \ - "--rpcuser=$RPCUSER" \ - "--rpcpass=$RPCPASS" \ - "--rpcserver=localhost" \ -) - -PARAMS="$PARAMS $@" -exec ltcctl $PARAMS diff --git a/lnd/docker/ltcd/start-ltcd.sh b/lnd/docker/ltcd/start-ltcd.sh deleted file mode 100755 index b6c6d699..00000000 --- a/lnd/docker/ltcd/start-ltcd.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env bash - -# exit from script if error was raised. -set -e - -# error function is used within a bash function in order to send the error -# message directly to the stderr output and exit. -error() { - echo "$1" > /dev/stderr - exit 0 -} - -# return is used within bash function in order to return the value. -return() { - echo "$1" -} - -# set_default function gives the ability to move the setting of default -# env variable from docker file to the script thereby giving the ability to the -# user override it durin container start. -set_default() { - # docker initialized env variables with blank string and we can't just - # use -z flag as usually. - BLANK_STRING='""' - - VARIABLE="$1" - DEFAULT="$2" - - if [[ -z "$VARIABLE" || "$VARIABLE" == "$BLANK_STRING" ]]; then - - if [ -z "$DEFAULT" ]; then - error "You should specify default variable" - else - VARIABLE="$DEFAULT" - fi - fi - - return "$VARIABLE" -} - -# Set default variables if needed. -RPCUSER=$(set_default "$RPCUSER" "devuser") -RPCPASS=$(set_default "$RPCPASS" "devpass") -DEBUG=$(set_default "$DEBUG" "info") -NETWORK=$(set_default "$NETWORK" "simnet") - -PARAMS="" -if [ "$NETWORK" != "mainnet" ]; then - PARAMS="--$NETWORK" -fi - -PARAMS=$(echo $PARAMS \ - "--debuglevel=$DEBUG" \ - "--rpcuser=$RPCUSER" \ - "--rpcpass=$RPCPASS" \ - "--datadir=/data" \ - "--logdir=/data" \ - "--rpccert=/rpc/rpc.cert" \ - "--rpckey=/rpc/rpc.key" \ - "--rpclisten=0.0.0.0" \ - "--txindex" -) - -# Set the mining flag only if address is non empty. -if [[ -n "$MINING_ADDRESS" ]]; then - PARAMS="$PARAMS --miningaddr=$MINING_ADDRESS" -fi - -# Add user parameters to command. -PARAMS="$PARAMS $@" - -# Print command and start bitcoin node. -echo "Command: ltcd $PARAMS" -exec ltcd $PARAMS diff --git a/lnd/docs/DOCKER.md b/lnd/docs/DOCKER.md deleted file mode 100644 index 700f5677..00000000 --- a/lnd/docs/DOCKER.md +++ /dev/null @@ -1,110 +0,0 @@ -# Docker Instructions - -There are two flavors of Dockerfiles available: - - `Dockerfile`: Used for production builds. Checks out the source code from - GitHub during build. The build argument `--build-arg checkout=v0.x.x-beta` - can be used to specify what git tag or commit to check out before building. - - `dev.Dockerfile` Used for development or testing builds. Uses the local code - when building and allows local changes to be tested more easily. - -## Development/testing - -To build a standalone development image from the local source directory, use the -following command: - -``` -$ docker build --tag=myrepository/lnd-dev -f dev.Dockerfile . -``` - -There is also a `docker-compose` setup available for development or testing that -spins up a `btcd` backend alongside `lnd`. Check out the documentation at -[docker/README.md](../docker/README.md) to learn more about how to use that -setup to create a small local Lightning Network. - -## Production - -To use Docker in a production environment, you can run `lnd` by creating a -Docker container, adding the appropriate command-line options as parameters. - -You first need to build the `lnd` docker image: - -``` -$ docker build --tag=myrepository/lnd --build-arg checkout=v0.11.1-beta . -``` - -It is recommended that you checkout the latest released tag. - -You can continue by creating and running the container: - -``` -$ docker run lnd [command-line options] -``` - -Note: there currently are no automated docker image builds available. - -## Volumes - -A Docker volume will be created with your `.lnd` directory automatically, and will -persist through container restarts. - -You can also optionally manually specify a local folder to be used as a volume: - -``` -$ docker create --name=mylndcontainer -v /media/lnd-docker/:/root/.lnd myrepository/lnd [command-line options] -``` - -## Example - -Here is an example testnet `lnd` that uses Neutrino: - -``` -$ docker run --name lnd-testnet myrepository/lnd --bitcoin.active --bitcoin.testnet --bitcoin.node=neutrino --neutrino.connect=faucet.lightning.community -``` - -Create a wallet (and write down the seed): - -``` -$ docker exec -it lnd-testnet lncli create -``` - -Confirm `lnd` has begun to synchronize: - -``` -$ docker logs lnd-testnet -[snipped] -2018-05-01 02:28:01.201 [INF] RPCS: RPC server listening on 127.0.0.1:10009 -2018-05-01 02:28:01.201 [INF] LTND: Waiting for chain backend to finish sync, start_height=2546 -2018-05-01 02:28:01.201 [INF] RPCS: gRPC proxy started at 127.0.0.1:8080 -2018-05-01 02:28:08.999 [INF] LNWL: Caught up to height 10000 -2018-05-01 02:28:09.872 [INF] BTCN: Processed 10547 blocks in the last 10.23s (height 10547, 2012-05-28 05:02:32 +0000 UTC) -``` - -This is a simple example, it is possible to use any command-line options necessary -to expose RPC ports, use `btcd` or `bitcoind`, or add additional chains. - -## LND Development and Testing - -To test the Docker production image locally, run the following from -the project root: - -``` -$ docker build . -t myrepository/lnd:master -``` - -To choose a specific branch or tag instead, use the "checkout" build-arg. For example, to build the latest commits in master: - -``` -$ docker build . --build-arg checkout=v0.8.0-beta -t myrepository/lnd:v0.8.0-beta -``` - -To build the image using the most current tag: - -``` -$ docker build . --build-arg checkout=$(git describe --tags `git rev-list --tags --max-count=1`) -t myrepository/lnd:latest-tag -``` - -Once the image has been built and tagged locally, start the container: - -``` -docker run --name=lnd-testnet -it myrepository/lnd:latest-tag --bitcoin.active --bitcoin.testnet --bitcoin.node=neutrino --neutrino.connect=faucet.lightning.community -``` diff --git a/lnd/docs/INSTALL.md b/lnd/docs/INSTALL.md deleted file mode 100644 index 4ee1caa1..00000000 --- a/lnd/docs/INSTALL.md +++ /dev/null @@ -1,481 +0,0 @@ -# Table of Contents -* [Installation](#installation) - * [Preliminaries](#preliminaries) - * [Installing lnd](#installing-lnd) -* [Available Backend Operating Modes](#available-backend-operating-modes) - * [btcd Options](#btcd-options) - * [Neutrino Options](#neutrino-options) - * [Bitcoind Options](#bitcoind-options) - * [Using btcd](#using-btcd) - * [Installing btcd](#installing-btcd) - * [Starting btcd](#starting-btcd) - * [Running lnd using the btcd backend](#running-lnd-using-the-btcd-backend) - * [Using Neutrino](#using-neutrino) - * [Using bitcoind or litecoind](#using-bitcoind-or-litecoind) -* [Creating a Wallet](#creating-a-wallet) -* [Macaroons](#macaroons) -* [Network Reachability](#network-reachability) -* [Simnet vs. Testnet Development](#simnet-vs-testnet-development) -* [Creating an lnd.conf (Optional)](#creating-an-lndconf-optional) - -# Installation - -### Preliminaries - In order to work with [`lnd`](https://github.com/lightningnetwork/lnd), the - following build dependencies are required: - - * **Go:** `lnd` is written in Go. To install, run one of the following commands: - - - **Note**: The minimum version of Go supported is Go 1.13. We recommend that - users use the latest version of Go, which at the time of writing is - [`1.15`](https://blog.golang.org/go1.15). - - - On Linux: - - (x86-64) - ``` - wget https://dl.google.com/go/go1.13.linux-amd64.tar.gz - sha256sum go1.13.linux-amd64.tar.gz | awk -F " " '{ print $1 }' - ``` - - The final output of the command above should be - `68a2297eb099d1a76097905a2ce334e3155004ec08cdea85f24527be3c48e856`. If it - isn't, then the target REPO HAS BEEN MODIFIED, and you shouldn't install - this version of Go. If it matches, then proceed to install Go: - ``` - tar -C /usr/local -xzf go1.13.linux-amd64.tar.gz - export PATH=$PATH:/usr/local/go/bin - ``` - - (ARMv6) - ``` - wget https://dl.google.com/go/go1.13.linux-armv6l.tar.gz - sha256sum go1.13.linux-armv6l.tar.gz | awk -F " " '{ print $1 }' - ``` - - The final output of the command above should be - `931906d67cae1222f501e7be26e0ee73ba89420be0c4591925901cb9a4e156f0`. If it - isn't, then the target REPO HAS BEEN MODIFIED, and you shouldn't install - this version of Go. If it matches, then proceed to install Go: - ``` - tar -C /usr/local -xzf go1.13.linux-armv6l.tar.gz - export PATH=$PATH:/usr/local/go/bin - ``` - - On Mac OS X: - ``` - brew install go@1.13 - ``` - - On FreeBSD: - ``` - pkg install go - ``` - - Alternatively, one can download the pre-compiled binaries hosted on the - [Golang download page](https://golang.org/dl/). If one seeks to install - from source, then more detailed installation instructions can be found - [here](https://golang.org/doc/install). - - At this point, you should set your `$GOPATH` environment variable, which - represents the path to your workspace. By default, `$GOPATH` is set to - `~/go`. You will also need to add `$GOPATH/bin` to your `PATH`. This ensures - that your shell will be able to detect the binaries you install. - - ```bash - export GOPATH=~/gocode - export PATH=$PATH:$GOPATH/bin - ``` - - We recommend placing the above in your .bashrc or in a setup script so that - you can avoid typing this every time you open a new terminal window. - - * **Go modules:** This project uses [Go modules](https://github.com/golang/go/wiki/Modules) - to manage dependencies as well as to provide *reproducible builds*. - - Usage of Go modules (with Go 1.13) means that you no longer need to clone - `lnd` into your `$GOPATH` for development purposes. Instead, your `lnd` - repo can now live anywhere! - -### Installing lnd - -With the preliminary steps completed, to install `lnd`, `lncli`, and all -related dependencies run the following commands: -``` -git clone https://github.com/lightningnetwork/lnd -cd lnd -make install -``` - -The command above will install the current _master_ branch of `lnd`. If you -wish to install a tagged release of `lnd` (as the master branch can at times be -unstable), then [visit then release page to locate the latest -release](https://github.com/lightningnetwork/lnd/releases). Assuming the name -of the release is `v0.x.x`, then you can compile this release from source with -a small modification to the above command: -``` -git clone https://github.com/lightningnetwork/lnd -cd lnd -git checkout v0.x.x -make install -``` - - -**NOTE**: Our instructions still use the `$GOPATH` directory from prior -versions of Go, but with Go 1.13, it's now possible for `lnd` to live -_anywhere_ on your file system. - -For Windows WSL users, make will need to be referenced directly via -/usr/bin/make/, or alternatively by wrapping quotation marks around make, -like so: - -``` -/usr/bin/make && /usr/bin/make install - -"make" && "make" install -``` - -On FreeBSD, use gmake instead of make. - -Alternatively, if one doesn't wish to use `make`, then the `go` commands can be -used directly: -``` -GO111MODULE=on go install -v ./... -``` - -**Updating** - -To update your version of `lnd` to the latest version run the following -commands: -``` -cd $GOPATH/src/github.com/lightningnetwork/lnd -git pull -make clean && make && make install -``` - -On FreeBSD, use gmake instead of make. - -Alternatively, if one doesn't wish to use `make`, then the `go` commands can be -used directly: -``` -cd $GOPATH/src/github.com/lightningnetwork/lnd -git pull -GO111MODULE=on go install -v ./... -``` - -**Tests** - -To check that `lnd` was installed properly run the following command: -``` -make check -``` - -This command requires `bitcoind` (almost any version should do) to be available -in the system's `$PATH` variable. Otherwise some of the tests will fail. - -# Available Backend Operating Modes - -In order to run, `lnd` requires, that the user specify a chain backend. At the -time of writing of this document, there are three available chain backends: -`btcd`, `neutrino`, `bitcoind`. All including neutrino can run on mainnet with -an out of the box `lnd` instance. We don't require `--txindex` when running -with `bitcoind` or `btcd` but activating the `txindex` will generally make -`lnd` run faster. - -**NOTE: WE DO NOT FULLY SUPPORT PRUNED OPERATING MODES FOR FULL NODES.** It's -possible to run a node in a pruned mode and have it serve lnd, however one must -take care to ensure that `lnd` has all blocks on disk since the birth of the -wallet, and the age of the earliest channels (which were created around March -2018). - -The set of arguments for each of the backend modes is as follows: - -## btcd Options -``` -btcd: - --btcd.dir= The base directory that contains the node's data, logs, configuration file, etc. (default: /Users/roasbeef/Library/Application Support/Btcd) - --btcd.rpchost= The daemon's rpc listening address. If a port is omitted, then the default port for the selected chain parameters will be used. (default: localhost) - --btcd.rpcuser= Username for RPC connections - --btcd.rpcpass= Password for RPC connections - --btcd.rpccert= File containing the daemon's certificate file (default: /Users/roasbeef/Library/Application Support/Btcd/rpc.cert) - --btcd.rawrpccert= The raw bytes of the daemon's PEM-encoded certificate chain which will be used to authenticate the RPC connection. -``` - -## Neutrino Options -``` -neutrino: - -a, --neutrino.addpeer= Add a peer to connect with at startup - --neutrino.connect= Connect only to the specified peers at startup - --neutrino.maxpeers= Max number of inbound and outbound peers - --neutrino.banduration= How long to ban misbehaving peers. Valid time units are {s, m, h}. Minimum 1 second - --neutrino.banthreshold= Maximum allowed ban score before disconnecting and banning misbehaving peers. - --neutrino.useragentname= Used to help identify ourselves to other bitcoin peers. - --neutrino.useragentversion= Used to help identify ourselves to other bitcoin peers. -``` - -## Bitcoind Options -``` -bitcoind: - --bitcoind.dir= The base directory that contains the node's data, logs, configuration file, etc. (default: /Users/roasbeef/Library/Application Support/Bitcoin) - --bitcoind.rpchost= The daemon's rpc listening address. If a port is omitted, then the default port for the selected chain parameters will be used. (default: localhost) - --bitcoind.rpcuser= Username for RPC connections - --bitcoind.rpcpass= Password for RPC connections - --bitcoind.zmqpubrawblock= The address listening for ZMQ connections to deliver raw block notifications - --bitcoind.zmqpubrawtx= The address listening for ZMQ connections to deliver raw transaction notifications - --bitcoind.estimatemode= The fee estimate mode. Must be either "ECONOMICAL" or "CONSERVATIVE". (default: CONSERVATIVE) -``` - -## Using btcd - -### Installing btcd - -On FreeBSD, use gmake instead of make. - -To install btcd, run the following commands: - -Install **btcd**: -``` -make btcd -``` - -Alternatively, you can install [`btcd` directly from its -repo](https://github.com/btcsuite/btcd). - -### Starting btcd - -Running the following command will create `rpc.cert` and default `btcd.conf`. - -``` -btcd --testnet --rpcuser=REPLACEME --rpcpass=REPLACEME -``` -If you want to use `lnd` on testnet, `btcd` needs to first fully sync the -testnet blockchain. Depending on your hardware, this may take up to a few -hours. Note that adding `--txindex` is optional, as it will take longer to sync -the node, but then `lnd` will generally operate faster as it can hit the index -directly, rather than scanning blocks or BIP 158 filters for relevant items. - -(NOTE: It may take several minutes to find segwit-enabled peers.) - -While `btcd` is syncing you can check on its progress using btcd's `getinfo` -RPC command: -``` -btcctl --testnet --rpcuser=REPLACEME --rpcpass=REPLACEME getinfo -{ - "version": 120000, - "protocolversion": 70002, - "blocks": 1114996, - "timeoffset": 0, - "connections": 7, - "proxy": "", - "difficulty": 422570.58270815, - "testnet": true, - "relayfee": 0.00001, - "errors": "" -} -``` - -Additionally, you can monitor btcd's logs to track its syncing progress in real -time. - -You can test your `btcd` node's connectivity using the `getpeerinfo` command: -``` -btcctl --testnet --rpcuser=REPLACEME --rpcpass=REPLACEME getpeerinfo | more -``` - -### Running lnd using the btcd backend - -If you are on testnet, run this command after `btcd` has finished syncing. -Otherwise, replace `--bitcoin.testnet` with `--bitcoin.simnet`. If you are -installing `lnd` in preparation for the -[tutorial](https://dev.lightning.community/tutorial), you may skip this step. -``` -lnd --bitcoin.active --bitcoin.testnet --debuglevel=debug --btcd.rpcuser=kek --btcd.rpcpass=kek --externalip=X.X.X.X -``` - -## Using Neutrino - -In order to run `lnd` in its light client mode, you'll need to locate a -full-node which is capable of serving this new light client mode. `lnd` uses -[BIP 157](https://github.com/bitcoin/bips/blob/master/bip-0157.mediawiki) and [BIP -158](https://github.com/bitcoin/bips/blob/master/bip-0158.mediawiki) for its light client -mode. A public instance of such a node can be found at -`faucet.lightning.community`. - -To run lnd in neutrino mode, run `lnd` with the following arguments, (swapping -in `--bitcoin.simnet` if needed), and also your own `btcd` node if available: -``` -lnd --bitcoin.active --bitcoin.testnet --debuglevel=debug --bitcoin.node=neutrino --neutrino.connect=faucet.lightning.community -``` - - -## Using bitcoind or litecoind - -The configuration for bitcoind and litecoind are nearly identical, the -following steps can be mirrored with loss of generality to enable a litecoind -backend. Setup will be described in regards to `bitcoind`, but note that `lnd` -uses a distinct `litecoin.node=litecoind` argument and analogous -subconfigurations prefixed by `litecoind`. Note that adding `--txindex` is -optional, as it will take longer to sync the node, but then `lnd` will -generally operate faster as it can hit the index directly, rather than scanning -blocks or BIP 158 filters for relevant items. - -To configure your bitcoind backend for use with lnd, first complete and verify -the following: - -- Since `lnd` uses - [ZeroMQ](https://github.com/bitcoin/bitcoin/blob/master/doc/zmq.md) to - interface with `bitcoind`, *your `bitcoind` installation must be compiled with - ZMQ*. Note that if you installed `bitcoind` from source and ZMQ was not present, - then ZMQ support will be disabled, and `lnd` will quit on a `connection refused` error. - If you installed `bitcoind` via Homebrew in the past ZMQ may not be included - ([this has now been fixed](https://github.com/Homebrew/homebrew-core/pull/23088) - in the latest Homebrew recipe for bitcoin) -- Configure the `bitcoind` instance for ZMQ with `--zmqpubrawblock` and - `--zmqpubrawtx`. These options must each use their own unique address in order - to provide a reliable delivery of notifications (e.g. - `--zmqpubrawblock=tcp://127.0.0.1:28332` and - `--zmqpubrawtx=tcp://127.0.0.1:28333`). -- Start `bitcoind` running against testnet, and let it complete a full sync with - the testnet chain (alternatively, use `--bitcoind.regtest` instead). - -Here's a sample `bitcoin.conf` for use with lnd: -``` -testnet=1 -server=1 -daemon=1 -zmqpubrawblock=tcp://127.0.0.1:28332 -zmqpubrawtx=tcp://127.0.0.1:28333 -``` - -Once all of the above is complete, and you've confirmed `bitcoind` is fully -updated with the latest blocks on testnet, run the command below to launch -`lnd` with `bitcoind` as your backend (as with `bitcoind`, you can create an -`lnd.conf` to save these options, more info on that is described further -below): - -``` -lnd --bitcoin.active --bitcoin.testnet --debuglevel=debug --bitcoin.node=bitcoind --bitcoind.rpcuser=REPLACEME --bitcoind.rpcpass=REPLACEME --bitcoind.zmqpubrawblock=tcp://127.0.0.1:28332 --bitcoind.zmqpubrawtx=tcp://127.0.0.1:28333 --externalip=X.X.X.X -``` - -*NOTE:* -- The auth parameters `rpcuser` and `rpcpass` parameters can typically be - determined by `lnd` for a `bitcoind` instance running under the same user, - including when using cookie auth. In this case, you can exclude them from the - `lnd` options entirely. -- If you DO choose to explicitly pass the auth parameters in your `lnd.conf` or - command line options for `lnd` (`bitcoind.rpcuser` and `bitcoind.rpcpass` as - shown in example command above), you must also specify the - `bitcoind.zmqpubrawblock` and `bitcoind.zmqpubrawtx` options. Otherwise, `lnd` - will attempt to get the configuration from your `bitcoin.conf`. -- You must ensure the same addresses are used for the `bitcoind.zmqpubrawblock` - and `bitcoind.zmqpubrawtx` options passed to `lnd` as for the `zmqpubrawblock` - and `zmqpubrawtx` passed in the `bitcoind` options respectively. -- When running lnd and bitcoind on the same Windows machine, ensure you use - 127.0.0.1, not localhost, for all configuration options that require a TCP/IP - host address. If you use "localhost" as the host name, you may see extremely - slow inter-process-communication between lnd and the bitcoind backend. If lnd - is experiencing this issue, you'll see "Waiting for chain backend to finish - sync, start_height=XXXXXX" as the last entry in the console or log output, and - lnd will appear to hang. Normal lnd output will quickly show multiple - messages like this as lnd consumes blocks from bitcoind. -- Don't connect more than two or three instances of `lnd` to `bitcoind`. With - the default `bitcoind` settings, having more than one instance of `lnd`, or - `lnd` plus any application that consumes the RPC could cause `lnd` to miss - crucial updates from the backend. -- The default fee estimate mode in `bitcoind` is CONSERVATIVE. You can set - `bitcoind.estimatemode=ECONOMICAL` to change it into ECONOMICAL. Futhermore, - if you start `bitcoind` in `regtest`, this configuration won't take any effect. - - -# Creating a wallet -If `lnd` is being run for the first time, create a new wallet with: -``` -lncli create -``` -This will prompt for a wallet password, and optionally a cipher seed -passphrase. - -`lnd` will then print a 24 word cipher seed mnemonic, which can be used to -recover the wallet in case of data loss. The user should write this down and -keep in a safe place. - - -# Macaroons - -`lnd`'s authentication system is called **macaroons**, which are decentralized -bearer credentials allowing for delegation, attenuation, and other cool -features. You can learn more about them in Alex Akselrod's [writeup on -Github](https://github.com/lightningnetwork/lnd/issues/20). - -Running `lnd` for the first time will by default generate the `admin.macaroon`, -`read_only.macaroon`, and `macaroons.db` files that are used to authenticate -into `lnd`. They will be stored in the network directory (default: -`lnddir/data/chain/bitcoin/mainnet`) so that it's possible to use a distinct -password for mainnet, testnet, simnet, etc. Note that if you specified an -alternative data directory (via the `--datadir` argument), you will have to -additionally pass the updated location of the `admin.macaroon` file into `lncli` -using the `--macaroonpath` argument. - -To disable macaroons for testing, pass the `--no-macaroons` flag into *both* -`lnd` and `lncli`. - -# Network Reachability - -If you'd like to signal to other nodes on the network that you'll accept -incoming channels (as peers need to connect inbound to initiate a channel -funding workflow), then the `--externalip` flag should be set to your publicly -reachable IP address. - -# Simnet vs. Testnet Development - -If you are doing local development, such as for the tutorial, you'll want to -start both `btcd` and `lnd` in the `simnet` mode. Simnet is similar to regtest -in that you'll be able to instantly mine blocks as needed to test `lnd` -locally. In order to start either daemon in the `simnet` mode use `simnet` -instead of `testnet`, adding the `--bitcoin.simnet` flag instead of the -`--bitcoin.testnet` flag. - -Another relevant command line flag for local testing of new `lnd` developments -is the `--debughtlc` flag. When starting `lnd` with this flag, it'll be able to -automatically settle a special type of HTLC sent to it. This means that you -won't need to manually insert invoices in order to test payment connectivity. -To send this "special" HTLC type, include the `--debugsend` command at the end -of your `sendpayment` commands. - - -There are currently two primary ways to run `lnd`: one requires a local `btcd` -instance with the RPC service exposed, and the other uses a fully integrated -light client powered by [neutrino](https://github.com/pkt-cash/pktd/neutrino). - -# Creating an lnd.conf (Optional) - -Optionally, if you'd like to have a persistent configuration between `lnd` -launches, allowing you to simply type `lnd --bitcoin.testnet --bitcoin.active` -at the command line, you can create an `lnd.conf`. - -**On MacOS, located at:** -`/Users/[username]/Library/Application Support/Lnd/lnd.conf` - -**On Linux, located at:** -`~/.lnd/lnd.conf` - -Here's a sample `lnd.conf` for `btcd` to get you started: -``` -[Application Options] -debuglevel=trace -maxpendingchannels=10 - -[Bitcoin] -bitcoin.active=1 -``` - -Notice the `[Bitcoin]` section. This section houses the parameters for the -Bitcoin chain. `lnd` also supports Litecoin testnet4 (but not both BTC and LTC -at the same time), so when working with Litecoin be sure to set to parameters -for Litecoin accordingly. See a more detailed sample config file available -[here](https://github.com/lightningnetwork/lnd/blob/master/sample-lnd.conf) -and explore the other sections for node configuration, including `[Btcd]`, -`[Bitcoind]`, `[Neutrino]`, `[Ltcd]`, and `[Litecoind]` depending on which -chain and node type you're using. diff --git a/lnd/docs/MAKEFILE.md b/lnd/docs/MAKEFILE.md deleted file mode 100644 index 98532882..00000000 --- a/lnd/docs/MAKEFILE.md +++ /dev/null @@ -1,209 +0,0 @@ -Makefile -======== - -To build, verify, and install `lnd` from source, use the following -commands: -``` -make -make check -make install -``` - -The command `make check` requires `bitcoind` (almost any version should do) to -be available in the system's `$PATH` variable. Otherwise some of the tests will -fail. - -Developers -========== - -This document specifies all commands available from `lnd`'s `Makefile`. -The commands included handle: -- Installation of all go-related dependencies. -- Compilation and installation of `lnd` and `lncli`. -- Compilation and installation of `btcd` and `btcctl`. -- Running unit and integration suites. -- Testing, debugging, and flake hunting. -- Formatting and linting. - -Commands -======== - -- [`all`](#scratch) -- [`btcd`](#btcd) -- [`build`](#build) -- [`check`](#check) -- [`clean`](#clean) -- [`default`](#default) -- [`dep`](#dep) -- [`flake-unit`](#flake-unit) -- [`flakehunter`](#flakehunter) -- [`fmt`](#fmt) -- [`install`](#install) -- [`itest`](#itest) -- [`lint`](#lint) -- [`list`](#list) -- [`rpc`](#rpc) -- [`scratch`](#scratch) -- [`travis`](#travis) -- [`unit`](#unit) -- [`unit-cover`](#unit-cover) -- [`unit-race`](#unit-race) - -`all` ------ -Compiles, tests, and installs `lnd` and `lncli`. Equivalent to -[`scratch`](#scratch) [`check`](#check) [`install`](#install). - -`btcd` ------- -Ensures that the [`github.com/btcsuite/btcd`][btcd] repository is checked out -locally. Lastly, installs the version of -[`github.com/btcsuite/btcd`][btcd] specified in `Gopkg.toml` - -`build` -------- -Compiles the current source and vendor trees, creating `./lnd` and -`./lncli`. - -`check` -------- -Installs the version of [`github.com/btcsuite/btcd`][btcd] specified -in `Gopkg.toml`, then runs the unit tests followed by the integration -tests. - -Related: [`unit`](#unit) [`itest`](#itest) - -`clean` -------- -Removes compiled versions of both `./lnd` and `./lncli`, and removes the -`vendor` tree. - -`default` ---------- -Alias for [`scratch`](#scratch). - -`flake-unit` ------------- -Runs the unit test endlessly until a failure is detected. - -Arguments: -- `pkg=` -- `case=` -- `timeout=` - -Related: [`unit`](#unit) - -`flakehunter` -------------- -Runs the itegration test suite endlessly until a failure is detected. - -Arguments: -- `icase=` -- `timeout=` - -Related: [`itest`](#itest) - -`fmt` ------ -Runs `go fmt` on the entire project. - -`install` ---------- -Copies the compiled `lnd` and `lncli` binaries into `$GOPATH/bin`. - -`itest` -------- -Installs the version of [`github.com/btcsuite/btcd`][btcd] specified in -`Gopkg.toml`, builds the `./lnd` and `./lncli` binaries, then runs the -integration test suite. - -Arguments: -- `icase=` (the snake_case version of the testcase name field in the testCases slice (i.e. sweep_coins), not the test func name) -- `timeout=` - -`itest-parallel` ------- -Does the same as `itest` but splits the total set of tests into -`NUM_ITEST_TRANCHES` tranches (currently set to 6 by default, can be overwritten -by setting `tranches=Y`) and runs them in parallel. - -Arguments: -- `icase=`: The snake_case version of the testcase name field in the - testCases slice (i.e. `sweep_coins`, not the test func name) or any regular - expression describing a set of tests. -- `timeout=` -- `tranches=`: The number of parts/tranches to split the - total set of tests into. -- `parallel=`: The number of threads to run in parallel. Must - be greater or equal to `tranches`, otherwise undefined behavior is expected. - -`flakehunter-parallel` ------- -Runs the test specified by `icase` simultaneously `parallel` (default=6) times -until an error occurs. Useful for hunting flakes. - -Example: -```shell -$ make flakehunter-parallel icase='(data_loss_protection|channel_backup)' backend=neutrino -``` - -`lint` ------- -Ensures that [`gopkg.in/alecthomas/gometalinter.v1`][gometalinter] is -installed, then lints the project. - -`list` ------- -Lists all known make targets. - -`rpc` ------ -Compiles the `lnrpc` proto files. - -`scratch` ---------- -Compiles all dependencies and builds the `./lnd` and `./lncli` binaries. -Equivalent to [`lint`](#lint) [`btcd`](#btcd) -[`unit-race`](#unit-race). - -`unit` ------- -Runs the unit test suite. By default, this will run all known unit tests. - -Arguments: -- `pkg=` -- `case=` -- `timeout=` -- `log="stdlog[ ]"` prints logs to stdout - - `` can be `info` (default), `debug`, `trace`, `warn`, `error`, `critical`, or `off` - -`unit-cover` ------------- -Runs the unit test suite with test coverage, compiling the statisitics in -`profile.cov`. - -Arguments: -- `pkg=` -- `case=` -- `timeout=` -- `log="stdlog[ ]"` prints logs to stdout - - `` can be `info` (default), `debug`, `trace`, `warn`, `error`, `critical`, or `off` - -Related: [`unit`](#unit) - -`unit-race` ------------ -Runs the unit test suite with go's race detector. - -Arguments: -- `pkg=` -- `case=` -- `timeout=` -- `log="stdlog[ ]"` prints logs to stdout - - `` can be `info` (default), `debug`, `trace`, `warn`, `error`, `critical`, or `off` - -Related: [`unit`](#unit) - -[btcd]: https://github.com/btcsuite/btcd (github.com/btcsuite/btcd") -[gometalinter]: https://gopkg.in/alecthomas/gometalinter.v1 (gopkg.in/alecthomas/gometalinter.v1) -[goveralls]: https://github.com/mattn/goveralls (github.com/mattn/goveralls) diff --git a/lnd/docs/code_contribution_guidelines.md b/lnd/docs/code_contribution_guidelines.md deleted file mode 100644 index 3e069d15..00000000 --- a/lnd/docs/code_contribution_guidelines.md +++ /dev/null @@ -1,647 +0,0 @@ -### Table of Contents -1. [Overview](#Overview)
-2. [Minimum Recommended Skillset](#MinSkillset)
-3. [Required Reading](#ReqReading)
-4. [Development Practices](#DevelopmentPractices)
-4.1. [Share Early, Share Often](#ShareEarly)
-4.2. [Testing](#Testing)
-4.3. [Code Documentation and Commenting](#CodeDocumentation)
-4.4. [Model Git Commit Messages](#ModelGitCommitMessages)
-4.5. [Ideal Git Commit Structure](#IdealGitCommitStructure)
-4.6. [Code Spacing](#CodeSpacing)
-4.7. [Protobuf Compilation](#Protobuf)
-4.8. [Additional Style Constraints On Top of gofmt](ExtraGoFmtStyle)
-4.9. [Pointing to Remote Dependant Branches in Go Modules](ModulesReplace)
-4.10. [Use of Log Levels](#LogLevels)
-5. [Code Approval Process](#CodeApproval)
-5.1. [Code Review](#CodeReview)
-5.2. [Rework Code (if needed)](#CodeRework)
-5.3. [Acceptance](#CodeAcceptance)
-6. [Contribution Standards](#Standards)
-6.1. [Contribution Checklist](#Checklist)
-6.2. [Licensing of Contributions](#Licensing)
- - - -### 1. Overview - -Developing cryptocurrencies is an exciting endeavor that touches a wide variety -of areas such as wire protocols, peer-to-peer networking, databases, -cryptography, language interpretation (transaction scripts), adversarial -threat-modeling, and RPC systems. They also represent a radical shift to the -current fiscal system and as a result provide an opportunity to help reshape -the entire financial system. With the advent of the [Lightning Network -(LN)](https://lightning.network/), new layers are being constructed upon the -base blockchain layer which have the potential to alleviate many of the -limitations and constraints inherent in the design of blockchains. There are -few projects that offer this level of diversity and impact all in one code -base. - -However, as exciting as it is, one must keep in mind that cryptocurrencies -represent real money and introducing bugs and security vulnerabilities can have -far more dire consequences than in typical projects where having a small bug is -minimal by comparison. In the world of cryptocurrencies, even the smallest bug -in the wrong area can cost people a significant amount of money. For this -reason, the Lightning Network Daemon (`lnd`) has a formalized and rigorous -development process (heavily inspired by -[btcsuite](https://github.com/btcsuite)) which is outlined on this page. - -We highly encourage code contributions, however it is imperative that you adhere -to the guidelines established on this page. - - - -### 2. Minimum Recommended Skillset - -The following list is a set of core competencies that we recommend you possess -before you really start attempting to contribute code to the project. These are -not hard requirements as we will gladly accept code contributions as long as -they follow the guidelines set forth on this page. That said, if you don't have -the following basic qualifications you will likely find it quite difficult to -contribute to the core layers of Lightning. However, there are still a number -of low hanging fruit which can be tackled without having full competency in the -areas mentioned below. - -- A reasonable understanding of bitcoin at a high level (see the - [Required Reading](#ReqReading) section for the original white paper) -- A reasonable understanding of the Lightning Network at a high level -- Experience in some type of C-like language -- An understanding of data structures and their performance implications -- Familiarity with unit testing -- Debugging experience -- Ability to understand not only the area you are making a change in, but also - the code your change relies on, and the code which relies on your changed code - -Building on top of those core competencies, the recommended skill set largely -depends on the specific areas you are looking to contribute to. For example, -if you wish to contribute to the cryptography code, you should have a good -understanding of the various aspects involved with cryptography such as the -security and performance implications. - - - -### 3. Required Reading - -- [Effective Go](http://golang.org/doc/effective_go.html) - The entire `lnd` - project follows the guidelines in this document. For your code to be accepted, - it must follow the guidelines therein. -- [Original Satoshi Whitepaper](https://bitcoin.org/bitcoin.pdf) - This is the white paper that started it all. Having a solid - foundation to build on will make the code much more comprehensible. -- [Lightning Network Whitepaper](https://lightning.network/lightning-network-paper.pdf) - This is the white paper that kicked off the Layer 2 revolution. Having a good grasp of the concepts of Lightning will make the core logic within the daemon much more comprehensible: Bitcoin Script, off-chain blockchain protocols, payment channels, bi-directional payment channels, relative and absolute time-locks, commitment state revocations, and Segregated Witness. - - The original LN was written for a rather narrow audience, the paper may be a bit unapproachable to many. Thanks to the Bitcoin community, there exist many easily accessible supplemental resources which can help one see how all the pieces fit together from double-spend protection all the way up to commitment state transitions and Hash Time Locked Contracts (HTLCs): - - [Lightning Network Summary](https://lightning.network/lightning-network-summary.pdf) - - [Understanding the Lightning Network 3-Part series](https://bitcoinmagazine.com/articles/understanding-the-lightning-network-part-building-a-bidirectional-payment-channel-1464710791) - - [Deployable Lightning](https://github.com/ElementsProject/lightning/blob/master/doc/deployable-lightning.pdf) - - -Note that the core design of the Lightning Network has shifted over time as -concrete implementation and design has expanded our knowledge beyond the -original white paper. Therefore, specific information outlined in the resources -above may be a bit out of date. Many implementers are currently working on an -initial [Lightning Network Specifications](https://github.com/lightningnetwork/lightning-rfc). -Once the specification is finalized, it will be the most up-to-date -comprehensive document explaining the Lightning Network. As a result, it will -be recommended for newcomers to read first in order to get up to speed. - - - -### 4. Development Practices - -Developers are expected to work in their own trees and submit pull requests when -they feel their feature or bug fix is ready for integration into the master -branch. - - - -#### 4.1. Share Early, Share Often - -We firmly believe in the share early, share often approach. The basic premise -of the approach is to announce your plans **before** you start work, and once -you have started working, craft your changes into a stream of small and easily -reviewable commits. - -This approach has several benefits: - -- Announcing your plans to work on a feature **before** you begin work avoids - duplicate work -- It permits discussions which can help you achieve your goals in a way that is - consistent with the existing architecture -- It minimizes the chances of you spending time and energy on a change that - might not fit with the consensus of the community or existing architecture and - potentially be rejected as a result -- The quicker your changes are merged to master, the less time you will need to - spend rebasing and otherwise trying to keep up with the main code base - - - -#### 4.2. Testing - -One of the major design goals of all of `lnd`'s packages and the daemon itself is -to aim for a high degree of test coverage. This is financial software so bugs -and regressions in the core logic can cost people real money. For this reason -every effort must be taken to ensure the code is as accurate and bug-free as -possible. Thorough testing is a good way to help achieve that goal. - -Unless a new feature you submit is completely trivial, it will probably be -rejected unless it is also accompanied by adequate test coverage for both -positive and negative conditions. That is to say, the tests must ensure your -code works correctly when it is fed correct data as well as incorrect data -(error paths). - - -Go provides an excellent test framework that makes writing test code and -checking coverage statistics straightforward. For more information about the -test coverage tools, see the [golang cover blog post](http://blog.golang.org/cover). - -A quick summary of test practices follows: -- All new code should be accompanied by tests that ensure the code behaves - correctly when given expected values, and, perhaps even more importantly, that - it handles errors gracefully -- When you fix a bug, it should be accompanied by tests which exercise the bug - to both prove it has been resolved and to prevent future regressions -- Changes to publicly exported packages such as - [brontide](https://github.com/lightningnetwork/lnd/tree/master/brontide) should - be accompanied by unit tests exercising the new or changed behavior. -- Changes to behavior within the daemon's interaction with the P2P protocol, - or RPC's will need to be accompanied by integration tests which use the - [`networkHarness`framework](https://github.com/lightningnetwork/lnd/blob/master/lntest/harness.go) - contained within `lnd`. For example integration tests, see - [`lnd_test.go`](https://github.com/lightningnetwork/lnd/blob/master/lnd_test.go#L181). -- The itest log files are automatically scanned for `[ERR]` lines. There - shouldn't be any of those in the logs, see [Use of Log Levels](#LogLevels). - -Throughout the process of contributing to `lnd`, you'll likely also be -extensively using the commands within our `Makefile`. As a result, we recommend -[perusing the make file documentation](https://github.com/lightningnetwork/lnd/blob/master/docs/MAKEFILE.md). - - - -#### 4.3. Code Documentation and Commenting - -- At a minimum every function must be commented with its intended purpose and - any assumptions that it makes - - Function comments must always begin with the name of the function per - [Effective Go](http://golang.org/doc/effective_go.html) - - Function comments should be complete sentences since they allow a wide - variety of automated presentations such as [godoc.org](https://godoc.org) - - The general rule of thumb is to look at it as if you were completely - unfamiliar with the code and ask yourself, would this give me enough - information to understand what this function does and how I'd probably want - to use it? -- Exported functions should also include detailed information the caller of the - function will likely need to know and/or understand:

- -**WRONG** -```go -// generates a revocation key -func DeriveRevocationPubkey(commitPubKey *btcec.PublicKey, - revokePreimage []byte) *btcec.PublicKey { -``` -**RIGHT** -```go -// DeriveRevocationPubkey derives the revocation public key given the -// counterparty's commitment key, and revocation preimage derived via a -// pseudo-random-function. In the event that we (for some reason) broadcast a -// revoked commitment transaction, then if the other party knows the revocation -// preimage, then they'll be able to derive the corresponding private key to -// this private key by exploiting the homomorphism in the elliptic curve group: -// * https://en.wikipedia.org/wiki/Group_homomorphism#Homomorphisms_of_abelian_groups -// -// The derivation is performed as follows: -// -// revokeKey := commitKey + revokePoint -// := G*k + G*h -// := G * (k+h) -// -// Therefore, once we divulge the revocation preimage, the remote peer is able to -// compute the proper private key for the revokeKey by computing: -// revokePriv := commitPriv + revokePreimge mod N -// -// Where N is the order of the sub-group. -func DeriveRevocationPubkey(commitPubKey *btcec.PublicKey, - revokePreimage []byte) *btcec.PublicKey { -``` -- Comments in the body of the code are highly encouraged, but they should - explain the intention of the code as opposed to just calling out the - obvious

- -**WRONG** -```Go -// return err if amt is less than 546 -if amt < 546 { - return err -} -``` -**RIGHT** -```go -// Treat transactions with amounts less than the amount which is considered dust -// as non-standard. -if amt < 546 { - return err -} -``` -**NOTE:** The above should really use a constant as opposed to a magic number, -but it was left as a magic number to show how much of a difference a good -comment can make. - -
- -#### 4.4. Model Git Commit Messages - -This project prefers to keep a clean commit history with well-formed commit -messages. This section illustrates a model commit message and provides a bit -of background for it. This content was originally created by Tim Pope and made -available on his website, however that website is no longer active, so it is -being provided here. - -Here’s a model Git commit message: - -``` -Short (50 chars or less) summary of changes - -More detailed explanatory text, if necessary. Wrap it to about 72 -characters or so. In some contexts, the first line is treated as the -subject of an email and the rest of the text as the body. The blank -line separating the summary from the body is critical (unless you omit -the body entirely); tools like rebase can get confused if you run the -two together. - -Write your commit message in the present tense: "Fix bug" and not "Fixed -bug." This convention matches up with commit messages generated by -commands like git merge and git revert. - -Further paragraphs come after blank lines. - -- Bullet points are okay, too -- Typically a hyphen or asterisk is used for the bullet, preceded by a - single space, with blank lines in between, but conventions vary here -- Use a hanging indent -``` - -Here are some of the reasons why wrapping your commit messages to 72 columns is -a good thing. - -- git log doesn't do any special wrapping of the commit messages. With - the default pager of less -S, this means your paragraphs flow far off the edge - of the screen, making them difficult to read. On an 80 column terminal, if we - subtract 4 columns for the indent on the left and 4 more for symmetry on the - right, we’re left with 72 columns. -- git format-patch --stdout converts a series of commits to a series of emails, - using the messages for the message body. Good email netiquette dictates we - wrap our plain text emails such that there’s room for a few levels of nested - reply indicators without overflow in an 80 column terminal. - -In addition to the Git commit message structure adhered to within the daemon -all short-[commit messages are to be prefixed according to the convention -outlined in the Go project](https://golang.org/doc/contribute.html#change). All -commits should begin with the subsystem or package primarily affected by the -change. In the case of a widespread change, the packages are to be delimited by -either a '+' or a ','. This prefix seems minor but can be extremely helpful in -determining the scope of a commit at a glance, or when bug hunting to find a -commit which introduced a bug or regression. - - - -#### 4.5. Ideal Git Commit Structure - -Within the project we prefer small, contained commits for a pull request over a -single giant commit that touches several files/packages. Ideal commits build on -their own, in order to facilitate easy usage of tools like `git bisect` to `git -cherry-pick`. It's preferred that commits contain an isolated change in a -single package. In this case, the commit header message should begin with the -prefix of the modified package. For example, if a commit was made to modify the -`lnwallet` package, it should start with `lnwallet: `. - -In the case of changes that only build in tandem with changes made in other -packages, it is permitted for a single commit to be made which contains several -prefixes such as: `lnwallet+htlcswitch`. This prefix structure along with the -requirement for atomic contained commits (when possible) make things like -scanning the set of commits and debugging easier. In the case of changes that -touch several packages, and can only compile with the change across several -packages, a `multi: ` prefix should be used. - -Examples of common patterns w.r.t commit structures within the project: - - * It is common that during the work on a PR, existing bugs are found and - fixed. If they can be fixed in isolation, they should have their own - commit. - * File restructuring like moving a function to another file or changing order - of functions: with a separate commit because it is much easier to review - the real changes that go on top of the restructuring. - * Preparatory refactorings that are functionally equivalent: own commit. - * Project or package wide file renamings should be in their own commit. - * Ideally if a new package/struct/sub-system is added in a PR, there should - be a single commit which adds the new functionality, with follow up - induvidual commits that begin to intergrate the functionality within the - codebase. - - - -#### 4.6. Code Spacing - -Blocks of code within `lnd` should be segmented into logical stanzas of -operation. Such spacing makes the code easier to follow at a skim, and reduces -unnecessary line noise. Coupled with the commenting scheme specified above, -proper spacing allows readers to quickly scan code, extracting semantics quickly. -Functions should _not_ just be laid out as a bare contiguous block of code. - -**WRONG** -```go - witness := make([][]byte, 4) - witness[0] = nil - if bytes.Compare(pubA, pubB) == -1 { - witness[1] = sigB - witness[2] = sigA - } else { - witness[1] = sigA - witness[2] = sigB - } - witness[3] = witnessScript - return witness -``` -**RIGHT** -```go - witness := make([][]byte, 4) - - // When spending a p2wsh multi-sig script, rather than an OP_0, we add - // a nil stack element to eat the extra pop. - witness[0] = nil - - // When initially generating the witnessScript, we sorted the serialized - // public keys in descending order. So we do a quick comparison in order - // to ensure the signatures appear on the Script Virtual Machine stack in - // the correct order. - if bytes.Compare(pubA, pubB) == -1 { - witness[1] = sigB - witness[2] = sigA - } else { - witness[1] = sigA - witness[2] = sigB - } - - // Finally, add the preimage as the last witness element. - witness[3] = witnessScript - - return witness -``` - -Additionally, we favor spacing between stanzas within syntax like: switch case -statements and select statements. - -**WRONG** -```go - switch { - case a: - - case b: - - case c: - - case d: - - default: - - } -``` -**RIGHT** -```go - switch { - // Brief comment detailing instances of this case (repeat below). - case a: - - - case b: - - - case c: - - - case d: - - - default: - - } -``` - -If one is forced to wrap lines of function arguments that exceed the 80 -character limit, then a new line should be inserted before the first stanza in -the comment body. - -**WRONG** -```go - func foo(a, b, c, - d, e) er.R { - var a int - } -``` -**RIGHT** -```go - func foo(a, b, c, - d, e) er.R { - - var a int - } -``` - - - -#### 4.7. Protobuf Compilation - -The `lnd` project uses `protobuf`, and its extension [`gRPC`](www.grpc.io) in -several areas and as the primary RPC interface. In order to ensure uniformity -of all protos checked, in we require that all contributors pin against the -_exact same_ version of `protoc`. As of the writing of this article, the `lnd` -project uses [v3.4.0](https://github.com/google/protobuf/releases/tag/v3.4.0) -of `protoc`. - -The following two libraries must be installed with the exact commit hash as -described in [lnrpc README](https://github.com/lightningnetwork/lnd/blob/master/lnrpc/README.md) -otherwise the CI pipeline on Travis will fail: -- grpc-ecosystem/grpc-gateway -- golang/protobuf - -For detailed instructions on how to compile modifications to `lnd`'s `protobuf` -definitions, check out the [lnrpc README](https://github.com/lightningnetwork/lnd/blob/master/lnrpc/README.md). - - - -#### 4.8. Additional Style Constraints On Top of `gofmt` - -Before a PR is submitted, the proposer should ensure that the file passes the -set of linting scripts run by `make lint`. These include `gofmt`. In addition -to `gofmt` we've opted to enforce the following style guidelines. - - * ALL columns (on a best effort basis) should be wrapped to 80 line columns. - Editors should be set to treat a tab as 8 spaces. - * When wrapping a line that contains a function call as the unwrapped line - exceeds the column limit, the close paren should be placed on its own - line. Additionally, all arguments should begin in a new line after the - open paren. - - **WRONG** - ```go - value, err := bar(a, - a, b, c) - ``` - - **RIGHT** - ```go - value, err := bar( - a, a, b, c, - ) - ``` - -Note that the above guidelines don't apply to log messages. For log messages, -committers should attempt to minimize the of number lines utilized, while still -adhering to the 80-character column limit. - - - -#### 4.9 Pointing to Remote Dependant Branches in Go Modules - -It's common that a developer may need to make a change in a dependent project -of `lnd` such as `btcd`, `neutrino`, `btcwallet`, etc. In order to test changes -with out testing infrastructure, or simply make a PR into `lnd` that will build -without any further work, the `go.mod` and `go.sum` files will need to be -updated. Luckily, the `go mod` command has a handy tool to do this -automatically so developers don't need to manually edit the `go.mod` file: -``` - go mod edit -replace=IMPORT-PATH-IN-LND@LND-VERSION=DEV-FORK-IMPORT-PATH@DEV-FORK-VERSION -``` - -Here's an example replacing the `lightning-onion` version checked into `lnd` with a version in roasbeef's fork: -``` - go mod edit -replace=github.com/lightningnetwork/lightning-onion@v0.0.0-20180605012408-ac4d9da8f1d6=github.com/roasbeef/lightning-onion@2e5ae87696046298365ab43bcd1cf3a7a1d69695 -``` - - - -#### 4.10 Use of Log Levels - -There are six log levels available: `trace`, `debug`, `info`, `warn`, `error` and `critical`. - -Only use `error` for internal errors that are never expected to happen during -normal operation. No event triggered by external sources (rpc, chain backend, -etc) should lead to an `error` log. - - - -### 5. Code Approval Process - -This section describes the code approval process that is used for code -contributions. This is how to get your changes into `lnd`. - - - -#### 5.1. Code Review - -All code which is submitted will need to be reviewed before inclusion into the -master branch. This process is performed by the project maintainers and usually -other committers who are interested in the area you are working in as well. - -##### Code Review Timeframe - -The timeframe for a code review will vary greatly depending on factors such as -the number of other pull requests which need to be reviewed, the size and -complexity of the contribution, how well you followed the guidelines presented -on this page, and how easy it is for the reviewers to digest your commits. For -example, if you make one monolithic commit that makes sweeping changes to things -in multiple subsystems, it will obviously take much longer to review. You will -also likely be asked to split the commit into several smaller, and hence more -manageable, commits. - -Keeping the above in mind, most small changes will be reviewed within a few -days, while large or far reaching changes may take weeks. This is a good reason -to stick with the [Share Early, Share Often](#ShareEarly) development practice -outlined above. - -##### What is the review looking for? - -The review is mainly ensuring the code follows the [Development Practices](#DevelopmentPractices) -and [Code Contribution Standards](#Standards). However, there are a few other -checks which are generally performed as follows: - -- The code is stable and has no stability or security concerns -- The code is properly using existing APIs and generally fits well into the - overall architecture -- The change is not something which is deemed inappropriate by community - consensus - - - -#### 5.2. Rework Code (if needed) - -After the code review, the change will be accepted immediately if no issues are -found. If there are any concerns or questions, you will be provided with -feedback along with the next steps needed to get your contribution merged with -master. In certain cases the code reviewer(s) or interested committers may help -you rework the code, but generally you will simply be given feedback for you to -make the necessary changes. - -During the process of responding to review comments, we prefer that changes be -made with [fixup commits](https://robots.thoughtbot.com/autosquashing-git-commits). -The reason for this is two fold: it makes it easier for the reviewer to see -what changes have been made between versions (since Github doesn't easily show -prior versions like Critique) and it makes it easier on the PR author as they -can set it to auto squash the fix up commits on rebase. - -This process will continue until the code is finally accepted. - - - -#### 5.3. Acceptance - -Once your code is accepted, it will be integrated with the master branch. After -2+ (sometimes 1) LGTM's (approvals) are given on a PR, it's eligible to land in -master. At this final phase, it may be necessary to rebase the PR in order to -resolve any conflicts and also squash fix up commits. Ideally, the set of -[commits by new contributors are PGP signed](https://git-scm.com/book/en/v2/Git-Tools-Signing-Your-Work), -although this isn't a strong requirement (but we prefer it!). In order to keep -these signatures intact, we prefer using merge commits. PR proposers can use -`git rebase --signoff` to sign and rebase at the same time as a final step. - -Rejoice as you will now be listed as a [contributor](https://github.com/lightningnetwork/lnd/graphs/contributors)! - - - -### 6. Contribution Standards - - - -#### 6.1. Contribution Checklist - -- [  ] All changes are Go version 1.12 compliant -- [  ] The code being submitted is commented according to [Code Documentation and Commenting](#CodeDocumentation) -- [  ] For new code: Code is accompanied by tests which exercise both - the positive and negative (error paths) conditions (if applicable) -- [  ] For bug fixes: Code is accompanied by new tests which trigger - the bug being fixed to prevent regressions -- [  ] Any new logging statements use an appropriate subsystem and - logging level -- [  ] Code has been formatted with `go fmt` -- [  ] For code and documentation: lines are wrapped at 80 characters - (the tab character should be counted as 8 characters, not 4, as some IDEs do - per default) -- [  ] Running `make check` does not fail any tests -- [  ] Running `go vet` does not report any issues -- [  ] Running `make lint` does not report any **new** issues that - did not already exist -- [  ] All commits build properly and pass tests. Only in exceptional - cases it can be justifiable to violate this condition. In that case, the - reason should be stated in the commit message. -- [  ] Commits have a logical structure according to [Ideal Git Commit Structure](#IdealGitCommitStructure). - - - -#### 6.2. Licensing of Contributions -**** -All contributions must be licensed with the -[MIT license](https://github.com/lightningnetwork/lnd/blob/master/LICENSE). This is -the same license as all of the code found within lnd. - - -## Acknowledgements -This document was heavily inspired by a [similar document outlining the code -contribution](https://github.com/btcsuite/btcd/blob/master/docs/code_contribution_guidelines.md) -guidelines for btcd. diff --git a/lnd/docs/configuring_tor.md b/lnd/docs/configuring_tor.md deleted file mode 100644 index f6372f59..00000000 --- a/lnd/docs/configuring_tor.md +++ /dev/null @@ -1,184 +0,0 @@ -# Table of Contents -1. [Overview](#overview) -2. [Getting Started](#getting-started) -3. [Tor Stream Isolation](#tor-stream-isolation) -4. [Authentication](#authentication) -5. [Listening for Inbound Connections](#listening-for-inbound-connections) - -## Overview - -`lnd` currently has complete support for using Lightning over -[Tor](https://www.torproject.org/). Usage of Lightning over Tor is valuable as -routing nodes no longer need to potentially expose their location via their -advertised IP address. Additionally, leaf nodes can also protect their location -by using Tor for anonymous networking to establish connections. - -With widespread usage of Onion Services within the network, concerns about the -difficulty of proper NAT traversal are alleviated, as usage of onion services -allows nodes to accept inbound connections even if they're behind a NAT. At the -time of writing this documentation, `lnd` supports both types of onion services: -v2 and v3. - -Before following the remainder of this documentation, you should ensure that you -already have Tor installed locally. **If you want to run v3 Onion Services, make -sure that you run at least version 0.3.3.6.** -Official instructions to install the latest release of Tor can be found -[here](https://www.torproject.org/docs/tor-doc-unix.html.en). - -**NOTE**: This documentation covers how to ensure that `lnd`'s _Lightning -protocol traffic_ is tunneled over Tor. Users must ensure that when also running -a Bitcoin full-node, that it is also proxying all traffic over Tor. If using the -`neutrino` backend for `lnd`, then it will automatically also default to Tor -usage if active within `lnd`. - -## Getting Started - -First, you'll want to run `tor` locally before starting up `lnd`. Depending on -how you installed Tor, you'll find the configuration file at -`/usr/local/etc/tor/torrc`. Here's an example configuration file that we'll be -using for the remainder of the tutorial: -``` -SOCKSPort 9050 -Log notice stdout -ControlPort 9051 -CookieAuthentication 1 -``` - -With the configuration file created, you'll then want to start the Tor daemon: -``` -⛰ tor -Feb 05 17:02:06.501 [notice] Tor 0.3.1.8 (git-ad5027f7dc790624) running on Darwin with Libevent 2.1.8-stable, OpenSSL 1.0.2l, Zlib 1.2.8, Liblzma N/A, and Libzstd N/A. -Feb 05 17:02:06.502 [notice] Tor can't help you if you use it wrong! Learn how to be safe at https://www.torproject.org/download/download#warning -Feb 05 17:02:06.502 [notice] Read configuration file "/usr/local/etc/tor/torrc". -Feb 05 17:02:06.506 [notice] Opening Socks listener on 127.0.0.1:9050 -Feb 05 17:02:06.506 [notice] Opening Control listener on 127.0.0.1:9051 -``` - -Once the `tor` daemon has started and it has finished bootstrapping, you'll see this in the logs: -``` -Feb 05 17:02:06.000 [notice] Bootstrapped 0%: Starting -Feb 05 17:02:07.000 [notice] Starting with guard context "default" -Feb 05 17:02:07.000 [notice] Bootstrapped 80%: Connecting to the Tor network -Feb 05 17:02:07.000 [notice] Bootstrapped 85%: Finishing handshake with first hop -Feb 05 17:02:08.000 [notice] Bootstrapped 90%: Establishing a Tor circuit -Feb 05 17:02:11.000 [notice] Tor has successfully opened a circuit. Looks like client functionality is working. -Feb 05 17:02:11.000 [notice] Bootstrapped 100%: Done -``` - -This indicates the daemon is fully bootstrapped and ready to proxy connections. -At this point, we can now start `lnd` with the relevant arguments: - -``` -⛰ ./lnd -h - - - -Tor: - --tor.active Allow outbound and inbound connections to be routed through Tor - --tor.socks= The host:port that Tor's exposed SOCKS5 proxy is listening on (default: localhost:9050) - --tor.dns= The DNS server as host:port that Tor will use for SRV queries - NOTE must have TCP resolution enabled (default: soa.nodes.lightning.directory:53) - --tor.streamisolation Enable Tor stream isolation by randomizing user credentials for each connection. - --tor.control= The host:port that Tor is listening on for Tor control connections (default: localhost:9051) - --tor.targetipaddress= IP address that Tor should use as the target of the hidden service - --tor.password= The password used to arrive at the HashedControlPassword for the control port. If provided, the HASHEDPASSWORD authentication method will be used instead of the SAFECOOKIE one. - --tor.v2 Automatically set up a v2 onion service to listen for inbound connections - --tor.v3 Automatically set up a v3 onion service to listen for inbound connections - --tor.privatekeypath= The path to the private key of the onion service being created -``` - -There are a couple things here, so let's dissect them. The `--tor.active` flag -allows `lnd` to route all outbound and inbound connections through Tor. - -Outbound connections are possible with the use of the `--tor.socks` and -`--tor.dns` arguments. The `--tor.socks` argument should point to the interface -that the `Tor` daemon is listening on to proxy connections. The `--tor.dns` flag -is required in order to be able to properly automatically bootstrap a set of -peer connections. The `tor` daemon doesn't currently support proxying `SRV` -queries over Tor. So instead, we need to connect directly to the authoritative -DNS server over TCP, in order query for `SRV` records that we can use to -bootstrap our connections. - -Inbound connections are possible due to `lnd` automatically creating an onion -service. A path to save the onion service's private key can be specified with -the `--tor.privatekeypath` flag. - -Most of these arguments have defaults, so as long as they apply to you, routing -all outbound and inbound connections through Tor can simply be done with either -v2 or v3 onion services: -```shell -⛰ ./lnd --tor.active --tor.v2 -``` -```shell -⛰ ./lnd --tor.active --tor.v3 -``` -See [Listening for Inbound Connections](#listening-for-inbound-connections) for -more info about allowing inbound connections via Tor. - -Outbound support only can also be used with: -```shell -⛰ ./lnd --tor.active -``` - -This will allow you to make all outgoing connections over Tor. Listening is -disabled to prevent inadvertent leaks. - -## Tor Stream Isolation - -Our support for Tor also has an additional privacy enhancing modified: stream -isolation. Usage of this mode means that Tor will always use _new circuit_ for -each connection. This added features means that it's harder to correlate -connections. As otherwise, several applications using Tor might share the same -circuit. - -Activating stream isolation is very straightforward, we only require the -specification of an additional argument: -``` -⛰ ./lnd --tor.active --tor.streamisolation -``` - -## Authentication - -In order for `lnd` to communicate with the Tor daemon securely, it must first -establish an authenticated connection. `lnd` supports the following Tor control -authentication methods (arguably, from most to least secure): - -* `SAFECOOKIE`: This authentication method relies on a cookie created and - stored by the Tor daemon and is the default assuming the Tor daemon supports - it by specifying `CookieAuthentication 1` in its configuration file. -* `HASHEDPASSWORD`: This authentication method is stateless as it relies on a - password hash scheme and may be useful if the Tor daemon is operating under a - separate host from the `lnd` node. The password hash can be obtained through - the Tor daemon with `tor --hash-password PASSWORD`, which should then be - specified in Tor's configuration file with `HashedControlPassword - PASSWORD_HASH`. Finally, to use it within `lnd`, the `--tor.password` flag - should be provided with the corresponding password. -* `NULL`: To bypass any authentication at all, this scheme can be used instead. - It doesn't require any additional flags to `lnd` or configuration options to - the Tor daemon. - -## Listening for Inbound Connections - -In order to listen for inbound connections through Tor, an onion service must be -created. There are two types of onion services: v2 and v3. v3 onion services -are the latest generation of onion services and they provide a number of -advantages over the legacy v2 onion services. To learn more about these -benefits, see [Intro to Next Gen Onion Services](https://trac.torproject.org/projects/tor/wiki/doc/NextGenOnions). - -Both types can be created and used automatically by `lnd`. Specifying which type -should be used can easily be done by either using the `tor.v2` or `tor.v3` flag. -To prevent unintentional leaking of identifying information, it is also necessary -to add the flag `listen=localhost`. - -For example, v3 onion services can be used with the following flags: -``` -⛰ ./lnd --tor.active --tor.v3 --listen=localhost -``` - -This will automatically create a hidden service for your node to use to listen -for inbound connections and advertise itself to the network. The onion service's -private key is saved to a file named `v2_onion_private_key` or -`v3_onion_private_key` depending on the type of onion service used in `lnd`'s -base directory. This will allow `lnd` to recreate the same hidden service upon -restart. If you wish to generate a new onion service, you can simply delete this -file. The path to this private key file can also be modified with the -`--tor.privatekeypath` argument. diff --git a/lnd/docs/debugging_lnd.md b/lnd/docs/debugging_lnd.md deleted file mode 100644 index 44a07d92..00000000 --- a/lnd/docs/debugging_lnd.md +++ /dev/null @@ -1,47 +0,0 @@ -# Table of Contents -1. [Overview](#overview) -1. [Debug Logging](#debug-logging) -1. [Capturing pprof data with `lnd`](#capturing-pprof-data-with-lnd) - -## Overview - -`lnd` ships with a few useful features for debugging, such as a built-in -profiler and tunable logging levels. If you need to submit a bug report -for `lnd`, it may be helpful to capture debug logging and performance -data ahead of time. - -## Debug Logging - -You can enable debug logging in `lnd` by passing the `--debuglevel` flag. For -example, to increase the log level from `info` to `debug`: - -``` -$ lnd --debuglevel=debug -``` - -You may also specify logging per-subsystem, like this: - -``` -$ lnd --debuglevel==,=,... -``` - -## Capturing pprof data with `lnd` - -`lnd` has a built-in feature which allows you to capture profiling data at -runtime using [pprof](https://golang.org/pkg/runtime/pprof/), a profiler for -Go. The profiler has negligible performance overhead during normal operations -(unless you have explicitly enabled CPU profiling). - -To enable this ability, start `lnd` with the `--profile` option using a free port. - -``` -$ lnd --profile=9736 -``` - -Now, with `lnd` running, you can use the pprof endpoint on port 9736 to collect -runtime profiling data. You can fetch this data using `curl` like so: - -``` -$ curl http://localhost:9736/debug/pprof/goroutine?debug=1 -... -``` diff --git a/lnd/docs/etcd.md b/lnd/docs/etcd.md deleted file mode 100644 index cc107639..00000000 --- a/lnd/docs/etcd.md +++ /dev/null @@ -1,84 +0,0 @@ -# Experimental etcd support in LND - -With the recent introduction of the `kvdb` interface LND can support multiple -database backends allowing experimentation with the storage model as well as -improving robustness trough eg. replicating essential data. - -Building on `kvdb` in v0.11.0 we're adding experimental [etcd](https://etcd.io) -support to LND. As this is an unstable feature heavily in development, it still -has *many* rough edges for the time being. It is therefore highly recommended to -not use LND on `etcd` in any kind of production environment especially not -on bitcoin mainnet. - -## Building LND with etcd support - -To create a dev build of LND with etcd support use the following command: - -``` -make tags="kvdb_etcd" -``` - -The important tag is the `kvdb_etcd`, without which the binary is built without -the etcd driver. - -For development it is advised to set the `GOFLAGS` environment variable to -`"-tags=test"` otherwise `gopls` won't work on code in `channeldb/kvdb/etcd` -directory. - -## Running a local etcd instance for testing - -To start your local etcd instance for testing run: - -``` -./etcd \ - --auto-tls \ - --advertise-client-urls=https://127.0.0.1:2379 \ - --listen-client-urls=https://0.0.0.0:2379 \ - --max-txn-ops=16384 \ - --max-request-bytes=104857600 -``` - -The large `max-txn-ops` and `max-request-bytes` values are currently required in -case of running LND with the full graph in etcd. Upcoming versions will split -the database to local and replicated parts and only essential parts will remain -in the replicated database, removing the requirement for these additional -settings. These parameters have been tested to work with testnet LND. - -## Configuring LND to run on etcd - -To run LND with etcd, additional configuration is needed, specified either -through command line flags or in `lnd.conf`. - -Sample command line: - -``` -./lnd-debug \ - --db.backend=etcd \ - --db.etcd.host=127.0.0.1:2379 \ - --db.etcd.certfile=/home/user/etcd/bin/default.etcd/fixtures/client/cert.pem \ - --db.etcd.keyfile=/home/user/etcd/bin/default.etcd/fixtures/client/key.pem \ - --db.etcd.insecure_skip_verify -``` - -Sample `lnd.conf` (with other setting omitted): - -``` -[db] -backend=etcd -etcd.host=127.0.0.1:2379 -etcd.cerfile=/home/user/etcd/bin/default.etcd/fixtures/client/cert.pem -etcd.keyfile=/home/user/etcd/bin/default.etcd/fixtures/client/key.pem -etcd.insecure_skip_verify=true -``` - -Optionally users can specifiy `db.etcd.user` and `db.etcd.pass` for db user -authentication. - -## Migrating existing channel.db to etcd - -This is currently not supported. - -## Disclaimer - -As mentioned before this is an experimental feature, and with that your data -may be lost. Use at your own risk! diff --git a/lnd/docs/fuzz.md b/lnd/docs/fuzz.md deleted file mode 100644 index 17634462..00000000 --- a/lnd/docs/fuzz.md +++ /dev/null @@ -1,54 +0,0 @@ -# Fuzzing LND # - -The `fuzz` package is organized into subpackages which are named after the `lnd` package they test. Each subpackage has its own set of fuzz targets. - -### Setup and Installation ### -This section will cover setup and installation of `go-fuzz` and fuzzing binaries. - -* First, we must get `go-fuzz`. -``` -$ go get -u github.com/dvyukov/go-fuzz/... -``` -* The following is a command to build all fuzzing harnesses for a specific package. -``` -$ cd fuzz/ -$ find * -maxdepth 1 -regex '[A-Za-z0-9\-_.]'* -not -name fuzz_utils.go | sed 's/\.go$//1' | xargs -I % sh -c 'go-fuzz-build -func Fuzz_% -o -%-fuzz.zip github.com/lightningnetwork/lnd/fuzz/' -``` - -* This may take a while since this will create zip files associated with each fuzzing target. - -* Now, run `go-fuzz` with `workdir` set as below! -``` -$ go-fuzz -bin=<.zip archive here> -workdir= -procs= -``` - -`go-fuzz` will print out log lines every couple of seconds. Example output: -``` -2017/09/19 17:44:23 workers: 8, corpus: 23 (3s ago), crashers: 1, restarts: 1/748, execs: 400690 (16694/sec), cover: 394, uptime: 24s -``` -Corpus is the number of items in the corpus. `go-fuzz` may add valid inputs to -the corpus in an attempt to gain more coverage. Crashers is the number of inputs -resulting in a crash. The inputs, and their outputs are logged in: -`fuzz///crashers`. `go-fuzz` also creates a `suppressions` directory -of stacktraces to ignore so that it doesn't create duplicate stacktraces. -Cover is a number representing edge coverage of the program being fuzzed. - -### Brontide ### -The brontide fuzzers need to be run with a `-timeout` flag of 20 seconds or greater since there is a lot of machine state that must be printed on panic. - -### Corpus ### -Fuzzing generally works best with a corpus that is of minimal size while achieving the maximum coverage. However, `go-fuzz` automatically minimizes the corpus in-memory before fuzzing so a large corpus shouldn't make a difference - edge coverage is all that really matters. - -### Test Harness ### -If you take a look at the test harnesses that are used, you will see that they all consist of one function: -``` -func Fuzz(data []byte) int -``` -If: - -- `-1` is returned, the fuzzing input is ignored -- `0` is returned, `go-fuzz` will add the input to the corpus and deprioritize it in future mutations. -- `1` is returned, `go-fuzz` will add the input to the corpus and prioritize it in future mutations. - -### Conclusion ### -Citizens, do your part and `go-fuzz` `lnd` today! diff --git a/lnd/docs/grpc/c#.md b/lnd/docs/grpc/c#.md deleted file mode 100644 index 913a333e..00000000 --- a/lnd/docs/grpc/c#.md +++ /dev/null @@ -1,198 +0,0 @@ -# How to write a C# gRPC client for the Lightning Network Daemon - -This section enumerates what you need to do to write a client that communicates with `lnd` in C#. - - -### Prerequisites - -* .Net Core [SDK](https://dotnet.microsoft.com/download) -* If using Windows, a unix terminal such as [Cygwin](https://www.cygwin.com/) - - -### Setup and Installation - -`lnd` uses the `gRPC` protocol for communication with clients like `lncli`. - -.NET natively supports gRPC proto files and generates the necessary C# classes. You can see the official Microsoft gRPC documentation [here](https://docs.microsoft.com/en-gb/aspnet/core/grpc/?view=aspnetcore-3.1) - -This assumes you are using a Windows machine, but it applies equally to Mac and Linux. - -Create a new `.net core` console application called `lndclient` at your root directory (On Windows : `C:/`). - -Create a folder `Grpc` in the root of your project and fetch the lnd proto files - -```bash -mkdir Grpc -curl -o Grpc/rpc.proto -s https://raw.githubusercontent.com/lightningnetwork/lnd/master/lnrpc/rpc.proto -``` - -Install `Grpc.Tools`, `Google.Protobuf`, `Grpc.Core` using NuGet or manually with `dotnet add`: - -```bash -dotnet add package Grpc.Tools -dotnet add package Google.Protobuf -dotnet add package Grpc.Core -``` - -Add the `rpc.proto` file to the `.csproj` file in an ItemGroup. (In Visual Studio you can do this by unloading the project, editing the `.csproj` file and then reloading it) - -``` - - - -``` - -You're done! Build the project and verify that it works. - -#### Imports and Client - -Use the code below to set up a channel and client to connect to your `lnd` node: - -```c# - -using System.Collections.Generic; -using System.IO; -using System.Threading.Tasks; -using Grpc.Core; -using Lnrpc; -... - -// Due to updated ECDSA generated tls.cert we need to let gprc know that -// we need to use that cipher suite otherwise there will be a handshake -// error when we communicate with the lnd rpc server. -System.Environment.SetEnvironmentVariable("GRPC_SSL_CIPHER_SUITES", "HIGH+ECDSA"); - -// Lnd cert is at AppData/Local/Lnd/tls.cert on Windows -// ~/.lnd/tls.cert on Linux and ~/Library/Application Support/Lnd/tls.cert on Mac -var cert = File.ReadAllText(); - -var sslCreds = new SslCredentials(cert); -var channel = new Grpc.Core.Channel("localhost:10009", sslCreds); -var client = new Lnrpc.Lightning.LightningClient(channel); - -``` - -### Examples - -Let's walk through some examples of C# `gRPC` clients. These examples assume that you have at least two `lnd` nodes running, the RPC location of one of which is at the default `localhost:10009`, with an open channel between the two nodes. - -#### Simple RPC - -```c# -// Retrieve and display the wallet balance -// Use "WalletBalanceAsync" if in async context -var response = client.WalletBalance(new WalletBalanceRequest()); -Console.WriteLine(response); -``` - -#### Response-streaming RPC - -```c# -var request = new InvoiceSubscription(); -using (var call = client.SubscribeInvoices(request)) -{ - while (await call.ResponseStream.MoveNext()) - { - var invoice = call.ResponseStream.Current; - Console.WriteLine(invoice.ToString()); - } -} -``` - -Now, create an invoice for your node at `localhost:10009` and send a payment to it from another node. -```bash -$ lncli addinvoice --amt=100 -{ - "r_hash": , - "pay_req": -} -$ lncli sendpayment --pay_req= -``` - -Your console should now display the details of the recently satisfied invoice. - -#### Bidirectional-streaming RPC - -```c# -using (var call = client.SendPayment()) -{ - var responseReaderTask = Task.Run(async () => - { - while (await call.ResponseStream.MoveNext()) - { - var payment = call.ResponseStream.Current; - Console.WriteLine(payment.ToString()); - } - }); - - foreach (SendRequest sendRequest in SendPayment()) - { - await call.RequestStream.WriteAsync(sendRequest); - } - await call.RequestStream.CompleteAsync(); - await responseReaderTask; -} - - -IEnumerable SendPayment() -{ - while (true) - { - SendRequest req = new SendRequest() { - DestString = , - Amt = 100, - PaymentHashString = , - FinalCltvDelta = 144 - }; - yield return req; - System.Threading.Thread.Sleep(2000); - } -} -``` -This example will send a payment of 100 satoshis every 2 seconds. - -#### Using Macaroons - -To authenticate using macaroons you need to include the macaroon in the metadata of the request. - -```c# -// Lnd admin macaroon is at /data/chain/bitcoin/simnet/admin.macaroon on Windows -// ~/.lnd/data/chain/bitcoin/simnet/admin.macaroon on Linux and ~/Library/Application Support/Lnd/data/chain/bitcoin/simnet/admin.macaroon on Mac -byte[] macaroonBytes = File.ReadAllBytes("/data/chain/bitcoin/simnet/admin.macaroon"); -var macaroon = BitConverter.ToString(macaroonBytes).Replace("-", ""); // hex format stripped of "-" chars -``` - -The simplest approach to use the macaroon is to include the metadata in each request as shown below. - -```c# -client.GetInfo(new GetInfoRequest(), new Metadata() { new Metadata.Entry("macaroon", macaroon) }); -``` - -However, this can get tiresome to do for each request, so to avoid explicitly including the macaroon we can update the credentials to include it automatically. - -```c# -// build ssl credentials using the cert the same as before -var sslCreds = new SslCredentials(cert); - -// combine the cert credentials and the macaroon auth credentials using interceptors -// so every call is properly encrypted and authenticated -Task AddMacaroon(AuthInterceptorContext context, Metadata metadata) -{ - metadata.Add(new Metadata.Entry("macaroon", macaroon)); - return Task.CompletedTask; -} -var macaroonInterceptor = new AsyncAuthInterceptor(AddMacaroon); -var combinedCreds = ChannelCredentials.Create(sslCreds, CallCredentials.FromInterceptor(macaroonInterceptor)); - -// finally pass in the combined credentials when creating a channel -var channel = new Grpc.Core.Channel("localhost:10009", combinedCreds); -var client = new Lnrpc.Lightning.LightningClient(channel); - -// now every call will be made with the macaroon already included -client.GetInfo(new GetInfoRequest()); -``` - - -### Conclusion - -With the above, you should have all the `lnd` related `gRPC` dependencies installed locally in your project. In order to get up to speed with `protobuf` usage from C#, see [this official `protobuf` tutorial for C#](https://developers.google.com/protocol-buffers/docs/csharptutorial). Additionally, [this official gRPC resource](http://www.grpc.io/docs/tutorials/basic/csharp.html) provides more details around how to drive `gRPC` from C#. \ No newline at end of file diff --git a/lnd/docs/grpc/java.md b/lnd/docs/grpc/java.md deleted file mode 100644 index 83ab7025..00000000 --- a/lnd/docs/grpc/java.md +++ /dev/null @@ -1,240 +0,0 @@ - -# How to write a Java gRPC client for the Lightning Network Daemon - -This section enumerates what you need to do to write a client that communicates -with lnd in Java. We'll be using Maven as our build tool. - -### Prerequisites - - Maven - - running lnd - - running btcd - -### Setup and Installation -#### Project Structure -``` -. -├── pom.xml -└── src - ├── main - ├── java - │ └── Main.java - ├── proto - ├── google - │ └── api - │ ├── annotations.proto - │ └── http.proto - └── lnrpc - └── rpc.proto - -``` -Note the ***proto*** folder, where all the proto files are kept. - - - [rpc.proto](https://github.com/lightningnetwork/lnd/blob/master/lnrpc/rpc.proto) - - [annotations.proto](https://github.com/grpc-ecosystem/grpc-gateway/blob/master/third_party/googleapis/google/api/annotations.proto) - - [http.proto](https://github.com/grpc-ecosystem/grpc-gateway/blob/master/third_party/googleapis/google/api/http.proto) - -#### pom.xml -``` - - 1.8.0 - -``` -The following dependencies are required. -``` - - - io.grpc - grpc-netty - ${grpc.version} - - - io.grpc - grpc-protobuf - ${grpc.version} - - - io.grpc - grpc-stub - ${grpc.version} - - - io.netty - netty-tcnative-boringssl-static - 2.0.7.Final - - - commons-codec - commons-codec - 1.11 - - -``` -In the build section, we'll need to configure the following things : -``` - - - - kr.motd.maven - os-maven-plugin - 1.5.0.Final - - - - - org.xolstice.maven.plugins - protobuf-maven-plugin - 0.5.0 - - com.google.protobuf:protoc:3.4.0:exe:${os.detected.classifier} - grpc-java - io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier} - - - - - compile - compile-custom - - - - - - -``` -#### Main.java -```java -import io.grpc.Attributes; -import io.grpc.CallCredentials; -import io.grpc.ManagedChannel; -import io.grpc.Metadata; -import io.grpc.MethodDescriptor; -import io.grpc.Status; -import io.grpc.netty.GrpcSslContexts; -import io.grpc.netty.NettyChannelBuilder; -import io.netty.handler.ssl.SslContext; -import lnrpc.LightningGrpc; -import lnrpc.LightningGrpc.LightningBlockingStub; -import lnrpc.Rpc.GetInfoRequest; -import lnrpc.Rpc.GetInfoResponse; -import org.apache.commons.codec.binary.Hex; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.util.concurrent.Executor; - -public class Main { - static class MacaroonCallCredential implements CallCredentials { - private final String macaroon; - - MacaroonCallCredential(String macaroon) { - this.macaroon = macaroon; - } - - public void thisUsesUnstableApi() {} - - public void applyRequestMetadata( - MethodDescriptor < ? , ? > methodDescriptor, - Attributes attributes, - Executor executor, - final MetadataApplier metadataApplier - ) { - String authority = attributes.get(ATTR_AUTHORITY); - System.out.println(authority); - executor.execute(new Runnable() { - public void run() { - try { - Metadata headers = new Metadata(); - Metadata.Key < String > macaroonKey = Metadata.Key.of("macaroon", Metadata.ASCII_STRING_MARSHALLER); - headers.put(macaroonKey, macaroon); - metadataApplier.apply(headers); - } catch (Throwable e) { - metadataApplier.fail(Status.UNAUTHENTICATED.withCause(e)); - } - } - }); - } - } - - private static final String CERT_PATH = "/Users/user/Library/Application Support/Lnd/tls.cert"; - private static final String MACAROON_PATH = "/Users/user/Library/Application Support/Lnd/data/chain/bitcoin/simnet/admin.macaroon"; - private static final String HOST = "localhost"; - private static final int PORT = 10009; - - public static void main(String...args) throws IOException { - SslContext sslContext = GrpcSslContexts.forClient().trustManager(new File(CERT_PATH)).build(); - NettyChannelBuilder channelBuilder = NettyChannelBuilder.forAddress(HOST, PORT); - ManagedChannel channel = channelBuilder.sslContext(sslContext).build(); - - String macaroon = - Hex.encodeHexString( - Files.readAllBytes(Paths.get(MACAROON_PATH)) - ); - - LightningBlockingStub stub = LightningGrpc - .newBlockingStub(channel) - .withCallCredentials(new MacaroonCallCredential(macaroon)); - - - GetInfoResponse response = stub.getInfo(GetInfoRequest.getDefaultInstance()); - System.out.println(response.getIdentityPubkey()); - } -} -``` -#### Running the example -Execute the following command in the directory where the **pom.xml** file is located. -``` -mvn compile exec:java -Dexec.mainClass="Main" -Dexec.cleanupDaemonThreads=false -``` -##### Sample output -``` -[INFO] Scanning for projects... -[INFO] ------------------------------------------------------------------------ -[INFO] Detecting the operating system and CPU architecture -[INFO] ------------------------------------------------------------------------ -[INFO] os.detected.name: osx -[INFO] os.detected.arch: x86_64 -[INFO] os.detected.version: 10.13 -[INFO] os.detected.version.major: 10 -[INFO] os.detected.version.minor: 13 -[INFO] os.detected.classifier: osx-x86_64 -[INFO] -[INFO] ------------------------------------------------------------------------ -[INFO] Building lightning-client 0.0.1-SNAPSHOT -[INFO] ------------------------------------------------------------------------ -[INFO] -[INFO] --- protobuf-maven-plugin:0.5.0:compile (default) @ lightning-client --- -[INFO] Compiling 3 proto file(s) to /Users/user/Documents/Projects/lightningclient/target/generated-sources/protobuf/java -[INFO] -[INFO] --- protobuf-maven-plugin:0.5.0:compile-custom (default) @ lightning-client --- -[INFO] Compiling 3 proto file(s) to /Users/user/Documents/Projects/lightningclient/target/generated-sources/protobuf/grpc-java -[INFO] -[INFO] --- maven-resources-plugin:2.6:resources (default-resources) @ lightning-client --- -[INFO] Using 'UTF-8' encoding to copy filtered resources. -[INFO] Copying 0 resource -[INFO] Copying 3 resources -[INFO] Copying 3 resources -[INFO] -[INFO] --- maven-compiler-plugin:3.1:compile (default-compile) @ lightning-client --- -[INFO] Changes detected - recompiling the module! -[INFO] Compiling 12 source files to /Users/user/Documents/Projects/lightningclient/target/classes -[INFO] -[INFO] --- exec-maven-plugin:1.6.0:java (default-cli) @ lightning-client --- -032562215c38dede6f1f2f262ff4c8db58a38ecf889e8e907eee8e4c320e0b5e81 -[INFO] ------------------------------------------------------------------------ -[INFO] BUILD SUCCESS -[INFO] ------------------------------------------------------------------------ -[INFO] Total time: 7.408 s -[INFO] Finished at: 2018-01-13T19:05:49+01:00 -[INFO] Final Memory: 30M/589M -[INFO] ------------------------------------------------------------------------ -``` - -### Java proto options - -There are 2 options available that can be used in the *rpc.proto* file : - -* option java_multiple_files = true; -* option java_package = "network.lightning.rpc"; ->The package you want to use for your generated Java classes. If no explicit java_package option is given in the .proto file, then by default the proto package (specified using the "package" keyword in the .proto file) will be used. However, proto packages generally do not make good Java packages since proto packages are not expected to start with reverse domain names. If not generating Java code, this option has no effect. diff --git a/lnd/docs/grpc/javascript.md b/lnd/docs/grpc/javascript.md deleted file mode 100644 index d4dc0a3a..00000000 --- a/lnd/docs/grpc/javascript.md +++ /dev/null @@ -1,246 +0,0 @@ -# How to write a simple `lnd` client in Javascript using `node.js` - -## Setup and Installation - -First, you'll need to initialize a simple nodejs project: -``` -npm init (or npm init -f if you want to use the default values without prompt) -``` - -Then you need to install the Javascript grpc and proto loader library -dependencies: -``` -npm install grpc @grpc/proto-loader --save -``` - -You also need to copy the `lnd` `rpc.proto` file in your project directory (or -at least somewhere reachable by your Javascript code). - -The `rpc.proto` file is [located in the `lnrpc` directory of the `lnd` -sources](https://github.com/lightningnetwork/lnd/blob/master/lnrpc/rpc.proto). - -### Imports and Client - -Every time you work with Javascript gRPC, you will have to import `grpc`, load -`rpc.proto`, and create a connection to your client like so: - -```js -const grpc = require('grpc'); -const protoLoader = require('@grpc/proto-loader'); -const fs = require("fs"); - -// Due to updated ECDSA generated tls.cert we need to let gprc know that -// we need to use that cipher suite otherwise there will be a handhsake -// error when we communicate with the lnd rpc server. -process.env.GRPC_SSL_CIPHER_SUITES = 'HIGH+ECDSA' - -// We need to give the proto loader some extra options, otherwise the code won't -// fully work with lnd. -const loaderOptions = { - keepCase: true, - longs: String, - enums: String, - defaults: true, - oneofs: true -}; -const packageDefinition = protoLoader.loadSync('rpc.proto', loaderOptions); - -// Lnd cert is at ~/.lnd/tls.cert on Linux and -// ~/Library/Application Support/Lnd/tls.cert on Mac -let lndCert = fs.readFileSync("~/.lnd/tls.cert"); -let credentials = grpc.credentials.createSsl(lndCert); -let lnrpcDescriptor = grpc.loadPackageDefinition(packageDefinition); -let lnrpc = lnrpcDescriptor.lnrpc; -let lightning = new lnrpc.Lightning('localhost:10009', credentials); -``` - -## Examples - -Let's walk through some examples of Javascript gRPC clients. These examples -assume that you have at least two `lnd` nodes running, the RPC location of one -of which is at the default `localhost:10009`, with an open channel between the -two nodes. - -### Simple RPC - -```js -lightning.getInfo({}, function(err, response) { - if (err) { - console.log('Error: ' + err); - } - console.log('GetInfo:', response); -}); -``` - -You should get something like this in your console: - -``` -GetInfo: { identity_pubkey: '03c892e3f3f077ea1e381c081abb36491a2502bc43ed37ffb82e264224f325ff27', - alias: '', - num_pending_channels: 0, - num_active_channels: 1, - num_inactive_channels: 0, - num_peers: 1, - block_height: 1006, - block_hash: '198ba1dc43b4190e507fa5c7aea07a74ec0009a9ab308e1736dbdab5c767ff8e', - synced_to_chain: false, - testnet: false, - chains: [ 'bitcoin' ] } -``` - -### Response-streaming RPC - -```js -let call = lightning.subscribeInvoices({}); -call.on('data', function(invoice) { - console.log(invoice); -}) -.on('end', function() { - // The server has finished sending -}) -.on('status', function(status) { - // Process status - console.log("Current status" + status); -}); -``` - -Now, create an invoice for your node at `localhost:10009`and send a payment to -it from another node. -```bash -$ lncli addinvoice --amt=100 -{ - "r_hash": , - "pay_req": -} -$ lncli sendpayment --pay_req= -``` -Your Javascript console should now display the details of the recently satisfied -invoice. - -### Bidirectional-streaming RPC - -This example has a few dependencies: -```shell -npm install --save async lodash bytebuffer -``` - -You can run the following in your shell or put it in a program and run it like -`node script.js` - -```js -// Load some libraries specific to this example -const async = require('async'); -const _ = require('lodash'); -const ByteBuffer = require('bytebuffer'); - -let dest_pubkey = ; -let dest_pubkey_bytes = ByteBuffer.fromHex(dest_pubkey); - -// Set a listener on the bidirectional stream -let call = lightning.sendPayment(); -call.on('data', function(payment) { - console.log("Payment sent:"); - console.log(payment); -}); -call.on('end', function() { - // The server has finished - console.log("END"); -}); - -// You can send single payments like this -call.write({ dest: dest_pubkey_bytes, amt: 6969 }); - -// Or send a bunch of them like this -function paymentSender(destination, amount) { - return function(callback) { - console.log("Sending " + amount + " satoshis"); - console.log("To: " + destination); - call.write({ - dest: destination, - amt: amount - }); - _.delay(callback, 2000); - }; -} -let payment_senders = []; -for (let i = 0; i < 10; i++) { - payment_senders[i] = paymentSender(dest_pubkey_bytes, 100); -} -async.series(payment_senders, function() { - call.end(); -}); - -``` -This example will send a payment of 100 satoshis every 2 seconds. - - -### Using Macaroons - -To authenticate using macaroons you need to include the macaroon in the metadata -of each request. - -The following snippet will add the macaroon to every request automatically: - -```js -const fs = require('fs'); -const grpc = require('grpc'); -const protoLoader = require('@grpc/proto-loader'); -const loaderOptions = { - keepCase: true, - longs: String, - enums: String, - defaults: true, - oneofs: true -}; -const packageDefinition = protoLoader.loadSync('rpc.proto', loaderOptions); - -process.env.GRPC_SSL_CIPHER_SUITES = 'HIGH+ECDSA' - -// Lnd admin macaroon is at ~/.lnd/data/chain/bitcoin/simnet/admin.macaroon on Linux and -// ~/Library/Application Support/Lnd/data/chain/bitcoin/simnet/admin.macaroon on Mac -let m = fs.readFileSync('~/.lnd/data/chain/bitcoin/simnet/admin.macaroon'); -let macaroon = m.toString('hex'); - -// build meta data credentials -let metadata = new grpc.Metadata() -metadata.add('macaroon', macaroon) -let macaroonCreds = grpc.credentials.createFromMetadataGenerator((_args, callback) => { - callback(null, metadata); -}); - -// build ssl credentials using the cert the same as before -let lndCert = fs.readFileSync("~/.lnd/tls.cert"); -let sslCreds = grpc.credentials.createSsl(lndCert); - -// combine the cert credentials and the macaroon auth credentials -// such that every call is properly encrypted and authenticated -let credentials = grpc.credentials.combineChannelCredentials(sslCreds, macaroonCreds); - -// Pass the crendentials when creating a channel -let lnrpcDescriptor = grpc.loadPackageDefinition(packageDefinition); -let lnrpc = lnrpcDescriptor.lnrpc; -let client = new lnrpc.Lightning('some.address:10009', credentials); - -client.getInfo({}, (err, response) => { - if (err) { - console.log('Error: ' + err); - } - console.log('GetInfo:', response); -}); -``` - -## Conclusion - -With the above, you should have all the `lnd` related `gRPC` dependencies -installed locally in your project. In order to get up to speed with `protofbuf` -usage from Javascript, see [this official `protobuf` reference for -Javascript](https://developers.google.com/protocol-buffers/docs/reference/javascript-generated). -Additionally, [this official gRPC -resource](http://www.grpc.io/docs/tutorials/basic/node.html) provides more -details around how to drive `gRPC` from `node.js`. - -## API documentation - -There is an [online API documentation](https://api.lightning.community?javascript) -available that shows all currently existing RPC methods, including code snippets -on how to use them. diff --git a/lnd/docs/grpc/python.md b/lnd/docs/grpc/python.md deleted file mode 100644 index 55452f28..00000000 --- a/lnd/docs/grpc/python.md +++ /dev/null @@ -1,212 +0,0 @@ -# How to write a Python gRPC client for the Lightning Network Daemon - -This section enumerates what you need to do to write a client that communicates -with `lnd` in Python. - -## Setup and Installation - -Lnd uses the gRPC protocol for communication with clients like lncli. gRPC is -based on protocol buffers and as such, you will need to compile the lnd proto -file in Python before you can use it to communicate with lnd. - -1. Create a virtual environment for your project - ``` - $ virtualenv lnd - ``` -2. Activate the virtual environment - ``` - $ source lnd/bin/activate - ``` -3. Install dependencies (googleapis-common-protos is required due to the use of - google/api/annotations.proto) - ``` - (lnd)$ pip install grpcio grpcio-tools googleapis-common-protos - ``` -4. Clone the google api's repository (required due to the use of - google/api/annotations.proto) - ``` - (lnd)$ git clone https://github.com/googleapis/googleapis.git - ``` -5. Copy the lnd rpc.proto file (you'll find this at - [lnrpc/rpc.proto](https://github.com/lightningnetwork/lnd/blob/master/lnrpc/rpc.proto)) - or just download it - ``` - (lnd)$ curl -o rpc.proto -s https://raw.githubusercontent.com/lightningnetwork/lnd/master/lnrpc/rpc.proto - ``` -6. Compile the proto file - ``` - (lnd)$ python -m grpc_tools.protoc --proto_path=googleapis:. --python_out=. --grpc_python_out=. rpc.proto - ``` - -After following these steps, two files `rpc_pb2.py` and `rpc_pb2_grpc.py` will -be generated. These files will be imported in your project anytime you use -Python gRPC. - -### Generating RPC modules for subservers - -If you want to use any of the subservers' functionality, you also need to -generate the python modules for them. - -For example, if you want to generate the RPC modules for the `Router` subserver -(located/defined in `routerrpc/router.proto`), you need to run the following two -extra steps (after completing all 6 step described above) to get the -`router_pb2.py` and `router_pb2_grpc.py`: - -``` -(lnd)$ curl -o router.proto -s https://raw.githubusercontent.com/lightningnetwork/lnd/master/lnrpc/routerrpc/router.proto -(lnd)$ python -m grpc_tools.protoc --proto_path=googleapis:. --python_out=. --grpc_python_out=. router.proto -``` - -### Imports and Client - -Every time you use Python gRPC, you will have to import the generated rpc modules -and set up a channel and stub to your connect to your `lnd` node: - -```python -import rpc_pb2 as ln -import rpc_pb2_grpc as lnrpc -import grpc -import os - -# Due to updated ECDSA generated tls.cert we need to let gprc know that -# we need to use that cipher suite otherwise there will be a handhsake -# error when we communicate with the lnd rpc server. -os.environ["GRPC_SSL_CIPHER_SUITES"] = 'HIGH+ECDSA' - -# Lnd cert is at ~/.lnd/tls.cert on Linux and -# ~/Library/Application Support/Lnd/tls.cert on Mac -cert = open(os.path.expanduser('~/.lnd/tls.cert'), 'rb').read() -creds = grpc.ssl_channel_credentials(cert) -channel = grpc.secure_channel('localhost:10009', creds) -stub = lnrpc.LightningStub(channel) -``` - -## Examples - -Let's walk through some examples of Python gRPC clients. These examples assume -that you have at least two `lnd` nodes running, the RPC location of one of which -is at the default `localhost:10009`, with an open channel between the two nodes. - -### Simple RPC - -```python -# Retrieve and display the wallet balance -response = stub.WalletBalance(ln.WalletBalanceRequest()) -print(response.total_balance) -``` - -### Response-streaming RPC - -```python -request = ln.InvoiceSubscription() -for invoice in stub.SubscribeInvoices(request): - print(invoice) -``` - -Now, create an invoice for your node at `localhost:10009`and send a payment to -it from another node. -```bash -$ lncli addinvoice --amt=100 -{ - "r_hash": , - "pay_req": -} -$ lncli sendpayment --pay_req= -``` - -Your Python console should now display the details of the recently satisfied -invoice. - -### Bidirectional-streaming RPC - -```python -from time import sleep -import codecs - -def request_generator(dest, amt): - # Initialization code here - counter = 0 - print("Starting up") - while True: - request = ln.SendRequest( - dest=dest, - amt=amt, - ) - yield request - # Alter parameters here - counter += 1 - sleep(2) - -# Outputs from lncli are hex-encoded -dest_hex = -dest_bytes = codecs.decode(dest_hex, 'hex') - -request_iterable = request_generator(dest=dest_bytes, amt=100) - -for payment in stub.SendPayment(request_iterable): - print(payment) -``` -This example will send a payment of 100 satoshis every 2 seconds. - -### Using Macaroons - -To authenticate using macaroons you need to include the macaroon in the metadata of the request. - -```python -import codecs - -# Lnd admin macaroon is at ~/.lnd/data/chain/bitcoin/simnet/admin.macaroon on Linux and -# ~/Library/Application Support/Lnd/data/chain/bitcoin/simnet/admin.macaroon on Mac -with open(os.path.expanduser('~/.lnd/data/chain/bitcoin/simnet/admin.macaroon'), 'rb') as f: - macaroon_bytes = f.read() - macaroon = codecs.encode(macaroon_bytes, 'hex') -``` - -The simplest approach to use the macaroon is to include the metadata in each request as shown below. - -```python -stub.GetInfo(ln.GetInfoRequest(), metadata=[('macaroon', macaroon)]) -``` - -However, this can get tiresome to do for each request, so to avoid explicitly including the macaroon we can update the credentials to include it automatically. - -```python -def metadata_callback(context, callback): - # for more info see grpc docs - callback([('macaroon', macaroon)], None) - - -# build ssl credentials using the cert the same as before -cert_creds = grpc.ssl_channel_credentials(cert) - -# now build meta data credentials -auth_creds = grpc.metadata_call_credentials(metadata_callback) - -# combine the cert credentials and the macaroon auth credentials -# such that every call is properly encrypted and authenticated -combined_creds = grpc.composite_channel_credentials(cert_creds, auth_creds) - -# finally pass in the combined credentials when creating a channel -channel = grpc.secure_channel('localhost:10009', combined_creds) -stub = lnrpc.LightningStub(channel) - -# now every call will be made with the macaroon already included -stub.GetInfo(ln.GetInfoRequest()) -``` - - -## Conclusion - -With the above, you should have all the `lnd` related `gRPC` dependencies -installed locally into your virtual environment. In order to get up to speed -with `protofbuf` usage from Python, see [this official `protobuf` tutorial for -Python](https://developers.google.com/protocol-buffers/docs/pythontutorial). -Additionally, [this official gRPC -resource](http://www.grpc.io/docs/tutorials/basic/python.html) provides more -details around how to drive `gRPC` from Python. - -## API documentation - -There is an [online API documentation](https://api.lightning.community?python) -available that shows all currently existing RPC methods, including code snippets -on how to use them. diff --git a/lnd/docs/grpc/ruby.md b/lnd/docs/grpc/ruby.md deleted file mode 100644 index 867d2ce7..00000000 --- a/lnd/docs/grpc/ruby.md +++ /dev/null @@ -1,185 +0,0 @@ -# How to write a Ruby gRPC client for the Lightning Network Daemon - -This section enumerates what you need to do to write a client that communicates -with `lnd` in Ruby. - -### Introduction - -`lnd` uses the `gRPC` protocol for communication with clients like `lncli`. - -`gRPC` is based on protocol buffers and as such, you will need to compile -the `lnd` proto file in Ruby before you can use it to communicate with `lnd`. - -### Setup - -Install gRPC rubygems: - -``` -$ gem install grpc -$ gem install grpc-tools -``` - -Clone the Google APIs repository: - -``` -$ git clone https://github.com/googleapis/googleapis.git -``` - -Fetch the `rpc.proto` file (or copy it from your local source directory): - -``` -$ curl -o rpc.proto -s https://raw.githubusercontent.com/lightningnetwork/lnd/master/lnrpc/rpc.proto -``` - -Compile the proto file: - -``` -$ grpc_tools_ruby_protoc --proto_path googleapis:. --ruby_out=. --grpc_out=. rpc.proto -``` - -Two files will be generated in the current directory: - -* `rpc_pb.rb` -* `rpc_services_pb.rb` - -### Examples - -#### Simple client to display wallet balance - -Every time you use the Ruby gRPC you need to require the `rpc_services_pb` file. - -We assume that `lnd` runs on the default `localhost:10009`. - -We further assume you run `lnd` with `--no-macaroons`. - -```ruby -#!/usr/bin/env ruby - -$:.unshift(File.dirname(__FILE__)) - -require 'grpc' -require 'rpc_services_pb' - -# Due to updated ECDSA generated tls.cert we need to let gprc know that -# we need to use that cipher suite otherwise there will be a handhsake -# error when we communicate with the lnd rpc server. -ENV['GRPC_SSL_CIPHER_SUITES'] = "HIGH+ECDSA" - -certificate = File.read(File.expand_path("~/.lnd/tls.cert")) -credentials = GRPC::Core::ChannelCredentials.new(certificate) -stub = Lnrpc::Lightning::Stub.new('127.0.0.1:10009', credentials) - -response = stub.wallet_balance(Lnrpc::WalletBalanceRequest.new()) -puts "Total balance: #{response.total_balance}" -``` - -This will show the `total_balance` of the wallet. - -#### Streaming client for invoice payment updates - -```ruby -#!/usr/bin/env ruby - -$:.unshift(File.dirname(__FILE__)) - -require 'grpc' -require 'rpc_services_pb' - -ENV['GRPC_SSL_CIPHER_SUITES'] = "HIGH+ECDSA" - -certificate = File.read(File.expand_path("~/.lnd/tls.cert")) -credentials = GRPC::Core::ChannelCredentials.new(certificate) -stub = Lnrpc::Lightning::Stub.new('127.0.0.1:10009', credentials) - -stub.subscribe_invoices(Lnrpc::InvoiceSubscription.new) do |invoice| - puts invoice.inspect -end -``` - -Now, create an invoice on your node: - -```bash -$ lncli addinvoice --amt=590 -{ - "r_hash": , - "pay_req": -} -``` - -Next send a payment to it from another node: - -``` -$ lncli sendpayment --pay_req= -``` - -You should now see the details of the settled invoice appear. - -#### Using Macaroons - -To authenticate using macaroons you need to include the macaroon in the metadata of the request. - -```ruby -# Lnd admin macaroon is at ~/.lnd/data/chain/bitcoin/simnet/admin.macaroon on Linux and -# ~/Library/Application Support/Lnd/data/chain/bitcoin/simnet/admin.macaroon on Mac -macaroon_binary = File.read(File.expand_path("~/.lnd/data/chain/bitcoin/simnet/admin.macaroon")) -macaroon = macaroon_binary.each_byte.map { |b| b.to_s(16).rjust(2,'0') }.join -``` - -The simplest approach to use the macaroon is to include the metadata in each request as shown below. - -```ruby -stub.get_info(Lnrpc::GetInfoRequest.new, metadata: {macaroon: macaroon}) -``` - -However, this can get tiresome to do for each request. We can use gRPC interceptors to add this metadata to each request automatically. Our interceptor class would look like this. - -```ruby -class MacaroonInterceptor < GRPC::ClientInterceptor - attr_reader :macaroon - - def initialize(macaroon) - @macaroon = macaroon - super - end - - def request_response(request:, call:, method:, metadata:) - metadata['macaroon'] = macaroon - yield - end - - def server_streamer(request:, call:, method:, metadata:) - metadata['macaroon'] = macaroon - yield - end -end -``` - -And then we would include it when we create our stub like so. - -```ruby -certificate = File.read(File.expand_path("~/.lnd/tls.cert")) -credentials = GRPC::Core::ChannelCredentials.new(certificate) -macaroon_binary = File.read(File.expand_path("~/.lnd/data/chain/bitcoin/simnet/admin.macaroon")) -macaroon = macaroon_binary.each_byte.map { |b| b.to_s(16).rjust(2,'0') }.join - -stub = Lnrpc::Lightning::Stub.new( - 'localhost:10009', - credentials, - interceptors: [MacaroonInterceptor.new(macaroon)] -) - -# Now we don't need to pass the metadata on a request level -p stub.get_info(Lnrpc::GetInfoRequest.new) -``` - -#### Receive Large Responses - -A GRPC::ResourceExhausted exception is raised when a server response is too large. In particular, this will happen with mainnet DescribeGraph calls. The solution is to raise the default limits by including a channel_args hash when creating our stub. - -```ruby -stub = Lnrpc::Lightning::Stub.new( - 'localhost:10009', - credentials, - channel_args: {"grpc.max_receive_message_length" => 1024 * 1024 * 50} -) -``` \ No newline at end of file diff --git a/lnd/docs/macaroons.md b/lnd/docs/macaroons.md deleted file mode 100644 index 13d8a6fd..00000000 --- a/lnd/docs/macaroons.md +++ /dev/null @@ -1,201 +0,0 @@ -As part of [the `lnd` 0.3-alpha -release](https://github.com/lightningnetwork/lnd/releases/tag/v0.3-alpha), we -have addressed [issue 20](https://github.com/lightningnetwork/lnd/issues/20), -which is RPC authentication. Until this was implemented, all RPC calls to `lnd` -were unauthenticated. To fix this, we've utilized -[macaroons](https://research.google.com/pubs/pub41892.html), which are similar -to cookies but more capable. This brief overview explains, at a basic level, -how they work, how we use them for `lnd` authentication, and our future plans. - -## What are macaroons? - -You can think of a macaroon as a cookie, in a way. Cookies are small bits of -data that your browser stores and sends to a particular website when it makes a -request to that website. If you're logged into a website, that cookie can store -a session ID, which the site can look up in its own database to check who you -are and give you the appropriate content. - -A macaroon is similar: it's a small bit of data that a client (like `lncli`) -can send to a service (like `lnd`) to assert that it's allowed to perform an -action. The service looks up the macaroon ID and verifies that the macaroon was -initially signed with the service's root key. However, unlike a cookie, you can -*delegate* a macaroon, or create a version of it that has more limited -capabilities, and then send it to someone else to use. - -Just like a cookie, a macaroon should be sent over a secure channel (such as a -TLS-encrypted connection), which is why we've also begun enforcing TLS for RPC -requests in this release. Before SSL was enforced on websites such as Facebook -and Google, listening to HTTP sessions on wireless networks was one way to -hijack the session and log in as that user, gaining access to the user's -account. Macaroons are similar in that intercepting a macaroon in transit -allows the interceptor to use the macaroon to gain all the privileges of the -legitimate user. - -## Macaroon delegation - -A macaroon is delegated by adding restrictions (called caveats) and an -authentication code similar to a signature (technically an HMAC) to it. The -technical method of doing this is outside the scope of this overview -documentation, but the [README in the macaroons package](../macaroons/README.md) -or the macaroon paper linked above describe it in more detail. The -user must remember several things: - -* Sharing a macaroon allows anyone in possession of that macaroon to use it to - access the service (in our case, `lnd`) to do anything permitted by the - macaroon. There is a specific type of restriction, called a "third party - caveat," that requires an external service to verify the request; however, - `lnd` doesn't currently implement those. - -* If you add a caveat to a macaroon and share the resulting macaroon, the - person receiving it cannot remove the caveat. - -This is used in `lnd` in an interesting way. By default, when `lnd` starts, it -creates three files which contain macaroons: a file called `admin.macaroon`, -which contains a macaroon with no caveats, a file called `readonly.macaroon`, -which is the *same* macaroon but with an additional caveat, that permits only -methods that don't change the state of `lnd`, and `invoice.macaroon`, which -only has access to invoice related methods. - -## How macaroons are used by `lnd` and `lncli`. - -On startup, `lnd` checks to see if the `admin.macaroon`, `readonly.macaroon` -and `invoice.macaroon` files exist. If they don't exist, `lnd` updates its -database with a new macaroon ID, generates the three files `admin.macaroon`, -`readonly.macaroon` and `invoice.macaroon`, all with the same ID. The -`readonly.macaroon` file has an additional caveat which restricts the caller -to using only read-only methods and the `invoice.macaroon` also has an -additional caveat which restricts the caller to using only invoice related -methods. This means a few important things: - -* You can delete the `admin.macaroon` and be left with only the - `readonly.macaroon`, which can sometimes be useful (for example, if you want - your `lnd` instance to run in autopilot mode and don't want to accidentally - change its state). - -* If you delete the data directory which contains the `macaroons.db` file, this - invalidates the `admin.macaroon`, `readonly.macaroon` and `invoice.macaroon` - files. Invalid macaroon files give you errors like `cannot get macaroon: root - key with id 0 doesn't exist` or `verification failed: signature mismatch - after caveat verification`. - -You can also run `lnd` with the `--no-macaroons` option, which skips the -creation of the macaroon files and all macaroon checks within the RPC server. -This means you can still pass a macaroon to the RPC server with a client, but -it won't be checked for validity. Note that disabling authentication of a server -that's listening on a public interface is not allowed. This means the -`--no-macaroons` option is only permitted when the RPC server is in a private -network. In CIDR notation, the following IPs are considered private, -- [`169.254.0.0/16` and `fe80::/10`](https://en.wikipedia.org/wiki/Link-local_address). -- [`224.0.0.0/4` and `ff00::/8`](https://en.wikipedia.org/wiki/Multicast_address). -- [`10.0.0.0/8`, `172.16.0.0/12` and `192.168.0.0/16`](https://tools.ietf.org/html/rfc1918). -- [`fc00::/7`](https://tools.ietf.org/html/rfc4193). - -Since `lnd` requires macaroons by default in order to call RPC methods, `lncli` -now reads a macaroon and provides it in the RPC call. Unless the path is -changed by the `--macaroonpath` option, `lncli` tries to read the macaroon from -the network directory of `lnd`'s currently active network (e.g. for simnet -`lnddir/data/chain/bitcoin/simnet/admin.macaroon`) by default and will error if -that file doesn't exist unless provided the `--no-macaroons` option. Keep this -in mind when running `lnd` with `--no-macaroons`, as `lncli` will error out -unless called the same way **or** `lnd` has generated a macaroon on a previous -run without this option. - -`lncli` also adds a caveat which makes it valid for only 60 seconds by default -to help prevent replay in case the macaroon is somehow intercepted in -transmission. This is unlikely with TLS, but can happen e.g. when using a PKI -and network setup which allows inspection of encrypted traffic, and an attacker -gets access to the traffic logs after interception. The default 60 second -timeout can be changed with the `--macaroontimeout` option; this can be -increased for making RPC calls between systems whose clocks are more than 60s -apart. - -## Stateless initialization - -As mentioned above, by default `lnd` creates several macaroon files in its -directory. These are unencrypted and in case of the `admin.macaroon` provide -full access to the daemon. This can be seen as quite a big security risk if -the `lnd` daemon runs in an environment that is not fully trusted. - -The macaroon files are the only files with highly sensitive information that -are not encrypted (unlike the wallet file and the macaroon database file that -contains the [root key](../macaroons/README.md), these are always encrypted, -even if no password is used). - -To avoid leaking the macaroon information, `lnd` supports the so called -`stateless initialization` mode: -* The three startup commands `create`, `unlock` and `changepassword` of `lncli` - all have a flag called `--stateless_init` that instructs the daemon **not** - to create `*.macaroon` files. -* The two operations `create` and `changepassword` that actually create/update - the macaroon database will return the admin macaroon in the RPC call. - Assuming the daemon and the `lncli` are not used on the same machine, this - will leave no unencrypted information on the machine where `lnd` runs on. - * To be more precise: By default, when using the `changepassword` command, the - macaroon root key in the macaroon DB is just re-encrypted with the new - password. But the key remains the same and therefore the macaroons issued - before the `changepassword` command still remain valid. If a user wants to - invalidate all previously created macaroons, the `--new_mac_root_key` flag - of the `changepassword` command should be used! -* An user of `lncli` will see the returned admin macaroon printed to the screen - or saved to a file if the parameter `--save_to=some_file.macaroon` is used. -* **Important:** By default, `lnd` will create the macaroon files during the - `unlock` phase, if the `--stateless_init` flag is not used. So to avoid - leakage of the macaroon information, use the stateless initialization flag - for all three startup commands of the wallet unlocker service! - -Examples: - -* Create a new wallet stateless (first run): - * `lncli create --stateless_init --save_to=/safe/location/admin.macaroon` -* Unlock a wallet that has previously been initialized stateless: - * `lncli unlock --stateless_init` -* Use the created macaroon: - * `lncli --macaroonpath=/safe/location/admin.macaroon getinfo` - -## Using Macaroons with GRPC clients - -When interacting with `lnd` using the GRPC interface, the macaroons are encoded -as a hex string over the wire and can be passed to `lnd` by specifying the -hex-encoded macaroon as GRPC metadata: - - GET https://localhost:8080/v1/getinfo - Grpc-Metadata-macaroon: - -Where `` is the hex encoded binary data from the macaroon file itself. - -A very simple example using `curl` may look something like this: - - curl --insecure --header "Grpc-Metadata-macaroon: $(xxd -ps -u -c 1000 $HOME/.lnd/data/chain/bitcoin/simnet/admin.macaroon)" https://localhost:8080/v1/getinfo - -Have a look at the [Java GRPC example](/docs/grpc/java.md) for programmatic usage details. - -## Creating macaroons with custom permissions - -The macaroon bakery is described in more detail in the -[README in the macaroons package](../macaroons/README.md). - -## Future improvements to the `lnd` macaroon implementation - -The existing macaroon implementation in `lnd` and `lncli` lays the groundwork -for future improvements in functionality and security. We will add features -such as: - -* Improved replay protection for securing RPC calls - -* Macaroon database encryption - -* Root key rotation and possibly macaroon invalidation/rotation - -* Additional restrictions, such as limiting payments to use (or not use) - specific routes, channels, nodes, etc. - -* Accounting-based macaroons, which can make an instance of `lnd` act almost - like a bank for apps: for example, an app that pays to consume APIs whose - budget is limited to the money it receives by providing an API/service - -* Support for third-party caveats, which allows external plugins for - authorization and authentication - -With this new feature, we've started laying the groundwork for flexible -authentication and authorization for RPC calls to `lnd`. We look forward to -expanding its functionality to make it easy to develop secure apps. diff --git a/lnd/docs/nat_traversal.md b/lnd/docs/nat_traversal.md deleted file mode 100644 index dd48fcfc..00000000 --- a/lnd/docs/nat_traversal.md +++ /dev/null @@ -1,23 +0,0 @@ -# NAT Traversal - -`lnd` has support for NAT traversal using a number of different techniques. At -the time of writing this documentation, UPnP and NAT-PMP are supported. NAT -traversal can be enabled through `lnd`'s `--nat` flag. - -```shell -$ lnd ... --nat -``` - -On startup, `lnd` will try the different techniques until one is found that's -supported by your hardware. The underlying dependencies used for these -techniques rely on using system-specific binaries in order to detect your -gateway device's address. This is needed because we need to be able to reach the -gateway device to determine if it supports the specific NAT traversal technique -currently being tried. Because of this, due to uncommon setups, it is possible -that these binaries are not found in your system. If this is case, `lnd` will -exit stating such error. - -As a bonus, `lnd` spawns a background thread that automatically detects IP -address changes and propagates the new address update to the rest of the -network. This is especially beneficial for users who were provided dynamic IP -addresses from their internet service provider. diff --git a/lnd/docs/psbt.md b/lnd/docs/psbt.md deleted file mode 100644 index 059af124..00000000 --- a/lnd/docs/psbt.md +++ /dev/null @@ -1,598 +0,0 @@ -# PSBT - -This document describes various use cases around the topic of Partially Signed -Bitcoin Transactions (PSBTs). `lnd`'s wallet now features a full set of PSBT -functionality, including creating, signing and funding channels with PSBTs. - -See [BIP174](https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki) for -a full description of the PSBT format and the different _roles_ that a -participant in a PSBT can have. - -## Creating/funding a PSBT - -The first step for every transaction that is constructed using a PSBT flow is to -select inputs (UTXOs) to fund the desired output and to add a change output that -sends the remaining funds back to the own wallet. - -This `wallet psbt fund` command is very similar to `bitcoind`'s -`walletcreatefundedpsbt` command. One main difference is that you can specify a -template PSBT in the `lncli` variant that contains the output(s) and optional -inputs. Another difference is that for the `--outputs` flag, `lncli` expects the -amounts to be in satoshis instead of fractions of a bitcoin. - -### Simple example: fund PSBT that sends to address - -Let's start with a very simple example and assume we want to send half a coin -to the address `bcrt1qjrdns4f5zwkv29ln86plqzs092yd5fg6nsz8re`: - -```shell script -$ lncli wallet psbt fund --outputs='{"bcrt1qjrdns4f5zwkv29ln86plqzs092yd5fg6nsz8re":50000000}' - -{ - "psbt": "cHNidP8BAHECAAAAAeJQY2VLRtutKgQYFUajEKpjFfl0Uyrm6x23OumDpe/4AQAAAAD/////AkxREgEAAAAAFgAUv6pTgbKHN60CZ+RQn5yOuH6c2WiA8PoCAAAAABYAFJDbOFU0E6zFF/M+g/AKDyqI2iUaAAAAAAABAOsCAAAAAAEBbxqXgEf9DlzcqqNM610s5pL1X258ra6+KJ22etb7HAcBAAAAAAAAAAACACT0AAAAAAAiACC7U1W0iJGhQ6o7CexDh5k36V6v3256xpA9/xmB2BybTFZdDQQAAAAAFgAUKp2ThzhswyM2QHlyvmMB6tQB7V0CSDBFAiEA4Md8RIZYqFdUPsgDyomlzMJL9bJ6Ho23JGTihXtEelgCIAeNXRLyt88SOuuWFVn3IodCE4U5D6DojIHesRmikF28ASEDHYFzMEAxfmfq98eSSnZtUwb1w7mAtHG65y8qiRFNnIkAAAAAAQEfVl0NBAAAAAAWABQqnZOHOGzDIzZAeXK+YwHq1AHtXQEDBAEAAAAAAAA=", - "change_output_index": 0, - "locks": [ - { - "id": "ede19a92ed321a4705f8a1cccc1d4f6182545d4bb4fae08bd5937831b7e38f98", - "outpoint": "f8efa583e93ab71debe62a5374f91563aa10a3461518042aaddb464b656350e2:1", - "expiration": 1601553408 - } - ] -} -``` - -The first thing we notice in the response is that an outpoint was locked. -That means, the UTXO that was chosen to fund the PSBT is currently locked and -cannot be used by the internal wallet or any other RPC call. This lock will be -released automatically either after 10 minutes (timeout) or once a transaction -that spends the UTXO is published. - -If we inspect the PSBT that was created, we see that the locked input was indeed -selected, the UTXO information was attached and a change output (at index 0) was -created as well: - -```shell script -$ bitcoin-cli decodepsbt cHNidP8BAHECAAAAAeJQY2VLRtutKgQYFUajEKpjFfl0Uyrm6x23OumDpe/4AQAAAAD/////AkxREgEAAAAAFgAUv6pTgbKHN60CZ+RQn5yOuH6c2WiA8PoCAAAAABYAFJDbOFU0E6zFF/M+g/AKDyqI2iUaAAAAAAABAOsCAAAAAAEBbxqXgEf9DlzcqqNM610s5pL1X258ra6+KJ22etb7HAcBAAAAAAAAAAACACT0AAAAAAAiACC7U1W0iJGhQ6o7CexDh5k36V6v3256xpA9/xmB2BybTFZdDQQAAAAAFgAUKp2ThzhswyM2QHlyvmMB6tQB7V0CSDBFAiEA4Md8RIZYqFdUPsgDyomlzMJL9bJ6Ho23JGTihXtEelgCIAeNXRLyt88SOuuWFVn3IodCE4U5D6DojIHesRmikF28ASEDHYFzMEAxfmfq98eSSnZtUwb1w7mAtHG65y8qiRFNnIkAAAAAAQEfVl0NBAAAAAAWABQqnZOHOGzDIzZAeXK+YwHq1AHtXQEDBAEAAAAAAAA= -{ - "tx": { - "txid": "33a316d62ddf74656967754d26ea83a3cb89e03ae44578d965156d4b71b1fce7", - "hash": "33a316d62ddf74656967754d26ea83a3cb89e03ae44578d965156d4b71b1fce7", - "version": 2, - "size": 113, - "vsize": 113, - "weight": 452, - "locktime": 0, - "vin": [ - { - "txid": "f8efa583e93ab71debe62a5374f91563aa10a3461518042aaddb464b656350e2", - "vout": 1, - "scriptSig": { - "asm": "", - "hex": "" - }, - "sequence": 4294967295 - } - ], - "vout": [ - { - "value": 0.17977676, - "n": 0, - "scriptPubKey": { - "asm": "0 bfaa5381b28737ad0267e4509f9c8eb87e9cd968", - "hex": "0014bfaa5381b28737ad0267e4509f9c8eb87e9cd968", - "reqSigs": 1, - "type": "witness_v0_keyhash", - "addresses": [ - "bcrt1qh7498qdjsum66qn8u3gfl8ywhplfektg6mutfs" - ] - } - }, - { - "value": 0.50000000, - "n": 1, - "scriptPubKey": { - "asm": "0 90db38553413acc517f33e83f00a0f2a88da251a", - "hex": "001490db38553413acc517f33e83f00a0f2a88da251a", - "reqSigs": 1, - "type": "witness_v0_keyhash", - "addresses": [ - "bcrt1qjrdns4f5zwkv29ln86plqzs092yd5fg6nsz8re" - ] - } - } - ] - }, - "unknown": { - }, - "inputs": [ - { - "witness_utxo": { -... - }, - "non_witness_utxo": { - ... - }, - "sighash": "ALL" - } - ], - "outputs": [ -... - ], - "fee": 0.00007050 -} -``` - -### Advanced example: fund PSBT with manual coin selection - -Let's now look at how we can implement manual coin selection by using the `fund` -command. We again want to send half a coin to -`bcrt1qjrdns4f5zwkv29ln86plqzs092yd5fg6nsz8re` but we want to select our inputs -manually. - -The first step is to look at all available UTXOs and choose. To do so, we use -the `listunspent` command: - -```shell script -$ lncli listunspent - -{ - "utxos": [ - { - "address_type": 0, - "address": "bcrt1qmsq36rtc6ap3m0m6jryu0ez923et6kxrv46t4w", - "amount_sat": 100000000, - "pk_script": "0014dc011d0d78d7431dbf7a90c9c7e4455472bd58c3", - "outpoint": "3597b451ff56bc901eb806e8c644a004e934b4c208679756b4cddc455c768c48:1", - "confirmations": 6 - }, - { - "address_type": 0, - "address": "bcrt1q92we8pecdnpjxdjq09etuccpat2qrm2acu4256", - "amount_sat": 67984726, - "pk_script": "00142a9d9387386cc32336407972be6301ead401ed5d", - "outpoint": "f8efa583e93ab71debe62a5374f91563aa10a3461518042aaddb464b656350e2:1", - "confirmations": 24 - }, -... - ] -} -``` - -Next, we choose these two inputs and create the PSBT: - -```shell script -$ lncli wallet psbt fund --outputs='{"bcrt1qjrdns4f5zwkv29ln86plqzs092yd5fg6nsz8re":50000000}' \ - --inputs='["3597b451ff56bc901eb806e8c644a004e934b4c208679756b4cddc455c768c48:1","f8efa583e93ab71debe62a5374f91563aa10a3461518042aaddb464b656350e2:1"]' - -{ - "psbt": "cHNidP8BAJoCAAAAAkiMdlxF3M20VpdnCMK0NOkEoETG6Aa4HpC8Vv9RtJc1AQAAAAAAAAAA4lBjZUtG260qBBgVRqMQqmMV+XRTKubrHbc66YOl7/gBAAAAAAAAAAACgPD6AgAAAAAWABSQ2zhVNBOsxRfzPoPwCg8qiNolGtIkCAcAAAAAFgAUuvRP5r7qAvj0egDxyX9/FH+vukgAAAAAAAEA3gIAAAAAAQEr9IZcho/gV/6fH8C8P+yhNRZP+l3YuxsyatdYcS0S6AEAAAAA/v///wLI/8+yAAAAABYAFDXoRFwgXNO5VVtVq2WpaENh6blAAOH1BQAAAAAWABTcAR0NeNdDHb96kMnH5EVUcr1YwwJHMEQCIDqugtYLp4ebJAZvOdieshLi1lLuPl2tHQG4jM4ybwEGAiBeMpCkbHBmzYvljxb1JBQyVAMuoco0xIfi+5OQdHuXaAEhAnH96NhTW09X0npE983YBsHUoMPI4U4xBtHenpZVTEqpVwAAAAEBHwDh9QUAAAAAFgAU3AEdDXjXQx2/epDJx+RFVHK9WMMBAwQBAAAAAAEA6wIAAAAAAQFvGpeAR/0OXNyqo0zrXSzmkvVfbnytrr4onbZ61vscBwEAAAAAAAAAAAIAJPQAAAAAACIAILtTVbSIkaFDqjsJ7EOHmTfpXq/fbnrGkD3/GYHYHJtMVl0NBAAAAAAWABQqnZOHOGzDIzZAeXK+YwHq1AHtXQJIMEUCIQDgx3xEhlioV1Q+yAPKiaXMwkv1snoejbckZOKFe0R6WAIgB41dEvK3zxI665YVWfcih0IThTkPoOiMgd6xGaKQXbwBIQMdgXMwQDF+Z+r3x5JKdm1TBvXDuYC0cbrnLyqJEU2ciQAAAAABAR9WXQ0EAAAAABYAFCqdk4c4bMMjNkB5cr5jAerUAe1dAQMEAQAAAAAAAA==", - "change_output_index": 1, - "locks": [ - { - "id": "ede19a92ed321a4705f8a1cccc1d4f6182545d4bb4fae08bd5937831b7e38f98", - "outpoint": "3597b451ff56bc901eb806e8c644a004e934b4c208679756b4cddc455c768c48:1", - "expiration": 1601560626 - }, - { - "id": "ede19a92ed321a4705f8a1cccc1d4f6182545d4bb4fae08bd5937831b7e38f98", - "outpoint": "f8efa583e93ab71debe62a5374f91563aa10a3461518042aaddb464b656350e2:1", - "expiration": 1601560626 - } - ] -} -``` - -Inspecting this PSBT, we notice that the two inputs were chosen and a large -change change output was added at index 1: - -```shell script -$ bitcoin-cli decodepsbt cHNidP8BAJoCAAAAAkiMdlxF3M20VpdnCMK0NOkEoETG6Aa4HpC8Vv9RtJc1AQAAAAAAAAAA4lBjZUtG260qBBgVRqMQqmMV+XRTKubrHbc66YOl7/gBAAAAAAAAAAACgPD6AgAAAAAWABSQ2zhVNBOsxRfzPoPwCg8qiNolGtIkCAcAAAAAFgAUuvRP5r7qAvj0egDxyX9/FH+vukgAAAAAAAEA3gIAAAAAAQEr9IZcho/gV/6fH8C8P+yhNRZP+l3YuxsyatdYcS0S6AEAAAAA/v///wLI/8+yAAAAABYAFDXoRFwgXNO5VVtVq2WpaENh6blAAOH1BQAAAAAWABTcAR0NeNdDHb96kMnH5EVUcr1YwwJHMEQCIDqugtYLp4ebJAZvOdieshLi1lLuPl2tHQG4jM4ybwEGAiBeMpCkbHBmzYvljxb1JBQyVAMuoco0xIfi+5OQdHuXaAEhAnH96NhTW09X0npE983YBsHUoMPI4U4xBtHenpZVTEqpVwAAAAEBHwDh9QUAAAAAFgAU3AEdDXjXQx2/epDJx+RFVHK9WMMBAwQBAAAAAAEA6wIAAAAAAQFvGpeAR/0OXNyqo0zrXSzmkvVfbnytrr4onbZ61vscBwEAAAAAAAAAAAIAJPQAAAAAACIAILtTVbSIkaFDqjsJ7EOHmTfpXq/fbnrGkD3/GYHYHJtMVl0NBAAAAAAWABQqnZOHOGzDIzZAeXK+YwHq1AHtXQJIMEUCIQDgx3xEhlioV1Q+yAPKiaXMwkv1snoejbckZOKFe0R6WAIgB41dEvK3zxI665YVWfcih0IThTkPoOiMgd6xGaKQXbwBIQMdgXMwQDF+Z+r3x5JKdm1TBvXDuYC0cbrnLyqJEU2ciQAAAAABAR9WXQ0EAAAAABYAFCqdk4c4bMMjNkB5cr5jAerUAe1dAQMEAQAAAAAAAA== - -{ -"tx": { - "txid": "e62356b99c3097eaa1241ff8e39b996917e66b13e4c0ccba3698982d746c3b76", - "hash": "e62356b99c3097eaa1241ff8e39b996917e66b13e4c0ccba3698982d746c3b76", - "version": 2, - "size": 154, - "vsize": 154, - "weight": 616, - "locktime": 0, - "vin": [ - { - "txid": "3597b451ff56bc901eb806e8c644a004e934b4c208679756b4cddc455c768c48", - "vout": 1, - "scriptSig": { - "asm": "", - "hex": "" - }, - "sequence": 0 - }, - { - "txid": "f8efa583e93ab71debe62a5374f91563aa10a3461518042aaddb464b656350e2", - "vout": 1, - "scriptSig": { - "asm": "", - "hex": "" - }, - "sequence": 0 - } - ], - "vout": [ - { - "value": 0.50000000, - "n": 0, - "scriptPubKey": { - "asm": "0 90db38553413acc517f33e83f00a0f2a88da251a", - "hex": "001490db38553413acc517f33e83f00a0f2a88da251a", - "reqSigs": 1, - "type": "witness_v0_keyhash", - "addresses": [ - "bcrt1qjrdns4f5zwkv29ln86plqzs092yd5fg6nsz8re" - ] - } - }, - { - "value": 1.17974226, - "n": 1, - "scriptPubKey": { - "asm": "0 baf44fe6beea02f8f47a00f1c97f7f147fafba48", - "hex": "0014baf44fe6beea02f8f47a00f1c97f7f147fafba48", - "reqSigs": 1, - "type": "witness_v0_keyhash", - "addresses": [ - "bcrt1qht6yle47agp03ar6qrcujlmlz3l6lwjgjv36zl" - ] - } - } - ] -}, -"unknown": { -}, -"inputs": [ -... -], -"outputs": [ -... -], -"fee": 0.00010500 -} -``` - -## Signing and finalizing a PSBT - -Assuming we now want to sign the transaction that we created in the previous -example, we simply pass it to the `finalize` sub command of the wallet: - -```shell script -$ lncli wallet psbt finalize cHNidP8BAJoCAAAAAkiMdlxF3M20VpdnCMK0NOkEoETG6Aa4HpC8Vv9RtJc1AQAAAAAAAAAA4lBjZUtG260qBBgVRqMQqmMV+XRTKubrHbc66YOl7/gBAAAAAAAAAAACgPD6AgAAAAAWABSQ2zhVNBOsxRfzPoPwCg8qiNolGtIkCAcAAAAAFgAUuvRP5r7qAvj0egDxyX9/FH+vukgAAAAAAAEA3gIAAAAAAQEr9IZcho/gV/6fH8C8P+yhNRZP+l3YuxsyatdYcS0S6AEAAAAA/v///wLI/8+yAAAAABYAFDXoRFwgXNO5VVtVq2WpaENh6blAAOH1BQAAAAAWABTcAR0NeNdDHb96kMnH5EVUcr1YwwJHMEQCIDqugtYLp4ebJAZvOdieshLi1lLuPl2tHQG4jM4ybwEGAiBeMpCkbHBmzYvljxb1JBQyVAMuoco0xIfi+5OQdHuXaAEhAnH96NhTW09X0npE983YBsHUoMPI4U4xBtHenpZVTEqpVwAAAAEBHwDh9QUAAAAAFgAU3AEdDXjXQx2/epDJx+RFVHK9WMMBAwQBAAAAAAEA6wIAAAAAAQFvGpeAR/0OXNyqo0zrXSzmkvVfbnytrr4onbZ61vscBwEAAAAAAAAAAAIAJPQAAAAAACIAILtTVbSIkaFDqjsJ7EOHmTfpXq/fbnrGkD3/GYHYHJtMVl0NBAAAAAAWABQqnZOHOGzDIzZAeXK+YwHq1AHtXQJIMEUCIQDgx3xEhlioV1Q+yAPKiaXMwkv1snoejbckZOKFe0R6WAIgB41dEvK3zxI665YVWfcih0IThTkPoOiMgd6xGaKQXbwBIQMdgXMwQDF+Z+r3x5JKdm1TBvXDuYC0cbrnLyqJEU2ciQAAAAABAR9WXQ0EAAAAABYAFCqdk4c4bMMjNkB5cr5jAerUAe1dAQMEAQAAAAAAAA== - -{ - "psbt": "cHNidP8BAJoCAAAAAkiMdlxF3M20VpdnCMK0NOkEoETG6Aa4HpC8Vv9RtJc1AQAAAAAAAAAA4lBjZUtG260qBBgVRqMQqmMV+XRTKubrHbc66YOl7/gBAAAAAAAAAAACgPD6AgAAAAAWABSQ2zhVNBOsxRfzPoPwCg8qiNolGtIkCAcAAAAAFgAUuvRP5r7qAvj0egDxyX9/FH+vukgAAAAAAAEA3gIAAAAAAQEr9IZcho/gV/6fH8C8P+yhNRZP+l3YuxsyatdYcS0S6AEAAAAA/v///wLI/8+yAAAAABYAFDXoRFwgXNO5VVtVq2WpaENh6blAAOH1BQAAAAAWABTcAR0NeNdDHb96kMnH5EVUcr1YwwJHMEQCIDqugtYLp4ebJAZvOdieshLi1lLuPl2tHQG4jM4ybwEGAiBeMpCkbHBmzYvljxb1JBQyVAMuoco0xIfi+5OQdHuXaAEhAnH96NhTW09X0npE983YBsHUoMPI4U4xBtHenpZVTEqpVwAAAAEBHwDh9QUAAAAAFgAU3AEdDXjXQx2/epDJx+RFVHK9WMMBCGwCSDBFAiEAuiv52IX5wZlYJqqVGsQPfeQ/kneCNRD34v5yplNpuMYCIECHVUhjHPKSiWSsYEKD4JWGAyUwQHgDytA1whFOyLclASECg7PDfGE/uURta5/R42Vso6QKmVAgYMhjWlXENkE/x+QAAQDrAgAAAAABAW8al4BH/Q5c3KqjTOtdLOaS9V9ufK2uviidtnrW+xwHAQAAAAAAAAAAAgAk9AAAAAAAIgAgu1NVtIiRoUOqOwnsQ4eZN+ler99uesaQPf8Zgdgcm0xWXQ0EAAAAABYAFCqdk4c4bMMjNkB5cr5jAerUAe1dAkgwRQIhAODHfESGWKhXVD7IA8qJpczCS/Wyeh6NtyRk4oV7RHpYAiAHjV0S8rfPEjrrlhVZ9yKHQhOFOQ+g6IyB3rEZopBdvAEhAx2BczBAMX5n6vfHkkp2bVMG9cO5gLRxuucvKokRTZyJAAAAAAEBH1ZdDQQAAAAAFgAUKp2ThzhswyM2QHlyvmMB6tQB7V0BCGwCSDBFAiEAqK7FSrqWe2non0kl96yu2+gSXGPYPC7ZjzVZEMMWtpYCIGTzCDHZhJYGPrsnBWU8o0Eyd4nBa+6d037xGFcGUYJLASECORgkj75Xu8+DTh8bqYBIvNx1hSxV7VSJOwY6jam6LY8AAAA=", - "final_tx": "02000000000102488c765c45dccdb456976708c2b434e904a044c6e806b81e90bc56ff51b49735010000000000000000e25063654b46dbad2a04181546a310aa6315f974532ae6eb1db73ae983a5eff80100000000000000000280f0fa020000000016001490db38553413acc517f33e83f00a0f2a88da251ad224080700000000160014baf44fe6beea02f8f47a00f1c97f7f147fafba4802483045022100ba2bf9d885f9c1995826aa951ac40f7de43f9277823510f7e2fe72a65369b8c6022040875548631cf2928964ac604283e09586032530407803cad035c2114ec8b72501210283b3c37c613fb9446d6b9fd1e3656ca3a40a99502060c8635a55c436413fc7e402483045022100a8aec54aba967b69e89f4925f7acaedbe8125c63d83c2ed98f355910c316b696022064f30831d98496063ebb2705653ca341327789c16bee9dd37ef118570651824b0121023918248fbe57bbcf834e1f1ba98048bcdc75852c55ed54893b063a8da9ba2d8f00000000" -} -``` - -That final transaction can now, in theory, be broadcast. But **it is very -important** that you **do not** publish it manually if any of the involved -outputs are used to fund a channel. See -[the safety warning below](#safety-warning) to learn the reason for this. - -## Opening a channel by using a PSBT - -This is a step-by-step guide on how to open a channel with `lnd` by using a PSBT -as the funding transaction. -We will use `bitcoind` to create and sign the transaction just to keep the -example simple. Of course any other PSBT compatible wallet could be used and the -process would likely be spread out over multiple signing steps. The goal of this -example is not to cover each and every possible edge case but to help users of -`lnd` understand what inputs the `lncli` utility expects. - -The goal is to open a channel of 1'234'567 satoshis to the node -`03db1e56e5f76bc4018cf6f03d1bb98a7ae96e3f18535e929034f85e7f1ca2b8ac` by using -a PSBT. That means, `lnd` can have a wallet balance of `0` and is still able to -open a channel. We'll jump into an example right away. - -The new funding flow has a small caveat: _Time matters_. - -When opening a channel using the PSBT flow, we start the negotiation -with the remote peer immediately so we can obtain their multisig key they are -going to use for the channel. Then we pause the whole process until we get a -fully signed transaction back from the user. Unfortunately there is no reliable -way to know after how much time the remote node starts to clean up and "forgets" -about the pending channel. If the remote node is an `lnd` node, we know it's -after 10 minutes. **So as long as the whole process takes less than 10 minutes, -everything should work fine.** - -### Safety warning - -**DO NOT PUBLISH** the finished transaction by yourself or with another tool. -lnd MUST publish it in the proper funding flow order **OR THE FUNDS CAN BE -LOST**! - -This is very important to remember when using wallets like `Wasabi` for -instance, where the "publish" button is very easy to hit by accident. - -### 1. Use the new `--psbt` flag in `lncli openchannel` - -The new `--psbt` flag in the `openchannel` command starts an interactive dialog -between `lncli` and the user. Below the command you see an example output from -a regtest setup. Of course all values will be different. - -```shell script -$ lncli openchannel --node_key 03db1e56e5f76bc4018cf6f03d1bb98a7ae96e3f18535e929034f85e7f1ca2b8ac --local_amt 1234567 --psbt - -Starting PSBT funding flow with pending channel ID fc7853889a04d33b8115bd79ebc99c5eea80d894a0bead40fae5a06bcbdccd3d. -PSBT funding initiated with peer 03db1e56e5f76bc4018cf6f03d1bb98a7ae96e3f18535e929034f85e7f1ca2b8ac. -Please create a PSBT that sends 0.01234567 BTC (1234567 satoshi) to the funding address bcrt1qh33ghvgjj3ef625nl9jxz6nnrz2z9e65vsdey7w5msrklgr6rc0sv0s08q. - -Example with bitcoind: - bitcoin-cli walletcreatefundedpsbt [] '[{"bcrt1qh33ghvgjj3ef625nl9jxz6nnrz2z9e65vsdey7w5msrklgr6rc0sv0s08q":0.01234567}]' - -Or if you are using a wallet that can fund a PSBT directly (currently not -possible with bitcoind), you can use this PSBT that contains the same address -and amount: cHNidP8BADUCAAAAAAGH1hIAAAAAACIAILxii7ESlHKdKpP5ZGFqcxiUIudUZBuSedTcB2+geh4fAAAAAAAA - -Paste the funded PSBT here to continue the funding flow. -Base64 encoded PSBT: -``` - -The command line now waits until a PSBT is entered. We'll create one in the next -step. Make sure to use a new shell window/tab for the next commands and leave -the prompt from the `openchannel` running as is. - -### 2a. Use `bitcoind` to create a funding transaction - -The output of the last command already gave us an example command to use with -`bitcoind`. We'll go ahead and execute it now. The meaning of this command is -something like "bitcoind, give me a PSBT that sends the given amount to the -given address, choose any input you see fit": - -```shell script -$ bitcoin-cli walletcreatefundedpsbt [] '[{"bcrt1qh33ghvgjj3ef625nl9jxz6nnrz2z9e65vsdey7w5msrklgr6rc0sv0s08q":0.01234567}]' - -{ - "psbt": "cHNidP8BAH0CAAAAAbxLLf9+AYfqfF69QAQuETnL6cas7GDiWBZF+3xxc/Y/AAAAAAD+////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+1If0jAQAAABYAFL+6THEGhybJnOkFGSRFbtCcPOG8AAAAAAABAR8wBBAkAQAAABYAFHemJ11XF7CU7WXBIJLD/qZF+6jrAAAA", - "fee": 0.00003060, - "changepos": 1 -} -``` - -We see that `bitcoind` has given us a transaction that would pay `3060` satoshi -in fees. Fee estimation/calculation can be changed with parameters of the -`walletcreatefundedpsbt` command. To see all options, use -`bitcoin-cli help walletcreatefundedpsbt`. - -If we want to know what exactly is in this PSBT, we can look at it with the -`decodepsbt` command: - -```shell script -$ bitcoin-cli decodepsbt cHNidP8BAH0CAAAAAbxLLf9+AYfqfF69QAQuETnL6cas7GDiWBZF+3xxc/Y/AAAAAAD+////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+1If0jAQAAABYAFL+6THEGhybJnOkFGSRFbtCcPOG8AAAAAAABAR8wBBAkAQAAABYAFHemJ11XF7CU7WXBIJLD/qZF+6jrAAAA - -{ - "tx": { - "txid": "374504e4246a93a45b4a2c2bc31d8adc8525aa101c7b9065db6dc01c4bdfce0a", - "hash": "374504e4246a93a45b4a2c2bc31d8adc8525aa101c7b9065db6dc01c4bdfce0a", - "version": 2, - "size": 125, - "vsize": 125, - "weight": 500, - "locktime": 0, - "vin": [ - { - "txid": "3ff673717cfb451658e260ecacc6e9cb39112e0440bd5e7cea87017eff2d4bbc", - "vout": 0, - "scriptSig": { - "asm": "", - "hex": "" - }, - "sequence": 4294967294 - } - ], - "vout": [ - { - "value": 0.01234567, - "n": 0, - "scriptPubKey": { - "asm": "0 bc628bb11294729d2a93f964616a73189422e754641b9279d4dc076fa07a1e1f", - "hex": "0020bc628bb11294729d2a93f964616a73189422e754641b9279d4dc076fa07a1e1f", - "reqSigs": 1, - "type": "witness_v0_scripthash", - "addresses": [ - "bcrt1qh33ghvgjj3ef625nl9jxz6nnrz2z9e65vsdey7w5msrklgr6rc0sv0s08q" - ] - } - }, - { - "value": 48.98759093, - "n": 1, - "scriptPubKey": { - "asm": "0 bfba4c71068726c99ce9051924456ed09c3ce1bc", - "hex": "0014bfba4c71068726c99ce9051924456ed09c3ce1bc", - "reqSigs": 1, - "type": "witness_v0_keyhash", - "addresses": [ - "bcrt1qh7aycugxsunvn88fq5vjg3tw6zwrecduvvgre5" - ] - } - } - ] - }, - "unknown": { - }, - "inputs": [ - { - "witness_utxo": { - "amount": 48.99996720, - "scriptPubKey": { - "asm": "0 77a6275d5717b094ed65c12092c3fea645fba8eb", - "hex": "001477a6275d5717b094ed65c12092c3fea645fba8eb", - "type": "witness_v0_keyhash", - "address": "bcrt1qw7nzwh2hz7cffmt9cysf9sl75ezlh28tzl4n4e" - } - } - } - ], - "outputs": [ - { - }, - { - } - ], - "fee": 0.00003060 -} -``` - -This tells us that we got a PSBT with a big input, the channel output and a -change output for the rest. Everything is there but the signatures/witness data, -which is exactly what we need. - -### 2b. Use `lnd` to create a funding transaction - -Starting with version `v0.12.0`, `lnd` can also create PSBTs. This assumes a -scenario where one instance of `lnd` only has public keys (watch only mode) and -a secondary, hardened and firewalled `lnd` instance has the corresponding -private keys. On the watching only mode, the following command can be used to -create the funding PSBT: - -```shell script -$ lncli wallet psbt fund --outputs='{"bcrt1qh33ghvgjj3ef625nl9jxz6nnrz2z9e65vsdey7w5msrklgr6rc0sv0s08q":1234567}' - -{ - "psbt": "cHNidP8BAH0CAAAAAUiMdlxF3M20VpdnCMK0NOkEoETG6Aa4HpC8Vv9RtJc1AQAAAAD/////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+X7OIFAAAAABYAFNigOB6EbCLRi+Evlv4r2yJx63NxAAAAAAABAN4CAAAAAAEBK/SGXIaP4Ff+nx/AvD/soTUWT/pd2LsbMmrXWHEtEugBAAAAAP7///8CyP/PsgAAAAAWABQ16ERcIFzTuVVbVatlqWhDYem5QADh9QUAAAAAFgAU3AEdDXjXQx2/epDJx+RFVHK9WMMCRzBEAiA6roLWC6eHmyQGbznYnrIS4tZS7j5drR0BuIzOMm8BBgIgXjKQpGxwZs2L5Y8W9SQUMlQDLqHKNMSH4vuTkHR7l2gBIQJx/ejYU1tPV9J6RPfN2AbB1KDDyOFOMQbR3p6WVUxKqVcAAAABAR8A4fUFAAAAABYAFNwBHQ1410Mdv3qQycfkRVRyvVjDAQMEAQAAAAAAAA==", - "change_output_index": 1, - "locks": [ - { - "id": "ede19a92ed321a4705f8a1cccc1d4f6182545d4bb4fae08bd5937831b7e38f98", - "outpoint": "3597b451ff56bc901eb806e8c644a004e934b4c208679756b4cddc455c768c48:1", - "expiration": 1601562037 - } - ] -} -``` - -### 3. Verify and sign the PSBT - -Now that we have a valid PSBT that has everything but the final -signatures/witness data, we can paste it into the prompt in `lncli` that is -still waiting for our input. - -```shell script -... -Base64 encoded PSBT: cHNidP8BAH0CAAAAAbxLLf9+AYfqfF69QAQuETnL6cas7GDiWBZF+3xxc/Y/AAAAAAD+////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+1If0jAQAAABYAFL+6THEGhybJnOkFGSRFbtCcPOG8AAAAAAABAR8wBBAkAQAAABYAFHemJ11XF7CU7WXBIJLD/qZF+6jrAAAA - -PSBT verified by lnd, please continue the funding flow by signing the PSBT by -all required parties/devices. Once the transaction is fully signed, paste it -again here. - -Base64 encoded PSBT: -``` - -We can now go ahead and sign the transaction. We are going to use `bitcoind` for -this again, but in practice this would now happen on a hardware wallet and -perhaps `bitcoind` would only know the public keys and couldn't sign for the -transaction itself. Again, this is only an example and can't reflect all -real-world use cases. - -```shell script -$ bitcoin-cli walletprocesspsbt cHNidP8BAH0CAAAAAbxLLf9+AYfqfF69QAQuETnL6cas7GDiWBZF+3xxc/Y/AAAAAAD+////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+1If0jAQAAABYAFL+6THEGhybJnOkFGSRFbtCcPOG8AAAAAAABAR8wBBAkAQAAABYAFHemJ11XF7CU7WXBIJLD/qZF+6jrAAAA - -{ -"psbt": "cHNidP8BAH0CAAAAAbxLLf9+AYfqfF69QAQuETnL6cas7GDiWBZF+3xxc/Y/AAAAAAD+////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+1If0jAQAAABYAFL+6THEGhybJnOkFGSRFbtCcPOG8AAAAAAABAR8wBBAkAQAAABYAFHemJ11XF7CU7WXBIJLD/qZF+6jrAQhrAkcwRAIgHKQbenZYvgADRd9TKGVO36NnaIgW3S12OUg8XGtSrE8CICmeaYoJ/U7Ecm+/GneY8i2hu2QCaQnuomJgzn+JAnrDASEDUBmCLcsybA5qXSRBBdZ0Uk/FQiay9NgOpv4D26yeJpAAAAA=", -"complete": true -} -``` - -If you are using the two `lnd` node model as described in -[2b](#2b-use-lnd-to-create-a-funding-transaction), you can achieve the same -result with the following command: - -```shell script -$ lncli wallet psbt finalize cHNidP8BAH0CAAAAAUiMdlxF3M20VpdnCMK0NOkEoETG6Aa4HpC8Vv9RtJc1AQAAAAD/////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+X7OIFAAAAABYAFNigOB6EbCLRi+Evlv4r2yJx63NxAAAAAAABAN4CAAAAAAEBK/SGXIaP4Ff+nx/AvD/soTUWT/pd2LsbMmrXWHEtEugBAAAAAP7///8CyP/PsgAAAAAWABQ16ERcIFzTuVVbVatlqWhDYem5QADh9QUAAAAAFgAU3AEdDXjXQx2/epDJx+RFVHK9WMMCRzBEAiA6roLWC6eHmyQGbznYnrIS4tZS7j5drR0BuIzOMm8BBgIgXjKQpGxwZs2L5Y8W9SQUMlQDLqHKNMSH4vuTkHR7l2gBIQJx/ejYU1tPV9J6RPfN2AbB1KDDyOFOMQbR3p6WVUxKqVcAAAABAR8A4fUFAAAAABYAFNwBHQ1410Mdv3qQycfkRVRyvVjDAQMEAQAAAAAAAA== - -{ - "psbt": "cHNidP8BAH0CAAAAAUiMdlxF3M20VpdnCMK0NOkEoETG6Aa4HpC8Vv9RtJc1AQAAAAD/////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+X7OIFAAAAABYAFNigOB6EbCLRi+Evlv4r2yJx63NxAAAAAAABAN4CAAAAAAEBK/SGXIaP4Ff+nx/AvD/soTUWT/pd2LsbMmrXWHEtEugBAAAAAP7///8CyP/PsgAAAAAWABQ16ERcIFzTuVVbVatlqWhDYem5QADh9QUAAAAAFgAU3AEdDXjXQx2/epDJx+RFVHK9WMMCRzBEAiA6roLWC6eHmyQGbznYnrIS4tZS7j5drR0BuIzOMm8BBgIgXjKQpGxwZs2L5Y8W9SQUMlQDLqHKNMSH4vuTkHR7l2gBIQJx/ejYU1tPV9J6RPfN2AbB1KDDyOFOMQbR3p6WVUxKqVcAAAABAR8A4fUFAAAAABYAFNwBHQ1410Mdv3qQycfkRVRyvVjDAQhrAkcwRAIgU3Ow7cLkKrg8BJe0U0n9qFLPizqEzY0JtjVlpWOEk14CID/4AFNfgwNENN2LoOs0C6uHgt4sk8rNoZG+VMGzOC/HASECg7PDfGE/uURta5/R42Vso6QKmVAgYMhjWlXENkE/x+QAAAA=", - "final_tx": "02000000000101488c765c45dccdb456976708c2b434e904a044c6e806b81e90bc56ff51b497350100000000ffffffff0287d6120000000000220020bc628bb11294729d2a93f964616a73189422e754641b9279d4dc076fa07a1e1f97ece20500000000160014d8a0381e846c22d18be12f96fe2bdb2271eb73710247304402205373b0edc2e42ab83c0497b45349fda852cf8b3a84cd8d09b63565a56384935e02203ff800535f83034434dd8ba0eb340bab8782de2c93cacda191be54c1b3382fc701210283b3c37c613fb9446d6b9fd1e3656ca3a40a99502060c8635a55c436413fc7e400000000" -} -``` - -Interpreting the output, we now have a complete, final, and signed transaction -inside the PSBT. - -**!!! WARNING !!!** - -**DO NOT PUBLISH** the finished transaction by yourself or with another tool. -lnd MUST publish it in the proper funding flow order **OR THE FUNDS CAN BE -LOST**! - -Let's give it to `lncli` to continue: - -```shell script -... -Base64 encoded PSBT: cHNidP8BAH0CAAAAAbxLLf9+AYfqfF69QAQuETnL6cas7GDiWBZF+3xxc/Y/AAAAAAD+////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+1If0jAQAAABYAFL+6THEGhybJnOkFGSRFbtCcPOG8AAAAAAABAR8wBBAkAQAAABYAFHemJ11XF7CU7WXBIJLD/qZF+6jrAQhrAkcwRAIgHKQbenZYvgADRd9TKGVO36NnaIgW3S12OUg8XGtSrE8CICmeaYoJ/U7Ecm+/GneY8i2hu2QCaQnuomJgzn+JAnrDASEDUBmCLcsybA5qXSRBBdZ0Uk/FQiay9NgOpv4D26yeJpAAAAA= -{ - "funding_txid": "374504e4246a93a45b4a2c2bc31d8adc8525aa101c7b9065db6dc01c4bdfce0a" -} -``` - -Success! We now have the final transaction ID of the published funding -transaction. Now we only have to wait for some confirmations, then we can start -using the freshly created channel. - -## Batch opening channels - -The PSBT channel funding flow makes it possible to open multiple channels in one -transaction. This can be achieved by taking the initial PSBT returned by the -`openchannel` and feed it into the `--base_psbt` parameter of the next -`openchannel` command. This won't work with `bitcoind` though, as it cannot take -a PSBT as partial input for the `walletcreatefundedpsbt` command. - -However, the `bitcoin-cli` examples from the command line can be combined into -a single command. For example: - -Channel 1: -```shell script -$ bitcoin-cli walletcreatefundedpsbt [] '[{"tb1qywvazres587w9wyy8uw03q8j9ek6gc9crwx4jvhqcmew4xzsvqcq3jjdja":0.01000000}]' -``` - -Channel 2: -```shell script -$ bitcoin-cli walletcreatefundedpsbt [] '[{"tb1q53626fcwwtcdc942zaf4laqnr3vg5gv4g0hakd2h7fw2pmz6428sk3ezcx":0.01000000}]' -``` - -Combined command to get batch PSBT: -```shell script -$ bitcoin-cli walletcreatefundedpsbt [] '[{"tb1q53626fcwwtcdc942zaf4laqnr3vg5gv4g0hakd2h7fw2pmz6428sk3ezcx":0.01000000},{"tb1qywvazres587w9wyy8uw03q8j9ek6gc9crwx4jvhqcmew4xzsvqcq3jjdja":0.01000000}]' -``` - -### Safety warning about batch transactions - -As mentioned before, the PSBT channel funding flow works by pausing the funding -negotiation with the remote peer directly after the multisig keys have been -exchanged. That means, the channel isn't fully opened yet at the time the PSBT -is signed. This is fine for a single channel because the signed transaction is -only published after the counter-signed commitment transactions were exchanged -and the funds can be spent again by both parties. - -When doing batch transactions, **publishing** the whole transaction with -multiple channel funding outputs **too early could lead to loss of funds**! - -For example, let's say we want to open two channels. We call `openchannel --psbt` -two times, combine the funding addresses as shown above, verify the PSBT, sign -it and finally paste it into the terminal of the first command. `lnd` then goes -ahead and finishes the negotiations with peer 1. If successful, `lnd` publishes -the transaction. In the meantime we paste the same PSBT into the second terminal -window. But by now, the peer 2 for channel 2 has timed out our funding flow and -aborts the negotiation. Normally this would be fine, we would just not publish -the funding transaction. But in the batch case, channel 1 has already published -the transaction that contains both channel outputs. But because we never got a -signature from peer 2 to spend the funds now locked in a 2-of-2 multisig, the -fund are lost (unless peer 2 cooperates in a complicated, manual recovery -process). - -### Use --no_publish for batch transactions - -To mitigate the problem described in the section above, when open multiple -channels in one batch transaction, it is **imperative to use the -`--no_publish`** flag for each channel but the very last. This prevents the -full batch transaction to be published before each and every single channel has -fully completed its funding negotiation. diff --git a/lnd/docs/recovery.md b/lnd/docs/recovery.md deleted file mode 100644 index 49ac01a1..00000000 --- a/lnd/docs/recovery.md +++ /dev/null @@ -1,365 +0,0 @@ -# Table of Contents - -* [Recovering Funds From `lnd` (funds are safu!)](#recovering-funds-from-lnd-funds-are-safu) - * [On-Chain Recovery](#on-chain-recovery) - * [24-word Cipher Seeds](#24-word-cipher-seeds) - * [Wallet and Seed Passphrases](#wallet-and-seed-passphrases) - * [Starting On-Chain Recovery](#starting-on-chain-recovery) - * [Forced In-Place Rescan](#forced-in-place-rescan) - * [Off-Chain Recovery](#off-chain-recovery) - * [Obtaining SCBs](#obtaining-scbs) - * [On-Disk `channel.backup`](#on-disk-channelbackup) - * [Using the `ExportChanBackup` RPC](#using-the-exportchanbackup-rpc) - * [Streaming Updates via `SubscribeChannelBackups`.](#streaming-updates-via-subscribechannelbackups) - * [Recovering Using SCBs](#recovering-using-scbs) - -# Recovering Funds From `lnd` (funds are safu!) - -In this document, we'll go over the various built-in mechanisms for recovering -funds from `lnd` due to any sort of data loss, or malfunction. Coins in `lnd` -can exist in one of two pools: on-chain or off-chain. On-chain funds are -outputs under the control of `lnd` that can be spent immediately, and without -any auxiliary data. Off-chain funds on the other hand exist within a 2-of-2 -multi-sig output typically referred to as a payment channel. Depending on the -exact nature of operation of a given `lnd` node, one of these pools of funds -may be empty. - -Fund recovery for `lnd` will require two pieces of data: - 1. Your 24-word cipher seed - 2. Your encrypted Static Channel Backup file (or the raw data) - -If one is only attempting to recover _on chain_ funds, then only the first item -is required. - -The SCB file is encrypted using a key _derived_ from the user's seed. As a -result, it cannot be used in isolation. - -## On-Chain Recovery - -### 24-word Cipher Seeds - -When a new `lnd` node is created, it's given a 24-word seed phrase, called an -[`cipher seed`](https://github.com/lightningnetwork/lnd/tree/master/aezeed). -The two seed formats look similar, but the only commonality they share are -using the same default English dictionary. A valid seed phrase obtained over -the CLI `lncli create` command looks something like: -``` -!!!YOU MUST WRITE DOWN THIS SEED TO BE ABLE TO RESTORE THE WALLET!!! - ----------------BEGIN LND CIPHER SEED--------------- - 1. ability 2. noise 3. lift 4. document - 5. certain 6. month 7. shoot 8. perfect - 9. matrix 10. mango 11. excess 12. turkey -13. river 14. pitch 15. fluid 16. rack -17. drill 18. text 19. buddy 20. pool -21. soul 22. fatal 23. ship 24. jelly ----------------END LND CIPHER SEED----------------- - -!!!YOU MUST WRITE DOWN THIS SEED TO BE ABLE TO RESTORE THE WALLET!!! -``` - -### Wallet and Seed Passphrases - -During the creation process, users are first prompted to enter a **wallet -password**: -``` -Input wallet password: -Confirm wallet password: -``` - -This password is used to _encrypt_ the wallet on disk, which includes any -derived master private keys or public key data. - -Users can also _optionally_ enter a second passphrase which we call the _cipher -seed passphrase_: -``` -Your cipher seed can optionally be encrypted. -Input your passphrase if you wish to encrypt it (or press enter to proceed without a cipher seed passphrase): -``` - -If specified, then this will be used to encrypt the cipher seed itself. The -cipher seed format is unique in that the 24-word phrase is actually a -_ciphertext_. As a result, there's no standard word list as any arbitrary -encoding can be used. If a passphrase is specified, then the cipher seed you -write down is actually an _encryption_ of the entropy used to generate the BIP -32 root key for the wallet. Unlike a BIP 39 24-word phrase, the cipher seed is -able to _detect_ incorrect passphrase. BIP 39 on the other hand, will instead -silently decrypt to a new (likely empty) wallet. - -### Starting On-Chain Recovery - -The initial entry point to trigger recovery of on-chain funds in the command -line is the `lncli create` command. -``` -⛰ lncli create -``` - -Next, one can enter a _new_ wallet password to encrypt any newly derived keys -as a result of the recovery process. -``` -Input wallet password: -Confirm wallet password: -``` - -Once a new wallet password has been obtained, the user will be prompted for -their _existing_ cipher seed: -``` -Input your 24-word mnemonic separated by spaces: ability noise lift document certain month shoot perfect matrix mango excess turkey river pitch fluid rack drill text buddy pool soul fatal ship jelly -``` - -If a _cipher seed passphrase_ was used when the seed was created, it MUST be entered now: -``` -Input your cipher seed passphrase (press enter if your seed doesn't have a passphrase): -``` - -Finally, the user has an option to choose a _recovery window_: -``` -Input an optional address look-ahead used to scan for used keys (default 2500): -``` - -The recovery window is a metric that the on-chain rescanner will use to -determine when all the "used" addresses have been found. If the recovery window -is two, lnd will fail to find funds in any addresses generated after the point -in which two consecutive addresses were generated but never used. If an `lnd` -on-chain wallet was extensively used, then users may want to _increase_ the -default value. - -If all the information provided was valid, then you'll be presented with the -seed again: -``` - -!!!YOU MUST WRITE DOWN THIS SEED TO BE ABLE TO RESTORE THE WALLET!!! - ----------------BEGIN LND CIPHER SEED--------------- - 1. ability 2. noise 3. lift 4. document - 5. certain 6. month 7. shoot 8. perfect - 9. matrix 10. mango 11. excess 12. turkey -13. river 14. pitch 15. fluid 16. rack -17. drill 18. text 19. buddy 20. pool -21. soul 22. fatal 23. ship 24. jelly ----------------END LND CIPHER SEED----------------- - -!!!YOU MUST WRITE DOWN THIS SEED TO BE ABLE TO RESTORE THE WALLET!!! - -lnd successfully initialized! -``` - -In `lnd`'s logs, you should see something along the lines of (irrelevant lines skipped): -``` -[INF] LNWL: Opened wallet -[INF] LTND: Wallet recovery mode enabled with address lookahead of 2500 addresses -[INF] LNWL: RECOVERY MODE ENABLED -- rescanning for used addresses with recovery_window=2500 -[INF] CHBU: Updating backup file at test_lnd3/data/chain/bitcoin/simnet/channel.backup -[INF] CHBU: Swapping old multi backup file from test_lnd3/data/chain/bitcoin/simnet/temp-dont-use.backup to test_lnd3/data/chain/bitcoin/simnet/channel.backup -[INF] LNWL: Seed birthday surpassed, starting recovery of wallet from height=748 hash=3032830c812a4a6ea305d8ead13b52e9e69d6400ff3c997970b6f76fbc770920 with recovery-window=2500 -[INF] LNWL: Scanning 1 blocks for recoverable addresses -[INF] LNWL: Recovered addresses from blocks 748-748 -[INF] LNWL: Started rescan from block 3032830c812a4a6ea305d8ead13b52e9e69d6400ff3c997970b6f76fbc770920 (height 748) for 800 addresses -[INF] LNWL: Catching up block hashes to height 748, this might take a while -[INF] LNWL: Done catching up block hashes -[INF] LNWL: Finished rescan for 800 addresses (synced to block 3032830c812a4a6ea305d8ead13b52e9e69d6400ff3c997970b6f76fbc770920, height 748) -``` - -That final line indicates the rescan is complete! If not all funds have -appeared, then the user may need to _repeat_ the process with a higher recovery -window. Depending on how old the wallet is (the cipher seed stores the wallet's -birthday!) and how many addresses were used, the rescan may take anywhere from -a few minutes to a few hours. To track the recovery progress, one can use the -command `lncli getrecoveryinfo`. When finished, the following is returned, -``` -{ - "recovery_mode": true, - "recovery_finished": true, - "progress": 1 -} -``` - -If the rescan wasn't able to complete fully (`lnd` was shutdown for example), -then from `lncli unlock`, it's possible to _restart_ the rescan from where it -left off with the `--recovery-window` argument: -``` -⛰ lncli unlock --recovery_window=2500 -``` - -Note that if this argument is not specified, then the wallet will not -_re-enter_ the recovery mode and may miss funds during the portion of the -rescan. - -### Forced In-Place Rescan - -The recovery methods described above assume a clean slate for a node, so -there's no existing UTXO or key data in the node's database. However, there're -times when an _existing_ node may want to _manually_ rescan the chain. We have -a command line flag for that! Just start `lnd` and add the following flag: -``` -⛰ lnd --reset-wallet-transactions -``` - -The `--reset-wallet-transactions` flag will _reset_ the best synced height of -the wallet back to its birthday, or genesis if the birthday isn't known (for -some older wallets). - -Just run `lnd` with the flag, unlock it, then the wallet should begin -rescanning. An entry resembling the following will show up in the logs once it's -complete: -``` -[INF] LNWL: Finished rescan for 800 addresses (synced to block 3032830c812a4a6ea305d8ead13b52e9e69d6400ff3c997970b6f76fbc770920, height 748) -``` - -**Remember to remove the flag once the rescan was completed successfully to -avoid rescanning again for every restart of lnd**. - -## Off-Chain Recovery - -After version `v0.6-beta` of `lnd`, the daemon now ships with a new feature -called Static Channel Backups (SCBs). We call these _static_ as they only need -to be obtained _once_: when the channel is created. From there on, a backup is -good until the channel is closed. The backup contains all the information we -need to initiate the Data Loss Protection (DLP) feature in the protocol, which -ultimately leads to us recovering the funds from the channel _on-chain_. This -is a foolproof _safe_ backup mechanism. - -We say _safe_, as care has been taken to ensure that there are no foot guns in -this method of backing up channels, vs doing things like `rsync`ing or copying -the `channel.db` file periodically. Those methods can be dangerous as one never -knows if they have the latest state of a channel or not. Instead, we aim to -provide a simple, safe method to allow users to recover the settled funds in -their channels in the case of partial or complete data loss. The backups -themselves are encrypted using a key derived from the user's seed, this way we -protect privacy of the users channels in the back up state, and ensure that a -random node can't attempt to import another user's channels. - -Given a valid SCB, the user will be able to recover funds that are fully -settled within their channels. By "fully settled" we mean funds that are in the -base commitment outputs, and not HTLCs. We can only restore these funds as -right after the channel is created, as we have all the data required to make a -backup, but lack information about the future HTLCs that the channel will -process. - -### Obtaining SCBs - -#### On-Disk `channel.backup` - -There are multiple ways of obtaining SCBs from `lnd`. The most commonly used -method will likely be via the `channels.backup` file that's stored on-disk -alongside the rest of the chain data. This is a special file that contains SCB -entries for _all_ currently open channels. Each time a channel is opened or -closed, this file is updated on disk in a safe manner (atomic file rename). As -a result, unlike the `channel.db` file, it's _always_ safe to copy this file -for backup at ones desired location. The default location on Linux is: -``` -~/.lnd/data/chain/bitcoin/mainnet/channel.backup -``` - -An example of using file system level notification to [copy the backup to a -distinct volume/partition/drive can be found -here](https://gist.github.com/alexbosworth/2c5e185aedbdac45a03655b709e255a3). - -#### Using the `ExportChanBackup` RPC - -Another way to obtain SCBS for all or a target channel is via the new -`exportchanbackup` `lncli` command: -``` -⛰ lncli --network=simnet exportchanbackup --chan_point=29be6d259dc71ebdf0a3a0e83b240eda78f9023d8aeaae13c89250c7e59467d5:0 -{ - "chan_point": "29be6d259dc71ebdf0a3a0e83b240eda78f9023d8aeaae13c89250c7e59467d5:0", - "chan_backup": "02e7b423c8cf11038354732e9696caff9d5ac9720440f70a50ca2b9fcef5d873c8e64d53bdadfe208a86c96c7f31dc4eb370a02631bb02dce6611c435753a0c1f86c9f5b99006457f0dc7ee4a1c19e0d31a1036941d65717a50136c877d66ec80bb8f3e67cee8d9a5cb3f4081c3817cd830a8d0cf851c1f1e03fee35d790e42d98df5b24e07e6d9d9a46a16352e9b44ad412571c903a532017a5bc1ffe1369c123e1e17e1e4d52cc32329aa205d73d57f846389a6e446f612eeb2dcc346e4590f59a4c533f216ee44f09c1d2298b7d6c" -} - -⛰ lncli --network=simnet exportchanbackup --all -{ - "chan_points": [ - "29be6d259dc71ebdf0a3a0e83b240eda78f9023d8aeaae13c89250c7e59467d5:0" - ], - "multi_chan_backup": "fd73e992e5133aa085c8e45548e0189c411c8cfe42e902b0ee2dec528a18fb472c3375447868ffced0d4812125e4361d667b7e6a18b2357643e09bbe7e9110c6b28d74f4f55e7c29e92419b52509e5c367cf2d977b670a2ff7560f5fe24021d246abe30542e6c6e3aa52f903453c3a2389af918249dbdb5f1199aaecf4931c0366592165b10bdd58eaf706d6df02a39d9323a0c65260ffcc84776f2705e4942d89e4dbefa11c693027002c35582d56e295dcf74d27e90873699657337696b32c05c8014911a7ec8eb03bdbe526fe658be8abdf50ab12c4fec9ddeefc489cf817721c8e541d28fbe71e32137b5ea066a9f4e19814deedeb360def90eff2965570aab5fedd0ebfcd783ce3289360953680ac084b2e988c9cbd0912da400861467d7bb5ad4b42a95c2d541653e805cbfc84da401baf096fba43300358421ae1b43fd25f3289c8c73489977592f75bc9f73781f41718a752ab325b70c8eb2011c5d979f6efc7a76e16492566e43d94dbd42698eb06ff8ad4fd3f2baabafded" -} - -⛰ lncli --network=simnet exportchanbackup --all --output_file=channels.backup -``` - -As shown above, a user can either: specify a specific channel to backup, backup -all existing channels, or backup directly to an on-disk file. All backups use -the same format. - -#### Streaming Updates via `SubscribeChannelBackups` - -Using the gRPC interace directly, [a new call: -`SubscribeChannelBackups`](https://api.lightning.community/#subscribechannelbackups). -This call allows users to receive a new notification each time the underlying -SCB state changes. This can be used to implement more complex backup -schemes, compared to the file system notification based approach. - -### Recovering Using SCBs - -If a node is being created from scratch, then it's possible to pass in an -existing SCB using the `lncli create` or `lncli unlock` commands: -``` -⛰ lncli create -multi_file=channels.backup -``` - -Alternatively, the `restorechanbackup` command can be used if `lnd` has already -been created at the time of SCB restoration: -``` -⛰ lncli restorechanbackup -h -NAME: - lncli restorechanbackup - Restore an existing single or multi-channel static channel backup - -USAGE: - lncli restorechanbackup [command options] [--single_backup] [--multi_backup] [--multi_file=] - -CATEGORY: - Channels - -DESCRIPTION: - - Allows a user to restore a Static Channel Backup (SCB) that was - obtained either via the exportchanbackup command, or from lnd's - automatically manged channels.backup file. This command should be used - if a user is attempting to restore a channel due to data loss on a - running node restored with the same seed as the node that created the - channel. If successful, this command will allows the user to recover - the settled funds stored in the recovered channels. - - The command will accept backups in one of three forms: - - * A single channel packed SCB, which can be obtained from - exportchanbackup. This should be passed in hex encoded format. - - * A packed multi-channel SCB, which couples several individual - static channel backups in single blob. - - * A file path which points to a packed multi-channel backup within a - file, using the same format that lnd does in its channels.backup - file. - - -OPTIONS: - --single_backup value a hex encoded single channel backup obtained from exportchanbackup - --multi_backup value a hex encoded multi-channel backup obtained from exportchanbackup - --multi_file value the path to a multi-channel back up file -``` - -Once the process has been initiated, `lnd` will proceed to: - - 1. Given the set of channels to recover, the server will then will insert a - series of "channel shells" into the database. These contain only the - information required to initiate the DLP (data loss protection) protocol - and nothing more. As a result, they're marked as "recovered" channels in - the database, and we'll disallow trying to use them for any other process. - 2. Once the channel shell is recovered, the - [chanbackup](https://github.com/lightningnetwork/lnd/tree/master/chanbackup) - package will attempt to insert a LinkNode that contains all prior - addresses that we were able to reach the peer at. During the process, - we'll also insert the edge for that channel (only in the outgoing - direction) into the database as well. - 3. lnd will then start up, and as usual attempt to establish connections to - all peers that we have channels open with. If `lnd` is already running, - then a new persistent connection attempt will be initiated. - 4. Once we connect with a peer, we'll then initiate the DLP protocol. The - remote peer will discover that we've lost data, and then immediately force - close their channel. Before they do though, they'll send over the channel - reestablishment handshake message which contains the unrevoked commitment - point which we need to derive keys (will be fixed in - BOLT 1.1 by making the key static) to sweep our funds. - 5. Once the commitment transaction confirms, given information within the SCB - we'll re-derive all keys we need, and then sweep the funds. diff --git a/lnd/docs/release.md b/lnd/docs/release.md deleted file mode 100644 index e6591ea4..00000000 --- a/lnd/docs/release.md +++ /dev/null @@ -1,70 +0,0 @@ -# `lnd`'s Reproducible Build System - -This package contains the build script that the `lnd` project uses in order to -build binaries for each new release. As of `go1.13`, with some new build flags, -binaries are now reproducible, allowing developers to build the binary on -distinct machines, and end up with a byte-for-byte identical binary. However, -this wasn't _fully_ solved in `go1.13`, as the build system still includes the -directory the binary is built into the binary itself. As a result, our scripts -utilize a work around needed until `go1.13.2`. - -## Building a New Release - -### macOS/Linux/Windows (WSL) - -No prior set up is needed on Linux or macOS is required in order to build the -release binaries. However, on Windows, the only way to build the release -binaries at the moment is by using the Windows Subsystem Linux. One can build -the release binaries following these steps: - -1. `git clone https://github.com/lightningnetwork/lnd.git` -2. `cd lnd` -3. `make release tag= # is the name of the next release/tag` - -This will then create a directory of the form `lnd-` containing archives -of the release binaries for each supported operating system and architecture, -and a manifest file containing the hash of each archive. - -## Verifying a Release - -With `go1.13`, it's now possible for third parties to verify release binaries. -Before this version of `go`, one had to trust the release manager(s) to build the -proper binary. With this new system, third parties can now _independently_ run -the release process, and verify that all the hashes of the release binaries -match exactly that of the release binaries produced by said third parties. - -To verify a release, one must obtain the following tools (many of these come -installed by default in most Unix systems): `gpg`/`gpg2`, `shashum`, and -`tar`/`unzip`. - -Once done, verifiers can proceed with the following steps: - -1. Acquire the archive containing the release binaries for one's specific - operating system and architecture, and the manifest file along with its - signature. -2. Verify the signature of the manifest file with `gpg --verify - manifest-.txt.sig`. This will require obtaining the PGP keys which - signed the manifest file, which are included in the release notes. -3. Recompute the `SHA256` hash of the archive with `shasum -a 256 `, - locate the corresponding one in the manifest file, and ensure they match - __exactly__. - -At this point, verifiers can use the release binaries acquired if they trust -the integrity of the release manager(s). Otherwise, one can proceed with the -guide to verify the release binaries were built properly by obtaining `shasum` -and `go` (matching the same version used in the release): - -4. Extract the release binaries contained within the archive, compute their - hashes as done above, and note them down. -5. Ensure `go` is installed, matching the same version as noted in the release - notes. -6. Obtain a copy of `lnd`'s source code with `git clone - https://github.com/lightningnetwork/lnd` and checkout the source code of the - release with `git checkout `. -7. Proceed to verify the tag with `git verify-tag ` and compile the - binaries from source for the intended operating system and architecture with - `make release sys=OS-ARCH tag=`. -8. Extract the archive found in the `lnd-` directory created by the - release script and recompute the `SHA256` hash of the release binaries (lnd - and lncli) with `shasum -a 256 `. These should match __exactly__ - as the ones noted above. diff --git a/lnd/docs/rest/websockets.md b/lnd/docs/rest/websockets.md deleted file mode 100644 index 705a4c73..00000000 --- a/lnd/docs/rest/websockets.md +++ /dev/null @@ -1,99 +0,0 @@ -# WebSockets with `lnd`'s REST API - -This document describes how streaming response REST calls can be used correctly -by making use of the WebSocket API. - -As an example, we are going to write a simple JavaScript program that subscribes -to `lnd`'s -[block notification RPC](https://api.lightning.community/#v2-chainnotifier-register-blocks). - -The WebSocket will be kept open as long as `lnd` runs and JavaScript program -isn't stopped. - -## Browser environment - -When using WebSockets in a browser, there are certain security limitations of -what header fields are allowed to be sent. Therefore, the macaroon cannot just -be added as a `Grpc-Metadata-Macaroon` header field as it would work with normal -REST calls. The browser will just ignore that header field and not send it. - -Instead we have added a workaround in `lnd`'s WebSocket proxy that allows -sending the macaroon as a WebSocket "protocol": - -```javascript -const host = 'localhost:8080'; // The default REST port of lnd, can be overwritten with --restlisten=ip:port -const macaroon = '0201036c6e6402eb01030a10625e7e60fd00f5a6f9cd53f33fc82a...'; // The hex encoded macaroon to send -const initialRequest = { // The initial request to send (see API docs for each RPC). - hash: "xlkMdV382uNPskw6eEjDGFMQHxHNnZZgL47aVDSwiRQ=", // Just some example to show that all `byte` fields always have to be base64 encoded in the REST API. - height: 144, -} - -// The protocol is our workaround for sending the macaroon because custom header -// fields aren't allowed to be sent by the browser when opening a WebSocket. -const protocolString = 'Grpc-Metadata-Macaroon+' + macaroon; - -// Let's now connect the web socket. Notice that all WebSocket open calls are -// always GET requests. If the RPC expects a call to be POST or DELETE (see API -// docs to find out), the query parameter "method" can be set to overwrite. -const wsUrl = 'wss://' + host + '/v2/chainnotifier/register/blocks?method=POST'; -let ws = new WebSocket(wsUrl, protocolString); -ws.onopen = function (event) { - // After the WS connection is establishes, lnd expects the client to send the - // initial message. If an RPC doesn't have any request parameters, an empty - // JSON object has to be sent as a string, for example: ws.send('{}') - ws.send(JSON.stringify(initialRequest)); -} -ws.onmessage = function (event) { - // We received a new message. - console.log(event); - - // The data we're really interested in is in data and is always a string - // that needs to be parsed as JSON and always contains a "result" field: - console.log("Payload: "); - console.log(JSON.parse(event.data).result); -} -ws.onerror = function (event) { - // An error occured, let's log it to the console. - console.log(event); -} -``` - -## Node.js environment - -With Node.js it is a bit easier to use the streaming response APIs because we -can set the macaroon header field directly. This is the example from the API -docs: - -```javascript -// -------------------------- -// Example with websockets: -// -------------------------- -const WebSocket = require('ws'); -const fs = require('fs'); -const macaroon = fs.readFileSync('LND_DIR/data/chain/bitcoin/simnet/admin.macaroon').toString('hex'); -let ws = new WebSocket('wss://localhost:8080/v2/chainnotifier/register/blocks?method=POST', { - // Work-around for self-signed certificates. - rejectUnauthorized: false, - headers: { - 'Grpc-Metadata-Macaroon': macaroon, - }, -}); -let requestBody = { - hash: "", - height: "", -} -ws.on('open', function() { - ws.send(JSON.stringify(requestBody)); -}); -ws.on('error', function(err) { - console.log('Error: ' + err); -}); -ws.on('message', function(body) { - console.log(body); -}); -// Console output (repeated for every message in the stream): -// { -// "hash": , -// "height": , -// } -``` diff --git a/lnd/docs/ruby-thing.rb b/lnd/docs/ruby-thing.rb deleted file mode 100644 index 922201fe..00000000 --- a/lnd/docs/ruby-thing.rb +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env ruby - -File.open("INSTALL.md", 'r') do |f| - f.each_line do |line| - forbidden_words = ['Table of contents', 'define', 'pragma'] - next if !line.start_with?("#") || forbidden_words.any? { |w| line =~ /#{w}/ } - - title = line.gsub("#", "").strip - href = title.gsub(" ", "-").downcase - puts " " * (line.count("#")-1) + "* [#{title}](\##{href})" - end -end diff --git a/lnd/docs/safety.md b/lnd/docs/safety.md deleted file mode 100644 index 6f3d74cc..00000000 --- a/lnd/docs/safety.md +++ /dev/null @@ -1,438 +0,0 @@ -# lnd Operational Safety Guidelines - -## Table of Contents - -* [Overview](#overview) - - [aezeed](#aezeed) - - [Wallet password](#wallet-password) - - [TLS](#tls) - - [Macaroons](#macaroons) - - [Static Channel Backups (SCBs)](#static-channel-backups-scbs) - - [Static remote keys](#static-remote-keys) -* [Best practices](#best-practices) - - [aezeed storage](#aezeed-storage) - - [File based backups](#file-based-backups) - - [Keeping Static Channel Backups (SCBs) safe](#keeping-static-channel-backups-scb-safe) - - [Keep `lnd` updated](#keep-lnd-updated) - - [Zombie channels](#zombie-channels) - - [Migrating a node to a new device](#migrating-a-node-to-a-new-device) - - [Migrating a node from clearnet to Tor](#migrating-a-node-from-clearnet-to-tor) - - [Prevent data corruption](#prevent-data-corruption) - - [Don't interrupt `lncli` commands](#dont-interrupt-lncli-commands) - - [Regular accounting/monitoring](#regular-accountingmonitoring) - - [Pruned bitcoind node](#pruned-bitcoind-node) - - [The `--noseedbackup` flag](#the---noseedbackup-flag) - -## Overview - -This chapter describes the security/safety mechanisms that are implemented in -`lnd`. We encourage every person that is planning on putting mainnet funds into -a Lightning Network channel using `lnd` to read this guide carefully. -As of this writing, `lnd` is still in beta and it is considered `#reckless` to -put any life altering amounts of BTC into the network. -That said, we constantly put in a lot of effort to make `lnd` safer to use and -more secure. We will update this documentation with each safety mechanism that -we implement. - -The first part of this document describes the security elements that are used in -`lnd` and how they work on a high level. -The second part is a list of best practices that has crystallized from bug -reports, developer recommendations and experiences from a lot of individuals -running mainnet `lnd` nodes during the last 18 months and counting. - -### aezeed - -This is what all the on-chain private keys are derived from. `aezeed` is similar -to BIP39 as it uses the same word list to encode the seed as a mnemonic phrase. -But this is where the similarities end, because `aezeed` is _not_ compatible -with BIP39. The 24 words of `aezeed` encode a 128 bit entropy (the seed itself), -a wallet birthday (days since BTC genesis block) and a version. -This data is _encrypted_ with a password using the AEZ cipher suite (hence the -name). Encrypting the content instead of using the password to derive the HD -extended root key has the advantage that the password can actually be checked -for correctness and can also be changed without affecting any of the derived -keys. -A BIP for the `aezeed` scheme is being written and should be published soon. - -Important to know: -* As with any bitcoin seed phrase, never reveal this to any person and store - the 24 words (and the password) in a safe place. -* You should never run two different `lnd` nodes with the same seed! Even if - they aren't running at the same time. This will lead to strange/unpredictable - behavior or even loss of funds. To migrate an `lnd` node to a new device, - please see the [node migration section](#migrating-a-node-to-a-new-device). -* For more technical information [see the aezeed README](../aezeed/README.md). - -### Wallet password - -The wallet password is one of the first things that has to be entered if a new -wallet is created using `lnd`. It is completely independent from the `aezeed` -cipher seed passphrase (which is optional). The wallet password is used to -encrypt the sensitive parts of `lnd`'s databases, currently some parts of -`wallet.db` and `macaroons.db`. Loss of this password does not necessarily -mean loss of funds, as long as the `aezeed` passphrase is still available. -But the node will need to be restored using the -[SCB restore procedure](recovery.md). - -### TLS - -By default the two API connections `lnd` offers (gRPC on port 10009 and REST on -port 8080) use TLS with a self-signed certificate for transport level security. -Specifying the certificate on the client side (for example `lncli`) is only a -protection against man-in-the-middle attacks and does not provide any -authentication. In fact, `lnd` will never even see the certificate that is -supplied to `lncli` with the `--tlscertpath` argument. `lncli` only uses that -certificate to verify it is talking to the correct gRPC server. -If the key/certificate pair (`tls.cert` and `tls.key` in the main `lnd` data -directory) is missing on startup, a new self-signed key/certificate pair is -generated. Clients connecting to `lnd` then have to use the new certificate -to verify they are talking to the correct server. - -### Macaroons - -Macaroons are used as the main authentication method in `lnd`. A macaroon is a -cryptographically verifiable token, comparable to a [JWT](https://jwt.io/) -or other form of API access token. In `lnd` this token consists of a _list of -permissions_ (what operations does the user of the token have access to) and a -set of _restrictions_ (e.g. token expiration timestamp, IP address restriction). -`lnd` does not keep track of the individual macaroons issued, only the key that -was used to create (and later verify) them. That means, individual tokens cannot -currently be invalidated, only all of them at once. -See the [high-level macaroons documentation](macaroons.md) or the [technical -README](../macaroons/README.md) for more information. - -Important to know: -* Deleting the `*.macaroon` files in the `/data/chain/bitcoin/mainnet/` - folder will trigger `lnd` to recreate the default macaroons. But this does - **NOT** invalidate clients that use an old macaroon. To make sure all - previously generated macaroons are invalidated, the `macaroons.db` has to be - deleted as well as all `*.macaroon`. - -### Static Channel Backups (SCBs) - -A Static Channel Backup is a piece of data that contains all _static_ -information about a channel, like funding transaction, capacity, key derivation -paths, remote node public key, remote node last known network addresses and -some static settings like CSV timeout and min HTLC setting. -Such a backup can either be obtained as a file containing entries for multiple -channels or by calling RPC methods to get individual (or all) channel data. -See the section on [keeping SCBs safe](#keeping-static-channel-backups-scb-safe) -for more information. - -What the SCB does **not** contain is the current channel balance (or the -associated commitment transaction). So how can a channel be restored using -SCBs? -That's the important part: _A channel cannot be restored using SCBs_, but the -funds that are in the channel can be claimed. The restore procedure relies on -the Data Loss Prevention (DLP) protocol which works by connecting to the remote -node and asking them to **force close** the channel and hand over the needed -information to sweep the on-chain funds that belong to the local node. -Because of this, [restoring a node from SCB](recovery.md) should be seen as an -emergency measure as all channels will be closed and on-chain fees incur to the -party that opened the channel initially. -To migrate an existing, working node to a new device, SCBs are _not_ the way to -do it. See the section about -[migrating a node](#migrating-a-node-to-a-new-device) on how to do it correctly. - -Important to know: -* [Restoring a node from SCB](recovery.md) will force-close all channels - contained in that file. -* Restoring a node from SCB relies on the remote node of each channel to be - online and respond to the DLP protocol. That's why it's important to - [get rid of zombie channels](#zombie-channels) because they cannot be - recovered using SCBs. -* The SCB data is encrypted with a key from the seed the node was created with. - A node can therefore only be restored from SCB if the seed is also known. - -### Static remote keys - -Since version `v0.8.0-beta`, `lnd` supports the `option_static_remote_key` (also -known as "safu commitments"). All new channels will be opened with this option -enabled by default, if the other node also supports it. -In essence, this change makes it possible for a node to sweep their channel -funds if the remote node force-closes, without any further communication between -the nodes. Previous to this change, your node needed to get a random channel -secret (called the `per_commit_point`) from the remote node even if they -force-closed the channel, which could make recovery very difficult. - -## Best practices - -### aezeed storage - -When creating a new wallet, `lnd` will print out 24 words to write down, which -is the wallet's seed (in the [aezeed](#aezeed) format). That seed is optionally -encrypted with a passphrase, also called the _cipher seed passphrase_. -It is absolutely important to write both the seed and, if set, the password down -and store it in a safe place as **there is no way of exporting the seed from an -lnd wallet**. When creating the wallet, after printing the seed to the command -line, it is hashed and only the hash (or to be more exact, the BIP32 extended -root key) is stored in the `wallet.db` file. -There is -[a tool being worked on](https://github.com/lightningnetwork/lnd/pull/2373) -that can extract the BIP32 extended root key but currently you cannot restore -lnd with only this root key. - -Important to know: -* Setting a password/passphrase for the aezeed is meant to protect it from - an attacker that finds the paper/storage device. Writing down the password - alongside the 24 seed words does not enhance the security in any way. - Therefore the password should be stored in a separate place. - -### File based backups - -There is a lot of confusion and also some myths about how to best backup the -off-chain funds of an `lnd` node. Making a mistake here is also still the single -biggest risk of losing off-chain funds, even though we do everything to mitigate -those risks. - -**What files can/should I regularly backup?** -The single most important file that needs to be backed up whenever it changes -is the `/data/chain/bitcoin/mainnet/channel.backup` file which holds -the Static Channel Backups (SCBs). This file is only updated every time `lnd` -starts, a channel is opened or a channel is closed. - -Most consumer Lightning wallet apps upload the file to the cloud automatically. - -See the [SCB chapter](#static-channel-backups-scbs) for more -information on how to use the file to restore channels. - -**What files should never be backed up to avoid problems?** -This is a bit of a trick question, as making the backup is not the problem. -Restoring/using an old version of a specific file called -`/data/graph/mainnet/channel.db` is what is very risky and should -_never_ be done! -This requires some explanation: -The way LN channels are currently set up (until `eltoo` is implemented) is that -both parties agree on a current balance. To make sure none of the two peers in -a channel ever try to publish an old state of that balance, they both hand over -their keys to the other peer that gives them the means to take _all_ funds (not -just their agreed upon part) from a channel, if an _old_ state is ever -published. Therefore, having an old state of a channel basically means -forfeiting the balance to the other party. - -As payments in `lnd` can be made multiple times a second, it's very hard to -make a backup of the channel database every time it is updated. And even if it -can be technically done, the confidence that a particular state is certainly the -most up-to-date can never be very high. That's why the focus should be on -[making sure the channel database is not corrupted](#prevent-data-corruption), -[closing out the zombie channels](#zombie-channels) and keeping your SCBs safe. - -### Keeping Static Channel Backups (SCB) safe - -As mentioned in the previous chapter, there is a file where `lnd` stores and -updates a backup of all channels whenever the node is restarted, a new channel -is opened or a channel is closed: -`/data/chain/bitcoin/mainnet/channel.backup` - -One straight-forward way of backing that file up is to create a file watcher and -react whenever the file is changed. Here is an example script that -[automatically makes a copy of the file whenever it changes](https://gist.github.com/alexbosworth/2c5e185aedbdac45a03655b709e255a3). - -Other ways of obtaining SCBs for a node's channels are -[described in the recovery documentation](recovery.md#obtaining-scbs). - -Because the backup file is encrypted with a key from the seed the node was -created with, it can safely be stored on a cloud storage or any other storage -medium. Many consumer focused wallet smartphone apps automatically store a -backup file to the cloud, if the phone is set up to allow it. - -### Keep `lnd` updated - -With every larger update of `lnd`, new security features are added. Users are -always encouraged to update their nodes as soon as possible. This also helps the -network in general as new safety features that require compatibility among nodes -can be used sooner. - -### Zombie channels - -Zombie channels are channels that are most likely dead but are still around. -This can happen if one of the channel peers has gone offline for good (possibly -due to a failure of some sort) and didn't close its channels. The other, still -online node doesn't necessarily know that its partner will never come back -online. - -Funds that are in such channels are at great risk, as is described quite -dramatically in -[this article](https://medium.com/@gcomxx/get-rid-of-those-zombie-channels-1267d5a2a708?) -. - -The TL;DR of the article is that if you have funds in a zombie channel and you -need to recover your node after a failure, SCBs won't be able to recover those -funds. Because SCB restore -[relies on the remote node cooperating](#static-channel-backups-scbs). - -That's why it's important to **close channels with peers that have been -offline** for a length of time as a precautionary measure. - -Of course this might not be good advice for a routing node operator that wants -to support mobile users and route for them. Nodes running on a mobile device -tend to be offline for long periods of time. It would be bad for those users if -they needed to open a new channel every time they want to use the wallet. -Most mobile wallets only open private channels as they do not intend to route -payments through them. A routing node operator should therefore take into -account if a channel is public or private when thinking about closing it. - -### Migrating a node to a new device - -As mentioned in the chapters [aezeed](#aezeed) and -[SCB](#static-channel-backups-scbs) you should never use the same seed on two -different nodes and restoring from SCB is not a migration but an emergency -procedure. -What is the correct way to migrate an existing node to a new device? There is -an easy way that should work for most people and there's the harder/costlier -fallback way to do it. - -**Option 1: Move the whole data directory to the new device** -This option works very well if the new device runs the same operating system on -the same architecture. If that is the case, the whole `/home//.lnd` -directory in Linux (or `$HOME/Library/Application Support/lnd` in MacOS, -`%LOCALAPPDATA%\lnd` in Windows) can be moved to the new device and `lnd` -started there. It is important to shut down `lnd` on the old device before -moving the directory! -**Not supported/untested** is moving the data directory between different -operating systems (for example `MacOS` -> `Linux`) or different system -architectures (for example `32bit` -> `64bit` or `ARM` -> `amd64`). Data -corruption or unexpected behavior can be the result. Users switching between -operating systems or architectures should always use Option 2! - -**Option 2: Start from scratch** -If option 1 does not work or is too risky, the safest course of action is to -initialize the existing node again from scratch. Unfortunately this incurs some -on-chain fee costs as all channels will need to be closed. Using the same seed -means restoring the same network node identity as before. If a new identity -should be created, a new seed needs to be created. -Follow these steps to create the **same node (with the same seed)** from -scratch: -1. On the old device, close all channels (`lncli closeallchannels`). The - command can take up to several minutes depending on the number of channels. - **Do not interrupt the command!** -1. Wait for all channels to be fully closed. If some nodes don't respond to the - close request it can be that `lnd` will go ahead and force close those - channels. This means that the local balance will be time locked for up to - two weeks (depending on the channel size). Check `lncli pendingchannels` to - see if any channels are still in the process of being force closed. -1. After all channels are fully closed (and `lncli pendingchannels` lists zero - channels), `lnd` can be shut down on the old device. -1. Start `lnd` on the new device and create a new wallet with the existing seed - that was used on the old device (answer "yes" when asked if an existing seed - should be used). -1. Wait for the wallet to rescan the blockchain. This can take up to several - hours depending on the age of the seed and the speed of the chain backend. -1. After the chain is fully synced (`lncli getinfo` shows - `"synced_to_chain": true`) the on-chain funds from the previous device should - now be visible on the new device as well and new channels can be opened. - -**What to do after the move** -If things don't work as expected on the moved or re-created node, consider this -list things that possibly need to be changed to work on a new device: -* In case the new device has a different hostname and TLS connection problems - occur, delete the `tls.key` and `tls.cert` files in the data directory and - restart `lnd` to recreate them. -* If an external IP is set (either with `--externalip` or `--tlsextraip`) these - might need to be changed if the new machine has a different address. Changing - the `--tlsextraip` setting also means regenerating the certificate pair. See - point 1. -* If port `9735` (or `10009` for gRPC) was forwarded on the router, these - forwarded ports need to point to the new device. The same applies to firewall - rules. -* It might take more than 24 hours for a new IP address to be visible on - network explorers. -* If channels show as offline after several hours, try to manually connect to - the remote peer. They might still try to reach `lnd` on the old address. - -### Migrating a node from clearnet to Tor - -If an `lnd` node has already been connected to the internet with an IPv4 or IPv6 -(clearnet) address and has any non-private channels, this connection between -channels and IP address is known to the network and cannot be deleted. -Starting the same node with the same identity and channels using Tor is trivial -to link back to any previously used clearnet IP address and does therefore not -provide any privacy benefits. -The following steps are recommended to cut all links between the old clearnet -node and the new Tor node: -1. Close all channels on the old node and wait for them to fully close. -1. Send all on-chain funds of the old node through a Coin Join service (like - Wasabi or Samurai/Whirlpool) until a sufficiently high anonymity set is - reached. -1. Create a new `lnd` node with a **new seed** that is only connected to Tor - and generate an on-chain address on the new node. -1. Send the mixed/coinjoined coins to the address of the new node. -1. Start opening channels. -1. Check an online network explorer that no IPv4 or IPv6 address is associated - with the new node's identity. - -### Prevent data corruption - -Many problems while running an `lnd` node can be prevented by avoiding data -corruption in the channel database (`/data/graph/mainnet/channel.db`). - -The following (non-exhaustive) list of things can lead to data corruption: -* A spinning hard drive gets a physical shock. -* `lnd`'s main data directory being written on an SD card or USB thumb drive - (SD cards and USB thumb drives _must_ be considered unsafe for critical files - that are written to very often, as the channel DB is). -* `lnd`'s main data directory being written to a network drive without - `fsync` support. -* Unclean shutdown of `lnd`. -* Aborting channel operation commands (see next chapter). -* Not enough disk space for a growing channel DB file. -* Moving `lnd`'s main data directory between different operating systems/ - architectures. - -To avoid most of these factors, it is recommended to store `lnd`'s main data -directory on an Solid State Drive (SSD) of a reliable manufacturer. -An alternative or extension to that is to use a replicated disk setup. Making -sure a power failure does not interrupt the node by running a UPS ( -uninterruptible power supply) might also make sense depending on the reliability -of the local power grid and the amount of funds at stake. - -### Don't interrupt `lncli` commands - -Things can start to take a while to execute if a node has more than 50 to 100 -channels. It is extremely important to **never interrupt an `lncli` command** -if it is manipulating the channel database, which is true for the following -commands: - - `openchannel` - - `closechannel` and `closeallchannels` - - `abandonchannel` - - `updatechanpolicy` - - `restorechanbackup` - -Interrupting any of those commands can lead to an inconsistent state of the -channel database and unpredictable behavior. If it is uncertain if a command -is really stuck or if the node is still working on it, a look at the log file -can help to get an idea. - -### Regular accounting/monitoring - -Regular monitoring of a node and keeping track of the movement of funds can help -prevent problems. Tools like [`lndmon`](https://github.com/lightninglabs/lndmon) -can assist with these tasks. - -### Pruned bitcoind node - -Running `lnd` connected to a `bitcoind` node that is running in prune mode is -not supported! `lnd` needs to verify the funding transaction of every channel -in the network and be able to retrieve that information from `bitcoind` which -it cannot deliver when that information is pruned away. - -In theory pruning away all blocks _before_ the SegWit activation would work -as LN channels rely on SegWit. But this has neither been tested nor would it -be recommended/supported. - -In addition to not running a pruned node, it is recommended to run `bitcoind` -with the `-txindex` flag for performance reasons, though this is not strictly -required. - -Multiple `lnd` nodes can run off of a single `bitcoind` instance. There will be -connection/thread/performance limits at some number of `lnd` nodes but in -practice running 2 or 3 `lnd` instances per `bitcoind` node didn't show any -problems. - -### The `--noseedbackup` flag - -This is a flag that is only used for integration tests and should **never** be -used on mainnet! Turning this flag on means that the 24 word seed will not be -shown when creating a wallet. The seed is required to restore a node in case -of data corruption and without it all funds (on-chain and off-chain) are -being put at risk. diff --git a/lnd/docs/watchtower.md b/lnd/docs/watchtower.md deleted file mode 100644 index 870ec1e8..00000000 --- a/lnd/docs/watchtower.md +++ /dev/null @@ -1,236 +0,0 @@ -# Private Altruist Watchtowers - -As of v0.7.0, `lnd` supports the ability to run a private, altruist watchtower -as a fully-integrated subsystem of `lnd`. Watchtowers act as a second line of -defense in responding to malicious or accidental breach scenarios in the event -that the client’s node is offline or unable to respond at the time of a breach, -offering greater degree of safety to channel funds. - -In contrast to a _reward watchtower_ which demand a portion of the channel funds -as a reward for fulfilling its duty, an _altruist watchtower_ returns all of the -victim’s funds (minus on-chain fees) without taking a cut. Reward watchtowers -will be enabled in a subsequent release, though are still undergoing further -testing and refinement. - -In addition, `lnd` can now be configured to operate as a _watchtower client_, -backing up encrypted breach-remedy transactions (aka. justice transactions) to -other altruist watchtowers. The watchtower stores fixed-size, encrypted blobs -and is only able to decrypt and publish the justice transaction after the -offending party has broadcast a revoked commitment state. Client communications -with a watchtower are encrypted and authenticated using ephemeral keypairs, -mitigating the amount of tracking the watchtower can perform on its clients -using long-term identifiers. - -Note that we have chosen to deploy a restricted set of features in this release -that can begin to provide meaningful security to `lnd` users. Many more -watchtower-related features are nearly complete or have meaningful progress, and -we will continue to ship them as they receive further testing and become safe to -release. - -Note: *For now, watchtowers will only backup the `to_local` and `to_remote` outputs -from revoked commitments; backing up HTLC outputs is slated to be deployed in a -future release, as the protocol can be extended to include the extra signature -data in the encrypted blobs.* - -## Configuring a Watchtower - -To set up a watchtower, command line users should compile in the optional -`watchtowerrpc` subserver, which will offer the ability to interface with the -tower via gRPC or `lncli`. The release binaries will include the `watchtowerrpc` -subserver by default. - -The minimal configuration needed to activate the tower is `watchtower.active=1`. - -Retrieving information about your tower’s configurations can be done using -`lncli tower info`: - -``` -🏔 lncli tower info -{ - "pubkey": "03281d603b2c5e19b8893a484eb938d7377179a9ef1a6bca4c0bcbbfc291657b63", - "listeners": [ - "[::]:9911" - ], - "uris": null, -} -``` - -The entire set of watchtower configuration options can be found using -`lnd -h`: - -``` -watchtower: - --watchtower.active If the watchtower should be active or not - --watchtower.towerdir= Directory of the watchtower.db (default: $HOME/.lnd/data/watchtower) - --watchtower.listen= Add interfaces/ports to listen for peer connections - --watchtower.externalip= Add interfaces/ports where the watchtower can accept peer connections - --watchtower.readtimeout= Duration the watchtower server will wait for messages to be received before hanging up on client connections - --watchtower.writetimeout= Duration the watchtower server will wait for messages to be written before hanging up on client connections -``` - -### Listening Interfaces - -By default, the watchtower will listen on `:9911` which specifies port `9911` -listening on all available interfaces. Users may configure their own listeners -via the `--watchtower.listen=` option. You can verify your configuration by -checking the `"listeners"` field in `lncli tower info`. If you're having trouble -connecting to your watchtower, ensure that `` is open or your proxy is -properly configured to point to an active listener. - -### External IP Addresses - -Additionally, users can specify their tower’s external IP address(es) using -`watchtower.externalip=`, which will expose the full tower URIs -(pubkey@host:port) over RPC or `lncli tower info`: - -``` - ... - "uris": [ - "03281d603b2c5e19b8893a484eb938d7377179a9ef1a6bca4c0bcbbfc291657b63@1.2.3.4:9911" - ] -``` - -The watchtower's URIs can be given to clients in order to connect and use the -tower with the following command: - -``` -🏔 lncli wtclient add 03281d603b2c5e19b8893a484eb938d7377179a9ef1a6bca4c0bcbbfc291657b63@1.2.3.4:9911 -``` - -If the watchtower's clients will need remote access, be sure to either: - - Open port 9911 or a port chosen via `watchtower.listen`. - - Use a proxy to direct traffic from an open port to the watchtower's listening - address. - -### Tor Hidden Services - -Watchtowers have tor hidden service support and can automatically generate a -hidden service on startup with the following flags: - -``` -🏔 lnd --tor.active --tor.v3 --watchtower.active -``` - -The onion address is then shown in the "uris" field when queried with `lncli tower info`: - -``` -... -"uris": [ - "03281d603b2c5e19b8893a484eb938d7377179a9ef1a6bca4c0bcbbfc291657b63@bn2kxggzjysvsd5o3uqe4h7655u7v2ydhxzy7ea2fx26duaixlwuguad.onion:9911" -] -``` - -Note: *The watchtower’s public key is distinct from `lnd`’s node public key. For -now this acts as a soft whitelist as it requires clients to know the tower’s -public key in order to use it for backups before more advanced whitelisting -features are implemented. We recommend NOT disclosing this public key openly, -unless you are prepared to open your tower up to the entire Internet.* - -### Watchtower Database Directory - -The watchtower's database can be moved using the `watchtower.towerdir=` -configuration option. Note that a trailing `/bitcoin/mainnet/watchtower.db` -will be appended to the chosen directory to isolate databases for different -chains, so setting `watchtower.towerdir=/path/to/towerdir` will yield a -watchtower database at `/path/to/towerdir/bitcoin/mainnet/watchtower.db`. - -On Linux, for example, the default watchtower database will be located at: - -``` -/$USER/.lnd/data/watchtower/bitcoin/mainnet/watchtower.db -``` - -## Configuring a Watchtower Client - -In order to set up a watchtower client, you’ll need two things: - -1. The watchtower client must be enabled with the `--wtclient.active` flag. - -``` -🏔 lnd --wtclient.active -``` - -2. The watchtower URI of an active watchtower. - -``` -🏔 lncli wtclient add 03281d603b2c5e19b8893a484eb938d7377179a9ef1a6bca4c0bcbbfc291657b63@1.2.3.4:9911 -``` - -Multiple watchtowers can be configured through this method. - -### Justice Fee Rates - -Users may optionally configure the fee rate of justice transactions by setting -the `wtclient.sweep-fee-rate` option, which accepts values in sat/byte. The -default value is 10 sat/byte, though users may choose to target higher rates to -offer greater priority during fee-spikes. Modifying the `sweep-fee-rate` will -be applied to all new updates after the daemon has been restarted. - -### Monitoring - -With the addition of the `lncli wtclient` command, users are now able to -interact with the watchtower client directly to obtain/modify information about -the set of registered watchtowers. - -As as example, with the `lncli wtclient tower` command, you can obtain the -number of sessions currently negotiated with the watchtower added above and -determine whether it is currently being used for backups through the -`active_session_candidate` value. - -``` -🏔 lncli wtclient tower 03281d603b2c5e19b8893a484eb938d7377179a9ef1a6bca4c0bcbbfc291657b63 -{ - "pubkey": "03281d603b2c5e19b8893a484eb938d7377179a9ef1a6bca4c0bcbbfc291657b63", - "addresses": [ - "1.2.3.4:9911" - ], - "active_session_candidate": true, - "num_sessions": 1, - "sessions": [] -} -``` - -To obtain information about the watchtower's sessions, users can use the -`--include_sessions` flag. - -``` -🏔 lncli wtclient tower --include_sessions 03281d603b2c5e19b8893a484eb938d7377179a9ef1a6bca4c0bcbbfc291657b63 -{ - "pubkey": "03281d603b2c5e19b8893a484eb938d7377179a9ef1a6bca4c0bcbbfc291657b63", - "addresses": [ - "1.2.3.4:9911" - ], - "active_session_candidate": true, - "num_sessions": 1, - "sessions": [ - { - "num_backups": 0, - "num_pending_backups": 0, - "max_backups": 1024, - "sweep_sat_per_byte": 10 - } - ] -} -``` - -The entire set of watchtower client configuration options can be found with -`lncli wtclient -h`: - -``` -NAME: - lncli wtclient - Interact with the watchtower client. - -USAGE: - lncli wtclient command [command options] [arguments...] - -COMMANDS: - add Register a watchtower to use for future sessions/backups. - remove Remove a watchtower to prevent its use for future sessions/backups. - towers Display information about all registered watchtowers. - tower Display information about a specific registered watchtower. - stats Display the session stats of the watchtower client. - policy Display the active watchtower client policy configuration. - -OPTIONS: - --help, -h show help -``` diff --git a/lnd/feature/default_sets.go b/lnd/feature/default_sets.go deleted file mode 100644 index ce080e8b..00000000 --- a/lnd/feature/default_sets.go +++ /dev/null @@ -1,54 +0,0 @@ -package feature - -import "github.com/pkt-cash/pktd/lnd/lnwire" - -// setDesc describes which feature bits should be advertised in which feature -// sets. -type setDesc map[lnwire.FeatureBit]map[Set]struct{} - -// defaultSetDesc are the default set descriptors for generating feature -// vectors. Each set is annotated with the corresponding identifier from BOLT 9 -// indicating where it should be advertised. -var defaultSetDesc = setDesc{ - lnwire.DataLossProtectRequired: { - SetInit: {}, // I - SetNodeAnn: {}, // N - }, - lnwire.GossipQueriesOptional: { - SetInit: {}, // I - SetNodeAnn: {}, // N - }, - lnwire.TLVOnionPayloadOptional: { - SetInit: {}, // I - SetNodeAnn: {}, // N - SetInvoice: {}, // 9 - SetLegacyGlobal: {}, - }, - lnwire.StaticRemoteKeyRequired: { - SetInit: {}, // I - SetNodeAnn: {}, // N - SetLegacyGlobal: {}, - }, - lnwire.UpfrontShutdownScriptOptional: { - SetInit: {}, // I - SetNodeAnn: {}, // N - }, - lnwire.PaymentAddrOptional: { - SetInit: {}, // I - SetNodeAnn: {}, // N - SetInvoice: {}, // 9 - }, - lnwire.MPPOptional: { - SetInit: {}, // I - SetNodeAnn: {}, // N - SetInvoice: {}, // 9 - }, - lnwire.AnchorsOptional: { - SetInit: {}, // I - SetNodeAnn: {}, // N - }, - lnwire.WumboChannelsOptional: { - SetInit: {}, // I - SetNodeAnn: {}, // N - }, -} diff --git a/lnd/feature/deps.go b/lnd/feature/deps.go deleted file mode 100644 index b2332241..00000000 --- a/lnd/feature/deps.go +++ /dev/null @@ -1,133 +0,0 @@ -package feature - -import ( - "fmt" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -type ( - // featureSet contains a set of feature bits. - featureSet map[lnwire.FeatureBit]struct{} - - // supportedFeatures maps the feature bit from a feature vector to a - // boolean indicating if this features dependencies have already been - // verified. This allows us to short circuit verification if multiple - // features have common dependencies, or map traversal starts verifying - // from the bottom up. - supportedFeatures map[lnwire.FeatureBit]bool - - // depDesc maps a features to its set of dependent features, which must - // also be present for the vector to be valid. This can be used to - // recursively check the dependency chain for features in a feature - // vector. - depDesc map[lnwire.FeatureBit]featureSet -) - -// ErrMissingFeatureDep is an error signaling that a transitive dependency in a -// feature vector is not set properly. -type ErrMissingFeatureDep struct { - dep lnwire.FeatureBit -} - -// NewErrMissingFeatureDep creates a new ErrMissingFeatureDep error. -func NewErrMissingFeatureDep(dep lnwire.FeatureBit) ErrMissingFeatureDep { - return ErrMissingFeatureDep{dep: dep} -} - -// Error returns a human-readable description of the missing dep error. -func (e ErrMissingFeatureDep) Error() string { - return fmt.Sprintf("missing feature dependency: %v", e.dep) -} - -// deps is the default set of dependencies for assigned feature bits. If a -// feature is not present in the depDesc it is assumed to have no dependencies. -// -// NOTE: For proper functioning, only the optional variant of feature bits -// should be used in the following descriptor. In the future it may be necessary -// to distinguish the dependencies for optional and required bits, but for now -// the validation code maps required bits to optional ones since it simplifies -// the number of constraints. -var deps = depDesc{ - lnwire.PaymentAddrOptional: { - lnwire.TLVOnionPayloadOptional: {}, - }, - lnwire.MPPOptional: { - lnwire.PaymentAddrOptional: {}, - }, - lnwire.AnchorsOptional: { - lnwire.StaticRemoteKeyOptional: {}, - }, -} - -// ValidateDeps asserts that a feature vector sets all features and their -// transitive dependencies properly. It assumes that the dependencies between -// optional and required features are identical, e.g. if a feature is required -// but its dependency is optional, that is sufficient. -func ValidateDeps(fv *lnwire.FeatureVector) er.R { - features := fv.Features() - supported := initSupported(features) - - return validateDeps(features, supported) -} - -// validateDeps is a subroutine that recursively checks that the passed features -// have all of their associated dependencies in the supported map. -func validateDeps(features featureSet, supported supportedFeatures) er.R { - for bit := range features { - // Convert any required bits to optional. - bit = mapToOptional(bit) - - // If the supported features doesn't contain the dependency, this - // vector is invalid. - checked, ok := supported[bit] - if !ok { - return er.E(NewErrMissingFeatureDep(bit)) - } - - // Alternatively, if we know that this dependency is valid, we - // can short circuit and continue verifying other bits. - if checked { - continue - } - - // Recursively validate dependencies, since this method ranges - // over the subDeps. This method will return true even if - // subDeps is nil. - subDeps := deps[bit] - if err := validateDeps(subDeps, supported); err != nil { - return err - } - - // Once we've confirmed that this feature's dependencies, if - // any, are sound, we record this so other paths taken through - // `bit` return early when inspecting the supported map. - supported[bit] = true - } - - return nil -} - -// initSupported sets all bits from the feature vector as supported but not -// checked. This signals that the validity of their dependencies has not been -// verified. All required bits are mapped to optional to simplify the DAG. -func initSupported(features featureSet) supportedFeatures { - supported := make(supportedFeatures) - for bit := range features { - bit = mapToOptional(bit) - supported[bit] = false - } - - return supported -} - -// mapToOptional returns the optional variant of a given feature bit pair. Our -// dependendency graph is described using only optional feature bits, which -// reduces the number of constraints we need to express in the descriptor. -func mapToOptional(bit lnwire.FeatureBit) lnwire.FeatureBit { - if bit.IsRequired() { - bit ^= 0x01 - } - return bit -} diff --git a/lnd/feature/deps_test.go b/lnd/feature/deps_test.go deleted file mode 100644 index 029cb5be..00000000 --- a/lnd/feature/deps_test.go +++ /dev/null @@ -1,168 +0,0 @@ -package feature - -import ( - "reflect" - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -type depTest struct { - name string - raw *lnwire.RawFeatureVector - expErr error -} - -var depTests = []depTest{ - { - name: "empty", - raw: lnwire.NewRawFeatureVector(), - }, - { - name: "no deps optional", - raw: lnwire.NewRawFeatureVector( - lnwire.GossipQueriesOptional, - ), - }, - { - name: "no deps required", - raw: lnwire.NewRawFeatureVector( - lnwire.TLVOnionPayloadRequired, - ), - }, - { - name: "one dep optional", - raw: lnwire.NewRawFeatureVector( - lnwire.TLVOnionPayloadOptional, - lnwire.PaymentAddrOptional, - ), - }, - { - name: "one dep required", - raw: lnwire.NewRawFeatureVector( - lnwire.TLVOnionPayloadRequired, - lnwire.PaymentAddrRequired, - ), - }, - { - name: "one missing optional", - raw: lnwire.NewRawFeatureVector( - lnwire.PaymentAddrOptional, - ), - expErr: ErrMissingFeatureDep{lnwire.TLVOnionPayloadOptional}, - }, - { - name: "one missing required", - raw: lnwire.NewRawFeatureVector( - lnwire.PaymentAddrRequired, - ), - expErr: ErrMissingFeatureDep{lnwire.TLVOnionPayloadOptional}, - }, - { - name: "two dep optional", - raw: lnwire.NewRawFeatureVector( - lnwire.TLVOnionPayloadOptional, - lnwire.PaymentAddrOptional, - lnwire.MPPOptional, - ), - }, - { - name: "two dep required", - raw: lnwire.NewRawFeatureVector( - lnwire.TLVOnionPayloadRequired, - lnwire.PaymentAddrRequired, - lnwire.MPPRequired, - ), - }, - { - name: "two dep last missing optional", - raw: lnwire.NewRawFeatureVector( - lnwire.PaymentAddrOptional, - lnwire.MPPOptional, - ), - expErr: ErrMissingFeatureDep{lnwire.TLVOnionPayloadOptional}, - }, - { - name: "two dep last missing required", - raw: lnwire.NewRawFeatureVector( - lnwire.PaymentAddrRequired, - lnwire.MPPRequired, - ), - expErr: ErrMissingFeatureDep{lnwire.TLVOnionPayloadOptional}, - }, - { - name: "two dep first missing optional", - raw: lnwire.NewRawFeatureVector( - lnwire.TLVOnionPayloadOptional, - lnwire.MPPOptional, - ), - expErr: ErrMissingFeatureDep{lnwire.PaymentAddrOptional}, - }, - { - name: "two dep first missing required", - raw: lnwire.NewRawFeatureVector( - lnwire.TLVOnionPayloadRequired, - lnwire.MPPRequired, - ), - expErr: ErrMissingFeatureDep{lnwire.PaymentAddrOptional}, - }, - { - name: "forest optional", - raw: lnwire.NewRawFeatureVector( - lnwire.GossipQueriesOptional, - lnwire.TLVOnionPayloadOptional, - lnwire.PaymentAddrOptional, - lnwire.MPPOptional, - ), - }, - { - name: "forest required", - raw: lnwire.NewRawFeatureVector( - lnwire.GossipQueriesRequired, - lnwire.TLVOnionPayloadRequired, - lnwire.PaymentAddrRequired, - lnwire.MPPRequired, - ), - }, - { - name: "broken forest optional", - raw: lnwire.NewRawFeatureVector( - lnwire.GossipQueriesOptional, - lnwire.TLVOnionPayloadOptional, - lnwire.MPPOptional, - ), - expErr: ErrMissingFeatureDep{lnwire.PaymentAddrOptional}, - }, - { - name: "broken forest required", - raw: lnwire.NewRawFeatureVector( - lnwire.GossipQueriesRequired, - lnwire.TLVOnionPayloadRequired, - lnwire.MPPRequired, - ), - expErr: ErrMissingFeatureDep{lnwire.PaymentAddrOptional}, - }, -} - -// TestValidateDeps tests that ValidateDeps correctly asserts whether or not the -// set features constitute a valid feature chain when accounting for transititve -// dependencies. -func TestValidateDeps(t *testing.T) { - for _, test := range depTests { - test := test - t.Run(test.name, func(t *testing.T) { - testValidateDeps(t, test) - }) - } -} - -func testValidateDeps(t *testing.T, test depTest) { - fv := lnwire.NewFeatureVector(test.raw, lnwire.Features) - err := ValidateDeps(fv) - if !reflect.DeepEqual(er.Wrapped(err), test.expErr) { - t.Fatalf("validation mismatch, want: %v, got: %v", - test.expErr, err) - - } -} diff --git a/lnd/feature/manager.go b/lnd/feature/manager.go deleted file mode 100644 index cc754710..00000000 --- a/lnd/feature/manager.go +++ /dev/null @@ -1,134 +0,0 @@ -package feature - -import ( - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Config houses any runtime modifications to the default set descriptors. For -// our purposes, this typically means disabling certain features to test legacy -// protocol interoperability or functionality. -type Config struct { - // NoTLVOnion unsets any optional or required TLVOnionPaylod bits from - // all feature sets. - NoTLVOnion bool - - // NoStaticRemoteKey unsets any optional or required StaticRemoteKey - // bits from all feature sets. - NoStaticRemoteKey bool - - // NoAnchors unsets any bits signaling support for anchor outputs. - NoAnchors bool - - // NoWumbo unsets any bits signalling support for wumbo channels. - NoWumbo bool -} - -// Manager is responsible for generating feature vectors for different requested -// feature sets. -type Manager struct { - // fsets is a static map of feature set to raw feature vectors. Requests - // are fulfilled by cloning these interal feature vectors. - fsets map[Set]*lnwire.RawFeatureVector -} - -// NewManager creates a new feature Manager, applying any custom modifications -// to its feature sets before returning. -func NewManager(cfg Config) (*Manager, er.R) { - return newManager(cfg, defaultSetDesc) -} - -// newManager creates a new feature Manager, applying any custom modifications -// to its feature sets before returning. This method accepts the setDesc as its -// own parameter so that it can be unit tested. -func newManager(cfg Config, desc setDesc) (*Manager, er.R) { - // First build the default feature vector for all known sets. - fsets := make(map[Set]*lnwire.RawFeatureVector) - for bit, sets := range desc { - for set := range sets { - // Fetch the feature vector for this set, allocating a - // new one if it doesn't exist. - fv, ok := fsets[set] - if !ok { - fv = lnwire.NewRawFeatureVector() - } - - // Set the configured bit on the feature vector, - // ensuring that we don't set two feature bits for the - // same pair. - err := fv.SafeSet(bit) - if err != nil { - return nil, er.Errorf("unable to set "+ - "%v in %v: %v", bit, set, err) - } - - // Write the updated feature vector under its set. - fsets[set] = fv - } - } - - // Now, remove any features as directed by the config. - for set, raw := range fsets { - if cfg.NoTLVOnion { - raw.Unset(lnwire.TLVOnionPayloadOptional) - raw.Unset(lnwire.TLVOnionPayloadRequired) - raw.Unset(lnwire.PaymentAddrOptional) - raw.Unset(lnwire.PaymentAddrRequired) - raw.Unset(lnwire.MPPOptional) - raw.Unset(lnwire.MPPRequired) - } - if cfg.NoStaticRemoteKey { - raw.Unset(lnwire.StaticRemoteKeyOptional) - raw.Unset(lnwire.StaticRemoteKeyRequired) - } - if cfg.NoAnchors { - raw.Unset(lnwire.AnchorsOptional) - raw.Unset(lnwire.AnchorsRequired) - } - if cfg.NoWumbo { - raw.Unset(lnwire.WumboChannelsOptional) - raw.Unset(lnwire.WumboChannelsRequired) - } - - // Ensure that all of our feature sets properly set any - // dependent features. - fv := lnwire.NewFeatureVector(raw, lnwire.Features) - err := ValidateDeps(fv) - if err != nil { - return nil, er.Errorf("invalid feature set %v: %v", - set, err) - } - } - - return &Manager{ - fsets: fsets, - }, nil -} - -// GetRaw returns a raw feature vector for the passed set. If no set is known, -// an empty raw feature vector is returned. -func (m *Manager) GetRaw(set Set) *lnwire.RawFeatureVector { - if fv, ok := m.fsets[set]; ok { - return fv.Clone() - } - - return lnwire.NewRawFeatureVector() -} - -// Get returns a feature vector for the passed set. If no set is known, an empty -// feature vector is returned. -func (m *Manager) Get(set Set) *lnwire.FeatureVector { - raw := m.GetRaw(set) - return lnwire.NewFeatureVector(raw, lnwire.Features) -} - -// ListSets returns a list of the feature sets that our node supports. -func (m *Manager) ListSets() []Set { - var sets []Set - - for set := range m.fsets { - sets = append(sets, set) - } - - return sets -} diff --git a/lnd/feature/manager_internal_test.go b/lnd/feature/manager_internal_test.go deleted file mode 100644 index 0a581f2b..00000000 --- a/lnd/feature/manager_internal_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package feature - -import ( - "reflect" - "testing" - - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -type managerTest struct { - name string - cfg Config -} - -const unknownFeature lnwire.FeatureBit = 30 - -var testSetDesc = setDesc{ - lnwire.DataLossProtectRequired: { - SetNodeAnn: {}, // I - }, - lnwire.TLVOnionPayloadOptional: { - SetInit: {}, // I - SetNodeAnn: {}, // N - }, - lnwire.StaticRemoteKeyOptional: { - SetInit: {}, // I - SetNodeAnn: {}, // N - }, -} - -var managerTests = []managerTest{ - { - name: "default", - cfg: Config{}, - }, - { - name: "no tlv", - cfg: Config{ - NoTLVOnion: true, - }, - }, - { - name: "no static remote key", - cfg: Config{ - NoStaticRemoteKey: true, - }, - }, - { - name: "no tlv or static remote key", - cfg: Config{ - NoTLVOnion: true, - NoStaticRemoteKey: true, - }, - }, -} - -// TestManager asserts basic initialazation and operation of a feature manager, -// including that the proper features are removed in response to config changes. -func TestManager(t *testing.T) { - for _, test := range managerTests { - test := test - t.Run(test.name, func(t *testing.T) { - testManager(t, test) - }) - } -} - -func testManager(t *testing.T, test managerTest) { - m, err := newManager(test.cfg, testSetDesc) - if err != nil { - t.Fatalf("unable to create feature manager: %v", err) - } - - sets := []Set{ - SetInit, - SetLegacyGlobal, - SetNodeAnn, - SetInvoice, - } - - for _, set := range sets { - raw := m.GetRaw(set) - fv := m.Get(set) - - fv2 := lnwire.NewFeatureVector(raw, lnwire.Features) - - if !reflect.DeepEqual(fv, fv2) { - t.Fatalf("mismatch Get vs GetRaw, raw: %v vs fv: %v", - fv2, fv) - } - - assertUnset := func(bit lnwire.FeatureBit) { - hasBit := fv.HasFeature(bit) || fv.HasFeature(bit^1) - if hasBit { - t.Fatalf("bit %v or %v is set", bit, bit^1) - } - } - - // Assert that the manager properly unset the configured feature - // bits from all sets. - if test.cfg.NoTLVOnion { - assertUnset(lnwire.TLVOnionPayloadOptional) - } - if test.cfg.NoStaticRemoteKey { - assertUnset(lnwire.StaticRemoteKeyOptional) - } - - assertUnset(unknownFeature) - } - - // Do same basic sanity checks on features that are always present. - nodeFeatures := m.Get(SetNodeAnn) - - assertSet := func(bit lnwire.FeatureBit) { - has := nodeFeatures.HasFeature(bit) - if !has { - t.Fatalf("node features don't advertised %v", bit) - } - } - - assertSet(lnwire.DataLossProtectOptional) - if !test.cfg.NoTLVOnion { - assertSet(lnwire.TLVOnionPayloadRequired) - } - if !test.cfg.NoStaticRemoteKey { - assertSet(lnwire.StaticRemoteKeyOptional) - } -} diff --git a/lnd/feature/required.go b/lnd/feature/required.go deleted file mode 100644 index 654b1a2e..00000000 --- a/lnd/feature/required.go +++ /dev/null @@ -1,23 +0,0 @@ -package feature - -import ( - "fmt" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// ErrUnknownRequired signals that a feature vector requires certain features -// that our node is unaware of or does not implement. -var ErrUnknownRequired = er.GenericErrorType.CodeWithDetail("ErrUnknownRequired", - "feature vector contains unknown required features") - -// ValidateRequired returns an error if the feature vector contains a non-zero -// number of unknown, required feature bits. -func ValidateRequired(fv *lnwire.FeatureVector) er.R { - unknown := fv.UnknownRequiredFeatures() - if len(unknown) > 0 { - return ErrUnknownRequired.New(fmt.Sprintf("%v", unknown), nil) - } - return nil -} diff --git a/lnd/feature/set.go b/lnd/feature/set.go deleted file mode 100644 index 2ac2ce52..00000000 --- a/lnd/feature/set.go +++ /dev/null @@ -1,41 +0,0 @@ -package feature - -// Set is an enum identifying various feature sets, which separates the single -// feature namespace into distinct categories depending what context a feature -// vector is being used. -type Set uint8 - -const ( - // SetInit identifies features that should be sent in an Init message to - // a remote peer. - SetInit Set = iota - - // SetLegacyGlobal identifies features that should be set in the legacy - // GlobalFeatures field of an Init message, which maintains backwards - // compatibility with nodes that haven't implemented flat features. - SetLegacyGlobal - - // SetNodeAnn identifies features that should be advertised on node - // announcements. - SetNodeAnn - - // SetInvoice identifies features that should be advertised on invoices - // generated by the daemon. - SetInvoice -) - -// String returns a human-readable description of a Set. -func (s Set) String() string { - switch s { - case SetInit: - return "SetInit" - case SetLegacyGlobal: - return "SetLegacyGlobal" - case SetNodeAnn: - return "SetNodeAnn" - case SetInvoice: - return "SetInvoice" - default: - return "SetUnknown" - } -} diff --git a/lnd/fmgr/interfaces.go b/lnd/fmgr/interfaces.go deleted file mode 100644 index 47238a8d..00000000 --- a/lnd/fmgr/interfaces.go +++ /dev/null @@ -1,20 +0,0 @@ -package fmgr - -import ( - "github.com/pkt-cash/pktd/lnd/lnpeer" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Manager is an interface that describes the basic operation of a funding -// manager. It should at a minimum process a subset of lnwire messages that -// are denoted as funding messages. -type Manager interface { - // ProcessFundingMsg processes a funding message represented by the - // lnwire.Message parameter along with the Peer object representing a - // connection to the counterparty. - ProcessFundingMsg(lnwire.Message, lnpeer.Peer) - - // IsPendingChannel is used to determine whether to send an Error message - // to the funding manager or not. - IsPendingChannel([32]byte, lnpeer.Peer) bool -} diff --git a/lnd/fundingmanager.go b/lnd/fundingmanager.go deleted file mode 100644 index b467537a..00000000 --- a/lnd/fundingmanager.go +++ /dev/null @@ -1,3583 +0,0 @@ -package lnd - -import ( - "bytes" - "encoding/binary" - "sync" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/chainreg" - "github.com/pkt-cash/pktd/lnd/chanacceptor" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/discovery" - "github.com/pkt-cash/pktd/lnd/htlcswitch" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/labels" - "github.com/pkt-cash/pktd/lnd/lnpeer" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee" - "github.com/pkt-cash/pktd/lnd/lnwallet/chanfunding" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/routing" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/txscript" - "github.com/pkt-cash/pktd/wire" - "golang.org/x/crypto/salsa20" -) - -const ( - // TODO(roasbeef): tune - msgBufferSize = 50 - - // minBtcRemoteDelay and maxBtcRemoteDelay is the extremes of the - // Bitcoin CSV delay we will require the remote to use for its - // commitment transaction. The actual delay we will require will be - // somewhere between these values, depending on channel size. - minBtcRemoteDelay uint16 = 144 - maxBtcRemoteDelay uint16 = 2016 - - // minLtcRemoteDelay and maxLtcRemoteDelay is the extremes of the - // Litecoin CSV delay we will require the remote to use for its - // commitment transaction. The actual delay we will require will be - // somewhere between these values, depending on channel size. - minLtcRemoteDelay uint16 = 576 - maxLtcRemoteDelay uint16 = 8064 - - // maxWaitNumBlocksFundingConf is the maximum number of blocks to wait - // for the funding transaction to be confirmed before forgetting - // channels that aren't initiated by us. 2016 blocks is ~2 weeks. - maxWaitNumBlocksFundingConf = 2016 - - // minChanFundingSize is the smallest channel that we'll allow to be - // created over the RPC interface. - minChanFundingSize = btcutil.Amount(20000) - - // MaxBtcFundingAmount is a soft-limit of the maximum channel size - // currently accepted on the Bitcoin chain within the Lightning - // Protocol. This limit is defined in BOLT-0002, and serves as an - // initial precautionary limit while implementations are battle tested - // in the real world. - MaxBtcFundingAmount = btcutil.Amount(1<<24) - 1 - - // MaxBtcFundingAmountWumbo is a soft-limit on the maximum size of wumbo - // channels. This limit is 10 BTC and is the only thing standing between - // you and limitless channel size (apart from 21 million cap) - MaxBtcFundingAmountWumbo = btcutil.Amount(1000000000) - - // maxLtcFundingAmount is a soft-limit of the maximum channel size - // currently accepted on the Litecoin chain within the Lightning - // Protocol. - maxLtcFundingAmount = MaxBtcFundingAmount * chainreg.BtcToLtcConversionRate - - // 10mn PKT chan limit - maxPktFundingAmount = btcutil.Amount(1 << 30 * 10000000) -) - -var ( - // MaxFundingAmount is a soft-limit of the maximum channel size - // currently accepted within the Lightning Protocol. This limit is - // defined in BOLT-0002, and serves as an initial precautionary limit - // while implementations are battle tested in the real world. - // - // At the moment, this value depends on which chain is active. It is set - // to the value under the Bitcoin chain as default. - // - // TODO(roasbeef): add command line param to modify - MaxFundingAmount = MaxBtcFundingAmount - - // ErrFundingManagerShuttingDown is an error returned when attempting to - // process a funding request/message but the funding manager has already - // been signaled to shut down. - ErrFundingManagerShuttingDown = Err.CodeWithDetail("ErrFundingManagerShuttingDown", - "funding manager shutting down") - - // ErrConfirmationTimeout is an error returned when we as a responder - // are waiting for a funding transaction to confirm, but too many - // blocks pass without confirmation. - ErrConfirmationTimeout = Err.CodeWithDetail("ErrConfirmationTimeout", - "timeout waiting for funding confirmation") - - // errUpfrontShutdownScriptNotSupported is returned if an upfront shutdown - // script is set for a peer that does not support the feature bit. - errUpfrontShutdownScriptNotSupported = Err.CodeWithDetail( - "errUpfrontShutdownScriptNotSupported", - "peer does not support option upfront shutdown script") - - zeroID [32]byte -) - -// reservationWithCtx encapsulates a pending channel reservation. This wrapper -// struct is used internally within the funding manager to track and progress -// the funding workflow initiated by incoming/outgoing methods from the target -// peer. Additionally, this struct houses a response and error channel which is -// used to respond to the caller in the case a channel workflow is initiated -// via a local signal such as RPC. -// -// TODO(roasbeef): actually use the context package -// * deadlines, etc. -type reservationWithCtx struct { - reservation *lnwallet.ChannelReservation - peer lnpeer.Peer - - chanAmt btcutil.Amount - - // Constraints we require for the remote. - remoteCsvDelay uint16 - remoteMinHtlc lnwire.MilliSatoshi - remoteMaxValue lnwire.MilliSatoshi - remoteMaxHtlcs uint16 - - // maxLocalCsv is the maximum csv we will accept from the remote. - maxLocalCsv uint16 - - updateMtx sync.RWMutex - lastUpdated time.Time - - updates chan *lnrpc.OpenStatusUpdate - err chan er.R -} - -// isLocked checks the reservation's timestamp to determine whether it is locked. -func (r *reservationWithCtx) isLocked() bool { - r.updateMtx.RLock() - defer r.updateMtx.RUnlock() - - // The time zero value represents a locked reservation. - return r.lastUpdated.IsZero() -} - -// updateTimestamp updates the reservation's timestamp with the current time. -func (r *reservationWithCtx) updateTimestamp() { - r.updateMtx.Lock() - defer r.updateMtx.Unlock() - - r.lastUpdated = time.Now() -} - -// initFundingMsg is sent by an outside subsystem to the funding manager in -// order to kick off a funding workflow with a specified target peer. The -// original request which defines the parameters of the funding workflow are -// embedded within this message giving the funding manager full context w.r.t -// the workflow. -type initFundingMsg struct { - peer lnpeer.Peer - *openChanReq -} - -// fundingMsg is sent by the ProcessFundingMsg function and packages a -// funding-specific lnwire.Message along with the lnpeer.Peer that sent it. -type fundingMsg struct { - msg lnwire.Message - peer lnpeer.Peer -} - -// pendingChannels is a map instantiated per-peer which tracks all active -// pending single funded channels indexed by their pending channel identifier, -// which is a set of 32-bytes generated via a CSPRNG. -type pendingChannels map[[32]byte]*reservationWithCtx - -// serializedPubKey is used within the FundingManager's activeReservations list -// to identify the nodes with which the FundingManager is actively working to -// initiate new channels. -type serializedPubKey [33]byte - -// newSerializedKey creates a new serialized public key from an instance of a -// live pubkey object. -func newSerializedKey(pubKey *btcec.PublicKey) serializedPubKey { - var s serializedPubKey - copy(s[:], pubKey.SerializeCompressed()) - return s -} - -// fundingConfig defines the configuration for the FundingManager. All elements -// within the configuration MUST be non-nil for the FundingManager to carry out -// its duties. -type fundingConfig struct { - // NoWumboChans indicates if we're to reject all incoming wumbo channel - // requests, and also reject all outgoing wumbo channel requests. - NoWumboChans bool - - // IDKey is the PublicKey that is used to identify this node within the - // Lightning Network. - IDKey *btcec.PublicKey - - // Wallet handles the parts of the funding process that involves moving - // funds from on-chain transaction outputs into Lightning channels. - Wallet *lnwallet.LightningWallet - - // PublishTransaction facilitates the process of broadcasting a - // transaction to the network. - PublishTransaction func(*wire.MsgTx, string) er.R - - // UpdateLabel updates the label that a transaction has in our wallet, - // overwriting any existing labels. - UpdateLabel func(chainhash.Hash, string) er.R - - // FeeEstimator calculates appropriate fee rates based on historical - // transaction information. - FeeEstimator chainfee.Estimator - - // Notifier is used by the FundingManager to determine when the - // channel's funding transaction has been confirmed on the blockchain - // so that the channel creation process can be completed. - Notifier chainntnfs.ChainNotifier - - // SignMessage signs an arbitrary message with a given public key. The - // actual digest signed is the double sha-256 of the message. In the - // case that the private key corresponding to the passed public key - // cannot be located, then an error is returned. - // - // TODO(roasbeef): should instead pass on this responsibility to a - // distinct sub-system? - SignMessage func(pubKey *btcec.PublicKey, - msg []byte) (input.Signature, er.R) - - // CurrentNodeAnnouncement should return the latest, fully signed node - // announcement from the backing Lightning Network node. - CurrentNodeAnnouncement func() (lnwire.NodeAnnouncement, er.R) - - // SendAnnouncement is used by the FundingManager to send announcement - // messages to the Gossiper to possibly broadcast to the greater - // network. A set of optional message fields can be provided to populate - // any information within the graph that is not included in the gossip - // message. - SendAnnouncement func(msg lnwire.Message, - optionalFields ...discovery.OptionalMsgField) chan er.R - - // NotifyWhenOnline allows the FundingManager to register with a - // subsystem that will notify it when the peer comes online. This is - // used when sending the fundingLocked message, since it MUST be - // delivered after the funding transaction is confirmed. - // - // NOTE: The peerChan channel must be buffered. - NotifyWhenOnline func(peer [33]byte, peerChan chan<- lnpeer.Peer) - - // FindChannel queries the database for the channel with the given - // channel ID. - FindChannel func(chanID lnwire.ChannelID) (*channeldb.OpenChannel, er.R) - - // TempChanIDSeed is a cryptographically random string of bytes that's - // used as a seed to generate pending channel ID's. - TempChanIDSeed [32]byte - - // DefaultRoutingPolicy is the default routing policy used when - // initially announcing channels. - DefaultRoutingPolicy htlcswitch.ForwardingPolicy - - // DefaultMinHtlcIn is the default minimum incoming htlc value that is - // set as a channel parameter. - DefaultMinHtlcIn lnwire.MilliSatoshi - - // NumRequiredConfs is a function closure that helps the funding - // manager decide how many confirmations it should require for a - // channel extended to it. The function is able to take into account - // the amount of the channel, and any funds we'll be pushed in the - // process to determine how many confirmations we'll require. - NumRequiredConfs func(btcutil.Amount, lnwire.MilliSatoshi) uint16 - - // RequiredRemoteDelay is a function that maps the total amount in a - // proposed channel to the CSV delay that we'll require for the remote - // party. Naturally a larger channel should require a higher CSV delay - // in order to give us more time to claim funds in the case of a - // contract breach. - RequiredRemoteDelay func(btcutil.Amount) uint16 - - // RequiredRemoteChanReserve is a function closure that, given the - // channel capacity and dust limit, will return an appropriate amount - // for the remote peer's required channel reserve that is to be adhered - // to at all times. - RequiredRemoteChanReserve func(capacity, dustLimit btcutil.Amount) btcutil.Amount - - // RequiredRemoteMaxValue is a function closure that, given the channel - // capacity, returns the amount of MilliSatoshis that our remote peer - // can have in total outstanding HTLCs with us. - RequiredRemoteMaxValue func(btcutil.Amount) lnwire.MilliSatoshi - - // RequiredRemoteMaxHTLCs is a function closure that, given the channel - // capacity, returns the number of maximum HTLCs the remote peer can - // offer us. - RequiredRemoteMaxHTLCs func(btcutil.Amount) uint16 - - // WatchNewChannel is to be called once a new channel enters the final - // funding stage: waiting for on-chain confirmation. This method sends - // the channel to the ChainArbitrator so it can watch for any on-chain - // events related to the channel. We also provide the public key of the - // node we're establishing a channel with for reconnection purposes. - WatchNewChannel func(*channeldb.OpenChannel, *btcec.PublicKey) er.R - - // ReportShortChanID allows the funding manager to report the newly - // discovered short channel ID of a formerly pending channel to outside - // sub-systems. - ReportShortChanID func(wire.OutPoint) er.R - - // ZombieSweeperInterval is the periodic time interval in which the - // zombie sweeper is run. - ZombieSweeperInterval time.Duration - - // ReservationTimeout is the length of idle time that must pass before - // a reservation is considered a zombie. - ReservationTimeout time.Duration - - // MinChanSize is the smallest channel size that we'll accept as an - // inbound channel. We have such a parameter, as otherwise, nodes could - // flood us with very small channels that would never really be usable - // due to fees. - MinChanSize btcutil.Amount - - // MaxChanSize is the largest channel size that we'll accept as an - // inbound channel. We have such a parameter, so that you may decide how - // WUMBO you would like your channel. - MaxChanSize btcutil.Amount - - // MaxPendingChannels is the maximum number of pending channels we - // allow for each peer. - MaxPendingChannels int - - // RejectPush is set true if the fundingmanager should reject any - // incoming channels having a non-zero push amount. - RejectPush bool - - // MaxLocalCSVDelay is the maximum csv delay we will allow for our - // commit output. Channels that exceed this value will be failed. - MaxLocalCSVDelay uint16 - - // NotifyOpenChannelEvent informs the ChannelNotifier when channels - // transition from pending open to open. - NotifyOpenChannelEvent func(wire.OutPoint) - - // OpenChannelPredicate is a predicate on the lnwire.OpenChannel message - // and on the requesting node's public key that returns a bool which tells - // the funding manager whether or not to accept the channel. - OpenChannelPredicate chanacceptor.ChannelAcceptor - - // NotifyPendingOpenChannelEvent informs the ChannelNotifier when channels - // enter a pending state. - NotifyPendingOpenChannelEvent func(wire.OutPoint, *channeldb.OpenChannel) - - // EnableUpfrontShutdown specifies whether the upfront shutdown script - // is enabled. - EnableUpfrontShutdown bool - - // RegisteredChains keeps track of all chains that have been registered - // with the daemon. - RegisteredChains *chainreg.ChainRegistry -} - -// fundingManager acts as an orchestrator/bridge between the wallet's -// 'ChannelReservation' workflow, and the wire protocol's funding initiation -// messages. Any requests to initiate the funding workflow for a channel, -// either kicked-off locally or remotely are handled by the funding manager. -// Once a channel's funding workflow has been completed, any local callers, the -// local peer, and possibly the remote peer are notified of the completion of -// the channel workflow. Additionally, any temporary or permanent access -// controls between the wallet and remote peers are enforced via the funding -// manager. -type fundingManager struct { - started sync.Once - stopped sync.Once - - // cfg is a copy of the configuration struct that the FundingManager - // was initialized with. - cfg *fundingConfig - - // chanIDKey is a cryptographically random key that's used to generate - // temporary channel ID's. - chanIDKey [32]byte - - // chanIDNonce is a nonce that's incremented for each new funding - // reservation created. - nonceMtx sync.RWMutex - chanIDNonce uint64 - - // activeReservations is a map which houses the state of all pending - // funding workflows. - activeReservations map[serializedPubKey]pendingChannels - - // signedReservations is a utility map that maps the permanent channel - // ID of a funding reservation to its temporary channel ID. This is - // required as mid funding flow, we switch to referencing the channel - // by its full channel ID once the commitment transactions have been - // signed by both parties. - signedReservations map[lnwire.ChannelID][32]byte - - // resMtx guards both of the maps above to ensure that all access is - // goroutine safe. - resMtx sync.RWMutex - - // fundingMsgs is a channel that relays fundingMsg structs from - // external sub-systems using the ProcessFundingMsg call. - fundingMsgs chan *fundingMsg - - // queries is a channel which receives requests to query the internal - // state of the funding manager. - queries chan interface{} - - // fundingRequests is a channel used to receive channel initiation - // requests from a local subsystem within the daemon. - fundingRequests chan *initFundingMsg - - // newChanBarriers is a map from a channel ID to a 'barrier' which will - // be signalled once the channel is fully open. This barrier acts as a - // synchronization point for any incoming/outgoing HTLCs before the - // channel has been fully opened. - barrierMtx sync.RWMutex - newChanBarriers map[lnwire.ChannelID]chan struct{} - - localDiscoveryMtx sync.Mutex - localDiscoverySignals map[lnwire.ChannelID]chan struct{} - - handleFundingLockedMtx sync.RWMutex - handleFundingLockedBarriers map[lnwire.ChannelID]struct{} - - quit chan struct{} - wg sync.WaitGroup -} - -// channelOpeningState represents the different states a channel can be in -// between the funding transaction has been confirmed and the channel is -// announced to the network and ready to be used. -type channelOpeningState uint8 - -const ( - // markedOpen is the opening state of a channel if the funding - // transaction is confirmed on-chain, but fundingLocked is not yet - // successfully sent to the other peer. - markedOpen channelOpeningState = iota - - // fundingLockedSent is the opening state of a channel if the - // fundingLocked message has successfully been sent to the other peer, - // but we still haven't announced the channel to the network. - fundingLockedSent - - // addedToRouterGraph is the opening state of a channel if the - // channel has been successfully added to the router graph - // immediately after the fundingLocked message has been sent, but - // we still haven't announced the channel to the network. - addedToRouterGraph -) - -var ( - // channelOpeningStateBucket is the database bucket used to store the - // channelOpeningState for each channel that is currently in the process - // of being opened. - channelOpeningStateBucket = []byte("channelOpeningState") - - // ErrChannelNotFound is an error returned when a channel is not known - // to us. In this case of the fundingManager, this error is returned - // when the channel in question is not considered being in an opening - // state. - ErrChannelNotFound = Err.CodeWithDetail("ErrChannelNotFound", - "channel not found") -) - -// newFundingManager creates and initializes a new instance of the -// fundingManager. -func newFundingManager(cfg fundingConfig) (*fundingManager, er.R) { - return &fundingManager{ - cfg: &cfg, - chanIDKey: cfg.TempChanIDSeed, - activeReservations: make(map[serializedPubKey]pendingChannels), - signedReservations: make(map[lnwire.ChannelID][32]byte), - newChanBarriers: make(map[lnwire.ChannelID]chan struct{}), - fundingMsgs: make(chan *fundingMsg, msgBufferSize), - fundingRequests: make(chan *initFundingMsg, msgBufferSize), - localDiscoverySignals: make(map[lnwire.ChannelID]chan struct{}), - handleFundingLockedBarriers: make(map[lnwire.ChannelID]struct{}), - queries: make(chan interface{}, 1), - quit: make(chan struct{}), - }, nil -} - -// Start launches all helper goroutines required for handling requests sent -// to the funding manager. -func (f *fundingManager) Start() er.R { - var err er.R - f.started.Do(func() { - err = f.start() - }) - return err -} - -func (f *fundingManager) start() er.R { - log.Tracef("Funding manager running") - - // Upon restart, the Funding Manager will check the database to load any - // channels that were waiting for their funding transactions to be - // confirmed on the blockchain at the time when the daemon last went - // down. - // TODO(roasbeef): store height that funding finished? - // * would then replace call below - allChannels, err := f.cfg.Wallet.Cfg.Database.FetchAllChannels() - if err != nil { - return err - } - - for _, channel := range allChannels { - chanID := lnwire.NewChanIDFromOutPoint(&channel.FundingOutpoint) - - // For any channels that were in a pending state when the - // daemon was last connected, the Funding Manager will - // re-initialize the channel barriers, and republish the - // funding transaction if we're the initiator. - if channel.IsPending { - f.barrierMtx.Lock() - log.Tracef("Loading pending ChannelPoint(%v), "+ - "creating chan barrier", - channel.FundingOutpoint) - - f.newChanBarriers[chanID] = make(chan struct{}) - f.barrierMtx.Unlock() - - f.localDiscoverySignals[chanID] = make(chan struct{}) - - // Rebroadcast the funding transaction for any pending - // channel that we initiated. No error will be returned - // if the transaction already has been broadcast. - chanType := channel.ChanType - if chanType.IsSingleFunder() && chanType.HasFundingTx() && - channel.IsInitiator { - - var fundingTxBuf bytes.Buffer - err := channel.FundingTxn.Serialize(&fundingTxBuf) - if err != nil { - log.Errorf("Unable to serialize "+ - "funding transaction %v: %v", - channel.FundingTxn.TxHash(), err) - - // Clear the buffer of any bytes that - // were written before the serialization - // error to prevent logging an - // incomplete transaction. - fundingTxBuf.Reset() - } - - log.Debugf("Rebroadcasting funding tx for "+ - "ChannelPoint(%v): %x", - channel.FundingOutpoint, - fundingTxBuf.Bytes()) - - // Set a nil short channel ID at this stage - // because we do not know it until our funding - // tx confirms. - label := labels.MakeLabel( - labels.LabelTypeChannelOpen, nil, - ) - - errr := f.cfg.PublishTransaction( - channel.FundingTxn, label, - ) - if errr != nil { - log.Errorf("Unable to rebroadcast "+ - "funding tx %x for "+ - "ChannelPoint(%v): %v", - fundingTxBuf.Bytes(), - channel.FundingOutpoint, errr) - } - } - } - - // We will restart the funding state machine for all channels, - // which will wait for the channel's funding transaction to be - // confirmed on the blockchain, and transmit the messages - // necessary for the channel to be operational. - f.wg.Add(1) - go f.advanceFundingState(channel, chanID, nil) - } - - f.wg.Add(1) // TODO(roasbeef): tune - go f.reservationCoordinator() - - return nil -} - -// Stop signals all helper goroutines to execute a graceful shutdown. This -// method will block until all goroutines have exited. -func (f *fundingManager) Stop() er.R { - var err er.R - f.stopped.Do(func() { - err = f.stop() - }) - return err -} - -func (f *fundingManager) stop() er.R { - log.Infof("Funding manager shutting down") - - close(f.quit) - f.wg.Wait() - - return nil -} - -// nextPendingChanID returns the next free pending channel ID to be used to -// identify a particular future channel funding workflow. -func (f *fundingManager) nextPendingChanID() [32]byte { - // Obtain a fresh nonce. We do this by encoding the current nonce - // counter, then incrementing it by one. - f.nonceMtx.Lock() - var nonce [8]byte - binary.LittleEndian.PutUint64(nonce[:], f.chanIDNonce) - f.chanIDNonce++ - f.nonceMtx.Unlock() - - // We'll generate the next pending channelID by "encrypting" 32-bytes - // of zeroes which'll extract 32 random bytes from our stream cipher. - var ( - nextChanID [32]byte - zeroes [32]byte - ) - salsa20.XORKeyStream(nextChanID[:], zeroes[:], nonce[:], &f.chanIDKey) - - return nextChanID -} - -type pendingChannel struct { - identityPub *btcec.PublicKey - channelPoint *wire.OutPoint - capacity btcutil.Amount - localBalance btcutil.Amount - remoteBalance btcutil.Amount -} - -type pendingChansReq struct { - resp chan []*pendingChannel - err chan er.R -} - -// PendingChannels returns a slice describing all the channels which are -// currently pending at the last state of the funding workflow. -func (f *fundingManager) PendingChannels() ([]*pendingChannel, er.R) { - respChan := make(chan []*pendingChannel, 1) - errChan := make(chan er.R, 1) - - req := &pendingChansReq{ - resp: respChan, - err: errChan, - } - - select { - case f.queries <- req: - case <-f.quit: - return nil, ErrFundingManagerShuttingDown.Default() - } - - select { - case resp := <-respChan: - return resp, nil - case err := <-errChan: - return nil, err - case <-f.quit: - return nil, ErrFundingManagerShuttingDown.Default() - } -} - -// CancelPeerReservations cancels all active reservations associated with the -// passed node. This will ensure any outputs which have been pre committed, -// (and thus locked from coin selection), are properly freed. -func (f *fundingManager) CancelPeerReservations(nodePub [33]byte) { - - log.Debugf("Cancelling all reservations for peer %x", nodePub[:]) - - f.resMtx.Lock() - defer f.resMtx.Unlock() - - // We'll attempt to look up this node in the set of active - // reservations. If they don't have any, then there's no further work - // to be done. - nodeReservations, ok := f.activeReservations[nodePub] - if !ok { - log.Debugf("No active reservations for node: %x", nodePub[:]) - return - } - - // If they do have any active reservations, then we'll cancel all of - // them (which releases any locked UTXO's), and also delete it from the - // reservation map. - for pendingID, resCtx := range nodeReservations { - if err := resCtx.reservation.Cancel(); err != nil { - log.Errorf("unable to cancel reservation for "+ - "node=%x: %v", nodePub[:], err) - } - - resCtx.err <- er.Errorf("peer disconnected") - delete(nodeReservations, pendingID) - } - - // Finally, we'll delete the node itself from the set of reservations. - delete(f.activeReservations, nodePub) -} - -// failFundingFlow will fail the active funding flow with the target peer, -// identified by its unique temporary channel ID. This method will send an -// error to the remote peer, and also remove the reservation from our set of -// pending reservations. -// -// TODO(roasbeef): if peer disconnects, and haven't yet broadcast funding -// transaction, then all reservations should be cleared. -func (f *fundingManager) failFundingFlow(peer lnpeer.Peer, tempChanID [32]byte, - fundingErr er.R) { - - log.Debugf("Failing funding flow for pending_id=%x: %v", - tempChanID, fundingErr) - - ctx, err := f.cancelReservationCtx(peer.IdentityKey(), tempChanID, false) - if err != nil { - log.Errorf("unable to cancel reservation: %v", err) - } - - // In case the case where the reservation existed, send the funding - // error on the error channel. - if ctx != nil { - ctx.err <- fundingErr - } - - // We only send the exact error if it is part of out whitelisted set of - // errors (lnwire.FundingError or lnwallet.ReservationError). - errMsg := &lnwire.Error{ - ChanID: tempChanID, - Data: lnwire.ErrorData(fundingErr.Message()), - } - - log.Debugf("Sending funding error to peer (%x): %v", - peer.IdentityKey().SerializeCompressed(), spew.Sdump(errMsg)) - if err := peer.SendMessage(false, errMsg); err != nil { - log.Errorf("unable to send error message to peer %v", err) - } -} - -// reservationCoordinator is the primary goroutine tasked with progressing the -// funding workflow between the wallet, and any outside peers or local callers. -// -// NOTE: This MUST be run as a goroutine. -func (f *fundingManager) reservationCoordinator() { - defer f.wg.Done() - - zombieSweepTicker := time.NewTicker(f.cfg.ZombieSweeperInterval) - defer zombieSweepTicker.Stop() - - for { - select { - - case fmsg := <-f.fundingMsgs: - switch msg := fmsg.msg.(type) { - case *lnwire.OpenChannel: - f.handleFundingOpen(fmsg.peer, msg) - case *lnwire.AcceptChannel: - f.handleFundingAccept(fmsg.peer, msg) - case *lnwire.FundingCreated: - f.handleFundingCreated(fmsg.peer, msg) - case *lnwire.FundingSigned: - f.handleFundingSigned(fmsg.peer, msg) - case *lnwire.FundingLocked: - f.wg.Add(1) - go f.handleFundingLocked(fmsg.peer, msg) - case *lnwire.Error: - f.handleErrorMsg(fmsg.peer, msg) - } - case req := <-f.fundingRequests: - f.handleInitFundingMsg(req) - - case <-zombieSweepTicker.C: - f.pruneZombieReservations() - - case req := <-f.queries: - switch msg := req.(type) { - case *pendingChansReq: - f.handlePendingChannels(msg) - } - case <-f.quit: - return - } - } -} - -// advanceFundingState will advance the channel through the steps after the -// funding transaction is broadcasted, up until the point where the channel is -// ready for operation. This includes waiting for the funding transaction to -// confirm, sending funding locked to the peer, adding the channel to the -// router graph, and announcing the channel. The updateChan can be set non-nil -// to get OpenStatusUpdates. -// -// NOTE: This MUST be run as a goroutine. -func (f *fundingManager) advanceFundingState(channel *channeldb.OpenChannel, - pendingChanID [32]byte, updateChan chan<- *lnrpc.OpenStatusUpdate) { - - defer f.wg.Done() - - // If the channel is still pending we must wait for the funding - // transaction to confirm. - if channel.IsPending { - err := f.advancePendingChannelState(channel, pendingChanID) - if err != nil { - log.Errorf("Unable to advance pending state of "+ - "ChannelPoint(%v): %v", - channel.FundingOutpoint, err) - return - } - } - - // We create the state-machine object which wraps the database state. - lnChannel, err := lnwallet.NewLightningChannel( - nil, channel, nil, - ) - if err != nil { - log.Errorf("Unable to create LightningChannel(%v): %v", - channel.FundingOutpoint, err) - return - } - - for { - channelState, shortChanID, err := f.getChannelOpeningState( - &channel.FundingOutpoint, - ) - if ErrChannelNotFound.Is(err) { - // Channel not in fundingManager's opening database, - // meaning it was successfully announced to the - // network. - // TODO(halseth): could do graph consistency check - // here, and re-add the edge if missing. - log.Debugf("ChannelPoint(%v) with chan_id=%x not "+ - "found in opening database, assuming already "+ - "announced to the network", - channel.FundingOutpoint, pendingChanID) - return - } else if err != nil { - log.Errorf("Unable to query database for "+ - "channel opening state(%v): %v", - channel.FundingOutpoint, err) - return - } - - // If we did find the channel in the opening state database, we - // have seen the funding transaction being confirmed, but there - // are still steps left of the setup procedure. We continue the - // procedure where we left off. - err = f.stateStep( - channel, lnChannel, shortChanID, pendingChanID, - channelState, updateChan, - ) - if err != nil { - log.Errorf("Unable to advance state(%v): %v", - channel.FundingOutpoint, err) - return - } - } -} - -// stateStep advances the confirmed channel one step in the funding state -// machine. This method is synchronous and the new channel opening state will -// have been written to the database when it successfully returns. The -// updateChan can be set non-nil to get OpenStatusUpdates. -func (f *fundingManager) stateStep(channel *channeldb.OpenChannel, - lnChannel *lnwallet.LightningChannel, - shortChanID *lnwire.ShortChannelID, pendingChanID [32]byte, - channelState channelOpeningState, - updateChan chan<- *lnrpc.OpenStatusUpdate) er.R { - - chanID := lnwire.NewChanIDFromOutPoint(&channel.FundingOutpoint) - log.Debugf("Channel(%v) with ShortChanID %v has opening state %v", - chanID, shortChanID, channelState) - - switch channelState { - - // The funding transaction was confirmed, but we did not successfully - // send the fundingLocked message to the peer, so let's do that now. - case markedOpen: - err := f.sendFundingLocked(channel, lnChannel, shortChanID) - if err != nil { - return er.Errorf("failed sending fundingLocked: %v", - err) - } - - // As the fundingLocked message is now sent to the peer, the - // channel is moved to the next state of the state machine. It - // will be moved to the last state (actually deleted from the - // database) after the channel is finally announced. - err = f.saveChannelOpeningState( - &channel.FundingOutpoint, fundingLockedSent, - shortChanID, - ) - if err != nil { - return er.Errorf("error setting channel state to"+ - " fundingLockedSent: %v", err) - } - - log.Debugf("Channel(%v) with ShortChanID %v: successfully "+ - "sent FundingLocked", chanID, shortChanID) - - return nil - - // fundingLocked was sent to peer, but the channel was not added to the - // router graph and the channel announcement was not sent. - case fundingLockedSent: - err := f.addToRouterGraph(channel, shortChanID) - if err != nil { - return er.Errorf("failed adding to "+ - "router graph: %v", err) - } - - // As the channel is now added to the ChannelRouter's topology, - // the channel is moved to the next state of the state machine. - // It will be moved to the last state (actually deleted from - // the database) after the channel is finally announced. - err = f.saveChannelOpeningState( - &channel.FundingOutpoint, addedToRouterGraph, - shortChanID, - ) - if err != nil { - return er.Errorf("error setting channel state to"+ - " addedToRouterGraph: %v", err) - } - - log.Debugf("Channel(%v) with ShortChanID %v: successfully "+ - "added to router graph", chanID, shortChanID) - - // Give the caller a final update notifying them that - // the channel is now open. - // TODO(roasbeef): only notify after recv of funding locked? - fundingPoint := channel.FundingOutpoint - cp := &lnrpc.ChannelPoint{ - FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ - FundingTxidBytes: fundingPoint.Hash[:], - }, - OutputIndex: fundingPoint.Index, - } - - if updateChan != nil { - upd := &lnrpc.OpenStatusUpdate{ - Update: &lnrpc.OpenStatusUpdate_ChanOpen{ - ChanOpen: &lnrpc.ChannelOpenUpdate{ - ChannelPoint: cp, - }, - }, - PendingChanId: pendingChanID[:], - } - - select { - case updateChan <- upd: - case <-f.quit: - return ErrFundingManagerShuttingDown.Default() - } - } - - return nil - - // The channel was added to the Router's topology, but the channel - // announcement was not sent. - case addedToRouterGraph: - err := f.annAfterSixConfs(channel, shortChanID) - if err != nil { - return er.Errorf("error sending channel "+ - "announcement: %v", err) - } - - // We delete the channel opening state from our internal - // database as the opening process has succeeded. We can do - // this because we assume the AuthenticatedGossiper queues the - // announcement messages, and persists them in case of a daemon - // shutdown. - err = f.deleteChannelOpeningState(&channel.FundingOutpoint) - if err != nil { - return er.Errorf("error deleting channel state: %v", - err) - } - - log.Debugf("Channel(%v) with ShortChanID %v: successfully "+ - "announced", chanID, shortChanID) - - return nil - } - - return er.Errorf("undefined channelState: %v", channelState) -} - -// advancePendingChannelState waits for a pending channel's funding tx to -// confirm, and marks it open in the database when that happens. -func (f *fundingManager) advancePendingChannelState( - channel *channeldb.OpenChannel, pendingChanID [32]byte) er.R { - - confChannel, err := f.waitForFundingWithTimeout(channel) - if ErrConfirmationTimeout.Is(err) { - // We'll get a timeout if the number of blocks mined - // since the channel was initiated reaches - // maxWaitNumBlocksFundingConf and we are not the - // channel initiator. - ch := channel - localBalance := ch.LocalCommitment.LocalBalance.ToSatoshis() - closeInfo := &channeldb.ChannelCloseSummary{ - ChainHash: ch.ChainHash, - ChanPoint: ch.FundingOutpoint, - RemotePub: ch.IdentityPub, - Capacity: ch.Capacity, - SettledBalance: localBalance, - CloseType: channeldb.FundingCanceled, - RemoteCurrentRevocation: ch.RemoteCurrentRevocation, - RemoteNextRevocation: ch.RemoteNextRevocation, - LocalChanConfig: ch.LocalChanCfg, - } - - // Close the channel with us as the initiator because we are - // timing the channel out. - if err := ch.CloseChannel( - closeInfo, channeldb.ChanStatusLocalCloseInitiator, - ); err != nil { - return er.Errorf("failed closing channel "+ - "%v: %v", ch.FundingOutpoint, err) - } - - timeoutErr := er.Errorf("timeout waiting for funding tx "+ - "(%v) to confirm", channel.FundingOutpoint) - - // When the peer comes online, we'll notify it that we - // are now considering the channel flow canceled. - f.wg.Add(1) - go func() { - defer f.wg.Done() - - peerChan := make(chan lnpeer.Peer, 1) - var peerKey [33]byte - copy(peerKey[:], ch.IdentityPub.SerializeCompressed()) - - f.cfg.NotifyWhenOnline(peerKey, peerChan) - - var peer lnpeer.Peer - select { - case peer = <-peerChan: - case <-f.quit: - return - } - // TODO(halseth): should this send be made - // reliable? - f.failFundingFlow(peer, pendingChanID, timeoutErr) - }() - - return timeoutErr - - } else if err != nil { - return er.Errorf("error waiting for funding "+ - "confirmation for ChannelPoint(%v): %v", - channel.FundingOutpoint, err) - } - - // Success, funding transaction was confirmed. - chanID := lnwire.NewChanIDFromOutPoint(&channel.FundingOutpoint) - log.Debugf("ChannelID(%v) is now fully confirmed! "+ - "(shortChanID=%v)", chanID, confChannel.shortChanID) - - err = f.handleFundingConfirmation(channel, confChannel) - if err != nil { - return er.Errorf("unable to handle funding "+ - "confirmation for ChannelPoint(%v): %v", - channel.FundingOutpoint, err) - } - - return nil -} - -// handlePendingChannels responds to a request for details concerning all -// currently pending channels waiting for the final phase of the funding -// workflow (funding txn confirmation). -func (f *fundingManager) handlePendingChannels(msg *pendingChansReq) { - var pendingChannels []*pendingChannel - - dbPendingChannels, err := f.cfg.Wallet.Cfg.Database.FetchPendingChannels() - if err != nil { - msg.err <- err - return - } - - for _, dbPendingChan := range dbPendingChannels { - pendingChan := &pendingChannel{ - identityPub: dbPendingChan.IdentityPub, - channelPoint: &dbPendingChan.FundingOutpoint, - capacity: dbPendingChan.Capacity, - localBalance: dbPendingChan.LocalCommitment.LocalBalance.ToSatoshis(), - remoteBalance: dbPendingChan.LocalCommitment.RemoteBalance.ToSatoshis(), - } - - pendingChannels = append(pendingChannels, pendingChan) - } - - msg.resp <- pendingChannels -} - -// ProcessFundingMsg sends a message to the internal fundingManager goroutine, -// allowing it to handle the lnwire.Message. -func (f *fundingManager) ProcessFundingMsg(msg lnwire.Message, peer lnpeer.Peer) { - select { - case f.fundingMsgs <- &fundingMsg{msg, peer}: - case <-f.quit: - return - } -} - -// commitmentType returns the commitment type to use for the channel, based on -// the features the two peers have available. -func commitmentType(localFeatures, - remoteFeatures *lnwire.FeatureVector) lnwallet.CommitmentType { - - // If both peers are signalling support for anchor commitments, this - // implicitly mean we'll create the channel of this type. Note that - // this also enables tweakless commitments, as anchor commitments are - // always tweakless. - localAnchors := localFeatures.HasFeature( - lnwire.AnchorsOptional, - ) - remoteAnchors := remoteFeatures.HasFeature( - lnwire.AnchorsOptional, - ) - if localAnchors && remoteAnchors { - return lnwallet.CommitmentTypeAnchors - } - - localTweakless := localFeatures.HasFeature( - lnwire.StaticRemoteKeyOptional, - ) - remoteTweakless := remoteFeatures.HasFeature( - lnwire.StaticRemoteKeyOptional, - ) - - // If both nodes are signaling the proper feature bit for tweakless - // copmmitments, we'll use that. - if localTweakless && remoteTweakless { - return lnwallet.CommitmentTypeTweakless - } - - // Otherwise we'll fall back to the legacy type. - return lnwallet.CommitmentTypeLegacy -} - -// handleFundingOpen creates an initial 'ChannelReservation' within the wallet, -// then responds to the source peer with an accept channel message progressing -// the funding workflow. -// -// TODO(roasbeef): add error chan to all, let channelManager handle -// error+propagate -func (f *fundingManager) handleFundingOpen(peer lnpeer.Peer, - msg *lnwire.OpenChannel) { - - // Check number of pending channels to be smaller than maximum allowed - // number and send ErrorGeneric to remote peer if condition is - // violated. - peerPubKey := peer.IdentityKey() - peerIDKey := newSerializedKey(peerPubKey) - - amt := msg.FundingAmount - - // We get all pending channels for this peer. This is the list of the - // active reservations and the channels pending open in the database. - f.resMtx.RLock() - reservations := f.activeReservations[peerIDKey] - - // We don't count reservations that were created from a canned funding - // shim. The user has registered the shim and therefore expects this - // channel to arrive. - numPending := 0 - for _, res := range reservations { - if !res.reservation.IsCannedShim() { - numPending++ - } - } - f.resMtx.RUnlock() - - // Also count the channels that are already pending. There we don't know - // the underlying intent anymore, unfortunately. - channels, err := f.cfg.Wallet.Cfg.Database.FetchOpenChannels(peerPubKey) - if err != nil { - f.failFundingFlow( - peer, msg.PendingChannelID, err, - ) - return - } - - for _, c := range channels { - // Pending channels that have a non-zero thaw height were also - // created through a canned funding shim. Those also don't - // count towards the DoS protection limit. - // - // TODO(guggero): Properly store the funding type (wallet, shim, - // PSBT) on the channel so we don't need to use the thaw height. - if c.IsPending && c.ThawHeight == 0 { - numPending++ - } - } - - // TODO(roasbeef): modify to only accept a _single_ pending channel per - // block unless white listed - if numPending >= f.cfg.MaxPendingChannels { - f.failFundingFlow( - peer, msg.PendingChannelID, - lnwire.ErrMaxPendingChannels.Default(), - ) - return - } - - // We'll also reject any requests to create channels until we're fully - // synced to the network as we won't be able to properly validate the - // confirmation of the funding transaction. - isSynced, _, errr := f.cfg.Wallet.IsSynced() - if errr != nil || !isSynced { - if errr != nil { - log.Errorf("unable to query wallet: %v", errr) - } - f.failFundingFlow( - peer, msg.PendingChannelID, - lnwire.ErrSynchronizingChain.Default(), - ) - return - } - - // Ensure that the remote party respects our maximum channel size. - if amt > f.cfg.MaxChanSize { - f.failFundingFlow( - peer, msg.PendingChannelID, - lnwallet.ErrChanTooLarge(amt, f.cfg.MaxChanSize), - ) - return - } - - // We'll, also ensure that the remote party isn't attempting to propose - // a channel that's below our current min channel size. - if amt < f.cfg.MinChanSize { - f.failFundingFlow( - peer, msg.PendingChannelID, - lnwallet.ErrChanTooSmall(amt, btcutil.Amount(f.cfg.MinChanSize)), - ) - return - } - - // If request specifies non-zero push amount and 'rejectpush' is set, - // signal an error. - if f.cfg.RejectPush && msg.PushAmount > 0 { - f.failFundingFlow( - peer, msg.PendingChannelID, - lnwallet.ErrNonZeroPushAmount(), - ) - return - } - - // Send the OpenChannel request to the ChannelAcceptor to determine whether - // this node will accept the channel. - chanReq := &chanacceptor.ChannelAcceptRequest{ - Node: peer.IdentityKey(), - OpenChanMsg: msg, - } - - // Query our channel acceptor to determine whether we should reject - // the channel. - acceptorResp := f.cfg.OpenChannelPredicate.Accept(chanReq) - if acceptorResp.RejectChannel() { - f.failFundingFlow( - peer, msg.PendingChannelID, - acceptorResp.ChanAcceptError, - ) - return - } - - log.Infof("Recv'd fundingRequest(amt=%v, push=%v, delay=%v, "+ - "pendingId=%x) from peer(%x)", amt, msg.PushAmount, - msg.CsvDelay, msg.PendingChannelID, - peer.IdentityKey().SerializeCompressed()) - - // Attempt to initialize a reservation within the wallet. If the wallet - // has insufficient resources to create the channel, then the - // reservation attempt may be rejected. Note that since we're on the - // responding side of a single funder workflow, we don't commit any - // funds to the channel ourselves. - // - // Before we init the channel, we'll also check to see if we've - // negotiated the new tweakless commitment format. This is only the - // case if *both* us and the remote peer are signaling the proper - // feature bit. - commitType := commitmentType( - peer.LocalFeatures(), peer.RemoteFeatures(), - ) - chainHash := chainhash.Hash(msg.ChainHash) - req := &lnwallet.InitFundingReserveMsg{ - ChainHash: &chainHash, - PendingChanID: msg.PendingChannelID, - NodeID: peer.IdentityKey(), - NodeAddr: peer.Address(), - LocalFundingAmt: 0, - RemoteFundingAmt: amt, - CommitFeePerKw: chainfee.SatPerKWeight(msg.FeePerKiloWeight), - FundingFeePerKw: 0, - PushMSat: msg.PushAmount, - Flags: msg.ChannelFlags, - MinConfs: 1, - CommitType: commitType, - } - - reservation, errr := f.cfg.Wallet.InitChannelReservation(req) - if errr != nil { - log.Errorf("Unable to initialize reservation: %v", errr) - f.failFundingFlow(peer, msg.PendingChannelID, errr) - return - } - - // As we're the responder, we get to specify the number of confirmations - // that we require before both of us consider the channel open. We'll - // use our mapping to derive the proper number of confirmations based on - // the amount of the channel, and also if any funds are being pushed to - // us. If a depth value was set by our channel acceptor, we will use - // that value instead. - numConfsReq := f.cfg.NumRequiredConfs(msg.FundingAmount, msg.PushAmount) - if acceptorResp.MinAcceptDepth != 0 { - numConfsReq = acceptorResp.MinAcceptDepth - } - reservation.SetNumConfsRequired(numConfsReq) - - // We'll also validate and apply all the constraints the initiating - // party is attempting to dictate for our commitment transaction. - channelConstraints := &channeldb.ChannelConstraints{ - DustLimit: msg.DustLimit, - ChanReserve: msg.ChannelReserve, - MaxPendingAmount: msg.MaxValueInFlight, - MinHTLC: msg.HtlcMinimum, - MaxAcceptedHtlcs: msg.MaxAcceptedHTLCs, - CsvDelay: msg.CsvDelay, - } - errr = reservation.CommitConstraints( - channelConstraints, f.cfg.MaxLocalCSVDelay, - ) - if errr != nil { - log.Errorf("Unacceptable channel constraints: %v", errr) - f.failFundingFlow(peer, msg.PendingChannelID, errr) - return - } - - // Check whether the peer supports upfront shutdown, and get a new wallet - // address if our node is configured to set shutdown addresses by default. - // We use the upfront shutdown script provided by our channel acceptor - // (if any) in lieu of user input. - shutdown, errr := getUpfrontShutdownScript( - f.cfg.EnableUpfrontShutdown, peer, acceptorResp.UpfrontShutdown, - func() (lnwire.DeliveryAddress, er.R) { - addr, errr := f.cfg.Wallet.NewAddress(lnwallet.WitnessPubKey, false) - if errr != nil { - return nil, errr - } - return txscript.PayToAddrScript(addr) - }, - ) - if err != nil { - f.failFundingFlow( - peer, msg.PendingChannelID, - er.Errorf("getUpfrontShutdownScript error: %v", err), - ) - return - } - reservation.SetOurUpfrontShutdown(shutdown) - - log.Infof("Requiring %v confirmations for pendingChan(%x): "+ - "amt=%v, push_amt=%v, committype=%v, upfrontShutdown=%x", numConfsReq, - msg.PendingChannelID, amt, msg.PushAmount, - commitType, msg.UpfrontShutdownScript) - - // Generate our required constraints for the remote party, using the - // values provided by the channel acceptor if they are non-zero. - remoteCsvDelay := f.cfg.RequiredRemoteDelay(amt) - if acceptorResp.CSVDelay != 0 { - remoteCsvDelay = acceptorResp.CSVDelay - } - - chanReserve := f.cfg.RequiredRemoteChanReserve(amt, msg.DustLimit) - if acceptorResp.Reserve != 0 { - chanReserve = acceptorResp.Reserve - } - - remoteMaxValue := f.cfg.RequiredRemoteMaxValue(amt) - if acceptorResp.InFlightTotal != 0 { - remoteMaxValue = acceptorResp.InFlightTotal - } - - maxHtlcs := f.cfg.RequiredRemoteMaxHTLCs(amt) - if acceptorResp.HtlcLimit != 0 { - maxHtlcs = acceptorResp.HtlcLimit - } - - // Default to our default minimum hltc value, replacing it with the - // channel acceptor's value if it is set. - minHtlc := f.cfg.DefaultMinHtlcIn - if acceptorResp.MinHtlcIn != 0 { - minHtlc = acceptorResp.MinHtlcIn - } - - // Once the reservation has been created successfully, we add it to - // this peer's map of pending reservations to track this particular - // reservation until either abort or completion. - f.resMtx.Lock() - if _, ok := f.activeReservations[peerIDKey]; !ok { - f.activeReservations[peerIDKey] = make(pendingChannels) - } - resCtx := &reservationWithCtx{ - reservation: reservation, - chanAmt: amt, - remoteCsvDelay: remoteCsvDelay, - remoteMinHtlc: minHtlc, - remoteMaxValue: remoteMaxValue, - remoteMaxHtlcs: maxHtlcs, - maxLocalCsv: f.cfg.MaxLocalCSVDelay, - err: make(chan er.R, 1), - peer: peer, - } - f.activeReservations[peerIDKey][msg.PendingChannelID] = resCtx - f.resMtx.Unlock() - - // Update the timestamp once the fundingOpenMsg has been handled. - defer resCtx.updateTimestamp() - - // With our parameters set, we'll now process their contribution so we - // can move the funding workflow ahead. - remoteContribution := &lnwallet.ChannelContribution{ - FundingAmount: amt, - FirstCommitmentPoint: msg.FirstCommitmentPoint, - ChannelConfig: &channeldb.ChannelConfig{ - ChannelConstraints: channeldb.ChannelConstraints{ - DustLimit: msg.DustLimit, - MaxPendingAmount: remoteMaxValue, - ChanReserve: chanReserve, - MinHTLC: minHtlc, - MaxAcceptedHtlcs: maxHtlcs, - CsvDelay: remoteCsvDelay, - }, - MultiSigKey: keychain.KeyDescriptor{ - PubKey: copyPubKey(msg.FundingKey), - }, - RevocationBasePoint: keychain.KeyDescriptor{ - PubKey: copyPubKey(msg.RevocationPoint), - }, - PaymentBasePoint: keychain.KeyDescriptor{ - PubKey: copyPubKey(msg.PaymentPoint), - }, - DelayBasePoint: keychain.KeyDescriptor{ - PubKey: copyPubKey(msg.DelayedPaymentPoint), - }, - HtlcBasePoint: keychain.KeyDescriptor{ - PubKey: copyPubKey(msg.HtlcPoint), - }, - }, - UpfrontShutdown: msg.UpfrontShutdownScript, - } - errr = reservation.ProcessSingleContribution(remoteContribution) - if errr != nil { - log.Errorf("unable to add contribution reservation: %v", errr) - f.failFundingFlow(peer, msg.PendingChannelID, errr) - return - } - - log.Infof("Sending fundingResp for pending_id(%x)", - msg.PendingChannelID) - log.Debugf("Remote party accepted commitment constraints: %v", - spew.Sdump(remoteContribution.ChannelConfig.ChannelConstraints)) - - // With the initiator's contribution recorded, respond with our - // contribution in the next message of the workflow. - ourContribution := reservation.OurContribution() - fundingAccept := lnwire.AcceptChannel{ - PendingChannelID: msg.PendingChannelID, - DustLimit: ourContribution.DustLimit, - MaxValueInFlight: remoteMaxValue, - ChannelReserve: chanReserve, - MinAcceptDepth: uint32(numConfsReq), - HtlcMinimum: minHtlc, - CsvDelay: remoteCsvDelay, - MaxAcceptedHTLCs: maxHtlcs, - FundingKey: ourContribution.MultiSigKey.PubKey, - RevocationPoint: ourContribution.RevocationBasePoint.PubKey, - PaymentPoint: ourContribution.PaymentBasePoint.PubKey, - DelayedPaymentPoint: ourContribution.DelayBasePoint.PubKey, - HtlcPoint: ourContribution.HtlcBasePoint.PubKey, - FirstCommitmentPoint: ourContribution.FirstCommitmentPoint, - UpfrontShutdownScript: ourContribution.UpfrontShutdown, - } - - if err := peer.SendMessage(true, &fundingAccept); err != nil { - log.Errorf("unable to send funding response to peer: %v", err) - f.failFundingFlow(peer, msg.PendingChannelID, err) - return - } -} - -// handleFundingAccept processes a response to the workflow initiation sent by -// the remote peer. This message then queues a message with the funding -// outpoint, and a commitment signature to the remote peer. -func (f *fundingManager) handleFundingAccept(peer lnpeer.Peer, - msg *lnwire.AcceptChannel) { - - pendingChanID := msg.PendingChannelID - peerKey := peer.IdentityKey() - - resCtx, err := f.getReservationCtx(peerKey, pendingChanID) - if err != nil { - log.Warnf("Can't find reservation (peerKey:%v, chan_id:%v)", - peerKey, pendingChanID) - return - } - - // Update the timestamp once the fundingAcceptMsg has been handled. - defer resCtx.updateTimestamp() - - log.Infof("Recv'd fundingResponse for pending_id(%x)", - pendingChanID[:]) - - // The required number of confirmations should not be greater than the - // maximum number of confirmations required by the ChainNotifier to - // properly dispatch confirmations. - if msg.MinAcceptDepth > chainntnfs.MaxNumConfs { - err := lnwallet.ErrNumConfsTooLarge( - msg.MinAcceptDepth, chainntnfs.MaxNumConfs, - ) - log.Warnf("Unacceptable channel constraints: %v", err) - f.failFundingFlow(peer, msg.PendingChannelID, err) - return - } - - // We'll also specify the responder's preference for the number of - // required confirmations, and also the set of channel constraints - // they've specified for commitment states we can create. - resCtx.reservation.SetNumConfsRequired(uint16(msg.MinAcceptDepth)) - channelConstraints := &channeldb.ChannelConstraints{ - DustLimit: msg.DustLimit, - ChanReserve: msg.ChannelReserve, - MaxPendingAmount: msg.MaxValueInFlight, - MinHTLC: msg.HtlcMinimum, - MaxAcceptedHtlcs: msg.MaxAcceptedHTLCs, - CsvDelay: msg.CsvDelay, - } - err = resCtx.reservation.CommitConstraints( - channelConstraints, resCtx.maxLocalCsv, - ) - if err != nil { - log.Warnf("Unacceptable channel constraints: %v", err) - f.failFundingFlow(peer, msg.PendingChannelID, err) - return - } - - // As they've accepted our channel constraints, we'll regenerate them - // here so we can properly commit their accepted constraints to the - // reservation. - chanReserve := f.cfg.RequiredRemoteChanReserve(resCtx.chanAmt, msg.DustLimit) - - // The remote node has responded with their portion of the channel - // contribution. At this point, we can process their contribution which - // allows us to construct and sign both the commitment transaction, and - // the funding transaction. - remoteContribution := &lnwallet.ChannelContribution{ - FirstCommitmentPoint: msg.FirstCommitmentPoint, - ChannelConfig: &channeldb.ChannelConfig{ - ChannelConstraints: channeldb.ChannelConstraints{ - DustLimit: msg.DustLimit, - MaxPendingAmount: resCtx.remoteMaxValue, - ChanReserve: chanReserve, - MinHTLC: resCtx.remoteMinHtlc, - MaxAcceptedHtlcs: resCtx.remoteMaxHtlcs, - CsvDelay: resCtx.remoteCsvDelay, - }, - MultiSigKey: keychain.KeyDescriptor{ - PubKey: copyPubKey(msg.FundingKey), - }, - RevocationBasePoint: keychain.KeyDescriptor{ - PubKey: copyPubKey(msg.RevocationPoint), - }, - PaymentBasePoint: keychain.KeyDescriptor{ - PubKey: copyPubKey(msg.PaymentPoint), - }, - DelayBasePoint: keychain.KeyDescriptor{ - PubKey: copyPubKey(msg.DelayedPaymentPoint), - }, - HtlcBasePoint: keychain.KeyDescriptor{ - PubKey: copyPubKey(msg.HtlcPoint), - }, - }, - UpfrontShutdown: msg.UpfrontShutdownScript, - } - err = resCtx.reservation.ProcessContribution(remoteContribution) - - // The wallet has detected that a PSBT funding process was requested by - // the user and has halted the funding process after negotiating the - // multisig keys. We now have everything that is needed for the user to - // start constructing a PSBT that sends to the multisig funding address. - var psbtIntent *chanfunding.PsbtIntent - errr := er.Wrapped(err) - if psbtErr, ok := errr.(*lnwallet.PsbtFundingRequired); ok { - // Return the information that is needed by the user to - // construct the PSBT back to the caller. - addr, amt, packet, err := psbtErr.Intent.FundingParams() - if err != nil { - log.Errorf("Unable to process PSBT funding params "+ - "for contribution from %v: %v", peerKey, err) - f.failFundingFlow(peer, msg.PendingChannelID, err) - return - } - var buf bytes.Buffer - err = packet.Serialize(&buf) - if err != nil { - log.Errorf("Unable to serialize PSBT for "+ - "contribution from %v: %v", peerKey, err) - f.failFundingFlow(peer, msg.PendingChannelID, err) - return - } - resCtx.updates <- &lnrpc.OpenStatusUpdate{ - PendingChanId: pendingChanID[:], - Update: &lnrpc.OpenStatusUpdate_PsbtFund{ - PsbtFund: &lnrpc.ReadyForPsbtFunding{ - FundingAddress: addr.EncodeAddress(), - FundingAmount: amt, - Psbt: buf.Bytes(), - }, - }, - } - psbtIntent = psbtErr.Intent - } else if err != nil { - log.Errorf("Unable to process contribution from %v: %v", - peerKey, err) - f.failFundingFlow(peer, msg.PendingChannelID, err) - return - } - - log.Infof("pendingChan(%x): remote party proposes num_confs=%v, "+ - "csv_delay=%v", pendingChanID[:], msg.MinAcceptDepth, msg.CsvDelay) - log.Debugf("Remote party accepted commitment constraints: %v", - spew.Sdump(remoteContribution.ChannelConfig.ChannelConstraints)) - - // If the user requested funding through a PSBT, we cannot directly - // continue now and need to wait for the fully funded and signed PSBT - // to arrive. To not block any other channels from opening, we wait in - // a separate goroutine. - if psbtIntent != nil { - f.wg.Add(1) - go func() { - defer f.wg.Done() - f.waitForPsbt(psbtIntent, resCtx, pendingChanID) - }() - - // With the new goroutine spawned, we can now exit to unblock - // the main event loop. - return - } - - // In a normal, non-PSBT funding flow, we can jump directly to the next - // step where we expect our contribution to be finalized. - f.continueFundingAccept(resCtx, pendingChanID) -} - -// waitForPsbt blocks until either a signed PSBT arrives, an error occurs or -// the funding manager shuts down. In the case of a valid PSBT, the funding flow -// is continued. -// -// NOTE: This method must be called as a goroutine. -func (f *fundingManager) waitForPsbt(intent *chanfunding.PsbtIntent, - resCtx *reservationWithCtx, pendingChanID [32]byte) { - - // failFlow is a helper that logs an error message with the current - // context and then fails the funding flow. - peerKey := resCtx.peer.IdentityKey() - failFlow := func(errMsg string, cause er.R) { - log.Errorf("Unable to handle funding accept message "+ - "for peer_key=%x, pending_chan_id=%x: %s: %v", - peerKey.SerializeCompressed(), pendingChanID, errMsg, - cause) - f.failFundingFlow(resCtx.peer, pendingChanID, cause) - } - - // We'll now wait until the intent has received the final and complete - // funding transaction. If the channel is closed without any error being - // sent, we know everything's going as expected. - select { - case err := <-intent.PsbtReady: - switch { - // If the user canceled the funding reservation, we need to - // inform the other peer about us canceling the reservation. - case chanfunding.ErrUserCanceled.Is(err): - failFlow("aborting PSBT flow", err) - return - - // If the remote canceled the funding reservation, we don't need - // to send another fail message. But we want to inform the user - // about what happened. - case chanfunding.ErrRemoteCanceled.Is(err): - log.Infof("Remote canceled, aborting PSBT flow "+ - "for peer_key=%x, pending_chan_id=%x", - peerKey.SerializeCompressed(), pendingChanID) - return - - // Nil error means the flow continues normally now. - case nil == err: - - // For any other error, we'll fail the funding flow. - default: - failFlow("error waiting for PSBT flow", err) - return - } - - // A non-nil error means we can continue the funding flow. - // Notify the wallet so it can prepare everything we need to - // continue. - err = resCtx.reservation.ProcessPsbt() - if err != nil { - failFlow("error continuing PSBT flow", err) - return - } - - // We are now ready to continue the funding flow. - f.continueFundingAccept(resCtx, pendingChanID) - - // Handle a server shutdown as well because the reservation won't - // survive a restart as it's in memory only. - case <-f.quit: - log.Errorf("Unable to handle funding accept message "+ - "for peer_key=%x, pending_chan_id=%x: funding manager "+ - "shutting down", peerKey.SerializeCompressed(), - pendingChanID) - return - } -} - -// continueFundingAccept continues the channel funding flow once our -// contribution is finalized, the channel output is known and the funding -// transaction is signed. -func (f *fundingManager) continueFundingAccept(resCtx *reservationWithCtx, - pendingChanID [32]byte) { - - // Now that we have their contribution, we can extract, then send over - // both the funding out point and our signature for their version of - // the commitment transaction to the remote peer. - outPoint := resCtx.reservation.FundingOutpoint() - _, sig := resCtx.reservation.OurSignatures() - - // A new channel has almost finished the funding process. In order to - // properly synchronize with the writeHandler goroutine, we add a new - // channel to the barriers map which will be closed once the channel is - // fully open. - f.barrierMtx.Lock() - channelID := lnwire.NewChanIDFromOutPoint(outPoint) - log.Debugf("Creating chan barrier for ChanID(%v)", channelID) - f.newChanBarriers[channelID] = make(chan struct{}) - f.barrierMtx.Unlock() - - // The next message that advances the funding flow will reference the - // channel via its permanent channel ID, so we'll set up this mapping - // so we can retrieve the reservation context once we get the - // FundingSigned message. - f.resMtx.Lock() - f.signedReservations[channelID] = pendingChanID - f.resMtx.Unlock() - - log.Infof("Generated ChannelPoint(%v) for pending_id(%x)", outPoint, - pendingChanID[:]) - - var err er.R - fundingCreated := &lnwire.FundingCreated{ - PendingChannelID: pendingChanID, - FundingPoint: *outPoint, - } - fundingCreated.CommitSig, err = lnwire.NewSigFromSignature(sig) - if err != nil { - log.Errorf("Unable to parse signature: %v", err) - f.failFundingFlow(resCtx.peer, pendingChanID, err) - return - } - if err := resCtx.peer.SendMessage(true, fundingCreated); err != nil { - log.Errorf("Unable to send funding complete message: %v", err) - f.failFundingFlow(resCtx.peer, pendingChanID, err) - return - } -} - -// handleFundingCreated progresses the funding workflow when the daemon is on -// the responding side of a single funder workflow. Once this message has been -// processed, a signature is sent to the remote peer allowing it to broadcast -// the funding transaction, progressing the workflow into the final stage. -func (f *fundingManager) handleFundingCreated(peer lnpeer.Peer, - msg *lnwire.FundingCreated) { - - peerKey := peer.IdentityKey() - pendingChanID := msg.PendingChannelID - - resCtx, err := f.getReservationCtx(peerKey, pendingChanID) - if err != nil { - log.Warnf("can't find reservation (peer_id:%v, chan_id:%x)", - peerKey, pendingChanID[:]) - return - } - - // The channel initiator has responded with the funding outpoint of the - // final funding transaction, as well as a signature for our version of - // the commitment transaction. So at this point, we can validate the - // initiator's commitment transaction, then send our own if it's valid. - // TODO(roasbeef): make case (p vs P) consistent throughout - fundingOut := msg.FundingPoint - log.Infof("completing pending_id(%x) with ChannelPoint(%v)", - pendingChanID[:], fundingOut) - - commitSig, err := msg.CommitSig.ToSignature() - if err != nil { - log.Errorf("unable to parse signature: %v", err) - f.failFundingFlow(peer, pendingChanID, err) - return - } - - // With all the necessary data available, attempt to advance the - // funding workflow to the next stage. If this succeeds then the - // funding transaction will broadcast after our next message. - // CompleteReservationSingle will also mark the channel as 'IsPending' - // in the database. - completeChan, err := resCtx.reservation.CompleteReservationSingle( - &fundingOut, commitSig, - ) - if err != nil { - // TODO(roasbeef): better error logging: peerID, channelID, etc. - log.Errorf("unable to complete single reservation: %v", err) - f.failFundingFlow(peer, pendingChanID, err) - return - } - - // The channel is marked IsPending in the database, and can be removed - // from the set of active reservations. - f.deleteReservationCtx(peerKey, msg.PendingChannelID) - - // If something goes wrong before the funding transaction is confirmed, - // we use this convenience method to delete the pending OpenChannel - // from the database. - deleteFromDatabase := func() { - localBalance := completeChan.LocalCommitment.LocalBalance.ToSatoshis() - closeInfo := &channeldb.ChannelCloseSummary{ - ChanPoint: completeChan.FundingOutpoint, - ChainHash: completeChan.ChainHash, - RemotePub: completeChan.IdentityPub, - CloseType: channeldb.FundingCanceled, - Capacity: completeChan.Capacity, - SettledBalance: localBalance, - RemoteCurrentRevocation: completeChan.RemoteCurrentRevocation, - RemoteNextRevocation: completeChan.RemoteNextRevocation, - LocalChanConfig: completeChan.LocalChanCfg, - } - - // Close the channel with us as the initiator because we are - // deciding to exit the funding flow due to an internal error. - if err := completeChan.CloseChannel( - closeInfo, channeldb.ChanStatusLocalCloseInitiator, - ); err != nil { - log.Errorf("Failed closing channel %v: %v", - completeChan.FundingOutpoint, err) - } - } - - // A new channel has almost finished the funding process. In order to - // properly synchronize with the writeHandler goroutine, we add a new - // channel to the barriers map which will be closed once the channel is - // fully open. - f.barrierMtx.Lock() - channelID := lnwire.NewChanIDFromOutPoint(&fundingOut) - log.Debugf("Creating chan barrier for ChanID(%v)", channelID) - f.newChanBarriers[channelID] = make(chan struct{}) - f.barrierMtx.Unlock() - - log.Infof("sending FundingSigned for pending_id(%x) over "+ - "ChannelPoint(%v)", pendingChanID[:], fundingOut) - - // With their signature for our version of the commitment transaction - // verified, we can now send over our signature to the remote peer. - _, sig := resCtx.reservation.OurSignatures() - ourCommitSig, err := lnwire.NewSigFromSignature(sig) - if err != nil { - log.Errorf("unable to parse signature: %v", err) - f.failFundingFlow(peer, pendingChanID, err) - deleteFromDatabase() - return - } - - fundingSigned := &lnwire.FundingSigned{ - ChanID: channelID, - CommitSig: ourCommitSig, - } - if err := peer.SendMessage(true, fundingSigned); err != nil { - log.Errorf("unable to send FundingSigned message: %v", err) - f.failFundingFlow(peer, pendingChanID, err) - deleteFromDatabase() - return - } - - // Now that we've sent over our final signature for this channel, we'll - // send it to the ChainArbitrator so it can watch for any on-chain - // actions during this final confirmation stage. - if err := f.cfg.WatchNewChannel(completeChan, peerKey); err != nil { - log.Errorf("Unable to send new ChannelPoint(%v) for "+ - "arbitration: %v", fundingOut, err) - } - - // Create an entry in the local discovery map so we can ensure that we - // process the channel confirmation fully before we receive a funding - // locked message. - f.localDiscoveryMtx.Lock() - f.localDiscoverySignals[channelID] = make(chan struct{}) - f.localDiscoveryMtx.Unlock() - - // Inform the ChannelNotifier that the channel has entered - // pending open state. - f.cfg.NotifyPendingOpenChannelEvent(fundingOut, completeChan) - - // At this point we have sent our last funding message to the - // initiating peer before the funding transaction will be broadcast. - // With this last message, our job as the responder is now complete. - // We'll wait for the funding transaction to reach the specified number - // of confirmations, then start normal operations. - // - // When we get to this point we have sent the signComplete message to - // the channel funder, and BOLT#2 specifies that we MUST remember the - // channel for reconnection. The channel is already marked - // as pending in the database, so in case of a disconnect or restart, - // we will continue waiting for the confirmation the next time we start - // the funding manager. In case the funding transaction never appears - // on the blockchain, we must forget this channel. We therefore - // completely forget about this channel if we haven't seen the funding - // transaction in 288 blocks (~ 48 hrs), by canceling the reservation - // and canceling the wait for the funding confirmation. - f.wg.Add(1) - go f.advanceFundingState(completeChan, pendingChanID, nil) -} - -// handleFundingSigned processes the final message received in a single funder -// workflow. Once this message is processed, the funding transaction is -// broadcast. Once the funding transaction reaches a sufficient number of -// confirmations, a message is sent to the responding peer along with a compact -// encoding of the location of the channel within the blockchain. -func (f *fundingManager) handleFundingSigned(peer lnpeer.Peer, - msg *lnwire.FundingSigned) { - - // As the funding signed message will reference the reservation by its - // permanent channel ID, we'll need to perform an intermediate look up - // before we can obtain the reservation. - f.resMtx.Lock() - pendingChanID, ok := f.signedReservations[msg.ChanID] - delete(f.signedReservations, msg.ChanID) - f.resMtx.Unlock() - if !ok { - err := er.Errorf("unable to find signed reservation for "+ - "chan_id=%x", msg.ChanID) - log.Warnf(err.String()) - f.failFundingFlow(peer, msg.ChanID, err) - return - } - - peerKey := peer.IdentityKey() - resCtx, err := f.getReservationCtx(peerKey, pendingChanID) - if err != nil { - log.Warnf("Unable to find reservation (peer_id:%v, "+ - "chan_id:%x)", peerKey, pendingChanID[:]) - // TODO: add ErrChanNotFound? - f.failFundingFlow(peer, pendingChanID, err) - return - } - - // Create an entry in the local discovery map so we can ensure that we - // process the channel confirmation fully before we receive a funding - // locked message. - fundingPoint := resCtx.reservation.FundingOutpoint() - permChanID := lnwire.NewChanIDFromOutPoint(fundingPoint) - f.localDiscoveryMtx.Lock() - f.localDiscoverySignals[permChanID] = make(chan struct{}) - f.localDiscoveryMtx.Unlock() - - // The remote peer has responded with a signature for our commitment - // transaction. We'll verify the signature for validity, then commit - // the state to disk as we can now open the channel. - commitSig, err := msg.CommitSig.ToSignature() - if err != nil { - log.Errorf("Unable to parse signature: %v", err) - f.failFundingFlow(peer, pendingChanID, err) - return - } - - completeChan, err := resCtx.reservation.CompleteReservation( - nil, commitSig, - ) - if err != nil { - log.Errorf("Unable to complete reservation sign "+ - "complete: %v", err) - f.failFundingFlow(peer, pendingChanID, err) - return - } - - // The channel is now marked IsPending in the database, and we can - // delete it from our set of active reservations. - f.deleteReservationCtx(peerKey, pendingChanID) - - // Broadcast the finalized funding transaction to the network, but only - // if we actually have the funding transaction. - if completeChan.ChanType.HasFundingTx() { - fundingTx := completeChan.FundingTxn - var fundingTxBuf bytes.Buffer - if err := fundingTx.Serialize(&fundingTxBuf); err != nil { - log.Errorf("Unable to serialize funding "+ - "transaction %v: %v", fundingTx.TxHash(), err) - - // Clear the buffer of any bytes that were written - // before the serialization error to prevent logging an - // incomplete transaction. - fundingTxBuf.Reset() - } - - log.Infof("Broadcasting funding tx for ChannelPoint(%v): %x", - completeChan.FundingOutpoint, fundingTxBuf.Bytes()) - - // Set a nil short channel ID at this stage because we do not - // know it until our funding tx confirms. - label := labels.MakeLabel( - labels.LabelTypeChannelOpen, nil, - ) - - err = f.cfg.PublishTransaction(fundingTx, label) - if err != nil { - log.Errorf("Unable to broadcast funding tx %x for "+ - "ChannelPoint(%v): %v", fundingTxBuf.Bytes(), - completeChan.FundingOutpoint, err) - - // We failed to broadcast the funding transaction, but - // watch the channel regardless, in case the - // transaction made it to the network. We will retry - // broadcast at startup. - // - // TODO(halseth): retry more often? Handle with CPFP? - // Just delete from the DB? - } - } - - // Now that we have a finalized reservation for this funding flow, - // we'll send the to be active channel to the ChainArbitrator so it can - // watch for any on-chain actions before the channel has fully - // confirmed. - if err := f.cfg.WatchNewChannel(completeChan, peerKey); err != nil { - log.Errorf("Unable to send new ChannelPoint(%v) for "+ - "arbitration: %v", fundingPoint, err) - } - - log.Infof("Finalizing pending_id(%x) over ChannelPoint(%v), "+ - "waiting for channel open on-chain", pendingChanID[:], - fundingPoint) - - // Send an update to the upstream client that the negotiation process - // is over. - // - // TODO(roasbeef): add abstraction over updates to accommodate - // long-polling, or SSE, etc. - upd := &lnrpc.OpenStatusUpdate{ - Update: &lnrpc.OpenStatusUpdate_ChanPending{ - ChanPending: &lnrpc.PendingUpdate{ - Txid: fundingPoint.Hash[:], - OutputIndex: fundingPoint.Index, - }, - }, - PendingChanId: pendingChanID[:], - } - - select { - case resCtx.updates <- upd: - // Inform the ChannelNotifier that the channel has entered - // pending open state. - f.cfg.NotifyPendingOpenChannelEvent(*fundingPoint, completeChan) - case <-f.quit: - return - } - - // At this point we have broadcast the funding transaction and done all - // necessary processing. - f.wg.Add(1) - go f.advanceFundingState(completeChan, pendingChanID, resCtx.updates) -} - -// confirmedChannel wraps a confirmed funding transaction, as well as the short -// channel ID which identifies that channel into a single struct. We'll use -// this to pass around the final state of a channel after it has been -// confirmed. -type confirmedChannel struct { - // shortChanID expresses where in the block the funding transaction was - // located. - shortChanID lnwire.ShortChannelID - - // fundingTx is the funding transaction that created the channel. - fundingTx *wire.MsgTx -} - -// waitForFundingWithTimeout is a wrapper around waitForFundingConfirmation and -// waitForTimeout that will return ErrConfirmationTimeout if we are not the -// channel initiator and the maxWaitNumBlocksFundingConf has passed from the -// funding broadcast height. In case of confirmation, the short channel ID of -// the channel and the funding transaction will be returned. -func (f *fundingManager) waitForFundingWithTimeout( - ch *channeldb.OpenChannel) (*confirmedChannel, er.R) { - - confChan := make(chan *confirmedChannel) - timeoutChan := make(chan er.R, 1) - cancelChan := make(chan struct{}) - - f.wg.Add(1) - go f.waitForFundingConfirmation(ch, cancelChan, confChan) - - // If we are not the initiator, we have no money at stake and will - // timeout waiting for the funding transaction to confirm after a - // while. - if !ch.IsInitiator { - f.wg.Add(1) - go f.waitForTimeout(ch, cancelChan, timeoutChan) - } - defer close(cancelChan) - - select { - case err := <-timeoutChan: - if err != nil { - return nil, err - } - return nil, ErrConfirmationTimeout.Default() - - case <-f.quit: - // The fundingManager is shutting down, and will resume wait on - // startup. - return nil, ErrFundingManagerShuttingDown.Default() - - case confirmedChannel, ok := <-confChan: - if !ok { - return nil, er.Errorf("waiting for funding" + - "confirmation failed") - } - return confirmedChannel, nil - } -} - -// makeFundingScript re-creates the funding script for the funding transaction -// of the target channel. -func makeFundingScript(channel *channeldb.OpenChannel) ([]byte, er.R) { - localKey := channel.LocalChanCfg.MultiSigKey.PubKey.SerializeCompressed() - remoteKey := channel.RemoteChanCfg.MultiSigKey.PubKey.SerializeCompressed() - - multiSigScript, err := input.GenMultiSigScript(localKey, remoteKey) - if err != nil { - return nil, err - } - - return input.WitnessScriptHash(multiSigScript) -} - -// waitForFundingConfirmation handles the final stages of the channel funding -// process once the funding transaction has been broadcast. The primary -// function of waitForFundingConfirmation is to wait for blockchain -// confirmation, and then to notify the other systems that must be notified -// when a channel has become active for lightning transactions. -// The wait can be canceled by closing the cancelChan. In case of success, -// a *lnwire.ShortChannelID will be passed to confChan. -// -// NOTE: This MUST be run as a goroutine. -func (f *fundingManager) waitForFundingConfirmation( - completeChan *channeldb.OpenChannel, cancelChan <-chan struct{}, - confChan chan<- *confirmedChannel) { - - defer f.wg.Done() - defer close(confChan) - - // Register with the ChainNotifier for a notification once the funding - // transaction reaches `numConfs` confirmations. - txid := completeChan.FundingOutpoint.Hash - fundingScript, err := makeFundingScript(completeChan) - if err != nil { - log.Errorf("unable to create funding script for "+ - "ChannelPoint(%v): %v", completeChan.FundingOutpoint, - err) - return - } - numConfs := uint32(completeChan.NumConfsRequired) - confNtfn, err := f.cfg.Notifier.RegisterConfirmationsNtfn( - &txid, fundingScript, numConfs, - completeChan.FundingBroadcastHeight, - ) - if err != nil { - log.Errorf("Unable to register for confirmation of "+ - "ChannelPoint(%v): %v", completeChan.FundingOutpoint, - err) - return - } - - log.Infof("Waiting for funding tx (%v) to reach %v confirmations", - txid, numConfs) - - var confDetails *chainntnfs.TxConfirmation - var ok bool - - // Wait until the specified number of confirmations has been reached, - // we get a cancel signal, or the wallet signals a shutdown. - select { - case confDetails, ok = <-confNtfn.Confirmed: - // fallthrough - - case <-cancelChan: - log.Warnf("canceled waiting for funding confirmation, "+ - "stopping funding flow for ChannelPoint(%v)", - completeChan.FundingOutpoint) - return - - case <-f.quit: - log.Warnf("fundingManager shutting down, stopping funding "+ - "flow for ChannelPoint(%v)", - completeChan.FundingOutpoint) - return - } - - if !ok { - log.Warnf("ChainNotifier shutting down, cannot complete "+ - "funding flow for ChannelPoint(%v)", - completeChan.FundingOutpoint) - return - } - - fundingPoint := completeChan.FundingOutpoint - log.Infof("ChannelPoint(%v) is now active: ChannelID(%v)", - fundingPoint, lnwire.NewChanIDFromOutPoint(&fundingPoint)) - - // With the block height and the transaction index known, we can - // construct the compact chanID which is used on the network to unique - // identify channels. - shortChanID := lnwire.ShortChannelID{ - BlockHeight: confDetails.BlockHeight, - TxIndex: confDetails.TxIndex, - TxPosition: uint16(fundingPoint.Index), - } - - select { - case confChan <- &confirmedChannel{ - shortChanID: shortChanID, - fundingTx: confDetails.Tx, - }: - case <-f.quit: - return - } -} - -// waitForTimeout will close the timeout channel if maxWaitNumBlocksFundingConf -// has passed from the broadcast height of the given channel. In case of error, -// the error is sent on timeoutChan. The wait can be canceled by closing the -// cancelChan. -// -// NOTE: timeoutChan MUST be buffered. -// NOTE: This MUST be run as a goroutine. -func (f *fundingManager) waitForTimeout(completeChan *channeldb.OpenChannel, - cancelChan <-chan struct{}, timeoutChan chan<- er.R) { - defer f.wg.Done() - - epochClient, err := f.cfg.Notifier.RegisterBlockEpochNtfn(nil) - if err != nil { - timeoutChan <- er.Errorf("unable to register for epoch "+ - "notification: %v", err) - return - } - - defer epochClient.Cancel() - - // On block maxHeight we will cancel the funding confirmation wait. - maxHeight := completeChan.FundingBroadcastHeight + maxWaitNumBlocksFundingConf - for { - select { - case epoch, ok := <-epochClient.Epochs: - if !ok { - timeoutChan <- er.Errorf("epoch client " + - "shutting down") - return - } - - // Close the timeout channel and exit if the block is - // aboce the max height. - if uint32(epoch.Height) >= maxHeight { - log.Warnf("Waited for %v blocks without "+ - "seeing funding transaction confirmed,"+ - " cancelling.", - maxWaitNumBlocksFundingConf) - - // Notify the caller of the timeout. - close(timeoutChan) - return - } - - // TODO: If we are the channel initiator implement - // a method for recovering the funds from the funding - // transaction - - case <-cancelChan: - return - - case <-f.quit: - // The fundingManager is shutting down, will resume - // waiting for the funding transaction on startup. - return - } - } -} - -// handleFundingConfirmation marks a channel as open in the database, and set -// the channelOpeningState markedOpen. In addition it will report the now -// decided short channel ID to the switch, and close the local discovery signal -// for this channel. -func (f *fundingManager) handleFundingConfirmation( - completeChan *channeldb.OpenChannel, - confChannel *confirmedChannel) er.R { - - fundingPoint := completeChan.FundingOutpoint - chanID := lnwire.NewChanIDFromOutPoint(&fundingPoint) - - // TODO(roasbeef): ideally persistent state update for chan above - // should be abstracted - - // Now that that the channel has been fully confirmed, we'll request - // that the wallet fully verify this channel to ensure that it can be - // used. - err := f.cfg.Wallet.ValidateChannel(completeChan, confChannel.fundingTx) - if err != nil { - // TODO(roasbeef): delete chan state? - return er.Errorf("unable to validate channel: %v", err) - } - - // The funding transaction now being confirmed, we add this channel to - // the fundingManager's internal persistent state machine that we use - // to track the remaining process of the channel opening. This is - // useful to resume the opening process in case of restarts. We set the - // opening state before we mark the channel opened in the database, - // such that we can receover from one of the db writes failing. - err = f.saveChannelOpeningState( - &fundingPoint, markedOpen, &confChannel.shortChanID, - ) - if err != nil { - return er.Errorf("error setting channel state to markedOpen: %v", - err) - } - - // Now that the channel has been fully confirmed and we successfully - // saved the opening state, we'll mark it as open within the database. - err = completeChan.MarkAsOpen(confChannel.shortChanID) - if err != nil { - return er.Errorf("error setting channel pending flag to false: "+ - "%v", err) - } - - // Inform the ChannelNotifier that the channel has transitioned from - // pending open to open. - f.cfg.NotifyOpenChannelEvent(completeChan.FundingOutpoint) - - // As there might already be an active link in the switch with an - // outdated short chan ID, we'll instruct the switch to load the updated - // short chan id from disk. - err = f.cfg.ReportShortChanID(fundingPoint) - if err != nil { - log.Errorf("unable to report short chan id: %v", err) - } - - // If we opened the channel, and lnd's wallet published our funding tx - // (which is not the case for some channels) then we update our - // transaction label with our short channel ID, which is known now that - // our funding transaction has confirmed. We do not label transactions - // we did not publish, because our wallet has no knowledge of them. - if completeChan.IsInitiator && completeChan.ChanType.HasFundingTx() { - shortChanID := completeChan.ShortChanID() - label := labels.MakeLabel( - labels.LabelTypeChannelOpen, &shortChanID, - ) - - err = f.cfg.UpdateLabel( - completeChan.FundingOutpoint.Hash, label, - ) - if err != nil { - log.Errorf("unable to update label: %v", err) - } - } - - // Close the discoverySignal channel, indicating to a separate - // goroutine that the channel now is marked as open in the database - // and that it is acceptable to process funding locked messages - // from the peer. - f.localDiscoveryMtx.Lock() - if discoverySignal, ok := f.localDiscoverySignals[chanID]; ok { - close(discoverySignal) - } - f.localDiscoveryMtx.Unlock() - - return nil -} - -// sendFundingLocked creates and sends the fundingLocked message. -// This should be called after the funding transaction has been confirmed, -// and the channelState is 'markedOpen'. -func (f *fundingManager) sendFundingLocked( - completeChan *channeldb.OpenChannel, channel *lnwallet.LightningChannel, - shortChanID *lnwire.ShortChannelID) er.R { - - chanID := lnwire.NewChanIDFromOutPoint(&completeChan.FundingOutpoint) - - var peerKey [33]byte - copy(peerKey[:], completeChan.IdentityPub.SerializeCompressed()) - - // Next, we'll send over the funding locked message which marks that we - // consider the channel open by presenting the remote party with our - // next revocation key. Without the revocation key, the remote party - // will be unable to propose state transitions. - nextRevocation, err := channel.NextRevocationKey() - if err != nil { - return er.Errorf("unable to create next revocation: %v", err) - } - fundingLockedMsg := lnwire.NewFundingLocked(chanID, nextRevocation) - - // If the peer has disconnected before we reach this point, we will need - // to wait for him to come back online before sending the fundingLocked - // message. This is special for fundingLocked, since failing to send any - // of the previous messages in the funding flow just cancels the flow. - // But now the funding transaction is confirmed, the channel is open - // and we have to make sure the peer gets the fundingLocked message when - // it comes back online. This is also crucial during restart of lnd, - // where we might try to resend the fundingLocked message before the - // server has had the time to connect to the peer. We keep trying to - // send fundingLocked until we succeed, or the fundingManager is shut - // down. - for { - connected := make(chan lnpeer.Peer, 1) - f.cfg.NotifyWhenOnline(peerKey, connected) - - var peer lnpeer.Peer - select { - case peer = <-connected: - case <-f.quit: - return ErrFundingManagerShuttingDown.Default() - } - - log.Infof("Peer(%x) is online, sending FundingLocked "+ - "for ChannelID(%v)", peerKey, chanID) - - if err := peer.SendMessage(true, fundingLockedMsg); err == nil { - // Sending succeeded, we can break out and continue the - // funding flow. - break - } - - log.Warnf("Unable to send fundingLocked to peer %x: %v. "+ - "Will retry when online", peerKey, err) - } - - return nil -} - -// addToRouterGraph sends a ChannelAnnouncement and a ChannelUpdate to the -// gossiper so that the channel is added to the Router's internal graph. -// These announcement messages are NOT broadcasted to the greater network, -// only to the channel counter party. The proofs required to announce the -// channel to the greater network will be created and sent in annAfterSixConfs. -func (f *fundingManager) addToRouterGraph(completeChan *channeldb.OpenChannel, - shortChanID *lnwire.ShortChannelID) er.R { - - chanID := lnwire.NewChanIDFromOutPoint(&completeChan.FundingOutpoint) - - // We'll obtain the min HTLC value we can forward in our direction, as - // we'll use this value within our ChannelUpdate. This constraint is - // originally set by the remote node, as it will be the one that will - // need to determine the smallest HTLC it deems economically relevant. - fwdMinHTLC := completeChan.LocalChanCfg.MinHTLC - - // We don't necessarily want to go as low as the remote party - // allows. Check it against our default forwarding policy. - if fwdMinHTLC < f.cfg.DefaultRoutingPolicy.MinHTLCOut { - fwdMinHTLC = f.cfg.DefaultRoutingPolicy.MinHTLCOut - } - - // We'll obtain the max HTLC value we can forward in our direction, as - // we'll use this value within our ChannelUpdate. This value must be <= - // channel capacity and <= the maximum in-flight msats set by the peer. - fwdMaxHTLC := completeChan.LocalChanCfg.MaxPendingAmount - capacityMSat := lnwire.NewMSatFromSatoshis(completeChan.Capacity) - if fwdMaxHTLC > capacityMSat { - fwdMaxHTLC = capacityMSat - } - - ann, err := f.newChanAnnouncement( - f.cfg.IDKey, completeChan.IdentityPub, - completeChan.LocalChanCfg.MultiSigKey.PubKey, - completeChan.RemoteChanCfg.MultiSigKey.PubKey, *shortChanID, - chanID, fwdMinHTLC, fwdMaxHTLC, - ) - if err != nil { - return er.Errorf("error generating channel "+ - "announcement: %v", err) - } - - // Send ChannelAnnouncement and ChannelUpdate to the gossiper to add - // to the Router's topology. - errChan := f.cfg.SendAnnouncement( - ann.chanAnn, discovery.ChannelCapacity(completeChan.Capacity), - discovery.ChannelPoint(completeChan.FundingOutpoint), - ) - select { - case err := <-errChan: - if err != nil { - if routing.IsError(er.Wrapped(err), routing.ErrOutdated, - routing.ErrIgnored) { - log.Debugf("Router rejected "+ - "ChannelAnnouncement: %v", err) - } else { - return er.Errorf("error sending channel "+ - "announcement: %v", err) - } - } - case <-f.quit: - return ErrFundingManagerShuttingDown.Default() - } - - errChan = f.cfg.SendAnnouncement(ann.chanUpdateAnn) - select { - case err := <-errChan: - if err != nil { - if routing.IsError(er.Wrapped(err), routing.ErrOutdated, - routing.ErrIgnored) { - log.Debugf("Router rejected "+ - "ChannelUpdate: %v", err) - } else { - return er.Errorf("error sending channel "+ - "update: %v", err) - } - } - case <-f.quit: - return ErrFundingManagerShuttingDown.Default() - } - - return nil -} - -// annAfterSixConfs broadcasts the necessary channel announcement messages to -// the network after 6 confs. Should be called after the fundingLocked message -// is sent and the channel is added to the router graph (channelState is -// 'addedToRouterGraph') and the channel is ready to be used. This is the last -// step in the channel opening process, and the opening state will be deleted -// from the database if successful. -func (f *fundingManager) annAfterSixConfs(completeChan *channeldb.OpenChannel, - shortChanID *lnwire.ShortChannelID) er.R { - - // If this channel is not meant to be announced to the greater network, - // we'll only send our NodeAnnouncement to our counterparty to ensure we - // don't leak any of our information. - announceChan := completeChan.ChannelFlags&lnwire.FFAnnounceChannel != 0 - if !announceChan { - log.Debugf("Will not announce private channel %v.", - shortChanID.ToUint64()) - - peerChan := make(chan lnpeer.Peer, 1) - - var peerKey [33]byte - copy(peerKey[:], completeChan.IdentityPub.SerializeCompressed()) - - f.cfg.NotifyWhenOnline(peerKey, peerChan) - - var peer lnpeer.Peer - select { - case peer = <-peerChan: - case <-f.quit: - return ErrFundingManagerShuttingDown.Default() - } - - nodeAnn, err := f.cfg.CurrentNodeAnnouncement() - if err != nil { - return er.Errorf("unable to retrieve current node "+ - "announcement: %v", err) - } - - chanID := lnwire.NewChanIDFromOutPoint( - &completeChan.FundingOutpoint, - ) - pubKey := peer.PubKey() - log.Debugf("Sending our NodeAnnouncement for "+ - "ChannelID(%v) to %x", chanID, pubKey) - - // TODO(halseth): make reliable. If the peer is not online this - // will fail, and the opening process will stop. Should instead - // block here, waiting for the peer to come online. - if err := peer.SendMessage(true, &nodeAnn); err != nil { - return er.Errorf("unable to send node announcement "+ - "to peer %x: %v", pubKey, err) - } - } else { - // Otherwise, we'll wait until the funding transaction has - // reached 6 confirmations before announcing it. - numConfs := uint32(completeChan.NumConfsRequired) - if numConfs < 6 { - numConfs = 6 - } - txid := completeChan.FundingOutpoint.Hash - log.Debugf("Will announce channel %v after ChannelPoint"+ - "(%v) has gotten %d confirmations", - shortChanID.ToUint64(), completeChan.FundingOutpoint, - numConfs) - - fundingScript, err := makeFundingScript(completeChan) - if err != nil { - return er.Errorf("unable to create funding script for "+ - "ChannelPoint(%v): %v", - completeChan.FundingOutpoint, err) - } - - // Register with the ChainNotifier for a notification once the - // funding transaction reaches at least 6 confirmations. - confNtfn, err := f.cfg.Notifier.RegisterConfirmationsNtfn( - &txid, fundingScript, numConfs, - completeChan.FundingBroadcastHeight, - ) - if err != nil { - return er.Errorf("unable to register for "+ - "confirmation of ChannelPoint(%v): %v", - completeChan.FundingOutpoint, err) - } - - // Wait until 6 confirmations has been reached or the wallet - // signals a shutdown. - select { - case _, ok := <-confNtfn.Confirmed: - if !ok { - return er.Errorf("ChainNotifier shutting "+ - "down, cannot complete funding flow "+ - "for ChannelPoint(%v)", - completeChan.FundingOutpoint) - } - // Fallthrough. - - case <-f.quit: - return er.Errorf("%v, stopping funding flow for "+ - "ChannelPoint(%v)", - ErrFundingManagerShuttingDown, - completeChan.FundingOutpoint) - } - - fundingPoint := completeChan.FundingOutpoint - chanID := lnwire.NewChanIDFromOutPoint(&fundingPoint) - - log.Infof("Announcing ChannelPoint(%v), short_chan_id=%v", - &fundingPoint, shortChanID) - - // Create and broadcast the proofs required to make this channel - // public and usable for other nodes for routing. - err = f.announceChannel( - f.cfg.IDKey, completeChan.IdentityPub, - completeChan.LocalChanCfg.MultiSigKey.PubKey, - completeChan.RemoteChanCfg.MultiSigKey.PubKey, - *shortChanID, chanID, - ) - if err != nil { - return er.Errorf("channel announcement failed: %v", err) - } - - log.Debugf("Channel with ChannelPoint(%v), short_chan_id=%v "+ - "announced", &fundingPoint, shortChanID) - } - - return nil -} - -// handleFundingLocked finalizes the channel funding process and enables the -// channel to enter normal operating mode. -func (f *fundingManager) handleFundingLocked(peer lnpeer.Peer, - msg *lnwire.FundingLocked) { - - defer f.wg.Done() - log.Debugf("Received FundingLocked for ChannelID(%v) from "+ - "peer %x", msg.ChanID, - peer.IdentityKey().SerializeCompressed()) - - // If we are currently in the process of handling a funding locked - // message for this channel, ignore. - f.handleFundingLockedMtx.Lock() - _, ok := f.handleFundingLockedBarriers[msg.ChanID] - if ok { - log.Infof("Already handling fundingLocked for "+ - "ChannelID(%v), ignoring.", msg.ChanID) - f.handleFundingLockedMtx.Unlock() - return - } - - // If not already handling fundingLocked for this channel, set up - // barrier, and move on. - f.handleFundingLockedBarriers[msg.ChanID] = struct{}{} - f.handleFundingLockedMtx.Unlock() - - defer func() { - f.handleFundingLockedMtx.Lock() - delete(f.handleFundingLockedBarriers, msg.ChanID) - f.handleFundingLockedMtx.Unlock() - }() - - f.localDiscoveryMtx.Lock() - localDiscoverySignal, ok := f.localDiscoverySignals[msg.ChanID] - f.localDiscoveryMtx.Unlock() - - if ok { - // Before we proceed with processing the funding locked - // message, we'll wait for the local waitForFundingConfirmation - // goroutine to signal that it has the necessary state in - // place. Otherwise, we may be missing critical information - // required to handle forwarded HTLC's. - select { - case <-localDiscoverySignal: - // Fallthrough - case <-f.quit: - return - } - - // With the signal received, we can now safely delete the entry - // from the map. - f.localDiscoveryMtx.Lock() - delete(f.localDiscoverySignals, msg.ChanID) - f.localDiscoveryMtx.Unlock() - } - - // First, we'll attempt to locate the channel whose funding workflow is - // being finalized by this message. We go to the database rather than - // our reservation map as we may have restarted, mid funding flow. - chanID := msg.ChanID - channel, err := f.cfg.FindChannel(chanID) - if err != nil { - log.Errorf("Unable to locate ChannelID(%v), cannot complete "+ - "funding", chanID) - return - } - - // If the RemoteNextRevocation is non-nil, it means that we have - // already processed fundingLocked for this channel, so ignore. - if channel.RemoteNextRevocation != nil { - log.Infof("Received duplicate fundingLocked for "+ - "ChannelID(%v), ignoring.", chanID) - return - } - - // The funding locked message contains the next commitment point we'll - // need to create the next commitment state for the remote party. So - // we'll insert that into the channel now before passing it along to - // other sub-systems. - err = channel.InsertNextRevocation(msg.NextPerCommitmentPoint) - if err != nil { - log.Errorf("unable to insert next commitment point: %v", err) - return - } - - // Launch a defer so we _ensure_ that the channel barrier is properly - // closed even if the target peer is no longer online at this point. - defer func() { - // Close the active channel barrier signaling the readHandler - // that commitment related modifications to this channel can - // now proceed. - f.barrierMtx.Lock() - chanBarrier, ok := f.newChanBarriers[chanID] - if ok { - log.Tracef("Closing chan barrier for ChanID(%v)", - chanID) - close(chanBarrier) - delete(f.newChanBarriers, chanID) - } - f.barrierMtx.Unlock() - }() - - if err := peer.AddNewChannel(channel, f.quit); err != nil { - log.Errorf("Unable to add new channel %v with peer %x: %v", - channel.FundingOutpoint, - peer.IdentityKey().SerializeCompressed(), err, - ) - } -} - -// chanAnnouncement encapsulates the two authenticated announcements that we -// send out to the network after a new channel has been created locally. -type chanAnnouncement struct { - chanAnn *lnwire.ChannelAnnouncement - chanUpdateAnn *lnwire.ChannelUpdate - chanProof *lnwire.AnnounceSignatures -} - -// newChanAnnouncement creates the authenticated channel announcement messages -// required to broadcast a newly created channel to the network. The -// announcement is two part: the first part authenticates the existence of the -// channel and contains four signatures binding the funding pub keys and -// identity pub keys of both parties to the channel, and the second segment is -// authenticated only by us and contains our directional routing policy for the -// channel. -func (f *fundingManager) newChanAnnouncement(localPubKey, remotePubKey, - localFundingKey, remoteFundingKey *btcec.PublicKey, - shortChanID lnwire.ShortChannelID, chanID lnwire.ChannelID, - fwdMinHTLC, fwdMaxHTLC lnwire.MilliSatoshi) (*chanAnnouncement, er.R) { - - chainHash := *f.cfg.Wallet.Cfg.NetParams.GenesisHash - - // The unconditional section of the announcement is the ShortChannelID - // itself which compactly encodes the location of the funding output - // within the blockchain. - chanAnn := &lnwire.ChannelAnnouncement{ - ShortChannelID: shortChanID, - Features: lnwire.NewRawFeatureVector(), - ChainHash: chainHash, - } - - // The chanFlags field indicates which directed edge of the channel is - // being updated within the ChannelUpdateAnnouncement announcement - // below. A value of zero means it's the edge of the "first" node and 1 - // being the other node. - var chanFlags lnwire.ChanUpdateChanFlags - - // The lexicographical ordering of the two identity public keys of the - // nodes indicates which of the nodes is "first". If our serialized - // identity key is lower than theirs then we're the "first" node and - // second otherwise. - selfBytes := localPubKey.SerializeCompressed() - remoteBytes := remotePubKey.SerializeCompressed() - if bytes.Compare(selfBytes, remoteBytes) == -1 { - copy(chanAnn.NodeID1[:], localPubKey.SerializeCompressed()) - copy(chanAnn.NodeID2[:], remotePubKey.SerializeCompressed()) - copy(chanAnn.BitcoinKey1[:], localFundingKey.SerializeCompressed()) - copy(chanAnn.BitcoinKey2[:], remoteFundingKey.SerializeCompressed()) - - // If we're the first node then update the chanFlags to - // indicate the "direction" of the update. - chanFlags = 0 - } else { - copy(chanAnn.NodeID1[:], remotePubKey.SerializeCompressed()) - copy(chanAnn.NodeID2[:], localPubKey.SerializeCompressed()) - copy(chanAnn.BitcoinKey1[:], remoteFundingKey.SerializeCompressed()) - copy(chanAnn.BitcoinKey2[:], localFundingKey.SerializeCompressed()) - - // If we're the second node then update the chanFlags to - // indicate the "direction" of the update. - chanFlags = 1 - } - - // Our channel update message flags will signal that we support the - // max_htlc field. - msgFlags := lnwire.ChanUpdateOptionMaxHtlc - - // We announce the channel with the default values. Some of - // these values can later be changed by crafting a new ChannelUpdate. - chanUpdateAnn := &lnwire.ChannelUpdate{ - ShortChannelID: shortChanID, - ChainHash: chainHash, - Timestamp: uint32(time.Now().Unix()), - MessageFlags: msgFlags, - ChannelFlags: chanFlags, - TimeLockDelta: uint16(f.cfg.DefaultRoutingPolicy.TimeLockDelta), - - // We use the HtlcMinimumMsat that the remote party required us - // to use, as our ChannelUpdate will be used to carry HTLCs - // towards them. - HtlcMinimumMsat: fwdMinHTLC, - HtlcMaximumMsat: fwdMaxHTLC, - - BaseFee: uint32(f.cfg.DefaultRoutingPolicy.BaseFee), - FeeRate: uint32(f.cfg.DefaultRoutingPolicy.FeeRate), - } - - // With the channel update announcement constructed, we'll generate a - // signature that signs a double-sha digest of the announcement. - // This'll serve to authenticate this announcement and any other future - // updates we may send. - chanUpdateMsg, err := chanUpdateAnn.DataToSign() - if err != nil { - return nil, err - } - sig, err := f.cfg.SignMessage(f.cfg.IDKey, chanUpdateMsg) - if err != nil { - return nil, er.Errorf("unable to generate channel "+ - "update announcement signature: %v", err) - } - chanUpdateAnn.Signature, err = lnwire.NewSigFromSignature(sig) - if err != nil { - return nil, er.Errorf("unable to generate channel "+ - "update announcement signature: %v", err) - } - - // The channel existence proofs itself is currently announced in - // distinct message. In order to properly authenticate this message, we - // need two signatures: one under the identity public key used which - // signs the message itself and another signature of the identity - // public key under the funding key itself. - // - // TODO(roasbeef): use SignAnnouncement here instead? - chanAnnMsg, err := chanAnn.DataToSign() - if err != nil { - return nil, err - } - nodeSig, err := f.cfg.SignMessage(f.cfg.IDKey, chanAnnMsg) - if err != nil { - return nil, er.Errorf("unable to generate node "+ - "signature for channel announcement: %v", err) - } - bitcoinSig, err := f.cfg.SignMessage(localFundingKey, chanAnnMsg) - if err != nil { - return nil, er.Errorf("unable to generate bitcoin "+ - "signature for node public key: %v", err) - } - - // Finally, we'll generate the announcement proof which we'll use to - // provide the other side with the necessary signatures required to - // allow them to reconstruct the full channel announcement. - proof := &lnwire.AnnounceSignatures{ - ChannelID: chanID, - ShortChannelID: shortChanID, - } - proof.NodeSignature, err = lnwire.NewSigFromSignature(nodeSig) - if err != nil { - return nil, err - } - proof.BitcoinSignature, err = lnwire.NewSigFromSignature(bitcoinSig) - if err != nil { - return nil, err - } - - return &chanAnnouncement{ - chanAnn: chanAnn, - chanUpdateAnn: chanUpdateAnn, - chanProof: proof, - }, nil -} - -// announceChannel announces a newly created channel to the rest of the network -// by crafting the two authenticated announcements required for the peers on -// the network to recognize the legitimacy of the channel. The crafted -// announcements are then sent to the channel router to handle broadcasting to -// the network during its next trickle. -// This method is synchronous and will return when all the network requests -// finish, either successfully or with an error. -func (f *fundingManager) announceChannel(localIDKey, remoteIDKey, localFundingKey, - remoteFundingKey *btcec.PublicKey, shortChanID lnwire.ShortChannelID, - chanID lnwire.ChannelID) er.R { - - // First, we'll create the batch of announcements to be sent upon - // initial channel creation. This includes the channel announcement - // itself, the channel update announcement, and our half of the channel - // proof needed to fully authenticate the channel. - // - // We can pass in zeroes for the min and max htlc policy, because we - // only use the channel announcement message from the returned struct. - ann, err := f.newChanAnnouncement(localIDKey, remoteIDKey, - localFundingKey, remoteFundingKey, shortChanID, chanID, - 0, 0, - ) - if err != nil { - log.Errorf("can't generate channel announcement: %v", err) - return err - } - - // We only send the channel proof announcement and the node announcement - // because addToRouterGraph previously sent the ChannelAnnouncement and - // the ChannelUpdate announcement messages. The channel proof and node - // announcements are broadcast to the greater network. - errChan := f.cfg.SendAnnouncement(ann.chanProof) - select { - case err := <-errChan: - if err != nil { - if routing.IsError(er.Wrapped(err), routing.ErrOutdated, - routing.ErrIgnored) { - log.Debugf("Router rejected "+ - "AnnounceSignatures: %v", err) - } else { - log.Errorf("Unable to send channel "+ - "proof: %v", err) - return err - } - } - - case <-f.quit: - return ErrFundingManagerShuttingDown.Default() - } - - // Now that the channel is announced to the network, we will also - // obtain and send a node announcement. This is done since a node - // announcement is only accepted after a channel is known for that - // particular node, and this might be our first channel. - nodeAnn, err := f.cfg.CurrentNodeAnnouncement() - if err != nil { - log.Errorf("can't generate node announcement: %v", err) - return err - } - - errChan = f.cfg.SendAnnouncement(&nodeAnn) - select { - case err := <-errChan: - if err != nil { - if routing.IsError(er.Wrapped(err), routing.ErrOutdated, - routing.ErrIgnored) { - log.Debugf("Router rejected "+ - "NodeAnnouncement: %v", err) - } else { - log.Errorf("Unable to send node "+ - "announcement: %v", err) - return err - } - } - - case <-f.quit: - return ErrFundingManagerShuttingDown.Default() - } - - return nil -} - -// initFundingWorkflow sends a message to the funding manager instructing it -// to initiate a single funder workflow with the source peer. -// TODO(roasbeef): re-visit blocking nature.. -func (f *fundingManager) initFundingWorkflow(peer lnpeer.Peer, req *openChanReq) { - f.fundingRequests <- &initFundingMsg{ - peer: peer, - openChanReq: req, - } -} - -// getUpfrontShutdownScript takes a user provided script and a getScript -// function which can be used to generate an upfront shutdown script. If our -// peer does not support the feature, this function will error if a non-zero -// script was provided by the user, and return an empty script otherwise. If -// our peer does support the feature, we will return the user provided script -// if non-zero, or a freshly generated script if our node is configured to set -// upfront shutdown scripts automatically. -func getUpfrontShutdownScript(enableUpfrontShutdown bool, peer lnpeer.Peer, - script lnwire.DeliveryAddress, - getScript func() (lnwire.DeliveryAddress, er.R)) (lnwire.DeliveryAddress, - er.R) { - - // Check whether the remote peer supports upfront shutdown scripts. - remoteUpfrontShutdown := peer.RemoteFeatures().HasFeature( - lnwire.UpfrontShutdownScriptOptional, - ) - - // If the peer does not support upfront shutdown scripts, and one has been - // provided, return an error because the feature is not supported. - if !remoteUpfrontShutdown && len(script) != 0 { - return nil, errUpfrontShutdownScriptNotSupported.Default() - } - - // If the peer does not support upfront shutdown, return an empty address. - if !remoteUpfrontShutdown { - return nil, nil - } - - // If the user has provided an script and the peer supports the feature, - // return it. Note that user set scripts override the enable upfront - // shutdown flag. - if len(script) > 0 { - return script, nil - } - - // If we do not have setting of upfront shutdown script enabled, return - // an empty script. - if !enableUpfrontShutdown { - return nil, nil - } - - return getScript() -} - -// handleInitFundingMsg creates a channel reservation within the daemon's -// wallet, then sends a funding request to the remote peer kicking off the -// funding workflow. -func (f *fundingManager) handleInitFundingMsg(msg *initFundingMsg) { - var ( - peerKey = msg.peer.IdentityKey() - localAmt = msg.localFundingAmt - minHtlcIn = msg.minHtlcIn - remoteCsvDelay = msg.remoteCsvDelay - maxValue = msg.maxValueInFlight - maxHtlcs = msg.maxHtlcs - maxCSV = msg.maxLocalCsv - ) - - // If no maximum CSV delay was set for this channel, we use our default - // value. - if maxCSV == 0 { - maxCSV = f.cfg.MaxLocalCSVDelay - } - - // We'll determine our dust limit depending on which chain is active. - var ourDustLimit btcutil.Amount - switch f.cfg.RegisteredChains.PrimaryChain() { - case chainreg.BitcoinChain: - ourDustLimit = lnwallet.DefaultDustLimit() - case chainreg.LitecoinChain: - ourDustLimit = chainreg.DefaultLitecoinDustLimit - } - - log.Infof("Initiating fundingRequest(local_amt=%v "+ - "(subtract_fees=%v), push_amt=%v, chain_hash=%v, peer=%x, "+ - "dust_limit=%v, min_confs=%v)", localAmt, msg.subtractFees, - msg.pushAmt, msg.chainHash, peerKey.SerializeCompressed(), - ourDustLimit, msg.minConfs) - - // First, we'll query the fee estimator for a fee that should get the - // commitment transaction confirmed by the next few blocks (conf target - // of 3). We target the near blocks here to ensure that we'll be able - // to execute a timely unilateral channel closure if needed. - commitFeePerKw, err := f.cfg.FeeEstimator.EstimateFeePerKW(3) - if err != nil { - msg.err <- err - return - } - - // We set the channel flags to indicate whether we want this channel to - // be announced to the network. - var channelFlags lnwire.FundingFlag - if !msg.openChanReq.private { - // This channel will be announced. - channelFlags = lnwire.FFAnnounceChannel - } - - // If the caller specified their own channel ID, then we'll use that. - // Otherwise we'll generate a fresh one as normal. This will be used - // to track this reservation throughout its lifetime. - var chanID [32]byte - if msg.pendingChanID == zeroID { - chanID = f.nextPendingChanID() - } else { - // If the user specified their own pending channel ID, then - // we'll ensure it doesn't collide with any existing pending - // channel ID. - chanID = msg.pendingChanID - if _, err := f.getReservationCtx(peerKey, chanID); err == nil { - msg.err <- er.Errorf("pendingChannelID(%x) "+ - "already present", chanID[:]) - return - } - } - - // Check whether the peer supports upfront shutdown, and get an address - // which should be used (either a user specified address or a new - // address from the wallet if our node is configured to set shutdown - // address by default). - shutdown, err := getUpfrontShutdownScript( - f.cfg.EnableUpfrontShutdown, msg.peer, - msg.openChanReq.shutdownScript, - func() (lnwire.DeliveryAddress, er.R) { - addr, err := f.cfg.Wallet.NewAddress( - lnwallet.WitnessPubKey, false, - ) - if err != nil { - return nil, err - } - return txscript.PayToAddrScript(addr) - }, - ) - if err != nil { - msg.err <- err - return - } - - // Initialize a funding reservation with the local wallet. If the - // wallet doesn't have enough funds to commit to this channel, then the - // request will fail, and be aborted. - // - // Before we init the channel, we'll also check to see if we've - // negotiated the new tweakless commitment format. This is only the - // case if *both* us and the remote peer are signaling the proper - // feature bit. - commitType := commitmentType( - msg.peer.LocalFeatures(), msg.peer.RemoteFeatures(), - ) - req := &lnwallet.InitFundingReserveMsg{ - ChainHash: &msg.chainHash, - PendingChanID: chanID, - NodeID: peerKey, - NodeAddr: msg.peer.Address(), - SubtractFees: msg.subtractFees, - LocalFundingAmt: localAmt, - RemoteFundingAmt: 0, - CommitFeePerKw: commitFeePerKw, - FundingFeePerKw: msg.fundingFeePerKw, - PushMSat: msg.pushAmt, - Flags: channelFlags, - MinConfs: msg.minConfs, - CommitType: commitType, - ChanFunder: msg.chanFunder, - } - - reservation, err := f.cfg.Wallet.InitChannelReservation(req) - if err != nil { - msg.err <- err - return - } - - // Set our upfront shutdown address in the existing reservation. - reservation.SetOurUpfrontShutdown(shutdown) - - // Now that we have successfully reserved funds for this channel in the - // wallet, we can fetch the final channel capacity. This is done at - // this point since the final capacity might change in case of - // SubtractFees=true. - capacity := reservation.Capacity() - - log.Infof("Target commit tx sat/kw for pendingID(%x): %v", chanID, - int64(commitFeePerKw)) - - // If the remote CSV delay was not set in the open channel request, - // we'll use the RequiredRemoteDelay closure to compute the delay we - // require given the total amount of funds within the channel. - if remoteCsvDelay == 0 { - remoteCsvDelay = f.cfg.RequiredRemoteDelay(capacity) - } - - // If no minimum HTLC value was specified, use the default one. - if minHtlcIn == 0 { - minHtlcIn = f.cfg.DefaultMinHtlcIn - } - - // If no max value was specified, use the default one. - if maxValue == 0 { - maxValue = f.cfg.RequiredRemoteMaxValue(capacity) - } - - if maxHtlcs == 0 { - maxHtlcs = f.cfg.RequiredRemoteMaxHTLCs(capacity) - } - - // If a pending channel map for this peer isn't already created, then - // we create one, ultimately allowing us to track this pending - // reservation within the target peer. - peerIDKey := newSerializedKey(peerKey) - f.resMtx.Lock() - if _, ok := f.activeReservations[peerIDKey]; !ok { - f.activeReservations[peerIDKey] = make(pendingChannels) - } - - resCtx := &reservationWithCtx{ - chanAmt: capacity, - remoteCsvDelay: remoteCsvDelay, - remoteMinHtlc: minHtlcIn, - remoteMaxValue: maxValue, - remoteMaxHtlcs: maxHtlcs, - maxLocalCsv: maxCSV, - reservation: reservation, - peer: msg.peer, - updates: msg.updates, - err: msg.err, - } - f.activeReservations[peerIDKey][chanID] = resCtx - f.resMtx.Unlock() - - // Update the timestamp once the initFundingMsg has been handled. - defer resCtx.updateTimestamp() - - // Once the reservation has been created, and indexed, queue a funding - // request to the remote peer, kicking off the funding workflow. - ourContribution := reservation.OurContribution() - - // Finally, we'll use the current value of the channels and our default - // policy to determine of required commitment constraints for the - // remote party. - chanReserve := f.cfg.RequiredRemoteChanReserve(capacity, ourDustLimit) - - log.Infof("Starting funding workflow with %v for pending_id(%x), "+ - "committype=%v", msg.peer.Address(), chanID, commitType) - - fundingOpen := lnwire.OpenChannel{ - ChainHash: *f.cfg.Wallet.Cfg.NetParams.GenesisHash, - PendingChannelID: chanID, - FundingAmount: capacity, - PushAmount: msg.pushAmt, - DustLimit: ourContribution.DustLimit, - MaxValueInFlight: maxValue, - ChannelReserve: chanReserve, - HtlcMinimum: minHtlcIn, - FeePerKiloWeight: uint32(commitFeePerKw), - CsvDelay: remoteCsvDelay, - MaxAcceptedHTLCs: maxHtlcs, - FundingKey: ourContribution.MultiSigKey.PubKey, - RevocationPoint: ourContribution.RevocationBasePoint.PubKey, - PaymentPoint: ourContribution.PaymentBasePoint.PubKey, - HtlcPoint: ourContribution.HtlcBasePoint.PubKey, - DelayedPaymentPoint: ourContribution.DelayBasePoint.PubKey, - FirstCommitmentPoint: ourContribution.FirstCommitmentPoint, - ChannelFlags: channelFlags, - UpfrontShutdownScript: shutdown, - } - if err := msg.peer.SendMessage(true, &fundingOpen); err != nil { - e := er.Errorf("unable to send funding request message: %v", - err) - log.Errorf(e.String()) - - // Since we were unable to send the initial message to the peer - // and start the funding flow, we'll cancel this reservation. - _, err := f.cancelReservationCtx(peerKey, chanID, false) - if err != nil { - log.Errorf("unable to cancel reservation: %v", err) - } - - msg.err <- e - return - } -} - -// handleErrorMsg processes the error which was received from remote peer, -// depending on the type of error we should do different clean up steps and -// inform the user about it. -func (f *fundingManager) handleErrorMsg(peer lnpeer.Peer, - msg *lnwire.Error) { - - chanID := msg.ChanID - peerKey := peer.IdentityKey() - - // First, we'll attempt to retrieve and cancel the funding workflow - // that this error was tied to. If we're unable to do so, then we'll - // exit early as this was an unwarranted error. - resCtx, err := f.cancelReservationCtx(peerKey, chanID, true) - if err != nil { - log.Warnf("Received error for non-existent funding "+ - "flow: %v (%v)", err, msg.Error()) - return - } - - // If we did indeed find the funding workflow, then we'll return the - // error back to the caller (if any), and cancel the workflow itself. - fundingErr := er.Errorf("received funding error from %x: %v", - peerKey.SerializeCompressed(), msg.Error(), - ) - log.Errorf(fundingErr.String()) - - // If this was a PSBT funding flow, the remote likely timed out because - // we waited too long. Return a nice error message to the user in that - // case so the user knows what's the problem. - if resCtx.reservation.IsPsbt() { - fundingErr = chanfunding.ErrRemoteCanceled.New("", fundingErr) - } - - resCtx.err <- fundingErr -} - -// pruneZombieReservations loops through all pending reservations and fails the -// funding flow for any reservations that have not been updated since the -// ReservationTimeout and are not locked waiting for the funding transaction. -func (f *fundingManager) pruneZombieReservations() { - zombieReservations := make(pendingChannels) - - f.resMtx.RLock() - for _, pendingReservations := range f.activeReservations { - for pendingChanID, resCtx := range pendingReservations { - if resCtx.isLocked() { - continue - } - - // We don't want to expire PSBT funding reservations. - // These reservations are always initiated by us and the - // remote peer is likely going to cancel them after some - // idle time anyway. So no need for us to also prune - // them. - sinceLastUpdate := time.Since(resCtx.lastUpdated) - isExpired := sinceLastUpdate > f.cfg.ReservationTimeout - if !resCtx.reservation.IsPsbt() && isExpired { - zombieReservations[pendingChanID] = resCtx - } - } - } - f.resMtx.RUnlock() - - for pendingChanID, resCtx := range zombieReservations { - err := er.Errorf("reservation timed out waiting for peer "+ - "(peer_id:%x, chan_id:%x)", resCtx.peer.IdentityKey(), - pendingChanID[:]) - log.Warnf(err.String()) - f.failFundingFlow(resCtx.peer, pendingChanID, err) - } -} - -// cancelReservationCtx does all needed work in order to securely cancel the -// reservation. -func (f *fundingManager) cancelReservationCtx(peerKey *btcec.PublicKey, - pendingChanID [32]byte, byRemote bool) (*reservationWithCtx, er.R) { - - log.Infof("Cancelling funding reservation for node_key=%x, "+ - "chan_id=%x", peerKey.SerializeCompressed(), pendingChanID[:]) - - peerIDKey := newSerializedKey(peerKey) - f.resMtx.Lock() - defer f.resMtx.Unlock() - - nodeReservations, ok := f.activeReservations[peerIDKey] - if !ok { - // No reservations for this node. - return nil, er.Errorf("no active reservations for peer(%x)", - peerIDKey[:]) - } - - ctx, ok := nodeReservations[pendingChanID] - if !ok { - return nil, er.Errorf("unknown channel (id: %x) for "+ - "peer(%x)", pendingChanID[:], peerIDKey[:]) - } - - // If the reservation was a PSBT funding flow and it was canceled by the - // remote peer, then we need to thread through a different error message - // to the subroutine that's waiting for the user input so it can return - // a nice error message to the user. - if ctx.reservation.IsPsbt() && byRemote { - ctx.reservation.RemoteCanceled() - } - - if err := ctx.reservation.Cancel(); err != nil { - return nil, er.Errorf("unable to cancel reservation: %v", - err) - } - - delete(nodeReservations, pendingChanID) - - // If this was the last active reservation for this peer, delete the - // peer's entry altogether. - if len(nodeReservations) == 0 { - delete(f.activeReservations, peerIDKey) - } - return ctx, nil -} - -// deleteReservationCtx deletes the reservation uniquely identified by the -// target public key of the peer, and the specified pending channel ID. -func (f *fundingManager) deleteReservationCtx(peerKey *btcec.PublicKey, - pendingChanID [32]byte) { - - // TODO(roasbeef): possibly cancel funding barrier in peer's - // channelManager? - peerIDKey := newSerializedKey(peerKey) - f.resMtx.Lock() - defer f.resMtx.Unlock() - - nodeReservations, ok := f.activeReservations[peerIDKey] - if !ok { - // No reservations for this node. - return - } - delete(nodeReservations, pendingChanID) - - // If this was the last active reservation for this peer, delete the - // peer's entry altogether. - if len(nodeReservations) == 0 { - delete(f.activeReservations, peerIDKey) - } -} - -// getReservationCtx returns the reservation context for a particular pending -// channel ID for a target peer. -func (f *fundingManager) getReservationCtx(peerKey *btcec.PublicKey, - pendingChanID [32]byte) (*reservationWithCtx, er.R) { - - peerIDKey := newSerializedKey(peerKey) - f.resMtx.RLock() - resCtx, ok := f.activeReservations[peerIDKey][pendingChanID] - f.resMtx.RUnlock() - - if !ok { - return nil, er.Errorf("unknown channel (id: %x) for "+ - "peer(%x)", pendingChanID[:], peerIDKey[:]) - } - - return resCtx, nil -} - -// IsPendingChannel returns a boolean indicating whether the channel identified -// by the pendingChanID and given peer is pending, meaning it is in the process -// of being funded. After the funding transaction has been confirmed, the -// channel will receive a new, permanent channel ID, and will no longer be -// considered pending. -func (f *fundingManager) IsPendingChannel(pendingChanID [32]byte, - peer lnpeer.Peer) bool { - - peerIDKey := newSerializedKey(peer.IdentityKey()) - f.resMtx.RLock() - _, ok := f.activeReservations[peerIDKey][pendingChanID] - f.resMtx.RUnlock() - - return ok -} - -func copyPubKey(pub *btcec.PublicKey) *btcec.PublicKey { - return &btcec.PublicKey{ - Curve: btcec.S256(), - X: pub.X, - Y: pub.Y, - } -} - -// saveChannelOpeningState saves the channelOpeningState for the provided -// chanPoint to the channelOpeningStateBucket. -func (f *fundingManager) saveChannelOpeningState(chanPoint *wire.OutPoint, - state channelOpeningState, shortChanID *lnwire.ShortChannelID) er.R { - return kvdb.Update(f.cfg.Wallet.Cfg.Database, func(tx kvdb.RwTx) er.R { - - bucket, err := tx.CreateTopLevelBucket(channelOpeningStateBucket) - if err != nil { - return err - } - - var outpointBytes bytes.Buffer - if errr := writeOutpoint(&outpointBytes, chanPoint); errr != nil { - return errr - } - - // Save state and the uint64 representation of the shortChanID - // for later use. - scratch := make([]byte, 10) - byteOrder.PutUint16(scratch[:2], uint16(state)) - byteOrder.PutUint64(scratch[2:], shortChanID.ToUint64()) - - return bucket.Put(outpointBytes.Bytes(), scratch) - }, func() {}) -} - -// getChannelOpeningState fetches the channelOpeningState for the provided -// chanPoint from the database, or returns ErrChannelNotFound if the channel -// is not found. -func (f *fundingManager) getChannelOpeningState(chanPoint *wire.OutPoint) ( - channelOpeningState, *lnwire.ShortChannelID, er.R) { - - var state channelOpeningState - var shortChanID lnwire.ShortChannelID - err := kvdb.View(f.cfg.Wallet.Cfg.Database, func(tx kvdb.RTx) er.R { - - bucket := tx.ReadBucket(channelOpeningStateBucket) - if bucket == nil { - // If the bucket does not exist, it means we never added - // a channel to the db, so return ErrChannelNotFound. - return ErrChannelNotFound.Default() - } - - var outpointBytes bytes.Buffer - if err := writeOutpoint(&outpointBytes, chanPoint); err != nil { - return err - } - - value := bucket.Get(outpointBytes.Bytes()) - if value == nil { - return ErrChannelNotFound.Default() - } - - state = channelOpeningState(byteOrder.Uint16(value[:2])) - shortChanID = lnwire.NewShortChanIDFromInt(byteOrder.Uint64(value[2:])) - return nil - }, func() {}) - if err != nil { - return 0, nil, err - } - - return state, &shortChanID, nil -} - -// deleteChannelOpeningState removes any state for chanPoint from the database. -func (f *fundingManager) deleteChannelOpeningState(chanPoint *wire.OutPoint) er.R { - return kvdb.Update(f.cfg.Wallet.Cfg.Database, func(tx kvdb.RwTx) er.R { - bucket := tx.ReadWriteBucket(channelOpeningStateBucket) - if bucket == nil { - return er.Errorf("bucket not found") - } - - var outpointBytes bytes.Buffer - if err := writeOutpoint(&outpointBytes, chanPoint); err != nil { - return err - } - - return bucket.Delete(outpointBytes.Bytes()) - }, func() {}) -} diff --git a/lnd/fundingmanager_test.go b/lnd/fundingmanager_test.go deleted file mode 100644 index aaabc751..00000000 --- a/lnd/fundingmanager_test.go +++ /dev/null @@ -1,3497 +0,0 @@ -// +build !rpctest - -package lnd - -import ( - "bytes" - "io/ioutil" - "math/big" - "net" - "os" - "path/filepath" - "runtime" - "strings" - "testing" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/chainreg" - "github.com/pkt-cash/pktd/lnd/chanacceptor" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channelnotifier" - "github.com/pkt-cash/pktd/lnd/discovery" - "github.com/pkt-cash/pktd/lnd/htlcswitch" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/lncfg" - "github.com/pkt-cash/pktd/lnd/lnpeer" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lntest/mock" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/wire" - "github.com/stretchr/testify/require" -) - -const ( - // testPollNumTries is the number of times we attempt to query - // for a certain expected database state before we give up and - // consider the test failed. Since it sometimes can take a - // while to update the database, we poll a certain amount of - // times, until it gets into the state we expect, or we are out - // of tries. - testPollNumTries = 10 - - // testPollSleepMs is the number of milliseconds to sleep between - // each attempt to access the database to check its state. - testPollSleepMs = 500 - - // maxPending is the maximum number of channels we allow opening to the - // same peer in the max pending channels test. - maxPending = 4 - - // A dummy value to use for the funding broadcast height. - fundingBroadcastHeight = 123 -) - -var ( - // Use hard-coded keys for Alice and Bob, the two FundingManagers that - // we will test the interaction between. - alicePrivKeyBytes = [32]byte{ - 0xb7, 0x94, 0x38, 0x5f, 0x2d, 0x1e, 0xf7, 0xab, - 0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4, - 0x4f, 0x2f, 0x6f, 0x25, 0x88, 0xa3, 0xef, 0xb9, - 0x6a, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53, - } - - alicePrivKey, alicePubKey = btcec.PrivKeyFromBytes(btcec.S256(), - alicePrivKeyBytes[:]) - - aliceTCPAddr, _ = net.ResolveTCPAddr("tcp", "10.0.0.2:9001") - - aliceAddr = &lnwire.NetAddress{ - IdentityKey: alicePubKey, - Address: aliceTCPAddr, - } - - bobPrivKeyBytes = [32]byte{ - 0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, - 0x63, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, - 0xd, 0xe7, 0x95, 0xe4, 0xb7, 0x25, 0xb8, 0x4d, - 0x1e, 0xb, 0x4c, 0xfd, 0x9e, 0xc5, 0x8c, 0xe9, - } - - bobPrivKey, bobPubKey = btcec.PrivKeyFromBytes(btcec.S256(), - bobPrivKeyBytes[:]) - - bobTCPAddr, _ = net.ResolveTCPAddr("tcp", "10.0.0.2:9000") - - bobAddr = &lnwire.NetAddress{ - IdentityKey: bobPubKey, - Address: bobTCPAddr, - } - - testSig = &btcec.Signature{ - R: new(big.Int), - S: new(big.Int), - } - _, _ = testSig.R.SetString("63724406601629180062774974542967536251589935445068131219452686511677818569431", 10) - _, _ = testSig.S.SetString("18801056069249825825291287104931333862866033135609736119018462340006816851118", 10) - - fundingNetParams = chainreg.BitcoinTestNetParams -) - -type mockNotifier struct { - oneConfChannel chan *chainntnfs.TxConfirmation - sixConfChannel chan *chainntnfs.TxConfirmation - epochChan chan *chainntnfs.BlockEpoch -} - -func (m *mockNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, - _ []byte, numConfs, heightHint uint32) (*chainntnfs.ConfirmationEvent, er.R) { - - if numConfs == 6 { - return &chainntnfs.ConfirmationEvent{ - Confirmed: m.sixConfChannel, - }, nil - } - return &chainntnfs.ConfirmationEvent{ - Confirmed: m.oneConfChannel, - }, nil -} - -func (m *mockNotifier) RegisterBlockEpochNtfn( - bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, er.R) { - return &chainntnfs.BlockEpochEvent{ - Epochs: m.epochChan, - Cancel: func() {}, - }, nil -} - -func (m *mockNotifier) Start() er.R { - return nil -} - -func (m *mockNotifier) Started() bool { - return true -} - -func (m *mockNotifier) Stop() er.R { - return nil -} - -func (m *mockNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, _ []byte, - heightHint uint32) (*chainntnfs.SpendEvent, er.R) { - return &chainntnfs.SpendEvent{ - Spend: make(chan *chainntnfs.SpendDetail), - Cancel: func() {}, - }, nil -} - -type mockChanEvent struct { - openEvent chan wire.OutPoint - pendingOpenEvent chan channelnotifier.PendingOpenChannelEvent -} - -func (m *mockChanEvent) NotifyOpenChannelEvent(outpoint wire.OutPoint) { - m.openEvent <- outpoint -} - -func (m *mockChanEvent) NotifyPendingOpenChannelEvent(outpoint wire.OutPoint, - pendingChannel *channeldb.OpenChannel) { - - m.pendingOpenEvent <- channelnotifier.PendingOpenChannelEvent{ - ChannelPoint: &outpoint, - PendingChannel: pendingChannel, - } -} - -type newChannelMsg struct { - channel *channeldb.OpenChannel - err chan er.R -} - -type testNode struct { - privKey *btcec.PrivateKey - addr *lnwire.NetAddress - msgChan chan lnwire.Message - announceChan chan lnwire.Message - publTxChan chan *wire.MsgTx - fundingMgr *fundingManager - newChannels chan *newChannelMsg - mockNotifier *mockNotifier - mockChanEvent *mockChanEvent - testDir string - shutdownChannel chan struct{} - remoteFeatures []lnwire.FeatureBit - - remotePeer *testNode - sendMessage func(lnwire.Message) er.R -} - -var _ lnpeer.Peer = (*testNode)(nil) - -func (n *testNode) IdentityKey() *btcec.PublicKey { - return n.addr.IdentityKey -} - -func (n *testNode) Address() net.Addr { - return n.addr.Address -} - -func (n *testNode) PubKey() [33]byte { - return newSerializedKey(n.addr.IdentityKey) -} - -func (n *testNode) SendMessage(_ bool, msg ...lnwire.Message) er.R { - return n.sendMessage(msg[0]) -} - -func (n *testNode) SendMessageLazy(sync bool, msgs ...lnwire.Message) er.R { - return n.SendMessage(sync, msgs...) -} - -func (n *testNode) WipeChannel(_ *wire.OutPoint) {} - -func (n *testNode) QuitSignal() <-chan struct{} { - return n.shutdownChannel -} - -func (n *testNode) LocalFeatures() *lnwire.FeatureVector { - return lnwire.NewFeatureVector(nil, nil) -} - -func (n *testNode) RemoteFeatures() *lnwire.FeatureVector { - return lnwire.NewFeatureVector( - lnwire.NewRawFeatureVector(n.remoteFeatures...), nil, - ) -} - -func (n *testNode) AddNewChannel(channel *channeldb.OpenChannel, - quit <-chan struct{}) er.R { - - errChan := make(chan er.R) - msg := &newChannelMsg{ - channel: channel, - err: errChan, - } - - select { - case n.newChannels <- msg: - case <-quit: - return ErrFundingManagerShuttingDown.Default() - } - - select { - case err := <-errChan: - return err - case <-quit: - return ErrFundingManagerShuttingDown.Default() - } -} - -func createTestWallet(cdb *channeldb.DB, netParams *chaincfg.Params, - notifier chainntnfs.ChainNotifier, wc lnwallet.WalletController, - signer input.Signer, keyRing keychain.SecretKeyRing, - bio lnwallet.BlockChainIO, - estimator chainfee.Estimator) (*lnwallet.LightningWallet, er.R) { - - wallet, err := lnwallet.NewLightningWallet(lnwallet.Config{ - Database: cdb, - Notifier: notifier, - SecretKeyRing: keyRing, - WalletController: wc, - Signer: signer, - ChainIO: bio, - FeeEstimator: estimator, - NetParams: *netParams, - DefaultConstraints: chainreg.DefaultBtcChannelConstraints, - }) - if err != nil { - return nil, err - } - - if err := wallet.Startup(); err != nil { - return nil, err - } - - return wallet, nil -} - -func createTestFundingManager(t *testing.T, privKey *btcec.PrivateKey, - addr *lnwire.NetAddress, tempTestDir string, - options ...cfgOption) (*testNode, er.R) { - - netParams := fundingNetParams.Params - estimator := chainfee.NewStaticEstimator(62500, 0) - - chainNotifier := &mockNotifier{ - oneConfChannel: make(chan *chainntnfs.TxConfirmation, 1), - sixConfChannel: make(chan *chainntnfs.TxConfirmation, 1), - epochChan: make(chan *chainntnfs.BlockEpoch, 2), - } - - sentMessages := make(chan lnwire.Message) - sentAnnouncements := make(chan lnwire.Message) - publTxChan := make(chan *wire.MsgTx, 1) - shutdownChan := make(chan struct{}) - - wc := &mock.WalletController{ - RootKey: alicePrivKey, - } - signer := &mock.SingleSigner{ - Privkey: alicePrivKey, - } - bio := &mock.ChainIO{ - BestHeight: fundingBroadcastHeight, - } - - // The mock channel event notifier will receive events for each pending - // open and open channel. Because some tests will create multiple - // channels in a row before advancing to the next step, these channels - // need to be buffered. - evt := &mockChanEvent{ - openEvent: make(chan wire.OutPoint, maxPending), - pendingOpenEvent: make( - chan channelnotifier.PendingOpenChannelEvent, maxPending, - ), - } - - dbDir := filepath.Join(tempTestDir, "cdb") - cdb, err := channeldb.Open(dbDir) - if err != nil { - return nil, err - } - - keyRing := &mock.SecretKeyRing{ - RootKey: alicePrivKey, - } - - lnw, err := createTestWallet( - cdb, netParams, chainNotifier, wc, signer, keyRing, bio, - estimator, - ) - if err != nil { - t.Fatalf("unable to create test ln wallet: %v", err) - } - - var chanIDSeed [32]byte - - chainedAcceptor := chanacceptor.NewChainedAcceptor() - - fundingCfg := fundingConfig{ - IDKey: privKey.PubKey(), - Wallet: lnw, - Notifier: chainNotifier, - FeeEstimator: estimator, - SignMessage: func(pubKey *btcec.PublicKey, - msg []byte) (input.Signature, er.R) { - - return testSig, nil - }, - SendAnnouncement: func(msg lnwire.Message, - _ ...discovery.OptionalMsgField) chan er.R { - - errChan := make(chan er.R, 1) - select { - case sentAnnouncements <- msg: - errChan <- nil - case <-shutdownChan: - errChan <- er.Errorf("shutting down") - } - return errChan - }, - CurrentNodeAnnouncement: func() (lnwire.NodeAnnouncement, er.R) { - return lnwire.NodeAnnouncement{}, nil - }, - TempChanIDSeed: chanIDSeed, - FindChannel: func(chanID lnwire.ChannelID) ( - *channeldb.OpenChannel, er.R) { - dbChannels, err := cdb.FetchAllChannels() - if err != nil { - return nil, err - } - - for _, channel := range dbChannels { - if chanID.IsChanPoint(&channel.FundingOutpoint) { - return channel, nil - } - } - - return nil, er.Errorf("unable to find channel") - }, - DefaultRoutingPolicy: htlcswitch.ForwardingPolicy{ - MinHTLCOut: 5, - BaseFee: 100, - FeeRate: 1000, - TimeLockDelta: 10, - }, - DefaultMinHtlcIn: 5, - NumRequiredConfs: func(chanAmt btcutil.Amount, - pushAmt lnwire.MilliSatoshi) uint16 { - return 3 - }, - RequiredRemoteDelay: func(amt btcutil.Amount) uint16 { - return 4 - }, - RequiredRemoteChanReserve: func(chanAmt, - dustLimit btcutil.Amount) btcutil.Amount { - - reserve := chanAmt / 100 - if reserve < dustLimit { - reserve = dustLimit - } - - return reserve - }, - RequiredRemoteMaxValue: func(chanAmt btcutil.Amount) lnwire.MilliSatoshi { - reserve := lnwire.NewMSatFromSatoshis(chanAmt / 100) - return lnwire.NewMSatFromSatoshis(chanAmt) - reserve - }, - RequiredRemoteMaxHTLCs: func(chanAmt btcutil.Amount) uint16 { - return uint16(input.MaxHTLCNumber / 2) - }, - WatchNewChannel: func(*channeldb.OpenChannel, *btcec.PublicKey) er.R { - return nil - }, - ReportShortChanID: func(wire.OutPoint) er.R { - return nil - }, - PublishTransaction: func(txn *wire.MsgTx, _ string) er.R { - publTxChan <- txn - return nil - }, - UpdateLabel: func(chainhash.Hash, string) er.R { - return nil - }, - ZombieSweeperInterval: 1 * time.Hour, - ReservationTimeout: 1 * time.Nanosecond, - MaxChanSize: MaxFundingAmount, - MaxLocalCSVDelay: defaultMaxLocalCSVDelay, - MaxPendingChannels: lncfg.DefaultMaxPendingChannels, - NotifyOpenChannelEvent: evt.NotifyOpenChannelEvent, - OpenChannelPredicate: chainedAcceptor, - NotifyPendingOpenChannelEvent: evt.NotifyPendingOpenChannelEvent, - RegisteredChains: chainreg.NewChainRegistry(), - } - - for _, op := range options { - op(&fundingCfg) - } - - f, err := newFundingManager(fundingCfg) - if err != nil { - t.Fatalf("failed creating fundingManager: %v", err) - } - if err = f.Start(); err != nil { - t.Fatalf("failed starting fundingManager: %v", err) - } - - testNode := &testNode{ - privKey: privKey, - msgChan: sentMessages, - newChannels: make(chan *newChannelMsg), - announceChan: sentAnnouncements, - publTxChan: publTxChan, - fundingMgr: f, - mockNotifier: chainNotifier, - mockChanEvent: evt, - testDir: tempTestDir, - shutdownChannel: shutdownChan, - addr: addr, - } - - f.cfg.NotifyWhenOnline = func(peer [33]byte, - connectedChan chan<- lnpeer.Peer) { - - connectedChan <- testNode.remotePeer - } - - return testNode, nil -} - -func recreateAliceFundingManager(t *testing.T, alice *testNode) { - // Stop the old fundingManager before creating a new one. - close(alice.shutdownChannel) - if err := alice.fundingMgr.Stop(); err != nil { - t.Fatalf("unable to stop old fundingManager: %v", err) - } - - aliceMsgChan := make(chan lnwire.Message) - aliceAnnounceChan := make(chan lnwire.Message) - shutdownChan := make(chan struct{}) - publishChan := make(chan *wire.MsgTx, 10) - - oldCfg := alice.fundingMgr.cfg - - chainedAcceptor := chanacceptor.NewChainedAcceptor() - - f, err := newFundingManager(fundingConfig{ - IDKey: oldCfg.IDKey, - Wallet: oldCfg.Wallet, - Notifier: oldCfg.Notifier, - FeeEstimator: oldCfg.FeeEstimator, - SignMessage: func(pubKey *btcec.PublicKey, - msg []byte) (input.Signature, er.R) { - return testSig, nil - }, - SendAnnouncement: func(msg lnwire.Message, - _ ...discovery.OptionalMsgField) chan er.R { - - errChan := make(chan er.R, 1) - select { - case aliceAnnounceChan <- msg: - errChan <- nil - case <-shutdownChan: - errChan <- er.Errorf("shutting down") - } - return errChan - }, - CurrentNodeAnnouncement: func() (lnwire.NodeAnnouncement, er.R) { - return lnwire.NodeAnnouncement{}, nil - }, - NotifyWhenOnline: func(peer [33]byte, - connectedChan chan<- lnpeer.Peer) { - - connectedChan <- alice.remotePeer - }, - TempChanIDSeed: oldCfg.TempChanIDSeed, - FindChannel: oldCfg.FindChannel, - DefaultRoutingPolicy: htlcswitch.ForwardingPolicy{ - MinHTLCOut: 5, - BaseFee: 100, - FeeRate: 1000, - TimeLockDelta: 10, - }, - DefaultMinHtlcIn: 5, - RequiredRemoteMaxValue: oldCfg.RequiredRemoteMaxValue, - PublishTransaction: func(txn *wire.MsgTx, _ string) er.R { - publishChan <- txn - return nil - }, - UpdateLabel: func(chainhash.Hash, string) er.R { - return nil - }, - ZombieSweeperInterval: oldCfg.ZombieSweeperInterval, - ReservationTimeout: oldCfg.ReservationTimeout, - OpenChannelPredicate: chainedAcceptor, - }) - if err != nil { - t.Fatalf("failed recreating aliceFundingManager: %v", err) - } - - alice.fundingMgr = f - alice.msgChan = aliceMsgChan - alice.announceChan = aliceAnnounceChan - alice.publTxChan = publishChan - alice.shutdownChannel = shutdownChan - - if err = f.Start(); err != nil { - t.Fatalf("failed starting fundingManager: %v", err) - } -} - -type cfgOption func(*fundingConfig) - -func setupFundingManagers(t *testing.T, - options ...cfgOption) (*testNode, *testNode) { - - aliceTestDir, errr := ioutil.TempDir("", "alicelnwallet") - if errr != nil { - t.Fatalf("unable to create temp directory: %v", errr) - } - - alice, err := createTestFundingManager( - t, alicePrivKey, aliceAddr, aliceTestDir, options..., - ) - if err != nil { - t.Fatalf("failed creating fundingManager: %v", err) - } - - bobTestDir, errr := ioutil.TempDir("", "boblnwallet") - if errr != nil { - t.Fatalf("unable to create temp directory: %v", errr) - } - - bob, err := createTestFundingManager( - t, bobPrivKey, bobAddr, bobTestDir, options..., - ) - if err != nil { - t.Fatalf("failed creating fundingManager: %v", err) - } - - // With the funding manager's created, we'll now attempt to mimic a - // connection pipe between them. In order to intercept the messages - // within it, we'll redirect all messages back to the msgChan of the - // sender. Since the fundingManager now has a reference to peers itself, - // alice.sendMessage will be triggered when Bob's funding manager - // attempts to send a message to Alice and vice versa. - alice.remotePeer = bob - alice.sendMessage = func(msg lnwire.Message) er.R { - select { - case alice.remotePeer.msgChan <- msg: - case <-alice.shutdownChannel: - return er.New("shutting down") - } - return nil - } - - bob.remotePeer = alice - bob.sendMessage = func(msg lnwire.Message) er.R { - select { - case bob.remotePeer.msgChan <- msg: - case <-bob.shutdownChannel: - return er.New("shutting down") - } - return nil - } - - return alice, bob -} - -func tearDownFundingManagers(t *testing.T, a, b *testNode) { - close(a.shutdownChannel) - close(b.shutdownChannel) - - if err := a.fundingMgr.Stop(); err != nil { - t.Fatalf("unable to stop fundingManager: %v", err) - } - if err := b.fundingMgr.Stop(); err != nil { - t.Fatalf("unable to stop fundingManager: %v", err) - } - os.RemoveAll(a.testDir) - os.RemoveAll(b.testDir) -} - -// openChannel takes the funding process to the point where the funding -// transaction is confirmed on-chain. Returns the funding out point. -func openChannel(t *testing.T, alice, bob *testNode, localFundingAmt, - pushAmt btcutil.Amount, numConfs uint32, - updateChan chan *lnrpc.OpenStatusUpdate, announceChan bool) ( - *wire.OutPoint, *wire.MsgTx) { - - publ := fundChannel( - t, alice, bob, localFundingAmt, pushAmt, false, numConfs, - updateChan, announceChan, - ) - fundingOutPoint := &wire.OutPoint{ - Hash: publ.TxHash(), - Index: 0, - } - return fundingOutPoint, publ -} - -// fundChannel takes the funding process to the point where the funding -// transaction is confirmed on-chain. Returns the funding tx. -func fundChannel(t *testing.T, alice, bob *testNode, localFundingAmt, - pushAmt btcutil.Amount, subtractFees bool, numConfs uint32, - updateChan chan *lnrpc.OpenStatusUpdate, announceChan bool) *wire.MsgTx { - - // Create a funding request and start the workflow. - errChan := make(chan er.R, 1) - initReq := &openChanReq{ - targetPubkey: bob.privKey.PubKey(), - chainHash: *fundingNetParams.GenesisHash, - subtractFees: subtractFees, - localFundingAmt: localFundingAmt, - pushAmt: lnwire.NewMSatFromSatoshis(pushAmt), - fundingFeePerKw: 1000, - private: !announceChan, - updates: updateChan, - err: errChan, - } - - alice.fundingMgr.initFundingWorkflow(bob, initReq) - - // Alice should have sent the OpenChannel message to Bob. - var aliceMsg lnwire.Message - select { - case aliceMsg = <-alice.msgChan: - case err := <-initReq.err: - t.Fatalf("error init funding workflow: %v", err) - case <-time.After(time.Second * 5): - t.Fatalf("alice did not send OpenChannel message") - } - - openChannelReq, ok := aliceMsg.(*lnwire.OpenChannel) - if !ok { - errorMsg, gotError := aliceMsg.(*lnwire.Error) - if gotError { - t.Fatalf("expected OpenChannel to be sent "+ - "from bob, instead got error: %v", - errorMsg.Error()) - } - t.Fatalf("expected OpenChannel to be sent from "+ - "alice, instead got %T", aliceMsg) - } - - // Let Bob handle the init message. - bob.fundingMgr.ProcessFundingMsg(openChannelReq, alice) - - // Bob should answer with an AcceptChannel message. - acceptChannelResponse := assertFundingMsgSent( - t, bob.msgChan, "AcceptChannel", - ).(*lnwire.AcceptChannel) - - // They now should both have pending reservations for this channel - // active. - assertNumPendingReservations(t, alice, bobPubKey, 1) - assertNumPendingReservations(t, bob, alicePubKey, 1) - - // Forward the response to Alice. - alice.fundingMgr.ProcessFundingMsg(acceptChannelResponse, bob) - - // Alice responds with a FundingCreated message. - fundingCreated := assertFundingMsgSent( - t, alice.msgChan, "FundingCreated", - ).(*lnwire.FundingCreated) - - // Give the message to Bob. - bob.fundingMgr.ProcessFundingMsg(fundingCreated, alice) - - // Finally, Bob should send the FundingSigned message. - fundingSigned := assertFundingMsgSent( - t, bob.msgChan, "FundingSigned", - ).(*lnwire.FundingSigned) - - // Forward the signature to Alice. - alice.fundingMgr.ProcessFundingMsg(fundingSigned, bob) - - // After Alice processes the singleFundingSignComplete message, she will - // broadcast the funding transaction to the network. We expect to get a - // channel update saying the channel is pending. - var pendingUpdate *lnrpc.OpenStatusUpdate - select { - case pendingUpdate = <-updateChan: - case <-time.After(time.Second * 5): - t.Fatalf("alice did not send OpenStatusUpdate_ChanPending") - } - - _, ok = pendingUpdate.Update.(*lnrpc.OpenStatusUpdate_ChanPending) - if !ok { - t.Fatal("OpenStatusUpdate was not OpenStatusUpdate_ChanPending") - } - - // Get and return the transaction Alice published to the network. - var publ *wire.MsgTx - select { - case publ = <-alice.publTxChan: - case <-time.After(time.Second * 5): - t.Fatalf("alice did not publish funding tx") - } - - // Make sure the notification about the pending channel was sent out. - select { - case <-alice.mockChanEvent.pendingOpenEvent: - case <-time.After(time.Second * 5): - t.Fatalf("alice did not send pending channel event") - } - select { - case <-bob.mockChanEvent.pendingOpenEvent: - case <-time.After(time.Second * 5): - t.Fatalf("bob did not send pending channel event") - } - - // Finally, make sure neither have active reservation for the channel - // now pending open in the database. - assertNumPendingReservations(t, alice, bobPubKey, 0) - assertNumPendingReservations(t, bob, alicePubKey, 0) - - return publ -} - -func assertErrorNotSent(t *testing.T, msgChan chan lnwire.Message) { - t.Helper() - - select { - case <-msgChan: - t.Fatalf("error sent unexpectedly") - case <-time.After(100 * time.Millisecond): - // Expected, return. - } -} - -func assertErrorSent(t *testing.T, msgChan chan lnwire.Message) { - t.Helper() - - var msg lnwire.Message - select { - case msg = <-msgChan: - case <-time.After(time.Second * 5): - t.Fatalf("node did not send Error message") - } - _, ok := msg.(*lnwire.Error) - if !ok { - t.Fatalf("expected Error to be sent from "+ - "node, instead got %T", msg) - } -} - -func assertFundingMsgSent(t *testing.T, msgChan chan lnwire.Message, - msgType string) lnwire.Message { - t.Helper() - - var msg lnwire.Message - select { - case msg = <-msgChan: - case <-time.After(time.Second * 5): - t.Fatalf("peer did not send %s message", msgType) - } - - var ( - sentMsg lnwire.Message - ok bool - ) - switch msgType { - case "AcceptChannel": - sentMsg, ok = msg.(*lnwire.AcceptChannel) - case "FundingCreated": - sentMsg, ok = msg.(*lnwire.FundingCreated) - case "FundingSigned": - sentMsg, ok = msg.(*lnwire.FundingSigned) - case "FundingLocked": - sentMsg, ok = msg.(*lnwire.FundingLocked) - case "Error": - sentMsg, ok = msg.(*lnwire.Error) - default: - t.Fatalf("unknown message type: %s", msgType) - } - - if !ok { - errorMsg, gotError := msg.(*lnwire.Error) - if gotError { - t.Fatalf("expected %s to be sent, instead got error: %v", - msgType, errorMsg.Error()) - } - - _, _, line, _ := runtime.Caller(1) - t.Fatalf("expected %s to be sent, instead got %T at %v", - msgType, msg, line) - } - - return sentMsg -} - -func assertNumPendingReservations(t *testing.T, node *testNode, - peerPubKey *btcec.PublicKey, expectedNum int) { - t.Helper() - - serializedPubKey := newSerializedKey(peerPubKey) - actualNum := len(node.fundingMgr.activeReservations[serializedPubKey]) - if actualNum == expectedNum { - // Success, return. - return - } - - t.Fatalf("Expected node to have %d pending reservations, had %v", - expectedNum, actualNum) -} - -func assertNumPendingChannelsBecomes(t *testing.T, node *testNode, expectedNum int) { - t.Helper() - - var numPendingChans int - for i := 0; i < testPollNumTries; i++ { - // If this is not the first try, sleep before retrying. - if i > 0 { - time.Sleep(testPollSleepMs * time.Millisecond) - } - pendingChannels, err := node.fundingMgr. - cfg.Wallet.Cfg.Database.FetchPendingChannels() - if err != nil { - t.Fatalf("unable to fetch pending channels: %v", err) - } - - numPendingChans = len(pendingChannels) - if numPendingChans == expectedNum { - // Success, return. - return - } - } - - t.Fatalf("Expected node to have %d pending channels, had %v", - expectedNum, numPendingChans) -} - -func assertNumPendingChannelsRemains(t *testing.T, node *testNode, expectedNum int) { - t.Helper() - - var numPendingChans int - for i := 0; i < 5; i++ { - // If this is not the first try, sleep before retrying. - if i > 0 { - time.Sleep(200 * time.Millisecond) - } - pendingChannels, err := node.fundingMgr. - cfg.Wallet.Cfg.Database.FetchPendingChannels() - if err != nil { - t.Fatalf("unable to fetch pending channels: %v", err) - } - - numPendingChans = len(pendingChannels) - if numPendingChans != expectedNum { - - t.Fatalf("Expected node to have %d pending channels, had %v", - expectedNum, numPendingChans) - } - } -} - -func assertDatabaseState(t *testing.T, node *testNode, - fundingOutPoint *wire.OutPoint, expectedState channelOpeningState) { - t.Helper() - - var state channelOpeningState - var err er.R - for i := 0; i < testPollNumTries; i++ { - // If this is not the first try, sleep before retrying. - if i > 0 { - time.Sleep(testPollSleepMs * time.Millisecond) - } - state, _, err = node.fundingMgr.getChannelOpeningState( - fundingOutPoint) - if err != nil && !ErrChannelNotFound.Is(err) { - t.Fatalf("unable to get channel state: %v", err) - } - - // If we found the channel, check if it had the expected state. - if !ErrChannelNotFound.Is(err) && state == expectedState { - // Got expected state, return with success. - return - } - } - - // 10 tries without success. - if err != nil { - t.Fatalf("error getting channelOpeningState: %v", err) - } else { - t.Fatalf("expected state to be %v, was %v", expectedState, - state) - } -} - -func assertMarkedOpen(t *testing.T, alice, bob *testNode, - fundingOutPoint *wire.OutPoint) { - t.Helper() - - // Make sure the notification about the pending channel was sent out. - select { - case <-alice.mockChanEvent.openEvent: - case <-time.After(time.Second * 5): - t.Fatalf("alice did not send open channel event") - } - select { - case <-bob.mockChanEvent.openEvent: - case <-time.After(time.Second * 5): - t.Fatalf("bob did not send open channel event") - } - - assertDatabaseState(t, alice, fundingOutPoint, markedOpen) - assertDatabaseState(t, bob, fundingOutPoint, markedOpen) -} - -func assertFundingLockedSent(t *testing.T, alice, bob *testNode, - fundingOutPoint *wire.OutPoint) { - t.Helper() - - assertDatabaseState(t, alice, fundingOutPoint, fundingLockedSent) - assertDatabaseState(t, bob, fundingOutPoint, fundingLockedSent) -} - -func assertAddedToRouterGraph(t *testing.T, alice, bob *testNode, - fundingOutPoint *wire.OutPoint) { - t.Helper() - - assertDatabaseState(t, alice, fundingOutPoint, addedToRouterGraph) - assertDatabaseState(t, bob, fundingOutPoint, addedToRouterGraph) -} - -// assertChannelAnnouncements checks that alice and bob both sends the expected -// announcements (ChannelAnnouncement, ChannelUpdate) after the funding tx has -// confirmed. The last arguments can be set if we expect the nodes to advertise -// custom min_htlc values as part of their ChannelUpdate. We expect Alice to -// advertise the value required by Bob and vice versa. If they are not set the -// advertised value will be checked against the other node's default min_htlc -// value. -func assertChannelAnnouncements(t *testing.T, alice, bob *testNode, - capacity btcutil.Amount, customMinHtlc []lnwire.MilliSatoshi, - customMaxHtlc []lnwire.MilliSatoshi) { - t.Helper() - - // After the FundingLocked message is sent, Alice and Bob will each - // send the following messages to their gossiper: - // 1) ChannelAnnouncement - // 2) ChannelUpdate - // The ChannelAnnouncement is kept locally, while the ChannelUpdate - // is sent directly to the other peer, so the edge policies are - // known to both peers. - nodes := []*testNode{alice, bob} - for j, node := range nodes { - announcements := make([]lnwire.Message, 2) - for i := 0; i < len(announcements); i++ { - select { - case announcements[i] = <-node.announceChan: - case <-time.After(time.Second * 5): - t.Fatalf("node did not send announcement: %v", i) - } - } - - gotChannelAnnouncement := false - gotChannelUpdate := false - for _, msg := range announcements { - switch m := msg.(type) { - case *lnwire.ChannelAnnouncement: - gotChannelAnnouncement = true - case *lnwire.ChannelUpdate: - - // The channel update sent by the node should - // advertise the MinHTLC value required by the - // _other_ node. - other := (j + 1) % 2 - minHtlc := nodes[other].fundingMgr.cfg. - DefaultMinHtlcIn - - // We might expect a custom MinHTLC value. - if len(customMinHtlc) > 0 { - if len(customMinHtlc) != 2 { - t.Fatalf("only 0 or 2 custom " + - "min htlc values " + - "currently supported") - } - - minHtlc = customMinHtlc[j] - } - - if m.HtlcMinimumMsat != minHtlc { - t.Fatalf("expected ChannelUpdate to "+ - "advertise min HTLC %v, had %v", - minHtlc, m.HtlcMinimumMsat) - } - - maxHtlc := alice.fundingMgr.cfg.RequiredRemoteMaxValue( - capacity, - ) - // We might expect a custom MaxHltc value. - if len(customMaxHtlc) > 0 { - if len(customMaxHtlc) != 2 { - t.Fatalf("only 0 or 2 custom " + - "min htlc values " + - "currently supported") - } - - maxHtlc = customMaxHtlc[j] - } - if m.MessageFlags != 1 { - t.Fatalf("expected message flags to "+ - "be 1, was %v", m.MessageFlags) - } - - if maxHtlc != m.HtlcMaximumMsat { - t.Fatalf("expected ChannelUpdate to "+ - "advertise max HTLC %v, had %v", - maxHtlc, - m.HtlcMaximumMsat) - } - - gotChannelUpdate = true - } - } - - if !gotChannelAnnouncement { - t.Fatalf("did not get ChannelAnnouncement from node %d", - j) - } - if !gotChannelUpdate { - t.Fatalf("did not get ChannelUpdate from node %d", j) - } - - // Make sure no other message is sent. - select { - case <-node.announceChan: - t.Fatalf("received unexpected announcement") - case <-time.After(300 * time.Millisecond): - // Expected - } - } -} - -func assertAnnouncementSignatures(t *testing.T, alice, bob *testNode) { - t.Helper() - - // After the FundingLocked message is sent and six confirmations have - // been reached, the channel will be announced to the greater network - // by having the nodes exchange announcement signatures. - // Two distinct messages will be sent: - // 1) AnnouncementSignatures - // 2) NodeAnnouncement - // These may arrive in no particular order. - // Note that sending the NodeAnnouncement at this point is an - // implementation detail, and not something required by the LN spec. - for j, node := range []*testNode{alice, bob} { - announcements := make([]lnwire.Message, 2) - for i := 0; i < len(announcements); i++ { - select { - case announcements[i] = <-node.announceChan: - case <-time.After(time.Second * 5): - t.Fatalf("node did not send announcement %v", i) - } - } - - gotAnnounceSignatures := false - gotNodeAnnouncement := false - for _, msg := range announcements { - switch msg.(type) { - case *lnwire.AnnounceSignatures: - gotAnnounceSignatures = true - case *lnwire.NodeAnnouncement: - gotNodeAnnouncement = true - } - } - - if !gotAnnounceSignatures { - t.Fatalf("did not get AnnounceSignatures from node %d", - j) - } - if !gotNodeAnnouncement { - t.Fatalf("did not get NodeAnnouncement from node %d", j) - } - } -} - -func waitForOpenUpdate(t *testing.T, updateChan chan *lnrpc.OpenStatusUpdate) { - var openUpdate *lnrpc.OpenStatusUpdate - select { - case openUpdate = <-updateChan: - case <-time.After(time.Second * 5): - t.Fatalf("alice did not send OpenStatusUpdate") - } - - _, ok := openUpdate.Update.(*lnrpc.OpenStatusUpdate_ChanOpen) - if !ok { - t.Fatal("OpenStatusUpdate was not OpenStatusUpdate_ChanOpen") - } -} - -func assertNoChannelState(t *testing.T, alice, bob *testNode, - fundingOutPoint *wire.OutPoint) { - t.Helper() - - assertErrChannelNotFound(t, alice, fundingOutPoint) - assertErrChannelNotFound(t, bob, fundingOutPoint) -} - -func assertErrChannelNotFound(t *testing.T, node *testNode, - fundingOutPoint *wire.OutPoint) { - t.Helper() - - var state channelOpeningState - var err er.R - for i := 0; i < testPollNumTries; i++ { - // If this is not the first try, sleep before retrying. - if i > 0 { - time.Sleep(testPollSleepMs * time.Millisecond) - } - state, _, err = node.fundingMgr.getChannelOpeningState( - fundingOutPoint) - if ErrChannelNotFound.Is(err) { - // Got expected state, return with success. - return - } else if err != nil { - t.Fatalf("unable to get channel state: %v", err) - } - } - - // 10 tries without success. - t.Fatalf("expected to not find state, found state %v", state) -} - -func assertHandleFundingLocked(t *testing.T, alice, bob *testNode) { - t.Helper() - - // They should both send the new channel state to their peer. - select { - case c := <-alice.newChannels: - close(c.err) - case <-time.After(time.Second * 15): - t.Fatalf("alice did not send new channel to peer") - } - - select { - case c := <-bob.newChannels: - close(c.err) - case <-time.After(time.Second * 15): - t.Fatalf("bob did not send new channel to peer") - } -} - -func TestFundingManagerNormalWorkflow(t *testing.T) { - t.Parallel() - - alice, bob := setupFundingManagers(t) - defer tearDownFundingManagers(t, alice, bob) - - // We will consume the channel updates as we go, so no buffering is needed. - updateChan := make(chan *lnrpc.OpenStatusUpdate) - - // Run through the process of opening the channel, up until the funding - // transaction is broadcasted. - localAmt := btcutil.Amount(500000) - pushAmt := btcutil.Amount(0) - capacity := localAmt + pushAmt - fundingOutPoint, fundingTx := openChannel( - t, alice, bob, localAmt, pushAmt, 1, updateChan, true, - ) - - // Check that neither Alice nor Bob sent an error message. - assertErrorNotSent(t, alice.msgChan) - assertErrorNotSent(t, bob.msgChan) - - // Notify that transaction was mined. - alice.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - bob.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - - // The funding transaction was mined, so assert that both funding - // managers now have the state of this channel 'markedOpen' in their - // internal state machine. - assertMarkedOpen(t, alice, bob, fundingOutPoint) - - // After the funding transaction is mined, Alice will send - // fundingLocked to Bob. - fundingLockedAlice := assertFundingMsgSent( - t, alice.msgChan, "FundingLocked", - ).(*lnwire.FundingLocked) - - // And similarly Bob will send funding locked to Alice. - fundingLockedBob := assertFundingMsgSent( - t, bob.msgChan, "FundingLocked", - ).(*lnwire.FundingLocked) - - // Check that the state machine is updated accordingly - assertFundingLockedSent(t, alice, bob, fundingOutPoint) - - // Make sure both fundingManagers send the expected channel - // announcements. - assertChannelAnnouncements(t, alice, bob, capacity, nil, nil) - - // Check that the state machine is updated accordingly - assertAddedToRouterGraph(t, alice, bob, fundingOutPoint) - - // The funding transaction is now confirmed, wait for the - // OpenStatusUpdate_ChanOpen update - waitForOpenUpdate(t, updateChan) - - // Exchange the fundingLocked messages. - alice.fundingMgr.ProcessFundingMsg(fundingLockedBob, bob) - bob.fundingMgr.ProcessFundingMsg(fundingLockedAlice, alice) - - // Check that they notify the breach arbiter and peer about the new - // channel. - assertHandleFundingLocked(t, alice, bob) - - // Notify that six confirmations has been reached on funding transaction. - alice.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - bob.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - - // Make sure the fundingManagers exchange announcement signatures. - assertAnnouncementSignatures(t, alice, bob) - - // The internal state-machine should now have deleted the channelStates - // from the database, as the channel is announced. - assertNoChannelState(t, alice, bob, fundingOutPoint) -} - -// TestFundingManagerRejectCSV tests checking of local CSV values against our -// local CSV limit for incoming and outgoing channels. -func TestFundingManagerRejectCSV(t *testing.T) { - t.Run("csv too high", func(t *testing.T) { - testLocalCSVLimit(t, 400, 500) - }) - t.Run("csv within limit", func(t *testing.T) { - testLocalCSVLimit(t, 600, 500) - }) -} - -// testLocalCSVLimit creates two funding managers, alice and bob, where alice -// has a limit on her maximum local CSV and bob sets his required CSV for alice. -// We test an incoming and outgoing channel, ensuring that alice accepts csvs -// below her maximum, and rejects those above it. -func testLocalCSVLimit(t *testing.T, aliceMaxCSV, bobRequiredCSV uint16) { - t.Parallel() - - alice, bob := setupFundingManagers(t) - defer tearDownFundingManagers(t, alice, bob) - - // Set a maximum local delay in alice's config to aliceMaxCSV and overwrite - // bob's required remote delay function to return bobRequiredCSV. - alice.fundingMgr.cfg.MaxLocalCSVDelay = aliceMaxCSV - bob.fundingMgr.cfg.RequiredRemoteDelay = func(_ btcutil.Amount) uint16 { - return bobRequiredCSV - } - - // For convenience, we bump our max pending channels to 2 so that we - // can test incoming and outgoing channels without needing to step - // through the full funding process. - alice.fundingMgr.cfg.MaxPendingChannels = 2 - bob.fundingMgr.cfg.MaxPendingChannels = 2 - - // If our maximum is less than the value bob sets, we expect this test - // to fail. - expectFail := aliceMaxCSV < bobRequiredCSV - - // First, we will initiate an outgoing channel from Alice -> Bob. - errChan := make(chan er.R, 1) - updateChan := make(chan *lnrpc.OpenStatusUpdate) - initReq := &openChanReq{ - targetPubkey: bob.privKey.PubKey(), - chainHash: *fundingNetParams.GenesisHash, - localFundingAmt: 200000, - fundingFeePerKw: 1000, - updates: updateChan, - err: errChan, - } - - // Alice should have sent the OpenChannel message to Bob. - alice.fundingMgr.initFundingWorkflow(bob, initReq) - var aliceMsg lnwire.Message - select { - case aliceMsg = <-alice.msgChan: - - case err := <-initReq.err: - t.Fatalf("error init funding workflow: %v", err) - - case <-time.After(time.Second * 5): - t.Fatalf("alice did not send OpenChannel message") - } - - openChannelReq, ok := aliceMsg.(*lnwire.OpenChannel) - require.True(t, ok) - - // Let Bob handle the init message. - bob.fundingMgr.ProcessFundingMsg(openChannelReq, alice) - - // Bob should answer with an AcceptChannel message. - acceptChannelResponse := assertFundingMsgSent( - t, bob.msgChan, "AcceptChannel", - ).(*lnwire.AcceptChannel) - - // They now should both have pending reservations for this channel - // active. - assertNumPendingReservations(t, alice, bobPubKey, 1) - assertNumPendingReservations(t, bob, alicePubKey, 1) - - // Forward the response to Alice. - alice.fundingMgr.ProcessFundingMsg(acceptChannelResponse, bob) - - // At this point, Alice has received an AcceptChannel message from - // bob with the CSV value that he has set for her, and has to evaluate - // whether she wants to accept this channel. If we get an error, we - // assert that we expected the channel to fail, otherwise we assert that - // she proceeded with the channel open as usual. - select { - case err := <-errChan: - util.RequireErr(t, err) - require.True(t, expectFail) - - case msg := <-alice.msgChan: - _, ok := msg.(*lnwire.FundingCreated) - require.True(t, ok) - require.False(t, expectFail) - - case <-time.After(time.Second): - t.Fatal("funding flow was not failed") - } - - // We do not need to complete the rest of the funding flow (it is - // covered in other tests). So now we test that Alice will appropriately - // handle incoming channels, opening a channel from Bob->Alice. - errChan = make(chan er.R, 1) - updateChan = make(chan *lnrpc.OpenStatusUpdate) - initReq = &openChanReq{ - targetPubkey: alice.privKey.PubKey(), - chainHash: *fundingNetParams.GenesisHash, - localFundingAmt: 200000, - fundingFeePerKw: 1000, - updates: updateChan, - err: errChan, - } - - bob.fundingMgr.initFundingWorkflow(alice, initReq) - - // Bob should have sent the OpenChannel message to Alice. - var bobMsg lnwire.Message - select { - case bobMsg = <-bob.msgChan: - - case err := <-initReq.err: - t.Fatalf("bob OpenChannel message failed: %v", err) - - case <-time.After(time.Second * 5): - t.Fatalf("bob did not send OpenChannel message") - } - - openChannelReq, ok = bobMsg.(*lnwire.OpenChannel) - require.True(t, ok) - - // Let Alice handle the init message. - alice.fundingMgr.ProcessFundingMsg(openChannelReq, bob) - - // We expect a error message from Alice if we're expecting the channel - // to fail, otherwise we expect her to proceed with the channel as - // usual. - select { - case msg := <-alice.msgChan: - var ok bool - if expectFail { - _, ok = msg.(*lnwire.Error) - } else { - _, ok = msg.(*lnwire.AcceptChannel) - } - require.True(t, ok) - - case <-time.After(time.Second * 5): - t.Fatal("funding flow was not failed") - } -} - -func TestFundingManagerRestartBehavior(t *testing.T) { - t.Parallel() - - alice, bob := setupFundingManagers(t) - defer tearDownFundingManagers(t, alice, bob) - - // Run through the process of opening the channel, up until the funding - // transaction is broadcasted. - localAmt := btcutil.Amount(500000) - pushAmt := btcutil.Amount(0) - capacity := localAmt + pushAmt - updateChan := make(chan *lnrpc.OpenStatusUpdate) - fundingOutPoint, fundingTx := openChannel( - t, alice, bob, localAmt, pushAmt, 1, updateChan, true, - ) - - // After the funding transaction gets mined, both nodes will send the - // fundingLocked message to the other peer. If the funding node fails - // before this message has been successfully sent, it should retry - // sending it on restart. We mimic this behavior by letting the - // SendToPeer method return an error, as if the message was not - // successfully sent. We then recreate the fundingManager and make sure - // it continues the process as expected. We'll save the current - // implementation of sendMessage to restore the original behavior later - // on. - workingSendMessage := bob.sendMessage - bob.sendMessage = func(msg lnwire.Message) er.R { - return er.Errorf("intentional error in SendToPeer") - } - alice.fundingMgr.cfg.NotifyWhenOnline = func(peer [33]byte, - con chan<- lnpeer.Peer) { - // Intentionally empty. - } - - // Notify that transaction was mined - alice.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - bob.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - - // The funding transaction was mined, so assert that both funding - // managers now have the state of this channel 'markedOpen' in their - // internal state machine. - assertMarkedOpen(t, alice, bob, fundingOutPoint) - - // After the funding transaction was mined, Bob should have successfully - // sent the fundingLocked message, while Alice failed sending it. In - // Alice's case this means that there should be no messages for Bob, and - // the channel should still be in state 'markedOpen' - select { - case msg := <-alice.msgChan: - t.Fatalf("did not expect any message from Alice: %v", msg) - default: - // Expected. - } - - // Bob will send funding locked to Alice. - fundingLockedBob := assertFundingMsgSent( - t, bob.msgChan, "FundingLocked", - ).(*lnwire.FundingLocked) - - // Alice should still be markedOpen - assertDatabaseState(t, alice, fundingOutPoint, markedOpen) - - // While Bob successfully sent fundingLocked. - assertDatabaseState(t, bob, fundingOutPoint, fundingLockedSent) - - // We now recreate Alice's fundingManager with the correct sendMessage - // implementation, and expect it to retry sending the fundingLocked - // message. We'll explicitly shut down Alice's funding manager to - // prevent a race when overriding the sendMessage implementation. - if err := alice.fundingMgr.Stop(); err != nil { - t.Fatalf("unable to stop alice's funding manager: %v", err) - } - bob.sendMessage = workingSendMessage - recreateAliceFundingManager(t, alice) - - // Intentionally make the channel announcements fail - alice.fundingMgr.cfg.SendAnnouncement = func(msg lnwire.Message, - _ ...discovery.OptionalMsgField) chan er.R { - - errChan := make(chan er.R, 1) - errChan <- er.Errorf("intentional error in SendAnnouncement") - return errChan - } - - fundingLockedAlice := assertFundingMsgSent( - t, alice.msgChan, "FundingLocked", - ).(*lnwire.FundingLocked) - - // The state should now be fundingLockedSent - assertDatabaseState(t, alice, fundingOutPoint, fundingLockedSent) - - // Check that the channel announcements were never sent - select { - case ann := <-alice.announceChan: - t.Fatalf("unexpectedly got channel announcement message: %v", - ann) - default: - // Expected - } - - // Exchange the fundingLocked messages. - alice.fundingMgr.ProcessFundingMsg(fundingLockedBob, bob) - bob.fundingMgr.ProcessFundingMsg(fundingLockedAlice, alice) - - // Check that they notify the breach arbiter and peer about the new - // channel. - assertHandleFundingLocked(t, alice, bob) - - // Next up, we check that Alice rebroadcasts the announcement - // messages on restart. Bob should as expected send announcements. - recreateAliceFundingManager(t, alice) - time.Sleep(300 * time.Millisecond) - - // Make sure both fundingManagers send the expected channel - // announcements. - assertChannelAnnouncements(t, alice, bob, capacity, nil, nil) - - // Check that the state machine is updated accordingly - assertAddedToRouterGraph(t, alice, bob, fundingOutPoint) - - // Next, we check that Alice sends the announcement signatures - // on restart after six confirmations. Bob should as expected send - // them as well. - recreateAliceFundingManager(t, alice) - time.Sleep(300 * time.Millisecond) - - // Notify that six confirmations has been reached on funding transaction. - alice.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - bob.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - - // Make sure the fundingManagers exchange announcement signatures. - assertAnnouncementSignatures(t, alice, bob) - - // The internal state-machine should now have deleted the channelStates - // from the database, as the channel is announced. - assertNoChannelState(t, alice, bob, fundingOutPoint) -} - -// TestFundingManagerOfflinePeer checks that the fundingManager waits for the -// server to notify when the peer comes online, in case sending the -// fundingLocked message fails the first time. -func TestFundingManagerOfflinePeer(t *testing.T) { - t.Parallel() - - alice, bob := setupFundingManagers(t) - defer tearDownFundingManagers(t, alice, bob) - - // Run through the process of opening the channel, up until the funding - // transaction is broadcasted. - localAmt := btcutil.Amount(500000) - pushAmt := btcutil.Amount(0) - capacity := localAmt + pushAmt - updateChan := make(chan *lnrpc.OpenStatusUpdate) - fundingOutPoint, fundingTx := openChannel( - t, alice, bob, localAmt, pushAmt, 1, updateChan, true, - ) - - // After the funding transaction gets mined, both nodes will send the - // fundingLocked message to the other peer. If the funding node fails - // to send the fundingLocked message to the peer, it should wait for - // the server to notify it that the peer is back online, and try again. - // We'll save the current implementation of sendMessage to restore the - // original behavior later on. - workingSendMessage := bob.sendMessage - bob.sendMessage = func(msg lnwire.Message) er.R { - return er.Errorf("intentional error in SendToPeer") - } - peerChan := make(chan [33]byte, 1) - conChan := make(chan chan<- lnpeer.Peer, 1) - alice.fundingMgr.cfg.NotifyWhenOnline = func(peer [33]byte, - connected chan<- lnpeer.Peer) { - - peerChan <- peer - conChan <- connected - } - - // Notify that transaction was mined - alice.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - bob.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - - // The funding transaction was mined, so assert that both funding - // managers now have the state of this channel 'markedOpen' in their - // internal state machine. - assertMarkedOpen(t, alice, bob, fundingOutPoint) - - // After the funding transaction was mined, Bob should have successfully - // sent the fundingLocked message, while Alice failed sending it. In - // Alice's case this means that there should be no messages for Bob, and - // the channel should still be in state 'markedOpen' - select { - case msg := <-alice.msgChan: - t.Fatalf("did not expect any message from Alice: %v", msg) - default: - // Expected. - } - - // Bob will send funding locked to Alice - fundingLockedBob := assertFundingMsgSent( - t, bob.msgChan, "FundingLocked", - ).(*lnwire.FundingLocked) - - // Alice should still be markedOpen - assertDatabaseState(t, alice, fundingOutPoint, markedOpen) - - // While Bob successfully sent fundingLocked. - assertDatabaseState(t, bob, fundingOutPoint, fundingLockedSent) - - // Alice should be waiting for the server to notify when Bob comes back - // online. - var peer [33]byte - var con chan<- lnpeer.Peer - select { - case peer = <-peerChan: - // Expected - case <-time.After(time.Second * 3): - t.Fatalf("alice did not register peer with server") - } - - select { - case con = <-conChan: - // Expected - case <-time.After(time.Second * 3): - t.Fatalf("alice did not register connectedChan with server") - } - - if !bytes.Equal(peer[:], bobPubKey.SerializeCompressed()) { - t.Fatalf("expected to receive Bob's pubkey (%v), instead got %v", - bobPubKey, peer) - } - - // Restore the correct sendMessage implementation, and notify that Bob - // is back online. - bob.sendMessage = workingSendMessage - con <- bob - - // This should make Alice send the fundingLocked. - fundingLockedAlice := assertFundingMsgSent( - t, alice.msgChan, "FundingLocked", - ).(*lnwire.FundingLocked) - - // The state should now be fundingLockedSent - assertDatabaseState(t, alice, fundingOutPoint, fundingLockedSent) - - // Make sure both fundingManagers send the expected channel - // announcements. - assertChannelAnnouncements(t, alice, bob, capacity, nil, nil) - - // Check that the state machine is updated accordingly - assertAddedToRouterGraph(t, alice, bob, fundingOutPoint) - - // The funding transaction is now confirmed, wait for the - // OpenStatusUpdate_ChanOpen update - waitForOpenUpdate(t, updateChan) - - // Exchange the fundingLocked messages. - alice.fundingMgr.ProcessFundingMsg(fundingLockedBob, bob) - bob.fundingMgr.ProcessFundingMsg(fundingLockedAlice, alice) - - // Check that they notify the breach arbiter and peer about the new - // channel. - assertHandleFundingLocked(t, alice, bob) - - // Notify that six confirmations has been reached on funding transaction. - alice.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - bob.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - - // Make sure both fundingManagers send the expected announcement - // signatures. - assertAnnouncementSignatures(t, alice, bob) - - // The internal state-machine should now have deleted the channelStates - // from the database, as the channel is announced. - assertNoChannelState(t, alice, bob, fundingOutPoint) -} - -// TestFundingManagerPeerTimeoutAfterInitFunding checks that the zombie sweeper -// will properly clean up a zombie reservation that times out after the -// initFundingMsg has been handled. -func TestFundingManagerPeerTimeoutAfterInitFunding(t *testing.T) { - t.Parallel() - - alice, bob := setupFundingManagers(t) - defer tearDownFundingManagers(t, alice, bob) - - // We will consume the channel updates as we go, so no buffering is needed. - updateChan := make(chan *lnrpc.OpenStatusUpdate) - - // Create a funding request and start the workflow. - errChan := make(chan er.R, 1) - initReq := &openChanReq{ - targetPubkey: bob.privKey.PubKey(), - chainHash: *fundingNetParams.GenesisHash, - localFundingAmt: 500000, - pushAmt: lnwire.NewMSatFromSatoshis(0), - private: false, - updates: updateChan, - err: errChan, - } - - alice.fundingMgr.initFundingWorkflow(bob, initReq) - - // Alice should have sent the OpenChannel message to Bob. - var aliceMsg lnwire.Message - select { - case aliceMsg = <-alice.msgChan: - case err := <-initReq.err: - t.Fatalf("error init funding workflow: %v", err) - case <-time.After(time.Second * 5): - t.Fatalf("alice did not send OpenChannel message") - } - - _, ok := aliceMsg.(*lnwire.OpenChannel) - if !ok { - errorMsg, gotError := aliceMsg.(*lnwire.Error) - if gotError { - t.Fatalf("expected OpenChannel to be sent "+ - "from bob, instead got error: %v", - errorMsg.Error()) - } - t.Fatalf("expected OpenChannel to be sent from "+ - "alice, instead got %T", aliceMsg) - } - - // Alice should have a new pending reservation. - assertNumPendingReservations(t, alice, bobPubKey, 1) - - // Make sure Alice's reservation times out and then run her zombie sweeper. - time.Sleep(1 * time.Millisecond) - go alice.fundingMgr.pruneZombieReservations() - - // Alice should have sent an Error message to Bob. - assertErrorSent(t, alice.msgChan) - - // Alice's zombie reservation should have been pruned. - assertNumPendingReservations(t, alice, bobPubKey, 0) -} - -// TestFundingManagerPeerTimeoutAfterFundingOpen checks that the zombie sweeper -// will properly clean up a zombie reservation that times out after the -// fundingOpenMsg has been handled. -func TestFundingManagerPeerTimeoutAfterFundingOpen(t *testing.T) { - t.Parallel() - - alice, bob := setupFundingManagers(t) - defer tearDownFundingManagers(t, alice, bob) - - // We will consume the channel updates as we go, so no buffering is needed. - updateChan := make(chan *lnrpc.OpenStatusUpdate) - - // Create a funding request and start the workflow. - errChan := make(chan er.R, 1) - initReq := &openChanReq{ - targetPubkey: bob.privKey.PubKey(), - chainHash: *fundingNetParams.GenesisHash, - localFundingAmt: 500000, - pushAmt: lnwire.NewMSatFromSatoshis(0), - private: false, - updates: updateChan, - err: errChan, - } - - alice.fundingMgr.initFundingWorkflow(bob, initReq) - - // Alice should have sent the OpenChannel message to Bob. - var aliceMsg lnwire.Message - select { - case aliceMsg = <-alice.msgChan: - case err := <-initReq.err: - t.Fatalf("error init funding workflow: %v", err) - case <-time.After(time.Second * 5): - t.Fatalf("alice did not send OpenChannel message") - } - - openChannelReq, ok := aliceMsg.(*lnwire.OpenChannel) - if !ok { - errorMsg, gotError := aliceMsg.(*lnwire.Error) - if gotError { - t.Fatalf("expected OpenChannel to be sent "+ - "from bob, instead got error: %v", - errorMsg.Error()) - } - t.Fatalf("expected OpenChannel to be sent from "+ - "alice, instead got %T", aliceMsg) - } - - // Alice should have a new pending reservation. - assertNumPendingReservations(t, alice, bobPubKey, 1) - - // Let Bob handle the init message. - bob.fundingMgr.ProcessFundingMsg(openChannelReq, alice) - - // Bob should answer with an AcceptChannel. - assertFundingMsgSent(t, bob.msgChan, "AcceptChannel") - - // Bob should have a new pending reservation. - assertNumPendingReservations(t, bob, alicePubKey, 1) - - // Make sure Bob's reservation times out and then run his zombie sweeper. - time.Sleep(1 * time.Millisecond) - go bob.fundingMgr.pruneZombieReservations() - - // Bob should have sent an Error message to Alice. - assertErrorSent(t, bob.msgChan) - - // Bob's zombie reservation should have been pruned. - assertNumPendingReservations(t, bob, alicePubKey, 0) -} - -// TestFundingManagerPeerTimeoutAfterFundingAccept checks that the zombie sweeper -// will properly clean up a zombie reservation that times out after the -// fundingAcceptMsg has been handled. -func TestFundingManagerPeerTimeoutAfterFundingAccept(t *testing.T) { - t.Parallel() - - alice, bob := setupFundingManagers(t) - defer tearDownFundingManagers(t, alice, bob) - - // We will consume the channel updates as we go, so no buffering is needed. - updateChan := make(chan *lnrpc.OpenStatusUpdate) - - // Create a funding request and start the workflow. - errChan := make(chan er.R, 1) - initReq := &openChanReq{ - targetPubkey: bob.privKey.PubKey(), - chainHash: *fundingNetParams.GenesisHash, - localFundingAmt: 500000, - pushAmt: lnwire.NewMSatFromSatoshis(0), - private: false, - updates: updateChan, - err: errChan, - } - - alice.fundingMgr.initFundingWorkflow(bob, initReq) - - // Alice should have sent the OpenChannel message to Bob. - var aliceMsg lnwire.Message - select { - case aliceMsg = <-alice.msgChan: - case err := <-initReq.err: - t.Fatalf("error init funding workflow: %v", err) - case <-time.After(time.Second * 5): - t.Fatalf("alice did not send OpenChannel message") - } - - openChannelReq, ok := aliceMsg.(*lnwire.OpenChannel) - if !ok { - errorMsg, gotError := aliceMsg.(*lnwire.Error) - if gotError { - t.Fatalf("expected OpenChannel to be sent "+ - "from bob, instead got error: %v", - errorMsg.Error()) - } - t.Fatalf("expected OpenChannel to be sent from "+ - "alice, instead got %T", aliceMsg) - } - - // Alice should have a new pending reservation. - assertNumPendingReservations(t, alice, bobPubKey, 1) - - // Let Bob handle the init message. - bob.fundingMgr.ProcessFundingMsg(openChannelReq, alice) - - // Bob should answer with an AcceptChannel. - acceptChannelResponse := assertFundingMsgSent( - t, bob.msgChan, "AcceptChannel", - ).(*lnwire.AcceptChannel) - - // Bob should have a new pending reservation. - assertNumPendingReservations(t, bob, alicePubKey, 1) - - // Forward the response to Alice. - alice.fundingMgr.ProcessFundingMsg(acceptChannelResponse, bob) - - // Alice responds with a FundingCreated messages. - assertFundingMsgSent(t, alice.msgChan, "FundingCreated") - - // Make sure Alice's reservation times out and then run her zombie sweeper. - time.Sleep(1 * time.Millisecond) - go alice.fundingMgr.pruneZombieReservations() - - // Alice should have sent an Error message to Bob. - assertErrorSent(t, alice.msgChan) - - // Alice's zombie reservation should have been pruned. - assertNumPendingReservations(t, alice, bobPubKey, 0) -} - -func TestFundingManagerFundingTimeout(t *testing.T) { - t.Parallel() - - alice, bob := setupFundingManagers(t) - defer tearDownFundingManagers(t, alice, bob) - - // We will consume the channel updates as we go, so no buffering is needed. - updateChan := make(chan *lnrpc.OpenStatusUpdate) - - // Run through the process of opening the channel, up until the funding - // transaction is broadcasted. - _, _ = openChannel(t, alice, bob, 500000, 0, 1, updateChan, true) - - // Bob will at this point be waiting for the funding transaction to be - // confirmed, so the channel should be considered pending. - pendingChannels, err := bob.fundingMgr.cfg.Wallet.Cfg.Database.FetchPendingChannels() - if err != nil { - t.Fatalf("unable to fetch pending channels: %v", err) - } - if len(pendingChannels) != 1 { - t.Fatalf("Expected Bob to have 1 pending channel, had %v", - len(pendingChannels)) - } - - // We expect Bob to forget the channel after 2016 blocks (2 weeks), so - // mine 2016-1, and check that it is still pending. - bob.mockNotifier.epochChan <- &chainntnfs.BlockEpoch{ - Height: fundingBroadcastHeight + maxWaitNumBlocksFundingConf - 1, - } - - // Bob should still be waiting for the channel to open. - assertNumPendingChannelsRemains(t, bob, 1) - - bob.mockNotifier.epochChan <- &chainntnfs.BlockEpoch{ - Height: fundingBroadcastHeight + maxWaitNumBlocksFundingConf, - } - - // Bob should have sent an Error message to Alice. - assertErrorSent(t, bob.msgChan) - - // Should not be pending anymore. - assertNumPendingChannelsBecomes(t, bob, 0) -} - -// TestFundingManagerFundingNotTimeoutInitiator checks that if the user was -// the channel initiator, that it does not timeout when the lnd restarts. -func TestFundingManagerFundingNotTimeoutInitiator(t *testing.T) { - t.Parallel() - - alice, bob := setupFundingManagers(t) - defer tearDownFundingManagers(t, alice, bob) - - // We will consume the channel updates as we go, so no buffering is needed. - updateChan := make(chan *lnrpc.OpenStatusUpdate) - - // Run through the process of opening the channel, up until the funding - // transaction is broadcasted. - _, _ = openChannel(t, alice, bob, 500000, 0, 1, updateChan, true) - - // Alice will at this point be waiting for the funding transaction to be - // confirmed, so the channel should be considered pending. - pendingChannels, err := alice.fundingMgr.cfg.Wallet.Cfg.Database.FetchPendingChannels() - if err != nil { - t.Fatalf("unable to fetch pending channels: %v", err) - } - if len(pendingChannels) != 1 { - t.Fatalf("Expected Alice to have 1 pending channel, had %v", - len(pendingChannels)) - } - - recreateAliceFundingManager(t, alice) - - // We should receive the rebroadcasted funding txn. - select { - case <-alice.publTxChan: - case <-time.After(time.Second * 5): - t.Fatalf("alice did not publish funding tx") - } - - // Increase the height to 1 minus the maxWaitNumBlocksFundingConf height. - alice.mockNotifier.epochChan <- &chainntnfs.BlockEpoch{ - Height: fundingBroadcastHeight + maxWaitNumBlocksFundingConf - 1, - } - - bob.mockNotifier.epochChan <- &chainntnfs.BlockEpoch{ - Height: fundingBroadcastHeight + maxWaitNumBlocksFundingConf - 1, - } - - // Assert both and Alice and Bob still have 1 pending channels. - assertNumPendingChannelsRemains(t, alice, 1) - - assertNumPendingChannelsRemains(t, bob, 1) - - // Increase both Alice and Bob to maxWaitNumBlocksFundingConf height. - alice.mockNotifier.epochChan <- &chainntnfs.BlockEpoch{ - Height: fundingBroadcastHeight + maxWaitNumBlocksFundingConf, - } - - bob.mockNotifier.epochChan <- &chainntnfs.BlockEpoch{ - Height: fundingBroadcastHeight + maxWaitNumBlocksFundingConf, - } - - // Since Alice was the initiator, the channel should not have timed out. - assertNumPendingChannelsRemains(t, alice, 1) - - // Bob should have sent an Error message to Alice. - assertErrorSent(t, bob.msgChan) - - // Since Bob was not the initiator, the channel should timeout. - assertNumPendingChannelsBecomes(t, bob, 0) -} - -// TestFundingManagerReceiveFundingLockedTwice checks that the fundingManager -// continues to operate as expected in case we receive a duplicate fundingLocked -// message. -func TestFundingManagerReceiveFundingLockedTwice(t *testing.T) { - t.Parallel() - - alice, bob := setupFundingManagers(t) - defer tearDownFundingManagers(t, alice, bob) - - // We will consume the channel updates as we go, so no buffering is needed. - updateChan := make(chan *lnrpc.OpenStatusUpdate) - - // Run through the process of opening the channel, up until the funding - // transaction is broadcasted. - localAmt := btcutil.Amount(500000) - pushAmt := btcutil.Amount(0) - capacity := localAmt + pushAmt - fundingOutPoint, fundingTx := openChannel( - t, alice, bob, localAmt, pushAmt, 1, updateChan, true, - ) - - // Notify that transaction was mined - alice.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - bob.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - - // The funding transaction was mined, so assert that both funding - // managers now have the state of this channel 'markedOpen' in their - // internal state machine. - assertMarkedOpen(t, alice, bob, fundingOutPoint) - - // After the funding transaction is mined, Alice will send - // fundingLocked to Bob. - fundingLockedAlice := assertFundingMsgSent( - t, alice.msgChan, "FundingLocked", - ).(*lnwire.FundingLocked) - - // And similarly Bob will send funding locked to Alice. - fundingLockedBob := assertFundingMsgSent( - t, bob.msgChan, "FundingLocked", - ).(*lnwire.FundingLocked) - - // Check that the state machine is updated accordingly - assertFundingLockedSent(t, alice, bob, fundingOutPoint) - - // Make sure both fundingManagers send the expected channel - // announcements. - assertChannelAnnouncements(t, alice, bob, capacity, nil, nil) - - // Check that the state machine is updated accordingly - assertAddedToRouterGraph(t, alice, bob, fundingOutPoint) - - // The funding transaction is now confirmed, wait for the - // OpenStatusUpdate_ChanOpen update - waitForOpenUpdate(t, updateChan) - - // Send the fundingLocked message twice to Alice, and once to Bob. - alice.fundingMgr.ProcessFundingMsg(fundingLockedBob, bob) - alice.fundingMgr.ProcessFundingMsg(fundingLockedBob, bob) - bob.fundingMgr.ProcessFundingMsg(fundingLockedAlice, alice) - - // Check that they notify the breach arbiter and peer about the new - // channel. - assertHandleFundingLocked(t, alice, bob) - - // Alice should not send the channel state the second time, as the - // second funding locked should just be ignored. - select { - case <-alice.newChannels: - t.Fatalf("alice sent new channel to peer a second time") - case <-time.After(time.Millisecond * 300): - // Expected - } - - // Another fundingLocked should also be ignored, since Alice should - // have updated her database at this point. - alice.fundingMgr.ProcessFundingMsg(fundingLockedBob, bob) - select { - case <-alice.newChannels: - t.Fatalf("alice sent new channel to peer a second time") - case <-time.After(time.Millisecond * 300): - // Expected - } - - // Notify that six confirmations has been reached on funding transaction. - alice.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - bob.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - - // Make sure the fundingManagers exchange announcement signatures. - assertAnnouncementSignatures(t, alice, bob) - - // The internal state-machine should now have deleted the channelStates - // from the database, as the channel is announced. - assertNoChannelState(t, alice, bob, fundingOutPoint) -} - -// TestFundingManagerRestartAfterChanAnn checks that the fundingManager properly -// handles receiving a fundingLocked after the its own fundingLocked and channel -// announcement is sent and gets restarted. -func TestFundingManagerRestartAfterChanAnn(t *testing.T) { - t.Parallel() - - alice, bob := setupFundingManagers(t) - defer tearDownFundingManagers(t, alice, bob) - - // We will consume the channel updates as we go, so no buffering is needed. - updateChan := make(chan *lnrpc.OpenStatusUpdate) - - // Run through the process of opening the channel, up until the funding - // transaction is broadcasted. - localAmt := btcutil.Amount(500000) - pushAmt := btcutil.Amount(0) - capacity := localAmt + pushAmt - fundingOutPoint, fundingTx := openChannel( - t, alice, bob, localAmt, pushAmt, 1, updateChan, true, - ) - - // Notify that transaction was mined - alice.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - bob.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - - // The funding transaction was mined, so assert that both funding - // managers now have the state of this channel 'markedOpen' in their - // internal state machine. - assertMarkedOpen(t, alice, bob, fundingOutPoint) - - // After the funding transaction is mined, Alice will send - // fundingLocked to Bob. - fundingLockedAlice := assertFundingMsgSent( - t, alice.msgChan, "FundingLocked", - ).(*lnwire.FundingLocked) - - // And similarly Bob will send funding locked to Alice. - fundingLockedBob := assertFundingMsgSent( - t, bob.msgChan, "FundingLocked", - ).(*lnwire.FundingLocked) - - // Check that the state machine is updated accordingly - assertFundingLockedSent(t, alice, bob, fundingOutPoint) - - // Make sure both fundingManagers send the expected channel - // announcements. - assertChannelAnnouncements(t, alice, bob, capacity, nil, nil) - - // Check that the state machine is updated accordingly - assertAddedToRouterGraph(t, alice, bob, fundingOutPoint) - - // The funding transaction is now confirmed, wait for the - // OpenStatusUpdate_ChanOpen update - waitForOpenUpdate(t, updateChan) - - // At this point we restart Alice's fundingManager, before she receives - // the fundingLocked message. After restart, she will receive it, and - // we expect her to be able to handle it correctly. - recreateAliceFundingManager(t, alice) - - // Exchange the fundingLocked messages. - alice.fundingMgr.ProcessFundingMsg(fundingLockedBob, bob) - bob.fundingMgr.ProcessFundingMsg(fundingLockedAlice, alice) - - // Check that they notify the breach arbiter and peer about the new - // channel. - assertHandleFundingLocked(t, alice, bob) - - // Notify that six confirmations has been reached on funding transaction. - alice.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - bob.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - - // Make sure both fundingManagers send the expected channel announcements. - assertAnnouncementSignatures(t, alice, bob) - - // The internal state-machine should now have deleted the channelStates - // from the database, as the channel is announced. - assertNoChannelState(t, alice, bob, fundingOutPoint) -} - -// TestFundingManagerRestartAfterReceivingFundingLocked checks that the -// fundingManager continues to operate as expected after it has received -// fundingLocked and then gets restarted. -func TestFundingManagerRestartAfterReceivingFundingLocked(t *testing.T) { - t.Parallel() - - alice, bob := setupFundingManagers(t) - defer tearDownFundingManagers(t, alice, bob) - - // We will consume the channel updates as we go, so no buffering is needed. - updateChan := make(chan *lnrpc.OpenStatusUpdate) - - // Run through the process of opening the channel, up until the funding - // transaction is broadcasted. - localAmt := btcutil.Amount(500000) - pushAmt := btcutil.Amount(0) - capacity := localAmt + pushAmt - fundingOutPoint, fundingTx := openChannel( - t, alice, bob, localAmt, pushAmt, 1, updateChan, true, - ) - - // Notify that transaction was mined - alice.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - bob.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - - // The funding transaction was mined, so assert that both funding - // managers now have the state of this channel 'markedOpen' in their - // internal state machine. - assertMarkedOpen(t, alice, bob, fundingOutPoint) - - // After the funding transaction is mined, Alice will send - // fundingLocked to Bob. - fundingLockedAlice := assertFundingMsgSent( - t, alice.msgChan, "FundingLocked", - ).(*lnwire.FundingLocked) - - // And similarly Bob will send funding locked to Alice. - fundingLockedBob := assertFundingMsgSent( - t, bob.msgChan, "FundingLocked", - ).(*lnwire.FundingLocked) - - // Check that the state machine is updated accordingly - assertFundingLockedSent(t, alice, bob, fundingOutPoint) - - // Let Alice immediately get the fundingLocked message. - alice.fundingMgr.ProcessFundingMsg(fundingLockedBob, bob) - - // Also let Bob get the fundingLocked message. - bob.fundingMgr.ProcessFundingMsg(fundingLockedAlice, alice) - - // Check that they notify the breach arbiter and peer about the new - // channel. - assertHandleFundingLocked(t, alice, bob) - - // At this point we restart Alice's fundingManager. - recreateAliceFundingManager(t, alice) - - // Make sure both fundingManagers send the expected channel - // announcements. - assertChannelAnnouncements(t, alice, bob, capacity, nil, nil) - - // Check that the state machine is updated accordingly - assertAddedToRouterGraph(t, alice, bob, fundingOutPoint) - - // Notify that six confirmations has been reached on funding transaction. - alice.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - bob.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - - // Make sure both fundingManagers send the expected channel announcements. - assertAnnouncementSignatures(t, alice, bob) - - // The internal state-machine should now have deleted the channelStates - // from the database, as the channel is announced. - assertNoChannelState(t, alice, bob, fundingOutPoint) -} - -// TestFundingManagerPrivateChannel tests that if we open a private channel -// (a channel not supposed to be announced to the rest of the network), -// the announcementSignatures nor the nodeAnnouncement messages are sent. -func TestFundingManagerPrivateChannel(t *testing.T) { - t.Parallel() - - alice, bob := setupFundingManagers(t) - defer tearDownFundingManagers(t, alice, bob) - - // We will consume the channel updates as we go, so no buffering is needed. - updateChan := make(chan *lnrpc.OpenStatusUpdate) - - // Run through the process of opening the channel, up until the funding - // transaction is broadcasted. - localAmt := btcutil.Amount(500000) - pushAmt := btcutil.Amount(0) - capacity := localAmt + pushAmt - fundingOutPoint, fundingTx := openChannel( - t, alice, bob, localAmt, pushAmt, 1, updateChan, false, - ) - - // Notify that transaction was mined - alice.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - bob.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - - // The funding transaction was mined, so assert that both funding - // managers now have the state of this channel 'markedOpen' in their - // internal state machine. - assertMarkedOpen(t, alice, bob, fundingOutPoint) - - // After the funding transaction is mined, Alice will send - // fundingLocked to Bob. - fundingLockedAlice := assertFundingMsgSent( - t, alice.msgChan, "FundingLocked", - ).(*lnwire.FundingLocked) - - // And similarly Bob will send funding locked to Alice. - fundingLockedBob := assertFundingMsgSent( - t, bob.msgChan, "FundingLocked", - ).(*lnwire.FundingLocked) - - // Check that the state machine is updated accordingly - assertFundingLockedSent(t, alice, bob, fundingOutPoint) - - // Make sure both fundingManagers send the expected channel - // announcements. - assertChannelAnnouncements(t, alice, bob, capacity, nil, nil) - - // The funding transaction is now confirmed, wait for the - // OpenStatusUpdate_ChanOpen update - waitForOpenUpdate(t, updateChan) - - // Exchange the fundingLocked messages. - alice.fundingMgr.ProcessFundingMsg(fundingLockedBob, bob) - bob.fundingMgr.ProcessFundingMsg(fundingLockedAlice, alice) - - // Check that they notify the breach arbiter and peer about the new - // channel. - assertHandleFundingLocked(t, alice, bob) - - // Notify that six confirmations has been reached on funding transaction. - alice.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - bob.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - - // Since this is a private channel, we shouldn't receive the - // announcement signatures. - select { - case ann := <-alice.announceChan: - t.Fatalf("unexpectedly got channel announcement message: %v", ann) - case <-time.After(300 * time.Millisecond): - // Expected - } - - select { - case ann := <-bob.announceChan: - t.Fatalf("unexpectedly got channel announcement message: %v", ann) - case <-time.After(300 * time.Millisecond): - // Expected - } - - // We should however receive each side's node announcement. - select { - case msg := <-alice.msgChan: - if _, ok := msg.(*lnwire.NodeAnnouncement); !ok { - t.Fatalf("expected to receive node announcement") - } - case <-time.After(time.Second): - t.Fatalf("expected to receive node announcement") - } - - select { - case msg := <-bob.msgChan: - if _, ok := msg.(*lnwire.NodeAnnouncement); !ok { - t.Fatalf("expected to receive node announcement") - } - case <-time.After(time.Second): - t.Fatalf("expected to receive node announcement") - } - - // The internal state-machine should now have deleted the channelStates - // from the database, as the channel is announced. - assertNoChannelState(t, alice, bob, fundingOutPoint) -} - -// TestFundingManagerPrivateRestart tests that the privacy guarantees granted -// by the private channel persist even on restart. This means that the -// announcement signatures nor the node announcement messages are sent upon -// restart. -func TestFundingManagerPrivateRestart(t *testing.T) { - t.Parallel() - - alice, bob := setupFundingManagers(t) - defer tearDownFundingManagers(t, alice, bob) - - // We will consume the channel updates as we go, so no buffering is needed. - updateChan := make(chan *lnrpc.OpenStatusUpdate) - - // Run through the process of opening the channel, up until the funding - // transaction is broadcasted. - localAmt := btcutil.Amount(500000) - pushAmt := btcutil.Amount(0) - capacity := localAmt + pushAmt - fundingOutPoint, fundingTx := openChannel( - t, alice, bob, localAmt, pushAmt, 1, updateChan, false, - ) - - // Notify that transaction was mined - alice.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - bob.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - - // The funding transaction was mined, so assert that both funding - // managers now have the state of this channel 'markedOpen' in their - // internal state machine. - assertMarkedOpen(t, alice, bob, fundingOutPoint) - - // After the funding transaction is mined, Alice will send - // fundingLocked to Bob. - fundingLockedAlice := assertFundingMsgSent( - t, alice.msgChan, "FundingLocked", - ).(*lnwire.FundingLocked) - - // And similarly Bob will send funding locked to Alice. - fundingLockedBob := assertFundingMsgSent( - t, bob.msgChan, "FundingLocked", - ).(*lnwire.FundingLocked) - - // Check that the state machine is updated accordingly - assertFundingLockedSent(t, alice, bob, fundingOutPoint) - - // Make sure both fundingManagers send the expected channel - // announcements. - assertChannelAnnouncements(t, alice, bob, capacity, nil, nil) - - // Note: We don't check for the addedToRouterGraph state because in - // the private channel mode, the state is quickly changed from - // addedToRouterGraph to deleted from the database since the public - // announcement phase is skipped. - - // The funding transaction is now confirmed, wait for the - // OpenStatusUpdate_ChanOpen update - waitForOpenUpdate(t, updateChan) - - // Exchange the fundingLocked messages. - alice.fundingMgr.ProcessFundingMsg(fundingLockedBob, bob) - bob.fundingMgr.ProcessFundingMsg(fundingLockedAlice, alice) - - // Check that they notify the breach arbiter and peer about the new - // channel. - assertHandleFundingLocked(t, alice, bob) - - // Notify that six confirmations has been reached on funding transaction. - alice.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - bob.mockNotifier.sixConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - - // Since this is a private channel, we shouldn't receive the public - // channel announcement messages. - select { - case ann := <-alice.announceChan: - t.Fatalf("unexpectedly got channel announcement message: %v", ann) - case <-time.After(300 * time.Millisecond): - } - - select { - case ann := <-bob.announceChan: - t.Fatalf("unexpectedly got channel announcement message: %v", ann) - case <-time.After(300 * time.Millisecond): - } - - // We should however receive each side's node announcement. - select { - case msg := <-alice.msgChan: - if _, ok := msg.(*lnwire.NodeAnnouncement); !ok { - t.Fatalf("expected to receive node announcement") - } - case <-time.After(time.Second): - t.Fatalf("expected to receive node announcement") - } - - select { - case msg := <-bob.msgChan: - if _, ok := msg.(*lnwire.NodeAnnouncement); !ok { - t.Fatalf("expected to receive node announcement") - } - case <-time.After(time.Second): - t.Fatalf("expected to receive node announcement") - } - - // Restart Alice's fundingManager so we can prove that the public - // channel announcements are not sent upon restart and that the private - // setting persists upon restart. - recreateAliceFundingManager(t, alice) - - select { - case ann := <-alice.announceChan: - t.Fatalf("unexpectedly got channel announcement message: %v", ann) - case <-time.After(300 * time.Millisecond): - // Expected - } - - select { - case ann := <-bob.announceChan: - t.Fatalf("unexpectedly got channel announcement message: %v", ann) - case <-time.After(300 * time.Millisecond): - // Expected - } - - // The internal state-machine should now have deleted the channelStates - // from the database, as the channel is announced. - assertNoChannelState(t, alice, bob, fundingOutPoint) -} - -// TestFundingManagerCustomChannelParameters checks that custom requirements we -// specify during the channel funding flow is preserved correcly on both sides. -func TestFundingManagerCustomChannelParameters(t *testing.T) { - t.Parallel() - - alice, bob := setupFundingManagers(t) - defer tearDownFundingManagers(t, alice, bob) - - // This is the custom parameters we'll use. - const csvDelay = 67 - const minHtlcIn = 1234 - const maxValueInFlight = 50000 - const fundingAmt = 5000000 - - // We will consume the channel updates as we go, so no buffering is - // needed. - updateChan := make(chan *lnrpc.OpenStatusUpdate) - - localAmt := btcutil.Amount(5000000) - pushAmt := btcutil.Amount(0) - capacity := localAmt + pushAmt - - // Create a funding request with the custom parameters and start the - // workflow. - errChan := make(chan er.R, 1) - initReq := &openChanReq{ - targetPubkey: bob.privKey.PubKey(), - chainHash: *fundingNetParams.GenesisHash, - localFundingAmt: localAmt, - pushAmt: lnwire.NewMSatFromSatoshis(pushAmt), - private: false, - maxValueInFlight: maxValueInFlight, - minHtlcIn: minHtlcIn, - remoteCsvDelay: csvDelay, - updates: updateChan, - err: errChan, - } - - alice.fundingMgr.initFundingWorkflow(bob, initReq) - - // Alice should have sent the OpenChannel message to Bob. - var aliceMsg lnwire.Message - select { - case aliceMsg = <-alice.msgChan: - case err := <-initReq.err: - t.Fatalf("error init funding workflow: %v", err) - case <-time.After(time.Second * 5): - t.Fatalf("alice did not send OpenChannel message") - } - - openChannelReq, ok := aliceMsg.(*lnwire.OpenChannel) - if !ok { - errorMsg, gotError := aliceMsg.(*lnwire.Error) - if gotError { - t.Fatalf("expected OpenChannel to be sent "+ - "from bob, instead got error: %v", - errorMsg.Error()) - } - t.Fatalf("expected OpenChannel to be sent from "+ - "alice, instead got %T", aliceMsg) - } - - // Check that the custom CSV delay is sent as part of OpenChannel. - if openChannelReq.CsvDelay != csvDelay { - t.Fatalf("expected OpenChannel to have CSV delay %v, got %v", - csvDelay, openChannelReq.CsvDelay) - } - - // Check that the custom minHTLC value is sent. - if openChannelReq.HtlcMinimum != minHtlcIn { - t.Fatalf("expected OpenChannel to have minHtlc %v, got %v", - minHtlcIn, openChannelReq.HtlcMinimum) - } - - // Check that the max value in flight is sent as part of OpenChannel. - if openChannelReq.MaxValueInFlight != maxValueInFlight { - t.Fatalf("expected OpenChannel to have MaxValueInFlight %v, got %v", - maxValueInFlight, openChannelReq.MaxValueInFlight) - } - - chanID := openChannelReq.PendingChannelID - - // Let Bob handle the init message. - bob.fundingMgr.ProcessFundingMsg(openChannelReq, alice) - - // Bob should answer with an AcceptChannel message. - acceptChannelResponse := assertFundingMsgSent( - t, bob.msgChan, "AcceptChannel", - ).(*lnwire.AcceptChannel) - - // Bob should require the default delay of 4. - if acceptChannelResponse.CsvDelay != 4 { - t.Fatalf("expected AcceptChannel to have CSV delay %v, got %v", - 4, acceptChannelResponse.CsvDelay) - } - - // And the default MinHTLC value of 5. - if acceptChannelResponse.HtlcMinimum != 5 { - t.Fatalf("expected AcceptChannel to have minHtlc %v, got %v", - 5, acceptChannelResponse.HtlcMinimum) - } - - reserve := lnwire.NewMSatFromSatoshis(fundingAmt / 100) - maxValueAcceptChannel := lnwire.NewMSatFromSatoshis(fundingAmt) - reserve - - if acceptChannelResponse.MaxValueInFlight != maxValueAcceptChannel { - t.Fatalf("expected AcceptChannel to have MaxValueInFlight %v, got %v", - maxValueAcceptChannel, acceptChannelResponse.MaxValueInFlight) - } - - // Forward the response to Alice. - alice.fundingMgr.ProcessFundingMsg(acceptChannelResponse, bob) - - // Alice responds with a FundingCreated message. - fundingCreated := assertFundingMsgSent( - t, alice.msgChan, "FundingCreated", - ).(*lnwire.FundingCreated) - - // Helper method for checking the CSV delay stored for a reservation. - assertDelay := func(resCtx *reservationWithCtx, - ourDelay, theirDelay uint16) er.R { - - ourCsvDelay := resCtx.reservation.OurContribution().CsvDelay - if ourCsvDelay != ourDelay { - return er.Errorf("expected our CSV delay to be %v, "+ - "was %v", ourDelay, ourCsvDelay) - } - - theirCsvDelay := resCtx.reservation.TheirContribution().CsvDelay - if theirCsvDelay != theirDelay { - return er.Errorf("expected their CSV delay to be %v, "+ - "was %v", theirDelay, theirCsvDelay) - } - return nil - } - - // Helper method for checking the MinHtlc value stored for a - // reservation. - assertMinHtlc := func(resCtx *reservationWithCtx, - expOurMinHtlc, expTheirMinHtlc lnwire.MilliSatoshi) er.R { - - ourMinHtlc := resCtx.reservation.OurContribution().MinHTLC - if ourMinHtlc != expOurMinHtlc { - return er.Errorf("expected our minHtlc to be %v, "+ - "was %v", expOurMinHtlc, ourMinHtlc) - } - - theirMinHtlc := resCtx.reservation.TheirContribution().MinHTLC - if theirMinHtlc != expTheirMinHtlc { - return er.Errorf("expected their minHtlc to be %v, "+ - "was %v", expTheirMinHtlc, theirMinHtlc) - } - return nil - } - - // Helper method for checking the MaxValueInFlight stored for a - // reservation. - assertMaxHtlc := func(resCtx *reservationWithCtx, - expOurMaxValue, expTheirMaxValue lnwire.MilliSatoshi) er.R { - - ourMaxValue := - resCtx.reservation.OurContribution().MaxPendingAmount - if ourMaxValue != expOurMaxValue { - return er.Errorf("expected our maxValue to be %v, "+ - "was %v", expOurMaxValue, ourMaxValue) - } - - theirMaxValue := - resCtx.reservation.TheirContribution().MaxPendingAmount - if theirMaxValue != expTheirMaxValue { - return er.Errorf("expected their MaxPendingAmount to be %v, "+ - "was %v", expTheirMaxValue, theirMaxValue) - } - return nil - } - - // Check that the custom channel parameters were properly set in the - // channel reservation. - resCtx, err := alice.fundingMgr.getReservationCtx(bobPubKey, chanID) - if err != nil { - t.Fatalf("unable to find ctx: %v", err) - } - - // Alice's CSV delay should be 4 since Bob sent the default value, and - // Bob's should be 67 since Alice sent the custom value. - if err := assertDelay(resCtx, 4, csvDelay); err != nil { - t.Fatal(err) - } - - // The minimum HTLC value Alice can offer should be 5, and the minimum - // Bob can offer should be 1234. - if err := assertMinHtlc(resCtx, 5, minHtlcIn); err != nil { - t.Fatal(err) - } - - // The max value in flight Alice can have should be maxValueAcceptChannel, - // which is the default value and the maxium Bob can offer should be - // maxValueInFlight. - if err := assertMaxHtlc(resCtx, - maxValueAcceptChannel, maxValueInFlight); err != nil { - t.Fatal(err) - } - - // Also make sure the parameters are properly set on Bob's end. - resCtx, err = bob.fundingMgr.getReservationCtx(alicePubKey, chanID) - if err != nil { - t.Fatalf("unable to find ctx: %v", err) - } - - if err := assertDelay(resCtx, csvDelay, 4); err != nil { - t.Fatal(err) - } - - if err := assertMinHtlc(resCtx, minHtlcIn, 5); err != nil { - t.Fatal(err) - } - - if err := assertMaxHtlc(resCtx, - maxValueInFlight, maxValueAcceptChannel); err != nil { - t.Fatal(err) - } - // Give the message to Bob. - bob.fundingMgr.ProcessFundingMsg(fundingCreated, alice) - - // Finally, Bob should send the FundingSigned message. - fundingSigned := assertFundingMsgSent( - t, bob.msgChan, "FundingSigned", - ).(*lnwire.FundingSigned) - - // Forward the signature to Alice. - alice.fundingMgr.ProcessFundingMsg(fundingSigned, bob) - - // After Alice processes the singleFundingSignComplete message, she will - // broadcast the funding transaction to the network. We expect to get a - // channel update saying the channel is pending. - var pendingUpdate *lnrpc.OpenStatusUpdate - select { - case pendingUpdate = <-updateChan: - case <-time.After(time.Second * 5): - t.Fatalf("alice did not send OpenStatusUpdate_ChanPending") - } - - _, ok = pendingUpdate.Update.(*lnrpc.OpenStatusUpdate_ChanPending) - if !ok { - t.Fatal("OpenStatusUpdate was not OpenStatusUpdate_ChanPending") - } - - // Wait for Alice to published the funding tx to the network. - var fundingTx *wire.MsgTx - select { - case fundingTx = <-alice.publTxChan: - case <-time.After(time.Second * 5): - t.Fatalf("alice did not publish funding tx") - } - - // Notify that transaction was mined. - alice.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - bob.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{ - Tx: fundingTx, - } - - // After the funding transaction is mined, Alice will send - // fundingLocked to Bob. - _ = assertFundingMsgSent( - t, alice.msgChan, "FundingLocked", - ).(*lnwire.FundingLocked) - - // And similarly Bob will send funding locked to Alice. - _ = assertFundingMsgSent( - t, bob.msgChan, "FundingLocked", - ).(*lnwire.FundingLocked) - - // Make sure both fundingManagers send the expected channel - // announcements. - // Alice should advertise the default MinHTLC value of - // 5, while bob should advertise the value minHtlc, since Alice - // required him to use it. - minHtlcArr := []lnwire.MilliSatoshi{5, minHtlcIn} - - // For maxHltc Alice should advertise the default MaxHtlc value of - // maxValueAcceptChannel, while bob should advertise the value - // maxValueInFlight since Alice required him to use it. - maxHtlcArr := []lnwire.MilliSatoshi{maxValueAcceptChannel, maxValueInFlight} - - assertChannelAnnouncements(t, alice, bob, capacity, minHtlcArr, maxHtlcArr) - - // The funding transaction is now confirmed, wait for the - // OpenStatusUpdate_ChanOpen update - waitForOpenUpdate(t, updateChan) -} - -// TestFundingManagerMaxPendingChannels checks that trying to open another -// channel with the same peer when MaxPending channels are pending fails. -func TestFundingManagerMaxPendingChannels(t *testing.T) { - t.Parallel() - - alice, bob := setupFundingManagers( - t, func(cfg *fundingConfig) { - cfg.MaxPendingChannels = maxPending - }, - ) - defer tearDownFundingManagers(t, alice, bob) - - // Create openChanReqs for maxPending+1 channels. - var initReqs []*openChanReq - for i := 0; i < maxPending+1; i++ { - updateChan := make(chan *lnrpc.OpenStatusUpdate) - errChan := make(chan er.R, 1) - initReq := &openChanReq{ - targetPubkey: bob.privKey.PubKey(), - chainHash: *fundingNetParams.GenesisHash, - localFundingAmt: 5000000, - pushAmt: lnwire.NewMSatFromSatoshis(0), - private: false, - updates: updateChan, - err: errChan, - } - initReqs = append(initReqs, initReq) - } - - // Kick of maxPending+1 funding workflows. - var accepts []*lnwire.AcceptChannel - var lastOpen *lnwire.OpenChannel - for i, initReq := range initReqs { - alice.fundingMgr.initFundingWorkflow(bob, initReq) - - // Alice should have sent the OpenChannel message to Bob. - var aliceMsg lnwire.Message - select { - case aliceMsg = <-alice.msgChan: - case err := <-initReq.err: - t.Fatalf("error init funding workflow: %v", err) - case <-time.After(time.Second * 5): - t.Fatalf("alice did not send OpenChannel message") - } - - openChannelReq, ok := aliceMsg.(*lnwire.OpenChannel) - if !ok { - errorMsg, gotError := aliceMsg.(*lnwire.Error) - if gotError { - t.Fatalf("expected OpenChannel to be sent "+ - "from bob, instead got error: %v", - errorMsg.Error()) - } - t.Fatalf("expected OpenChannel to be sent from "+ - "alice, instead got %T", aliceMsg) - } - - // Let Bob handle the init message. - bob.fundingMgr.ProcessFundingMsg(openChannelReq, alice) - - // Bob should answer with an AcceptChannel message for the - // first maxPending channels. - if i < maxPending { - acceptChannelResponse := assertFundingMsgSent( - t, bob.msgChan, "AcceptChannel", - ).(*lnwire.AcceptChannel) - accepts = append(accepts, acceptChannelResponse) - continue - } - - // For the last channel, Bob should answer with an error. - lastOpen = openChannelReq - _ = assertFundingMsgSent( - t, bob.msgChan, "Error", - ).(*lnwire.Error) - - } - - // Forward the responses to Alice. - var signs []*lnwire.FundingSigned - for _, accept := range accepts { - alice.fundingMgr.ProcessFundingMsg(accept, bob) - - // Alice responds with a FundingCreated message. - fundingCreated := assertFundingMsgSent( - t, alice.msgChan, "FundingCreated", - ).(*lnwire.FundingCreated) - - // Give the message to Bob. - bob.fundingMgr.ProcessFundingMsg(fundingCreated, alice) - - // Finally, Bob should send the FundingSigned message. - fundingSigned := assertFundingMsgSent( - t, bob.msgChan, "FundingSigned", - ).(*lnwire.FundingSigned) - - signs = append(signs, fundingSigned) - } - - // Sending another init request from Alice should still make Bob - // respond with an error. - bob.fundingMgr.ProcessFundingMsg(lastOpen, alice) - _ = assertFundingMsgSent( - t, bob.msgChan, "Error", - ).(*lnwire.Error) - - // Give the FundingSigned messages to Alice. - var txs []*wire.MsgTx - for i, sign := range signs { - alice.fundingMgr.ProcessFundingMsg(sign, bob) - - // Alice should send a status update for each channel, and - // publish a funding tx to the network. - var pendingUpdate *lnrpc.OpenStatusUpdate - select { - case pendingUpdate = <-initReqs[i].updates: - case <-time.After(time.Second * 5): - t.Fatalf("alice did not send OpenStatusUpdate_ChanPending") - } - - _, ok := pendingUpdate.Update.(*lnrpc.OpenStatusUpdate_ChanPending) - if !ok { - t.Fatal("OpenStatusUpdate was not OpenStatusUpdate_ChanPending") - } - - select { - case tx := <-alice.publTxChan: - txs = append(txs, tx) - case <-time.After(time.Second * 5): - t.Fatalf("alice did not publish funding tx") - } - - } - - // Sending another init request from Alice should still make Bob - // respond with an error, since the funding transactions are not - // confirmed yet, - bob.fundingMgr.ProcessFundingMsg(lastOpen, alice) - _ = assertFundingMsgSent( - t, bob.msgChan, "Error", - ).(*lnwire.Error) - - // Notify that the transactions were mined. - for i := 0; i < maxPending; i++ { - alice.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{ - Tx: txs[i], - } - bob.mockNotifier.oneConfChannel <- &chainntnfs.TxConfirmation{ - Tx: txs[i], - } - - // Expect both to be sending FundingLocked. - _ = assertFundingMsgSent( - t, alice.msgChan, "FundingLocked", - ).(*lnwire.FundingLocked) - - _ = assertFundingMsgSent( - t, bob.msgChan, "FundingLocked", - ).(*lnwire.FundingLocked) - - } - - // Now opening another channel should work. - bob.fundingMgr.ProcessFundingMsg(lastOpen, alice) - - // Bob should answer with an AcceptChannel message. - _ = assertFundingMsgSent( - t, bob.msgChan, "AcceptChannel", - ).(*lnwire.AcceptChannel) -} - -// TestFundingManagerRejectPush checks behaviour of 'rejectpush' -// option, namely that non-zero incoming push amounts are disabled. -func TestFundingManagerRejectPush(t *testing.T) { - t.Parallel() - - // Enable 'rejectpush' option and initialize funding managers. - alice, bob := setupFundingManagers( - t, func(cfg *fundingConfig) { - cfg.RejectPush = true - }, - ) - defer tearDownFundingManagers(t, alice, bob) - - // Create a funding request and start the workflow. - updateChan := make(chan *lnrpc.OpenStatusUpdate) - errChan := make(chan er.R, 1) - initReq := &openChanReq{ - targetPubkey: bob.privKey.PubKey(), - chainHash: *fundingNetParams.GenesisHash, - localFundingAmt: 500000, - pushAmt: lnwire.NewMSatFromSatoshis(10), - private: true, - updates: updateChan, - err: errChan, - } - - alice.fundingMgr.initFundingWorkflow(bob, initReq) - - // Alice should have sent the OpenChannel message to Bob. - var aliceMsg lnwire.Message - select { - case aliceMsg = <-alice.msgChan: - case err := <-initReq.err: - t.Fatalf("error init funding workflow: %v", err) - case <-time.After(time.Second * 5): - t.Fatalf("alice did not send OpenChannel message") - } - - openChannelReq, ok := aliceMsg.(*lnwire.OpenChannel) - if !ok { - errorMsg, gotError := aliceMsg.(*lnwire.Error) - if gotError { - t.Fatalf("expected OpenChannel to be sent "+ - "from bob, instead got error: %v", - errorMsg.Error()) - } - t.Fatalf("expected OpenChannel to be sent from "+ - "alice, instead got %T", aliceMsg) - } - - // Let Bob handle the init message. - bob.fundingMgr.ProcessFundingMsg(openChannelReq, alice) - - // Assert Bob responded with an ErrNonZeroPushAmount error. - err := assertFundingMsgSent(t, bob.msgChan, "Error").(*lnwire.Error) - if !strings.Contains(err.Error(), "non-zero push amounts are disabled") { - t.Fatalf("expected ErrNonZeroPushAmount error, got \"%v\"", - err.Error()) - } -} - -// TestFundingManagerMaxConfs ensures that we don't accept a funding proposal -// that proposes a MinAcceptDepth greater than the maximum number of -// confirmations we're willing to accept. -func TestFundingManagerMaxConfs(t *testing.T) { - t.Parallel() - - alice, bob := setupFundingManagers(t) - defer tearDownFundingManagers(t, alice, bob) - - // Create a funding request and start the workflow. - updateChan := make(chan *lnrpc.OpenStatusUpdate) - errChan := make(chan er.R, 1) - initReq := &openChanReq{ - targetPubkey: bob.privKey.PubKey(), - chainHash: *fundingNetParams.GenesisHash, - localFundingAmt: 500000, - pushAmt: lnwire.NewMSatFromSatoshis(10), - private: false, - updates: updateChan, - err: errChan, - } - - alice.fundingMgr.initFundingWorkflow(bob, initReq) - - // Alice should have sent the OpenChannel message to Bob. - var aliceMsg lnwire.Message - select { - case aliceMsg = <-alice.msgChan: - case err := <-initReq.err: - t.Fatalf("error init funding workflow: %v", err) - case <-time.After(time.Second * 5): - t.Fatalf("alice did not send OpenChannel message") - } - - openChannelReq, ok := aliceMsg.(*lnwire.OpenChannel) - if !ok { - errorMsg, gotError := aliceMsg.(*lnwire.Error) - if gotError { - t.Fatalf("expected OpenChannel to be sent "+ - "from bob, instead got error: %v", - errorMsg.Error()) - } - t.Fatalf("expected OpenChannel to be sent from "+ - "alice, instead got %T", aliceMsg) - } - - // Let Bob handle the init message. - bob.fundingMgr.ProcessFundingMsg(openChannelReq, alice) - - // Bob should answer with an AcceptChannel message. - acceptChannelResponse := assertFundingMsgSent( - t, bob.msgChan, "AcceptChannel", - ).(*lnwire.AcceptChannel) - - // Modify the AcceptChannel message Bob is proposing to including a - // MinAcceptDepth Alice won't be willing to accept. - acceptChannelResponse.MinAcceptDepth = chainntnfs.MaxNumConfs + 1 - - alice.fundingMgr.ProcessFundingMsg(acceptChannelResponse, bob) - - // Alice should respond back with an error indicating MinAcceptDepth is - // too large. - err := assertFundingMsgSent(t, alice.msgChan, "Error").(*lnwire.Error) - if !strings.Contains(err.Error(), "minimum depth") { - t.Fatalf("expected ErrNumConfsTooLarge, got \"%v\"", - err.Error()) - } -} - -// TestFundingManagerFundAll tests that we can initiate a funding request to -// use the funds remaining in the wallet. This should produce a funding tx with -// no change output. -func TestFundingManagerFundAll(t *testing.T) { - t.Parallel() - - // We set up our mock wallet to control a list of UTXOs that sum to - // less than the max channel size. - allCoins := []*lnwallet.Utxo{ - { - AddressType: lnwallet.WitnessPubKey, - Value: btcutil.Amount( - 0.05 * btcutil.UnitsPerCoinF(), - ), - PkScript: mock.CoinPkScript, - OutPoint: wire.OutPoint{ - Hash: chainhash.Hash{}, - Index: 0, - }, - }, - { - AddressType: lnwallet.WitnessPubKey, - Value: btcutil.Amount( - 0.06 * btcutil.UnitsPerCoinF(), - ), - PkScript: mock.CoinPkScript, - OutPoint: wire.OutPoint{ - Hash: chainhash.Hash{}, - Index: 1, - }, - }, - } - - tests := []struct { - spendAmt btcutil.Amount - change bool - }{ - { - // We will spend all the funds in the wallet, and - // expects no change output. - spendAmt: btcutil.Amount( - 0.11 * btcutil.UnitsPerCoinF(), - ), - change: false, - }, - { - // We spend a little less than the funds in the wallet, - // so a change output should be created. - spendAmt: btcutil.Amount( - 0.10 * btcutil.UnitsPerCoinF(), - ), - change: true, - }, - } - - for _, test := range tests { - alice, bob := setupFundingManagers(t) - defer tearDownFundingManagers(t, alice, bob) - - alice.fundingMgr.cfg.Wallet.WalletController.(*mock.WalletController).Utxos = allCoins - - // We will consume the channel updates as we go, so no - // buffering is needed. - updateChan := make(chan *lnrpc.OpenStatusUpdate) - - // Initiate a fund channel, and inspect the funding tx. - pushAmt := btcutil.Amount(0) - fundingTx := fundChannel( - t, alice, bob, test.spendAmt, pushAmt, true, 1, - updateChan, true, - ) - - // Check whether the expected change output is present. - if test.change && len(fundingTx.TxOut) != 2 { - t.Fatalf("expected 2 outputs, had %v", - len(fundingTx.TxOut)) - } - - if !test.change && len(fundingTx.TxOut) != 1 { - t.Fatalf("expected 1 output, had %v", - len(fundingTx.TxOut)) - } - - // Inputs should be all funds in the wallet. - if len(fundingTx.TxIn) != len(allCoins) { - t.Fatalf("Had %d inputs, expected %d", - len(fundingTx.TxIn), len(allCoins)) - } - - for i, txIn := range fundingTx.TxIn { - if txIn.PreviousOutPoint != allCoins[i].OutPoint { - t.Fatalf("expected outpoint to be %v, was %v", - allCoins[i].OutPoint, - txIn.PreviousOutPoint) - } - } - } -} - -// TestGetUpfrontShutdown tests different combinations of inputs for getting a -// shutdown script. It varies whether the peer has the feature set, whether -// the user has provided a script and our local configuration to test that -// GetUpfrontShutdownScript returns the expected outcome. -func TestGetUpfrontShutdownScript(t *testing.T) { - upfrontScript := []byte("upfront script") - generatedScript := []byte("generated script") - - getScript := func() (lnwire.DeliveryAddress, er.R) { - return generatedScript, nil - } - - tests := []struct { - name string - getScript func() (lnwire.DeliveryAddress, er.R) - upfrontScript lnwire.DeliveryAddress - peerEnabled bool - localEnabled bool - expectedScript lnwire.DeliveryAddress - expectedErr *er.ErrorCode - }{ - { - name: "peer disabled, no shutdown", - getScript: getScript, - }, - { - name: "peer disabled, upfront provided", - upfrontScript: upfrontScript, - expectedErr: errUpfrontShutdownScriptNotSupported, - }, - { - name: "peer enabled, upfront provided", - upfrontScript: upfrontScript, - peerEnabled: true, - expectedScript: upfrontScript, - }, - { - name: "peer enabled, local disabled", - peerEnabled: true, - }, - { - name: "local enabled, no upfront script", - getScript: getScript, - peerEnabled: true, - localEnabled: true, - expectedScript: generatedScript, - }, - { - name: "local enabled, upfront script", - peerEnabled: true, - upfrontScript: upfrontScript, - localEnabled: true, - expectedScript: upfrontScript, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - var mockPeer testNode - - // If the remote peer in the test should support upfront shutdown, - // add the feature bit. - if test.peerEnabled { - mockPeer.remoteFeatures = []lnwire.FeatureBit{ - lnwire.UpfrontShutdownScriptOptional, - } - } - - addr, err := getUpfrontShutdownScript( - test.localEnabled, &mockPeer, test.upfrontScript, - test.getScript, - ) - if !er.Cis(test.expectedErr, err) { - t.Fatalf("got: %v, expected error: %v", err, test.expectedErr) - } - - if !bytes.Equal(addr, test.expectedScript) { - t.Fatalf("expected address: %x, got: %x", - test.expectedScript, addr) - } - - }) - } -} - -func expectOpenChannelMsg(t *testing.T, msgChan chan lnwire.Message) *lnwire.OpenChannel { - var msg lnwire.Message - select { - case msg = <-msgChan: - case <-time.After(time.Second * 5): - t.Fatalf("node did not send OpenChannel message") - } - - openChannelReq, ok := msg.(*lnwire.OpenChannel) - if !ok { - errorMsg, gotError := msg.(*lnwire.Error) - if gotError { - t.Fatalf("expected OpenChannel to be sent "+ - "from bob, instead got error: %v", - errorMsg.Error()) - } - t.Fatalf("expected OpenChannel to be sent, instead got %T", - msg) - } - - return openChannelReq -} - -func TestMaxChannelSizeConfig(t *testing.T) { - t.Parallel() - - // Create a set of funding managers that will reject wumbo - // channels but set --maxchansize explicitly lower than soft-limit. - // Verify that wumbo rejecting funding managers will respect --maxchansize - // below 16777215 satoshi (MaxFundingAmount) limit. - alice, bob := setupFundingManagers(t, func(cfg *fundingConfig) { - cfg.NoWumboChans = true - cfg.MaxChanSize = MaxFundingAmount - 1 - }) - - // Attempt to create a channel above the limit - // imposed by --maxchansize, which should be rejected. - updateChan := make(chan *lnrpc.OpenStatusUpdate) - errChan := make(chan er.R, 1) - initReq := &openChanReq{ - targetPubkey: bob.privKey.PubKey(), - chainHash: *fundingNetParams.GenesisHash, - localFundingAmt: MaxFundingAmount, - pushAmt: lnwire.NewMSatFromSatoshis(0), - private: false, - updates: updateChan, - err: errChan, - } - - // After processing the funding open message, bob should respond with - // an error rejecting the channel that exceeds size limit. - alice.fundingMgr.initFundingWorkflow(bob, initReq) - openChanMsg := expectOpenChannelMsg(t, alice.msgChan) - bob.fundingMgr.ProcessFundingMsg(openChanMsg, alice) - assertErrorSent(t, bob.msgChan) - - // Create a set of funding managers that will reject wumbo - // channels but set --maxchansize explicitly higher than soft-limit - // A --maxchansize greater than this limit should have no effect. - tearDownFundingManagers(t, alice, bob) - alice, bob = setupFundingManagers(t, func(cfg *fundingConfig) { - cfg.NoWumboChans = true - cfg.MaxChanSize = MaxFundingAmount + 1 - }) - - // We expect Bob to respond with an Accept channel message. - alice.fundingMgr.initFundingWorkflow(bob, initReq) - openChanMsg = expectOpenChannelMsg(t, alice.msgChan) - bob.fundingMgr.ProcessFundingMsg(openChanMsg, alice) - assertFundingMsgSent(t, bob.msgChan, "AcceptChannel") - - // Verify that wumbo accepting funding managers will respect --maxchansize - // Create the funding managers, this time allowing - // wumbo channels but setting --maxchansize explicitly. - tearDownFundingManagers(t, alice, bob) - alice, bob = setupFundingManagers(t, func(cfg *fundingConfig) { - cfg.NoWumboChans = false - cfg.MaxChanSize = btcutil.Amount(100000000) - }) - - // Attempt to create a channel above the limit - // imposed by --maxchansize, which should be rejected. - initReq.localFundingAmt = btcutil.UnitsPerCoin() + 1 - - // After processing the funding open message, bob should respond with - // an error rejecting the channel that exceeds size limit. - alice.fundingMgr.initFundingWorkflow(bob, initReq) - openChanMsg = expectOpenChannelMsg(t, alice.msgChan) - bob.fundingMgr.ProcessFundingMsg(openChanMsg, alice) - assertErrorSent(t, bob.msgChan) -} - -// TestWumboChannelConfig tests that the funding manager will respect the wumbo -// channel config param when creating or accepting new channels. -func TestWumboChannelConfig(t *testing.T) { - t.Parallel() - - // First we'll create a set of funding managers that will reject wumbo - // channels. - alice, bob := setupFundingManagers(t, func(cfg *fundingConfig) { - cfg.NoWumboChans = true - }) - - // If we attempt to initiate a new funding open request to Alice, - // that's below the wumbo channel mark, we should be able to start the - // funding process w/o issue. - updateChan := make(chan *lnrpc.OpenStatusUpdate) - errChan := make(chan er.R, 1) - initReq := &openChanReq{ - targetPubkey: bob.privKey.PubKey(), - chainHash: *fundingNetParams.GenesisHash, - localFundingAmt: MaxFundingAmount, - pushAmt: lnwire.NewMSatFromSatoshis(0), - private: false, - updates: updateChan, - err: errChan, - } - - // We expect Bob to respond with an Accept channel message. - alice.fundingMgr.initFundingWorkflow(bob, initReq) - openChanMsg := expectOpenChannelMsg(t, alice.msgChan) - bob.fundingMgr.ProcessFundingMsg(openChanMsg, alice) - assertFundingMsgSent(t, bob.msgChan, "AcceptChannel") - - // We'll now attempt to create a channel above the wumbo mark, which - // should be rejected. - initReq.localFundingAmt = btcutil.UnitsPerCoin() - - // After processing the funding open message, bob should respond with - // an error rejecting the channel. - alice.fundingMgr.initFundingWorkflow(bob, initReq) - openChanMsg = expectOpenChannelMsg(t, alice.msgChan) - bob.fundingMgr.ProcessFundingMsg(openChanMsg, alice) - assertErrorSent(t, bob.msgChan) - - // Next, we'll re-create the funding managers, but this time allowing - // wumbo channels explicitly. - tearDownFundingManagers(t, alice, bob) - alice, bob = setupFundingManagers(t, func(cfg *fundingConfig) { - cfg.NoWumboChans = false - cfg.MaxChanSize = MaxBtcFundingAmountWumbo - }) - - // We should now be able to initiate a wumbo channel funding w/o any - // issues. - alice.fundingMgr.initFundingWorkflow(bob, initReq) - openChanMsg = expectOpenChannelMsg(t, alice.msgChan) - bob.fundingMgr.ProcessFundingMsg(openChanMsg, alice) - assertFundingMsgSent(t, bob.msgChan, "AcceptChannel") -} diff --git a/lnd/fuzz/brontide/fuzz_utils.go b/lnd/fuzz/brontide/fuzz_utils.go deleted file mode 100644 index 870a0a4c..00000000 --- a/lnd/fuzz/brontide/fuzz_utils.go +++ /dev/null @@ -1,144 +0,0 @@ -// +build gofuzz - -package brontidefuzz - -import ( - "encoding/hex" - "fmt" - - "github.com/pkt-cash/pktd/btcec" - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/lnd/brontide" - "github.com/pkt-cash/pktd/lnd/keychain" -) - -var ( - initBytes = []byte{ - 0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, - 0x63, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, - 0xd, 0xe7, 0x95, 0xe4, 0xb7, 0x25, 0xb8, 0x4d, - 0x1e, 0xb, 0x4c, 0xfd, 0x9e, 0xc5, 0x8c, 0xe9, - } - - respBytes = []byte{ - 0xaa, 0xb6, 0x37, 0xd9, 0xfc, 0xd2, 0xc6, 0xda, - 0x63, 0x59, 0xe6, 0x99, 0x31, 0x13, 0xa1, 0x17, - 0xd, 0xe7, 0x95, 0xe9, 0xb7, 0x25, 0xb8, 0x4d, - 0x1e, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9, - } - - // Returns the initiator's ephemeral private key. - initEphemeral = brontide.EphemeralGenerator(func() (*btcec.PrivateKey, er.R) { - e := "121212121212121212121212121212121212121212121212121212" + - "1212121212" - eBytes, err := util.DecodeHex(e) - if err != nil { - return nil, err - } - - priv, _ := btcec.PrivKeyFromBytes(btcec.S256(), eBytes) - return priv, nil - }) - - // Returns the responder's ephemeral private key. - respEphemeral = brontide.EphemeralGenerator(func() (*btcec.PrivateKey, er.R) { - e := "222222222222222222222222222222222222222222222222222" + - "2222222222222" - eBytes, err := util.DecodeHex(e) - if err != nil { - return nil, err - } - - priv, _ := btcec.PrivKeyFromBytes(btcec.S256(), eBytes) - return priv, nil - }) -) - -// completeHandshake takes two brontide machines (initiator, responder) -// and completes the brontide handshake between them. If any part of the -// handshake fails, this function will panic. -func completeHandshake(initiator, responder *brontide.Machine) { - if err := handshake(initiator, responder); err != nil { - nilAndPanic(initiator, responder, err) - } -} - -// handshake actually completes the brontide handshake and bubbles up -// an error to the calling function. -func handshake(initiator, responder *brontide.Machine) er.R { - // Generate ActOne and send to the responder. - actOne, err := initiator.GenActOne() - if err != nil { - return err - } - - if err := responder.RecvActOne(actOne); err != nil { - return err - } - - // Generate ActTwo and send to initiator. - actTwo, err := responder.GenActTwo() - if err != nil { - return err - } - - if err := initiator.RecvActTwo(actTwo); err != nil { - return err - } - - // Generate ActThree and send to responder. - actThree, err := initiator.GenActThree() - if err != nil { - return err - } - - return responder.RecvActThree(actThree) -} - -// nilAndPanic first nils the initiator and responder's Curve fields and then -// panics. -func nilAndPanic(initiator, responder *brontide.Machine, err error) { - if initiator != nil { - initiator.SetCurveToNil() - } - if responder != nil { - responder.SetCurveToNil() - } - panic(er.Errorf("error: %v, initiator: %v, responder: %v", err, - spew.Sdump(initiator), spew.Sdump(responder))) -} - -// getBrontideMachines returns two brontide machines that use random keys -// everywhere. -func getBrontideMachines() (*brontide.Machine, *brontide.Machine) { - initPriv, _ := btcec.NewPrivateKey(btcec.S256()) - respPriv, _ := btcec.NewPrivateKey(btcec.S256()) - respPub := (*btcec.PublicKey)(&respPriv.PublicKey) - - initPrivECDH := &keychain.PrivKeyECDH{PrivKey: initPriv} - respPrivECDH := &keychain.PrivKeyECDH{PrivKey: respPriv} - - initiator := brontide.NewBrontideMachine(true, initPrivECDH, respPub) - responder := brontide.NewBrontideMachine(false, respPrivECDH, nil) - - return initiator, responder -} - -// getStaticBrontideMachines returns two brontide machines that use static keys -// everywhere. -func getStaticBrontideMachines() (*brontide.Machine, *brontide.Machine) { - initPriv, _ := btcec.PrivKeyFromBytes(btcec.S256(), initBytes) - respPriv, respPub := btcec.PrivKeyFromBytes(btcec.S256(), respBytes) - - initPrivECDH := &keychain.PrivKeyECDH{PrivKey: initPriv} - respPrivECDH := &keychain.PrivKeyECDH{PrivKey: respPriv} - - initiator := brontide.NewBrontideMachine( - true, initPrivECDH, respPub, initEphemeral, - ) - responder := brontide.NewBrontideMachine( - false, respPrivECDH, nil, respEphemeral, - ) - - return initiator, responder -} diff --git a/lnd/fuzz/brontide/random_actone.go b/lnd/fuzz/brontide/random_actone.go deleted file mode 100644 index 9a84367d..00000000 --- a/lnd/fuzz/brontide/random_actone.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build gofuzz - -package brontidefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/brontide" -) - -// Fuzz_random_actone is a go-fuzz harness for ActOne in the brontide -// handshake. -func Fuzz_random_actone(data []byte) int { - // Check if data is large enough. - if len(data) < brontide.ActOneSize { - return 1 - } - - // This will return brontide machines with random keys. - _, responder := getBrontideMachines() - - // Copy data into [ActOneSize]byte. - var actOne [brontide.ActOneSize]byte - copy(actOne[:], data) - - // Responder receives ActOne, should fail on the MAC check. - if err := responder.RecvActOne(actOne); err == nil { - nilAndPanic(nil, responder, nil) - } - - return 1 -} diff --git a/lnd/fuzz/brontide/random_actthree.go b/lnd/fuzz/brontide/random_actthree.go deleted file mode 100644 index 11d1597b..00000000 --- a/lnd/fuzz/brontide/random_actthree.go +++ /dev/null @@ -1,50 +0,0 @@ -// +build gofuzz - -package brontidefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/brontide" -) - -// Fuzz_random_actthree is a go-fuzz harness for ActThree in the brontide -// handshake. -func Fuzz_random_actthree(data []byte) int { - // Check if data is large enough. - if len(data) < brontide.ActThreeSize { - return 1 - } - - // This will return brontide machines with random keys. - initiator, responder := getBrontideMachines() - - // Generate ActOne and send to the responder. - actOne, err := initiator.GenActOne() - if err != nil { - nilAndPanic(initiator, responder, err) - } - - // Receiving ActOne should succeed, so we panic on error. - if err := responder.RecvActOne(actOne); err != nil { - nilAndPanic(initiator, responder, err) - } - - // Generate ActTwo - this is not sent to the initiator because nothing is - // done with the initiator after this point and it would slow down fuzzing. - // GenActTwo needs to be called to set the appropriate state in the - // responder machine. - _, err = responder.GenActTwo() - if err != nil { - nilAndPanic(initiator, responder, err) - } - - // Copy data into [ActThreeSize]byte. - var actThree [brontide.ActThreeSize]byte - copy(actThree[:], data) - - // Responder receives ActThree, should fail on the MAC check. - if err := responder.RecvActThree(actThree); err == nil { - nilAndPanic(initiator, responder, nil) - } - - return 1 -} diff --git a/lnd/fuzz/brontide/random_acttwo.go b/lnd/fuzz/brontide/random_acttwo.go deleted file mode 100644 index 664aa5f2..00000000 --- a/lnd/fuzz/brontide/random_acttwo.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build gofuzz - -package brontidefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/brontide" -) - -// Fuzz_random_acttwo is a go-fuzz harness for ActTwo in the brontide -// handshake. -func Fuzz_random_acttwo(data []byte) int { - // Check if data is large enough. - if len(data) < brontide.ActTwoSize { - return 1 - } - - // This will return brontide machines with random keys. - initiator, _ := getBrontideMachines() - - // Generate ActOne - this isn't sent to the responder because nothing is - // done with the responder machine and this would slow down fuzzing. - // GenActOne needs to be called to set the appropriate state in the - // initiator machine. - _, err := initiator.GenActOne() - if err != nil { - nilAndPanic(initiator, nil, err) - } - - // Copy data into [ActTwoSize]byte. - var actTwo [brontide.ActTwoSize]byte - copy(actTwo[:], data) - - // Initiator receives ActTwo, should fail. - if err := initiator.RecvActTwo(actTwo); err == nil { - nilAndPanic(initiator, nil, nil) - } - - return 1 -} diff --git a/lnd/fuzz/brontide/random_init_decrypt.go b/lnd/fuzz/brontide/random_init_decrypt.go deleted file mode 100644 index 3328a2b6..00000000 --- a/lnd/fuzz/brontide/random_init_decrypt.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build gofuzz - -package brontidefuzz - -import ( - "bytes" -) - -// Fuzz_random_init_decrypt is a go-fuzz harness that decrypts arbitrary data -// with the initiator. -func Fuzz_random_init_decrypt(data []byte) int { - // This will return brontide machines with random keys. - initiator, responder := getBrontideMachines() - - // Complete the brontide handshake. - completeHandshake(initiator, responder) - - // Create a reader with the byte array. - r := bytes.NewReader(data) - - // Decrypt the encrypted message using ReadMessage w/ initiator machine. - if _, err := initiator.ReadMessage(r); err == nil { - nilAndPanic(initiator, responder, nil) - } - - return 1 -} diff --git a/lnd/fuzz/brontide/random_init_enc_dec.go b/lnd/fuzz/brontide/random_init_enc_dec.go deleted file mode 100644 index 6f1a7312..00000000 --- a/lnd/fuzz/brontide/random_init_enc_dec.go +++ /dev/null @@ -1,48 +0,0 @@ -// +build gofuzz - -package brontidefuzz - -import ( - "bytes" - "math" -) - -// Fuzz_random_init_enc_dec is a go-fuzz harness that tests round-trip -// encryption and decryption between the initiator and the responder. -func Fuzz_random_init_enc_dec(data []byte) int { - // Ensure that length of message is not greater than max allowed size. - if len(data) > math.MaxUint16 { - return 1 - } - - // This will return brontide machines with random keys. - initiator, responder := getBrontideMachines() - - // Complete the brontide handshake. - completeHandshake(initiator, responder) - - var b bytes.Buffer - - // Encrypt the message using WriteMessage w/ initiator machine. - if err := initiator.WriteMessage(data); err != nil { - nilAndPanic(initiator, responder, err) - } - - // Flush the encrypted message w/ initiator machine. - if _, err := initiator.Flush(&b); err != nil { - nilAndPanic(initiator, responder, err) - } - - // Decrypt the ciphertext using ReadMessage w/ responder machine. - plaintext, err := responder.ReadMessage(&b) - if err != nil { - nilAndPanic(initiator, responder, err) - } - - // Check that the decrypted message and the original message are equal. - if !bytes.Equal(data, plaintext) { - nilAndPanic(initiator, responder, nil) - } - - return 1 -} diff --git a/lnd/fuzz/brontide/random_init_encrypt.go b/lnd/fuzz/brontide/random_init_encrypt.go deleted file mode 100644 index 76f5dacd..00000000 --- a/lnd/fuzz/brontide/random_init_encrypt.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build gofuzz - -package brontidefuzz - -import ( - "bytes" - "math" -) - -// Fuzz_random_init_encrypt is a go-fuzz harness that encrypts arbitrary data -// with the initiator. -func Fuzz_random_init_encrypt(data []byte) int { - // Ensure that length of message is not greater than max allowed size. - if len(data) > math.MaxUint16 { - return 1 - } - - // This will return brontide machines with random keys. - initiator, responder := getBrontideMachines() - - // Complete the brontide handshake. - completeHandshake(initiator, responder) - - var b bytes.Buffer - - // Encrypt the message using WriteMessage w/ initiator machine. - if err := initiator.WriteMessage(data); err != nil { - nilAndPanic(initiator, responder, err) - } - - // Flush the encrypted message w/ initiator machine. - if _, err := initiator.Flush(&b); err != nil { - nilAndPanic(initiator, responder, err) - } - - return 1 -} diff --git a/lnd/fuzz/brontide/random_resp_decrypt.go b/lnd/fuzz/brontide/random_resp_decrypt.go deleted file mode 100644 index 1ae40bd2..00000000 --- a/lnd/fuzz/brontide/random_resp_decrypt.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build gofuzz - -package brontidefuzz - -import ( - "bytes" -) - -// Fuzz_random_resp_decrypt is a go-fuzz harness that decrypts arbitrary data -// with the responder. -func Fuzz_random_resp_decrypt(data []byte) int { - // This will return brontide machines with random keys. - initiator, responder := getBrontideMachines() - - // Complete the brontide handshake. - completeHandshake(initiator, responder) - - // Create a reader with the byte array. - r := bytes.NewReader(data) - - // Decrypt the encrypted message using ReadMessage w/ responder machine. - if _, err := responder.ReadMessage(r); err == nil { - nilAndPanic(initiator, responder, nil) - } - - return 1 -} diff --git a/lnd/fuzz/brontide/random_resp_enc_dec.go b/lnd/fuzz/brontide/random_resp_enc_dec.go deleted file mode 100644 index f84e7c47..00000000 --- a/lnd/fuzz/brontide/random_resp_enc_dec.go +++ /dev/null @@ -1,48 +0,0 @@ -// +build gofuzz - -package brontidefuzz - -import ( - "bytes" - "math" -) - -// Fuzz_random_resp_enc_dec is a go-fuzz harness that tests round-trip -// encryption and decryption between the responder and the initiator. -func Fuzz_random_resp_enc_dec(data []byte) int { - // Ensure that length of message is not greater than max allowed size. - if len(data) > math.MaxUint16 { - return 1 - } - - // This will return brontide machines with random keys. - initiator, responder := getBrontideMachines() - - // Complete the brontide handshake. - completeHandshake(initiator, responder) - - var b bytes.Buffer - - // Encrypt the message using WriteMessage w/ responder machine. - if err := responder.WriteMessage(data); err != nil { - nilAndPanic(initiator, responder, err) - } - - // Flush the encrypted message w/ responder machine. - if _, err := responder.Flush(&b); err != nil { - nilAndPanic(initiator, responder, err) - } - - // Decrypt the ciphertext using ReadMessage w/ initiator machine. - plaintext, err := initiator.ReadMessage(&b) - if err != nil { - nilAndPanic(initiator, responder, err) - } - - // Check that the decrypted message and the original message are equal. - if !bytes.Equal(data, plaintext) { - nilAndPanic(initiator, responder, nil) - } - - return 1 -} diff --git a/lnd/fuzz/brontide/random_resp_encrypt.go b/lnd/fuzz/brontide/random_resp_encrypt.go deleted file mode 100644 index 5ac9abad..00000000 --- a/lnd/fuzz/brontide/random_resp_encrypt.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build gofuzz - -package brontidefuzz - -import ( - "bytes" - "math" -) - -// Fuzz_random_resp_encrypt is a go-fuzz harness that encrypts arbitrary data -// with the responder. -func Fuzz_random_resp_encrypt(data []byte) int { - // Ensure that length of message is not greater than max allowed size. - if len(data) > math.MaxUint16 { - return 1 - } - - // This will return brontide machines with random keys. - initiator, responder := getBrontideMachines() - - // Complete the brontide handshake. - completeHandshake(initiator, responder) - - var b bytes.Buffer - - // Encrypt the message using WriteMessage w/ responder machine. - if err := responder.WriteMessage(data); err != nil { - nilAndPanic(initiator, responder, err) - } - - // Flush the encrypted message w/ responder machine. - if _, err := responder.Flush(&b); err != nil { - nilAndPanic(initiator, responder, err) - } - - return 1 -} diff --git a/lnd/fuzz/brontide/static_actone.go b/lnd/fuzz/brontide/static_actone.go deleted file mode 100644 index 89eb2434..00000000 --- a/lnd/fuzz/brontide/static_actone.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build gofuzz - -package brontidefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/brontide" -) - -// Fuzz_static_actone is a go-fuzz harness for ActOne in the brontide -// handshake. -func Fuzz_static_actone(data []byte) int { - // Check if data is large enough. - if len(data) < brontide.ActOneSize { - return 1 - } - - // This will return brontide machines with static keys. - _, responder := getStaticBrontideMachines() - - // Copy data into [ActOneSize]byte. - var actOne [brontide.ActOneSize]byte - copy(actOne[:], data) - - // Responder receives ActOne, should fail. - if err := responder.RecvActOne(actOne); err == nil { - nilAndPanic(nil, responder, nil) - } - - return 1 -} diff --git a/lnd/fuzz/brontide/static_actthree.go b/lnd/fuzz/brontide/static_actthree.go deleted file mode 100644 index e4e34de1..00000000 --- a/lnd/fuzz/brontide/static_actthree.go +++ /dev/null @@ -1,50 +0,0 @@ -// +build gofuzz - -package brontidefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/brontide" -) - -// Fuzz_static_actthree is a go-fuzz harness for ActThree in the brontide -// handshake. -func Fuzz_static_actthree(data []byte) int { - // Check if data is large enough. - if len(data) < brontide.ActThreeSize { - return 1 - } - - // This will return brontide machines with static keys. - initiator, responder := getStaticBrontideMachines() - - // Generate ActOne and send to the responder. - actOne, err := initiator.GenActOne() - if err != nil { - nilAndPanic(initiator, responder, err) - } - - // Receiving ActOne should succeed, so we panic on error. - if err := responder.RecvActOne(actOne); err != nil { - nilAndPanic(initiator, responder, err) - } - - // Generate ActTwo - this is not sent to the initiator because nothing is - // done with the initiator after this point and it would slow down fuzzing. - // GenActTwo needs to be called to set the appropriate state in the responder - // machine. - _, err = responder.GenActTwo() - if err != nil { - nilAndPanic(initiator, responder, err) - } - - // Copy data into [ActThreeSize]byte. - var actThree [brontide.ActThreeSize]byte - copy(actThree[:], data) - - // Responder receives ActThree, should fail. - if err := responder.RecvActThree(actThree); err == nil { - nilAndPanic(initiator, responder, nil) - } - - return 1 -} diff --git a/lnd/fuzz/brontide/static_acttwo.go b/lnd/fuzz/brontide/static_acttwo.go deleted file mode 100644 index 51da4fc2..00000000 --- a/lnd/fuzz/brontide/static_acttwo.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build gofuzz - -package brontidefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/brontide" -) - -// Fuzz_static_acttwo is a go-fuzz harness for ActTwo in the brontide -// handshake. -func Fuzz_static_acttwo(data []byte) int { - // Check if data is large enough. - if len(data) < brontide.ActTwoSize { - return 1 - } - - // This will return brontide machines with static keys. - initiator, _ := getStaticBrontideMachines() - - // Generate ActOne - this isn't sent to the responder because nothing is - // done with the responder machine and this would slow down fuzzing. - // GenActOne needs to be called to set the appropriate state in the initiator - // machine. - _, err := initiator.GenActOne() - if err != nil { - nilAndPanic(initiator, nil, err) - } - - // Copy data into [ActTwoSize]byte. - var actTwo [brontide.ActTwoSize]byte - copy(actTwo[:], data) - - // Initiator receives ActTwo, should fail. - if err := initiator.RecvActTwo(actTwo); err == nil { - nilAndPanic(initiator, nil, nil) - } - - return 1 -} diff --git a/lnd/fuzz/brontide/static_init_decrypt.go b/lnd/fuzz/brontide/static_init_decrypt.go deleted file mode 100644 index 35525d20..00000000 --- a/lnd/fuzz/brontide/static_init_decrypt.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build gofuzz - -package brontidefuzz - -import ( - "bytes" -) - -// Fuzz_static_init_decrypt is a go-fuzz harness that decrypts arbitrary data -// with the initiator. -func Fuzz_static_init_decrypt(data []byte) int { - // This will return brontide machines with static keys. - initiator, responder := getStaticBrontideMachines() - - // Complete the brontide handshake. - completeHandshake(initiator, responder) - - // Create a reader with the byte array. - r := bytes.NewReader(data) - - // Decrypt the encrypted message using ReadMessage w/ initiator machine. - if _, err := initiator.ReadMessage(r); err == nil { - nilAndPanic(initiator, responder, nil) - } - - return 1 -} diff --git a/lnd/fuzz/brontide/static_init_enc_dec.go b/lnd/fuzz/brontide/static_init_enc_dec.go deleted file mode 100644 index 81669db2..00000000 --- a/lnd/fuzz/brontide/static_init_enc_dec.go +++ /dev/null @@ -1,49 +0,0 @@ -// +build gofuzz - -package brontidefuzz - -import ( - "bytes" - "math" -) - -// Fuzz_static_init_enc_dec is a go-fuzz harness that tests round-trip -// encryption and decryption -// between the initiator and the responder. -func Fuzz_static_init_enc_dec(data []byte) int { - // Ensure that length of message is not greater than max allowed size. - if len(data) > math.MaxUint16 { - return 1 - } - - // This will return brontide machines with static keys. - initiator, responder := getStaticBrontideMachines() - - // Complete the brontide handshake. - completeHandshake(initiator, responder) - - var b bytes.Buffer - - // Encrypt the message using WriteMessage w/ initiator machine. - if err := initiator.WriteMessage(data); err != nil { - nilAndPanic(initiator, responder, err) - } - - // Flush the encrypted message w/ initiator machine. - if _, err := initiator.Flush(&b); err != nil { - nilAndPanic(initiator, responder, err) - } - - // Decrypt the ciphertext using ReadMessage w/ responder machine. - plaintext, err := responder.ReadMessage(&b) - if err != nil { - nilAndPanic(initiator, responder, err) - } - - // Check that the decrypted message and the original message are equal. - if !bytes.Equal(data, plaintext) { - nilAndPanic(initiator, responder, nil) - } - - return 1 -} diff --git a/lnd/fuzz/brontide/static_init_encrypt.go b/lnd/fuzz/brontide/static_init_encrypt.go deleted file mode 100644 index 6c45a0b2..00000000 --- a/lnd/fuzz/brontide/static_init_encrypt.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build gofuzz - -package brontidefuzz - -import ( - "bytes" - "math" -) - -// Fuzz_static_init_encrypt is a go-fuzz harness that encrypts arbitrary data -// with the initiator. -func Fuzz_static_init_encrypt(data []byte) int { - // Ensure that length of message is not greater than max allowed size. - if len(data) > math.MaxUint16 { - return 1 - } - - // This will return brontide machines with static keys. - initiator, responder := getStaticBrontideMachines() - - // Complete the brontide handshake. - completeHandshake(initiator, responder) - - var b bytes.Buffer - - // Encrypt the message using WriteMessage w/ initiator machine. - if err := initiator.WriteMessage(data); err != nil { - nilAndPanic(initiator, responder, err) - } - - // Flush the encrypted message w/ initiator machine. - if _, err := initiator.Flush(&b); err != nil { - nilAndPanic(initiator, responder, err) - } - - return 1 -} diff --git a/lnd/fuzz/brontide/static_resp_decrypt.go b/lnd/fuzz/brontide/static_resp_decrypt.go deleted file mode 100644 index fee4500b..00000000 --- a/lnd/fuzz/brontide/static_resp_decrypt.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build gofuzz - -package brontidefuzz - -import ( - "bytes" -) - -// Fuzz_static_resp_decrypt is a go-fuzz harness that decrypts arbitrary data -// with the responder. -func Fuzz_static_resp_decrypt(data []byte) int { - // This will return brontide machines with static keys. - initiator, responder := getStaticBrontideMachines() - - // Complete the brontide handshake. - completeHandshake(initiator, responder) - - // Create a reader with the byte array. - r := bytes.NewReader(data) - - // Decrypt the encrypted message using ReadMessage w/ responder machine. - if _, err := responder.ReadMessage(r); err == nil { - nilAndPanic(initiator, responder, nil) - } - - return 1 -} diff --git a/lnd/fuzz/brontide/static_resp_enc_dec.go b/lnd/fuzz/brontide/static_resp_enc_dec.go deleted file mode 100644 index aaa7c3e8..00000000 --- a/lnd/fuzz/brontide/static_resp_enc_dec.go +++ /dev/null @@ -1,48 +0,0 @@ -// +build gofuzz - -package brontidefuzz - -import ( - "bytes" - "math" -) - -// Fuzz_static_resp_enc_dec is a go-fuzz harness that tests round-trip -// encryption and decryption between the responder and the initiator. -func Fuzz_static_resp_enc_dec(data []byte) int { - // Ensure that length of message is not greater than max allowed size. - if len(data) > math.MaxUint16 { - return 1 - } - - // This will return brontide machines with static keys. - initiator, responder := getStaticBrontideMachines() - - // Complete the brontide handshake. - completeHandshake(initiator, responder) - - var b bytes.Buffer - - // Encrypt the message using WriteMessage w/ responder machine. - if err := responder.WriteMessage(data); err != nil { - nilAndPanic(initiator, responder, err) - } - - // Flush the encrypted message w/ responder machine. - if _, err := responder.Flush(&b); err != nil { - nilAndPanic(initiator, responder, err) - } - - // Decrypt the ciphertext using ReadMessage w/ initiator machine. - plaintext, err := initiator.ReadMessage(&b) - if err != nil { - nilAndPanic(initiator, responder, err) - } - - // Check that the decrypted message and the original message are equal. - if !bytes.Equal(data, plaintext) { - nilAndPanic(initiator, responder, nil) - } - - return 1 -} diff --git a/lnd/fuzz/brontide/static_resp_encrypt.go b/lnd/fuzz/brontide/static_resp_encrypt.go deleted file mode 100644 index 5fdc9036..00000000 --- a/lnd/fuzz/brontide/static_resp_encrypt.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build gofuzz - -package brontidefuzz - -import ( - "bytes" - "math" -) - -// Fuzz_static_resp_encrypt is a go-fuzz harness that encrypts arbitrary data -// with the responder. -func Fuzz_static_resp_encrypt(data []byte) int { - // Ensure that length of message is not greater than max allowed size. - if len(data) > math.MaxUint16 { - return 1 - } - - // This will return brontide machines with static keys. - initiator, responder := getStaticBrontideMachines() - - // Complete the brontide handshake. - completeHandshake(initiator, responder) - - var b bytes.Buffer - - // Encrypt the message using WriteMessage w/ responder machine. - if err := responder.WriteMessage(data); err != nil { - nilAndPanic(initiator, responder, err) - } - - // Flush the encrypted message w/ responder machine. - if _, err := responder.Flush(&b); err != nil { - nilAndPanic(initiator, responder, err) - } - - return 1 -} diff --git a/lnd/fuzz/lnwire/accept_channel.go b/lnd/fuzz/lnwire/accept_channel.go deleted file mode 100644 index 248b6b4b..00000000 --- a/lnd/fuzz/lnwire/accept_channel.go +++ /dev/null @@ -1,135 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "bytes" - - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_accept_channel is used by go-fuzz. -func Fuzz_accept_channel(data []byte) int { - // Prefix with MsgAcceptChannel. - data = prefixWithMsgType(data, lnwire.MsgAcceptChannel) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.AcceptChannel{} - - // We have to do this here instead of in fuzz.Harness so that - // reflect.DeepEqual isn't called. Because of the UpfrontShutdownScript - // encoding, the first message and second message aren't deeply equal since - // the first has a nil slice and the other has an empty slice. - - // Create a reader with the byte array. - r := bytes.NewReader(data) - - // Make sure byte array length (excluding 2 bytes for message type) is - // less than max payload size for the wire message. We check this because - // otherwise `go-fuzz` will keep creating inputs that crash on ReadMessage - // due to a large message size. - payloadLen := uint32(len(data)) - 2 - if payloadLen > emptyMsg.MaxPayloadLength(0) { - // Ignore this input - max payload constraint violated. - return 1 - } - - msg, err := lnwire.ReadMessage(r, 0) - if err != nil { - // go-fuzz generated []byte that cannot be represented as a - // wire message but we will return 0 so go-fuzz can modify the - // input. - return 1 - } - - // We will serialize the message into a new bytes buffer. - var b bytes.Buffer - if _, err := lnwire.WriteMessage(&b, msg, 0); err != nil { - // Could not serialize message into bytes buffer, panic - panic(err) - } - - // Deserialize the message from the serialized bytes buffer, and then - // assert that the original message is equal to the newly deserialized - // message. - newMsg, err := lnwire.ReadMessage(&b, 0) - if err != nil { - // Could not deserialize message from bytes buffer, panic - panic(err) - } - - // Now compare every field instead of using reflect.DeepEqual. - // For UpfrontShutdownScript, we only compare bytes. This probably takes - // up more branches than necessary, but that's fine for now. - var shouldPanic bool - first := msg.(*lnwire.AcceptChannel) - second := newMsg.(*lnwire.AcceptChannel) - - if !bytes.Equal(first.PendingChannelID[:], second.PendingChannelID[:]) { - shouldPanic = true - } - - if first.DustLimit != second.DustLimit { - shouldPanic = true - } - - if first.MaxValueInFlight != second.MaxValueInFlight { - shouldPanic = true - } - - if first.ChannelReserve != second.ChannelReserve { - shouldPanic = true - } - - if first.HtlcMinimum != second.HtlcMinimum { - shouldPanic = true - } - - if first.MinAcceptDepth != second.MinAcceptDepth { - shouldPanic = true - } - - if first.CsvDelay != second.CsvDelay { - shouldPanic = true - } - - if first.MaxAcceptedHTLCs != second.MaxAcceptedHTLCs { - shouldPanic = true - } - - if !first.FundingKey.IsEqual(second.FundingKey) { - shouldPanic = true - } - - if !first.RevocationPoint.IsEqual(second.RevocationPoint) { - shouldPanic = true - } - - if !first.PaymentPoint.IsEqual(second.PaymentPoint) { - shouldPanic = true - } - - if !first.DelayedPaymentPoint.IsEqual(second.DelayedPaymentPoint) { - shouldPanic = true - } - - if !first.HtlcPoint.IsEqual(second.HtlcPoint) { - shouldPanic = true - } - - if !first.FirstCommitmentPoint.IsEqual(second.FirstCommitmentPoint) { - shouldPanic = true - } - - if !bytes.Equal(first.UpfrontShutdownScript, second.UpfrontShutdownScript) { - shouldPanic = true - } - - if shouldPanic { - panic("original message and deserialized message are not equal") - } - - // Add this input to the corpus. - return 1 -} diff --git a/lnd/fuzz/lnwire/announce_signatures.go b/lnd/fuzz/lnwire/announce_signatures.go deleted file mode 100644 index bb72dccc..00000000 --- a/lnd/fuzz/lnwire/announce_signatures.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_announce_signatures is used by go-fuzz. -func Fuzz_announce_signatures(data []byte) int { - // Prefix with MsgAnnounceSignatures. - data = prefixWithMsgType(data, lnwire.MsgAnnounceSignatures) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.AnnounceSignatures{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/channel_announcement.go b/lnd/fuzz/lnwire/channel_announcement.go deleted file mode 100644 index 6d30d8bb..00000000 --- a/lnd/fuzz/lnwire/channel_announcement.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_channel_announcement is used by go-fuzz. -func Fuzz_channel_announcement(data []byte) int { - // Prefix with MsgChannelAnnouncement. - data = prefixWithMsgType(data, lnwire.MsgChannelAnnouncement) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.ChannelAnnouncement{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/channel_reestablish.go b/lnd/fuzz/lnwire/channel_reestablish.go deleted file mode 100644 index 54d9104c..00000000 --- a/lnd/fuzz/lnwire/channel_reestablish.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_channel_reestablish is used by go-fuzz. -func Fuzz_channel_reestablish(data []byte) int { - // Prefix with MsgChannelReestablish. - data = prefixWithMsgType(data, lnwire.MsgChannelReestablish) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.ChannelReestablish{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/channel_update.go b/lnd/fuzz/lnwire/channel_update.go deleted file mode 100644 index 97a54f4b..00000000 --- a/lnd/fuzz/lnwire/channel_update.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_channel_update is used by go-fuzz. -func Fuzz_channel_update(data []byte) int { - // Prefix with MsgChannelUpdate. - data = prefixWithMsgType(data, lnwire.MsgChannelUpdate) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.ChannelUpdate{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/closing_signed.go b/lnd/fuzz/lnwire/closing_signed.go deleted file mode 100644 index 920f2242..00000000 --- a/lnd/fuzz/lnwire/closing_signed.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_closing_signed is used by go-fuzz. -func Fuzz_closing_signed(data []byte) int { - // Prefix with MsgClosingSigned. - data = prefixWithMsgType(data, lnwire.MsgClosingSigned) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.ClosingSigned{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/commit_sig.go b/lnd/fuzz/lnwire/commit_sig.go deleted file mode 100644 index d157de9e..00000000 --- a/lnd/fuzz/lnwire/commit_sig.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_commit_sig is used by go-fuzz. -func Fuzz_commit_sig(data []byte) int { - // Prefix with MsgCommitSig. - data = prefixWithMsgType(data, lnwire.MsgCommitSig) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.CommitSig{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/error.go b/lnd/fuzz/lnwire/error.go deleted file mode 100644 index 1293cb0d..00000000 --- a/lnd/fuzz/lnwire/error.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_error is used by go-fuzz. -func Fuzz_error(data []byte) int { - // Prefix with MsgError. - data = prefixWithMsgType(data, lnwire.MsgError) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.Error{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/funding_created.go b/lnd/fuzz/lnwire/funding_created.go deleted file mode 100644 index e170a1b7..00000000 --- a/lnd/fuzz/lnwire/funding_created.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_funding_created is used by go-fuzz. -func Fuzz_funding_created(data []byte) int { - // Prefix with MsgFundingCreated. - data = prefixWithMsgType(data, lnwire.MsgFundingCreated) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.FundingCreated{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/funding_locked.go b/lnd/fuzz/lnwire/funding_locked.go deleted file mode 100644 index b3f2b2dd..00000000 --- a/lnd/fuzz/lnwire/funding_locked.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_funding_locked is used by go-fuzz. -func Fuzz_funding_locked(data []byte) int { - // Prefix with MsgFundingLocked. - data = prefixWithMsgType(data, lnwire.MsgFundingLocked) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.FundingLocked{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/funding_signed.go b/lnd/fuzz/lnwire/funding_signed.go deleted file mode 100644 index 9cd19cb8..00000000 --- a/lnd/fuzz/lnwire/funding_signed.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_funding_signed is used by go-fuzz. -func Fuzz_funding_signed(data []byte) int { - // Prefix with MsgFundingSigned. - prefixWithMsgType(data, lnwire.MsgFundingSigned) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.FundingSigned{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/fuzz_utils.go b/lnd/fuzz/lnwire/fuzz_utils.go deleted file mode 100644 index 9cd69a04..00000000 --- a/lnd/fuzz/lnwire/fuzz_utils.go +++ /dev/null @@ -1,72 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "bytes" - "encoding/binary" - "reflect" - - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// prefixWithMsgType takes []byte and adds a wire protocol prefix -// to make the []byte into an actual message to be used in fuzzing. -func prefixWithMsgType(data []byte, prefix lnwire.MessageType) []byte { - var prefixBytes [2]byte - binary.BigEndian.PutUint16(prefixBytes[:], uint16(prefix)) - data = append(prefixBytes[:], data...) - return data -} - -// harness performs the actual fuzz testing of the appropriate wire message. -// This function will check that the passed-in message passes wire length checks, -// is a valid message once deserialized, and passes a sequence of serialization -// and deserialization checks. Returns an int that determines whether the input -// is unique or not. -func harness(data []byte, emptyMsg lnwire.Message) int { - // Create a reader with the byte array. - r := bytes.NewReader(data) - - // Make sure byte array length (excluding 2 bytes for message type) is - // less than max payload size for the wire message. We check this because - // otherwise `go-fuzz` will keep creating inputs that crash on ReadMessage - // due to a large message size. - payloadLen := uint32(len(data)) - 2 - if payloadLen > emptyMsg.MaxPayloadLength(0) { - // Ignore this input - max payload constraint violated. - return 1 - } - - msg, err := lnwire.ReadMessage(r, 0) - if err != nil { - // go-fuzz generated []byte that cannot be represented as a - // wire message but we will return 0 so go-fuzz can modify the - // input. - return 1 - } - - // We will serialize the message into a new bytes buffer. - var b bytes.Buffer - if _, err := lnwire.WriteMessage(&b, msg, 0); err != nil { - // Could not serialize message into bytes buffer, panic - panic(err) - } - - // Deserialize the message from the serialized bytes buffer, and then - // assert that the original message is equal to the newly deserialized - // message. - newMsg, err := lnwire.ReadMessage(&b, 0) - if err != nil { - // Could not deserialize message from bytes buffer, panic - panic(err) - } - - if !reflect.DeepEqual(msg, newMsg) { - // Deserialized message and original message are not deeply equal. - panic("original message and deserialized message are not deeply equal") - } - - // Add this input to the corpus. - return 1 -} diff --git a/lnd/fuzz/lnwire/gossip_timestamp_range.go b/lnd/fuzz/lnwire/gossip_timestamp_range.go deleted file mode 100644 index 36ba6dce..00000000 --- a/lnd/fuzz/lnwire/gossip_timestamp_range.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_gossip_timestamp_range is used by go-fuzz. -func Fuzz_gossip_timestamp_range(data []byte) int { - // Prefix with MsgGossipTimestampRange. - data = prefixWithMsgType(data, lnwire.MsgGossipTimestampRange) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.GossipTimestampRange{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/init.go b/lnd/fuzz/lnwire/init.go deleted file mode 100644 index 0362cb42..00000000 --- a/lnd/fuzz/lnwire/init.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_init is used by go-fuzz. -func Fuzz_init(data []byte) int { - // Prefix with MsgInit. - data = prefixWithMsgType(data, lnwire.MsgInit) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.Init{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/node_announcement.go b/lnd/fuzz/lnwire/node_announcement.go deleted file mode 100644 index 4c15e627..00000000 --- a/lnd/fuzz/lnwire/node_announcement.go +++ /dev/null @@ -1,112 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "bytes" - "reflect" - - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_node_announcement is used by go-fuzz. -func Fuzz_node_announcement(data []byte) int { - // Prefix with MsgNodeAnnouncement. - data = prefixWithMsgType(data, lnwire.MsgNodeAnnouncement) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.NodeAnnouncement{} - - // We have to do this here instead of in fuzz.Harness so that - // reflect.DeepEqual isn't called. Address (de)serialization messes up - // the fuzzing assertions. - - // Create a reader with the byte array. - r := bytes.NewReader(data) - - // Make sure byte array length (excluding 2 bytes for message type) is - // less than max payload size for the wire message. We check this because - // otherwise `go-fuzz` will keep creating inputs that crash on ReadMessage - // due to a large message size. - payloadLen := uint32(len(data)) - 2 - if payloadLen > emptyMsg.MaxPayloadLength(0) { - // Ignore this input - max payload constraint violated. - return 1 - } - - msg, err := lnwire.ReadMessage(r, 0) - if err != nil { - // go-fuzz generated []byte that cannot be represented as a - // wire message but we will return 0 so go-fuzz can modify the - // input. - return 1 - } - - // We will serialize the message into a new bytes buffer. - var b bytes.Buffer - if _, err := lnwire.WriteMessage(&b, msg, 0); err != nil { - // Could not serialize message into bytes buffer, panic - panic(err) - } - - // Deserialize the message from the serialized bytes buffer, and then - // assert that the original message is equal to the newly deserialized - // message. - newMsg, err := lnwire.ReadMessage(&b, 0) - if err != nil { - // Could not deserialize message from bytes buffer, panic - panic(err) - } - - // Now compare every field instead of using reflect.DeepEqual for the - // Addresses field. - var shouldPanic bool - first := msg.(*lnwire.NodeAnnouncement) - second := newMsg.(*lnwire.NodeAnnouncement) - if !bytes.Equal(first.Signature[:], second.Signature[:]) { - shouldPanic = true - } - - if !reflect.DeepEqual(first.Features, second.Features) { - shouldPanic = true - } - - if first.Timestamp != second.Timestamp { - shouldPanic = true - } - - if !bytes.Equal(first.NodeID[:], second.NodeID[:]) { - shouldPanic = true - } - - if !reflect.DeepEqual(first.RGBColor, second.RGBColor) { - shouldPanic = true - } - - if !bytes.Equal(first.Alias[:], second.Alias[:]) { - shouldPanic = true - } - - if len(first.Addresses) != len(second.Addresses) { - shouldPanic = true - } - - for i := range first.Addresses { - if first.Addresses[i].String() != second.Addresses[i].String() { - shouldPanic = true - break - } - } - - if !reflect.DeepEqual(first.ExtraOpaqueData, second.ExtraOpaqueData) { - shouldPanic = true - } - - if shouldPanic { - panic("original message and deserialized message are not equal") - } - - // Add this input to the corpus. - return 1 -} diff --git a/lnd/fuzz/lnwire/open_channel.go b/lnd/fuzz/lnwire/open_channel.go deleted file mode 100644 index d3f2d0e6..00000000 --- a/lnd/fuzz/lnwire/open_channel.go +++ /dev/null @@ -1,151 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "bytes" - - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_open_channel is used by go-fuzz. -func Fuzz_open_channel(data []byte) int { - // Prefix with MsgOpenChannel. - data = prefixWithMsgType(data, lnwire.MsgOpenChannel) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.OpenChannel{} - - // We have to do this here instead of in fuzz.Harness so that - // reflect.DeepEqual isn't called. Because of the UpfrontShutdownScript - // encoding, the first message and second message aren't deeply equal since - // the first has a nil slice and the other has an empty slice. - - // Create a reader with the byte array. - r := bytes.NewReader(data) - - // Make sure byte array length (excluding 2 bytes for message type) is - // less than max payload size for the wire message. We check this because - // otherwise `go-fuzz` will keep creating inputs that crash on ReadMessage - // due to a large message size. - payloadLen := uint32(len(data)) - 2 - if payloadLen > emptyMsg.MaxPayloadLength(0) { - // Ignore this input - max payload constraint violated. - return 1 - } - - msg, err := lnwire.ReadMessage(r, 0) - if err != nil { - // go-fuzz generated []byte that cannot be represented as a - // wire message but we will return 0 so go-fuzz can modify the - // input. - return 1 - } - - // We will serialize the message into a new bytes buffer. - var b bytes.Buffer - if _, err := lnwire.WriteMessage(&b, msg, 0); err != nil { - // Could not serialize message into bytes buffer, panic - panic(err) - } - - // Deserialize the message from the serialized bytes buffer, and then - // assert that the original message is equal to the newly deserialized - // message. - newMsg, err := lnwire.ReadMessage(&b, 0) - if err != nil { - // Could not deserialize message from bytes buffer, panic - panic(err) - } - - // Now compare every field instead of using reflect.DeepEqual. - // For UpfrontShutdownScript, we only compare bytes. This probably takes - // up more branches than necessary, but that's fine for now. - var shouldPanic bool - first := msg.(*lnwire.OpenChannel) - second := newMsg.(*lnwire.OpenChannel) - - if !first.ChainHash.IsEqual(&second.ChainHash) { - shouldPanic = true - } - - if !bytes.Equal(first.PendingChannelID[:], second.PendingChannelID[:]) { - shouldPanic = true - } - - if first.FundingAmount != second.FundingAmount { - shouldPanic = true - } - - if first.PushAmount != second.PushAmount { - shouldPanic = true - } - - if first.DustLimit != second.DustLimit { - shouldPanic = true - } - - if first.MaxValueInFlight != second.MaxValueInFlight { - shouldPanic = true - } - - if first.ChannelReserve != second.ChannelReserve { - shouldPanic = true - } - - if first.HtlcMinimum != second.HtlcMinimum { - shouldPanic = true - } - - if first.FeePerKiloWeight != second.FeePerKiloWeight { - shouldPanic = true - } - - if first.CsvDelay != second.CsvDelay { - shouldPanic = true - } - - if first.MaxAcceptedHTLCs != second.MaxAcceptedHTLCs { - shouldPanic = true - } - - if !first.FundingKey.IsEqual(second.FundingKey) { - shouldPanic = true - } - - if !first.RevocationPoint.IsEqual(second.RevocationPoint) { - shouldPanic = true - } - - if !first.PaymentPoint.IsEqual(second.PaymentPoint) { - shouldPanic = true - } - - if !first.DelayedPaymentPoint.IsEqual(second.DelayedPaymentPoint) { - shouldPanic = true - } - - if !first.HtlcPoint.IsEqual(second.HtlcPoint) { - shouldPanic = true - } - - if !first.FirstCommitmentPoint.IsEqual(second.FirstCommitmentPoint) { - shouldPanic = true - } - - if first.ChannelFlags != second.ChannelFlags { - shouldPanic = true - } - - if !bytes.Equal(first.UpfrontShutdownScript, second.UpfrontShutdownScript) { - shouldPanic = true - } - - if shouldPanic { - panic("original message and deserialized message are not equal") - } - - // Add this input to the corpus. - return 1 -} diff --git a/lnd/fuzz/lnwire/ping.go b/lnd/fuzz/lnwire/ping.go deleted file mode 100644 index 7936d8b8..00000000 --- a/lnd/fuzz/lnwire/ping.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_ping is used by go-fuzz. -func Fuzz_ping(data []byte) int { - // Prefix with MsgPing. - data = prefixWithMsgType(data, lnwire.MsgPing) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.Ping{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/pong.go b/lnd/fuzz/lnwire/pong.go deleted file mode 100644 index 93e0b7f2..00000000 --- a/lnd/fuzz/lnwire/pong.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_pong is used by go-fuzz. -func Fuzz_pong(data []byte) int { - // Prefix with MsgPong. - data = prefixWithMsgType(data, lnwire.MsgPong) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.Pong{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/query_channel_range.go b/lnd/fuzz/lnwire/query_channel_range.go deleted file mode 100644 index 8c24fe99..00000000 --- a/lnd/fuzz/lnwire/query_channel_range.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_query_channel_range is used by go-fuzz. -func Fuzz_query_channel_range(data []byte) int { - // Prefix with MsgQueryChannelRange. - data = prefixWithMsgType(data, lnwire.MsgQueryChannelRange) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.QueryChannelRange{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/query_short_chan_ids.go b/lnd/fuzz/lnwire/query_short_chan_ids.go deleted file mode 100644 index 4e10cf84..00000000 --- a/lnd/fuzz/lnwire/query_short_chan_ids.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_query_short_chan_ids is used by go-fuzz. -func Fuzz_query_short_chan_ids(data []byte) int { - // Prefix with MsgQueryShortChanIDs. - data = prefixWithMsgType(data, lnwire.MsgQueryShortChanIDs) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.QueryShortChanIDs{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/query_short_chan_ids_zlib.go b/lnd/fuzz/lnwire/query_short_chan_ids_zlib.go deleted file mode 100644 index 7304ec9d..00000000 --- a/lnd/fuzz/lnwire/query_short_chan_ids_zlib.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "bytes" - "compress/zlib" - "encoding/binary" - - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_query_short_chan_ids_zlib is used by go-fuzz. -func Fuzz_query_short_chan_ids_zlib(data []byte) int { - - var buf bytes.Buffer - zlibWriter := zlib.NewWriter(&buf) - _, err := zlibWriter.Write(data) - if err != nil { - // Zlib bug? - panic(err) - } - - if err := zlibWriter.Close(); err != nil { - // Zlib bug? - panic(err) - } - - compressedPayload := buf.Bytes() - - chainhash := []byte("00000000000000000000000000000000") - numBytesInBody := len(compressedPayload) + 1 - zlibByte := []byte("\x01") - - bodyBytes := make([]byte, 2) - binary.BigEndian.PutUint16(bodyBytes, uint16(numBytesInBody)) - - payload := append(chainhash, bodyBytes...) - payload = append(payload, zlibByte...) - payload = append(payload, compressedPayload...) - - // Prefix with MsgQueryShortChanIDs. - payload = prefixWithMsgType(payload, lnwire.MsgQueryShortChanIDs) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.QueryShortChanIDs{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(payload, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/reply_channel_range.go b/lnd/fuzz/lnwire/reply_channel_range.go deleted file mode 100644 index 8e2165f8..00000000 --- a/lnd/fuzz/lnwire/reply_channel_range.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_reply_channel_range is used by go-fuzz. -func Fuzz_reply_channel_range(data []byte) int { - // Prefix with MsgReplyChannelRange. - data = prefixWithMsgType(data, lnwire.MsgReplyChannelRange) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.ReplyChannelRange{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/reply_channel_range_zlib.go b/lnd/fuzz/lnwire/reply_channel_range_zlib.go deleted file mode 100644 index 59bb3bea..00000000 --- a/lnd/fuzz/lnwire/reply_channel_range_zlib.go +++ /dev/null @@ -1,59 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "bytes" - "compress/zlib" - "encoding/binary" - - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_reply_channel_range_zlib is used by go-fuzz. -func Fuzz_reply_channel_range_zlib(data []byte) int { - - var buf bytes.Buffer - zlibWriter := zlib.NewWriter(&buf) - _, err := zlibWriter.Write(data) - if err != nil { - // Zlib bug? - panic(err) - } - - if err := zlibWriter.Close(); err != nil { - // Zlib bug? - panic(err) - } - - compressedPayload := buf.Bytes() - - // Initialize some []byte vars which will prefix our payload - chainhash := []byte("00000000000000000000000000000000") - firstBlockHeight := []byte("\x00\x00\x00\x00") - numBlocks := []byte("\x00\x00\x00\x00") - completeByte := []byte("\x00") - - numBytesInBody := len(compressedPayload) + 1 - zlibByte := []byte("\x01") - - bodyBytes := make([]byte, 2) - binary.BigEndian.PutUint16(bodyBytes, uint16(numBytesInBody)) - - payload := append(chainhash, firstBlockHeight...) - payload = append(payload, numBlocks...) - payload = append(payload, completeByte...) - payload = append(payload, bodyBytes...) - payload = append(payload, zlibByte...) - payload = append(payload, compressedPayload...) - - // Prefix with MsgReplyChannelRange. - payload = prefixWithMsgType(payload, lnwire.MsgReplyChannelRange) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.ReplyChannelRange{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(payload, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/reply_short_chan_ids_end.go b/lnd/fuzz/lnwire/reply_short_chan_ids_end.go deleted file mode 100644 index 130f1303..00000000 --- a/lnd/fuzz/lnwire/reply_short_chan_ids_end.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_reply_short_chan_ids_end is used by go-fuzz. -func Fuzz_reply_short_chan_ids_end(data []byte) int { - // Prefix with MsgReplyShortChanIDsEnd. - data = prefixWithMsgType(data, lnwire.MsgReplyShortChanIDsEnd) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.ReplyShortChanIDsEnd{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/revoke_and_ack.go b/lnd/fuzz/lnwire/revoke_and_ack.go deleted file mode 100644 index 60668d89..00000000 --- a/lnd/fuzz/lnwire/revoke_and_ack.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_revoke_and_ack is used by go-fuzz. -func Fuzz_revoke_and_ack(data []byte) int { - // Prefix with MsgRevokeAndAck. - data = prefixWithMsgType(data, lnwire.MsgRevokeAndAck) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.RevokeAndAck{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/shutdown.go b/lnd/fuzz/lnwire/shutdown.go deleted file mode 100644 index a51ee8db..00000000 --- a/lnd/fuzz/lnwire/shutdown.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_shutdown is used by go-fuzz. -func Fuzz_shutdown(data []byte) int { - // Prefix with MsgShutdown. - data = prefixWithMsgType(data, lnwire.MsgShutdown) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.Shutdown{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/update_add_htlc.go b/lnd/fuzz/lnwire/update_add_htlc.go deleted file mode 100644 index deabe79a..00000000 --- a/lnd/fuzz/lnwire/update_add_htlc.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_update_add_htlc is used by go-fuzz. -func Fuzz_update_add_htlc(data []byte) int { - // Prefix with MsgUpdateAddHTLC. - data = prefixWithMsgType(data, lnwire.MsgUpdateAddHTLC) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.UpdateAddHTLC{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/update_fail_htlc.go b/lnd/fuzz/lnwire/update_fail_htlc.go deleted file mode 100644 index 256b6345..00000000 --- a/lnd/fuzz/lnwire/update_fail_htlc.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_update_fail_htlc is used by go-fuzz. -func Fuzz_update_fail_htlc(data []byte) int { - // Prefix with MsgUpdateFailHTLC. - data = prefixWithMsgType(data, lnwire.MsgUpdateFailHTLC) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.UpdateFailHTLC{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/update_fail_malformed_htlc.go b/lnd/fuzz/lnwire/update_fail_malformed_htlc.go deleted file mode 100644 index 99bc15a0..00000000 --- a/lnd/fuzz/lnwire/update_fail_malformed_htlc.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_update_fail_malformed_htlc is used by go-fuzz. -func Fuzz_update_fail_malformed_htlc(data []byte) int { - // Prefix with MsgUpdateFailMalformedHTLC. - data = prefixWithMsgType(data, lnwire.MsgUpdateFailMalformedHTLC) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.UpdateFailMalformedHTLC{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/update_fee.go b/lnd/fuzz/lnwire/update_fee.go deleted file mode 100644 index dbfbcc64..00000000 --- a/lnd/fuzz/lnwire/update_fee.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_update_fee is used by go-fuzz. -func Fuzz_update_fee(data []byte) int { - // Prefix with MsgUpdateFee. - data = prefixWithMsgType(data, lnwire.MsgUpdateFee) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.UpdateFee{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/lnwire/update_fulfill_htlc.go b/lnd/fuzz/lnwire/update_fulfill_htlc.go deleted file mode 100644 index db166d2f..00000000 --- a/lnd/fuzz/lnwire/update_fulfill_htlc.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package lnwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Fuzz_update_fulfill_htlc is used by go-fuzz. -func Fuzz_update_fulfill_htlc(data []byte) int { - // Prefix with MsgUpdateFulfillHTLC. - data = prefixWithMsgType(data, lnwire.MsgUpdateFulfillHTLC) - - // Create an empty message so that the FuzzHarness func can check - // if the max payload constraint is violated. - emptyMsg := lnwire.UpdateFulfillHTLC{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/wtwire/create_session.go b/lnd/fuzz/wtwire/create_session.go deleted file mode 100644 index 99ea907f..00000000 --- a/lnd/fuzz/wtwire/create_session.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package wtwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/watchtower/wtwire" -) - -// Fuzz_create_session is used by go-fuzz. -func Fuzz_create_session(data []byte) int { - // Prefix with MsgCreateSession. - data = prefixWithMsgType(data, wtwire.MsgCreateSession) - - // Create an empty message so that the FuzzHarness func can check if the - // max payload constraint is violated. - emptyMsg := wtwire.CreateSession{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/wtwire/create_session_reply.go b/lnd/fuzz/wtwire/create_session_reply.go deleted file mode 100644 index 673fa91b..00000000 --- a/lnd/fuzz/wtwire/create_session_reply.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package wtwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/watchtower/wtwire" -) - -// Fuzz_create_session_reply is used by go-fuzz. -func Fuzz_create_session_reply(data []byte) int { - // Prefix with MsgCreateSessionReply. - data = prefixWithMsgType(data, wtwire.MsgCreateSessionReply) - - // Create an empty message so that the FuzzHarness func can check if the - // max payload constraint is violated. - emptyMsg := wtwire.CreateSessionReply{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/wtwire/delete_session.go b/lnd/fuzz/wtwire/delete_session.go deleted file mode 100644 index a47b437c..00000000 --- a/lnd/fuzz/wtwire/delete_session.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package wtwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/watchtower/wtwire" -) - -// Fuzz_delete_session is used by go-fuzz. -func Fuzz_delete_session(data []byte) int { - // Prefix with MsgDeleteSession. - data = prefixWithMsgType(data, wtwire.MsgDeleteSession) - - // Create an empty message so that the FuzzHarness func can check if the - // max payload constraint is violated. - emptyMsg := wtwire.DeleteSession{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/wtwire/delete_session_reply.go b/lnd/fuzz/wtwire/delete_session_reply.go deleted file mode 100644 index 1ecb10f0..00000000 --- a/lnd/fuzz/wtwire/delete_session_reply.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package wtwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/watchtower/wtwire" -) - -// Fuzz_delete_session_reply is used by go-fuzz. -func Fuzz_delete_session_reply(data []byte) int { - // Prefix with MsgDeleteSessionReply. - data = prefixWithMsgType(data, wtwire.MsgDeleteSessionReply) - - // Create an empty message so that the FuzzHarness func can check if the - // max payload constraint is violated. - emptyMsg := wtwire.DeleteSessionReply{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/wtwire/error.go b/lnd/fuzz/wtwire/error.go deleted file mode 100644 index ffb95723..00000000 --- a/lnd/fuzz/wtwire/error.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package wtwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/watchtower/wtwire" -) - -// Fuzz_error is used by go-fuzz. -func Fuzz_error(data []byte) int { - // Prefix with MsgError. - data = prefixWithMsgType(data, wtwire.MsgError) - - // Create an empty message so that the FuzzHarness func can check if the - // max payload constraint is violated. - emptyMsg := wtwire.Error{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/wtwire/fuzz_utils.go b/lnd/fuzz/wtwire/fuzz_utils.go deleted file mode 100644 index 36f31a34..00000000 --- a/lnd/fuzz/wtwire/fuzz_utils.go +++ /dev/null @@ -1,75 +0,0 @@ -// +build gofuzz - -package wtwirefuzz - -import ( - "bytes" - "encoding/binary" - "fmt" - "reflect" - - "github.com/pkt-cash/pktd/lnd/watchtower/wtwire" -) - -// prefixWithMsgType takes []byte and adds a wire protocol prefix -// to make the []byte into an actual message to be used in fuzzing. -func prefixWithMsgType(data []byte, prefix wtwire.MessageType) []byte { - var prefixBytes [2]byte - binary.BigEndian.PutUint16(prefixBytes[:], uint16(prefix)) - data = append(prefixBytes[:], data...) - return data -} - -// harness performs the actual fuzz testing of the appropriate wire message. -// This function will check that the passed-in message passes wire length checks, -// is a valid message once deserialized, and passes a sequence of serialization -// and deserialization checks. Returns an int that determines whether the input -// is unique or not. -func harness(data []byte, emptyMsg wtwire.Message) int { - // Create a reader with the byte array. - r := bytes.NewReader(data) - - // Make sure byte array length (excluding 2 bytes for message type) is - // less than max payload size for the wire message. We check this because - // otherwise `go-fuzz` will keep creating inputs that crash on ReadMessage - // due to a large message size. - payloadLen := uint32(len(data)) - 2 - if payloadLen > emptyMsg.MaxPayloadLength(0) { - // Ignore this input - max payload constraint violated. - return 1 - } - - msg, err := wtwire.ReadMessage(r, 0) - if err != nil { - // go-fuzz generated []byte that cannot be represented as a - // wire message but we will return 0 so go-fuzz can modify the - // input. - return 1 - } - - // We will serialize the message into a new bytes buffer. - var b bytes.Buffer - if _, err := wtwire.WriteMessage(&b, msg, 0); err != nil { - // Could not serialize message into bytes buffer, panic. - panic(err) - } - - // Deserialize the message from the serialized bytes buffer, and then - // assert that the original message is equal to the newly deserialized - // message. - newMsg, err := wtwire.ReadMessage(&b, 0) - if err != nil { - // Could not deserialize message from bytes buffer, panic. - panic(err) - } - - if !reflect.DeepEqual(msg, newMsg) { - // Deserialized message and original message are not - // deeply equal. - panic(er.Errorf("deserialized message and original message " + - "are not deeply equal.")) - } - - // Add this input to the corpus. - return 1 -} diff --git a/lnd/fuzz/wtwire/init.go b/lnd/fuzz/wtwire/init.go deleted file mode 100644 index ca302e8f..00000000 --- a/lnd/fuzz/wtwire/init.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package wtwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/watchtower/wtwire" -) - -// Fuzz_init is used by go-fuzz. -func Fuzz_init(data []byte) int { - // Prefix with MsgInit. - data = prefixWithMsgType(data, wtwire.MsgInit) - - // Create an empty message so that the FuzzHarness func can check if the - // max payload constraint is violated. - emptyMsg := wtwire.Init{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/wtwire/state_update.go b/lnd/fuzz/wtwire/state_update.go deleted file mode 100644 index c1d3bd9b..00000000 --- a/lnd/fuzz/wtwire/state_update.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package wtwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/watchtower/wtwire" -) - -// Fuzz_state_update is used by go-fuzz. -func Fuzz_state_update(data []byte) int { - // Prefix with MsgStateUpdate. - data = prefixWithMsgType(data, wtwire.MsgStateUpdate) - - // Create an empty message so that the FuzzHarness func can check if the - // max payload constraint is violated. - emptyMsg := wtwire.StateUpdate{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/wtwire/state_update_reply.go b/lnd/fuzz/wtwire/state_update_reply.go deleted file mode 100644 index 1621325a..00000000 --- a/lnd/fuzz/wtwire/state_update_reply.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build gofuzz - -package wtwirefuzz - -import ( - "github.com/pkt-cash/pktd/lnd/watchtower/wtwire" -) - -// Fuzz_state_update_reply is used by go-fuzz. -func Fuzz_state_update_reply(data []byte) int { - // Prefix with MsgStateUpdateReply. - data = prefixWithMsgType(data, wtwire.MsgStateUpdateReply) - - // Create an empty message so that the FuzzHarness func can check if the - // max payload constraint is violated. - emptyMsg := wtwire.StateUpdateReply{} - - // Pass the message into our general fuzz harness for wire messages! - return harness(data, &emptyMsg) -} diff --git a/lnd/fuzz/zpay32/decode.go b/lnd/fuzz/zpay32/decode.go deleted file mode 100644 index 553acd4c..00000000 --- a/lnd/fuzz/zpay32/decode.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build gofuzz - -package zpay32fuzz - -import ( - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/lnd/zpay32" -) - -// Fuzz_decode is used by go-fuzz. -func Fuzz_decode(data []byte) int { - inv, err := zpay32.Decode(string(data), &chaincfg.TestNet3Params) - if err != nil { - return 1 - } - - // Call these functions as a sanity check to make sure the invoice - // is well-formed. - _ = inv.MinFinalCLTVExpiry() - _ = inv.Expiry() - return 1 -} diff --git a/lnd/fuzz/zpay32/encode.go b/lnd/fuzz/zpay32/encode.go deleted file mode 100644 index 944c6de8..00000000 --- a/lnd/fuzz/zpay32/encode.go +++ /dev/null @@ -1,49 +0,0 @@ -// +build gofuzz - -package zpay32fuzz - -import ( - "encoding/hex" - "fmt" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/lnd/zpay32" -) - -// Fuzz_encode is used by go-fuzz. -func Fuzz_encode(data []byte) int { - inv, err := zpay32.Decode(string(data), &chaincfg.TestNet3Params) - if err != nil { - return 1 - } - - // Call these functions as a sanity check to make sure the invoice - // is well-formed. - _ = inv.MinFinalCLTVExpiry() - _ = inv.Expiry() - - // Initialize the static key we will be using for this fuzz test. - testPrivKeyBytes, _ := util.DecodeHex("e126f68f7eafcc8b74f54d269fe206be715000f94dac067d1c04a8ca3b2db734") - testPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), testPrivKeyBytes) - - // Then, initialize the testMessageSigner so we can encode out - // invoices with this private key. - testMessageSigner := zpay32.MessageSigner{ - SignCompact: func(hash []byte) ([]byte, er.R) { - sig, err := btcec.SignCompact(btcec.S256(), - testPrivKey, hash, true) - if err != nil { - return nil, er.Errorf("can't sign the "+ - "message: %v", err) - } - return sig, nil - }, - } - _, err = inv.Encode(testMessageSigner) - if err != nil { - return 1 - } - - return 1 -} diff --git a/lnd/healthcheck/diskcheck.go b/lnd/healthcheck/diskcheck.go deleted file mode 100644 index b57ceccb..00000000 --- a/lnd/healthcheck/diskcheck.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build !windows,!solaris,!netbsd,!openbsd - -package healthcheck - -import ( - "syscall" - - "github.com/pkt-cash/pktd/btcutil/er" -) - -// AvailableDiskSpaceRatio returns ratio of available disk space to total -// capacity. -func AvailableDiskSpaceRatio(path string) (float64, er.R) { - s := syscall.Statfs_t{} - err := syscall.Statfs(path, &s) - if err != nil { - return 0, er.E(err) - } - - // Calculate our free blocks/total blocks to get our total ratio of - // free blocks. - return float64(s.Bfree) / float64(s.Blocks), nil -} - -// AvailableDiskSpace returns the available disk space in bytes of the given -// file system. -func AvailableDiskSpace(path string) (uint64, er.R) { - s := syscall.Statfs_t{} - err := syscall.Statfs(path, &s) - if err != nil { - return 0, er.E(err) - } - - // Some OSes have s.Bavail defined as int64, others as uint64, so we - // need the explicit type conversion here. - return uint64(s.Bavail) * uint64(s.Bsize), nil // nolint:unconvert -} diff --git a/lnd/healthcheck/diskcheck_netbsd.go b/lnd/healthcheck/diskcheck_netbsd.go deleted file mode 100644 index ef4dab2d..00000000 --- a/lnd/healthcheck/diskcheck_netbsd.go +++ /dev/null @@ -1,29 +0,0 @@ -package healthcheck - -import "golang.org/x/sys/unix" - -// AvailableDiskSpaceRatio returns ratio of available disk space to total -// capacity for netbsd. -func AvailableDiskSpaceRatio(path string) (float64, er.R) { - s := unix.Statvfs_t{} - err := unix.Statvfs(path, &s) - if err != nil { - return 0, err - } - - // Calculate our free blocks/total blocks to get our total ratio of - // free blocks. - return float64(s.Bfree) / float64(s.Blocks), nil -} - -// AvailableDiskSpace returns the available disk space in bytes of the given -// file system for netbsd. -func AvailableDiskSpace(path string) (uint64, er.R) { - s := unix.Statvfs_t{} - err := unix.Statvfs(path, &s) - if err != nil { - return 0, err - } - - return s.Bavail * uint64(s.Bsize), nil -} diff --git a/lnd/healthcheck/diskcheck_openbsd.go b/lnd/healthcheck/diskcheck_openbsd.go deleted file mode 100644 index ee5c7636..00000000 --- a/lnd/healthcheck/diskcheck_openbsd.go +++ /dev/null @@ -1,29 +0,0 @@ -package healthcheck - -import "golang.org/x/sys/unix" - -// AvailableDiskSpaceRatio returns ratio of available disk space to total -// capacity for openbsd. -func AvailableDiskSpaceRatio(path string) (float64, er.R) { - s := unix.Statfs_t{} - err := unix.Statfs(path, &s) - if err != nil { - return 0, err - } - - // Calculate our free blocks/total blocks to get our total ratio of - // free blocks. - return float64(s.F_bfree) / float64(s.F_blocks), nil -} - -// AvailableDiskSpace returns the available disk space in bytes of the given -// file system for openbsd. -func AvailableDiskSpace(path string) (uint64, er.R) { - s := unix.Statfs_t{} - err := unix.Statfs(path, &s) - if err != nil { - return 0, err - } - - return uint64(s.F_bavail) * uint64(s.F_bsize), nil -} diff --git a/lnd/healthcheck/diskcheck_solaris.go b/lnd/healthcheck/diskcheck_solaris.go deleted file mode 100644 index 34fd2251..00000000 --- a/lnd/healthcheck/diskcheck_solaris.go +++ /dev/null @@ -1,29 +0,0 @@ -package healthcheck - -import "golang.org/x/sys/unix" - -// AvailableDiskSpaceRatio returns ratio of available disk space to total -// capacity for solaris. -func AvailableDiskSpaceRatio(path string) (float64, er.R) { - s := unix.Statvfs_t{} - err := unix.Statvfs(path, &s) - if err != nil { - return 0, err - } - - // Calculate our free blocks/total blocks to get our total ratio of - // free blocks. - return float64(s.Bfree) / float64(s.Blocks), nil -} - -// AvailableDiskSpace returns the available disk space in bytes of the given -// file system for solaris. -func AvailableDiskSpace(path string) (uint64, er.R) { - s := unix.Statvfs_t{} - err := unix.Statvfs(path, &s) - if err != nil { - return 0, err - } - - return s.Bavail * uint64(s.Bsize), nil -} diff --git a/lnd/healthcheck/diskcheck_windows.go b/lnd/healthcheck/diskcheck_windows.go deleted file mode 100644 index c4fbe4ed..00000000 --- a/lnd/healthcheck/diskcheck_windows.go +++ /dev/null @@ -1,34 +0,0 @@ -package healthcheck - -import ( - "github.com/pkt-cash/pktd/btcutil/er" - "golang.org/x/sys/windows" -) - -// AvailableDiskSpaceRatio returns ratio of available disk space to total -// capacity for windows. -func AvailableDiskSpaceRatio(path string) (float64, er.R) { - var free, total, avail uint64 - - pathPtr, err := windows.UTF16PtrFromString(path) - if err != nil { - return 0, er.E(err) - } - err = windows.GetDiskFreeSpaceEx(pathPtr, &free, &total, &avail) - - return float64(avail) / float64(total), nil -} - -// AvailableDiskSpace returns the available disk space in bytes of the given -// file system for windows. -func AvailableDiskSpace(path string) (uint64, er.R) { - var free, total, avail uint64 - - pathPtr, err := windows.UTF16PtrFromString(path) - if err != nil { - return 0, er.E(err) - } - err = windows.GetDiskFreeSpaceEx(pathPtr, &free, &total, &avail) - - return avail, nil -} diff --git a/lnd/healthcheck/healthcheck.go b/lnd/healthcheck/healthcheck.go deleted file mode 100644 index bc437e85..00000000 --- a/lnd/healthcheck/healthcheck.go +++ /dev/null @@ -1,231 +0,0 @@ -// Package healthcheck contains a monitor which takes a set of liveliness checks -// which it periodically checks. If a check fails after its configured number -// of allowed call attempts, the monitor will send a request to shutdown using -// the function is is provided in its config. Checks are dispatched in their own -// goroutines so that they do not block each other. -package healthcheck - -import ( - "sync" - "sync/atomic" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/ticker" - "github.com/pkt-cash/pktd/pktlog/log" -) - -// Config contains configuration settings for our monitor. -type Config struct { - // Checks is a set of health checks that assert that lnd has access to - // critical resources. - Checks []*Observation - - // Shutdown should be called to request safe shutdown on failure of a - // health check. - Shutdown shutdownFunc -} - -// shutdownFunc is the signature we use for a shutdown function which allows us -// to print our reason for shutdown. -type shutdownFunc func(format string, params ...interface{}) - -// Monitor periodically checks a series of configured liveliness checks to -// ensure that lnd has access to all critical resources. -type Monitor struct { - started int32 // To be used atomically. - stopped int32 // To be used atomically. - - cfg *Config - - quit chan struct{} - wg sync.WaitGroup -} - -// NewMonitor returns a monitor with the provided config. -func NewMonitor(cfg *Config) *Monitor { - return &Monitor{ - cfg: cfg, - quit: make(chan struct{}), - } -} - -// Start launches the goroutines required to run our monitor. -func (m *Monitor) Start() er.R { - if !atomic.CompareAndSwapInt32(&m.started, 0, 1) { - return er.New("monitor already started") - } - - // Run through all of the health checks that we have configured and - // start a goroutine for each check. - for _, check := range m.cfg.Checks { - check := check - - // Skip over health checks that are disabled by setting zero - // attempts. - if check.Attempts == 0 { - log.Warnf("check: %v configured with 0 attempts, "+ - "skipping it", check.Name) - - continue - } - - m.wg.Add(1) - go func() { - defer m.wg.Done() - check.monitor(m.cfg.Shutdown, m.quit) - }() - } - - return nil -} - -// Stop sends all goroutines the signal to exit and waits for them to exit. -func (m *Monitor) Stop() er.R { - if !atomic.CompareAndSwapInt32(&m.stopped, 0, 1) { - return er.Errorf("monitor already stopped") - } - - close(m.quit) - m.wg.Wait() - - return nil -} - -// CreateCheck is a helper function that takes a function that produces an error -// and wraps it in a function that returns its result on an error channel. -// We do not wait group the goroutine running our checkFunc because we expect -// to be dealing with health checks that may block; if we wait group them, we -// may wait forever. Ideally future health checks will allow callers to cancel -// them early, and we can wait group this. -func CreateCheck(checkFunc func() er.R) func() chan er.R { - return func() chan er.R { - errChan := make(chan er.R, 1) - go func() { - errChan <- checkFunc() - }() - - return errChan - } -} - -// Observation represents a liveliness check that we periodically check. -type Observation struct { - // Name describes the health check. - Name string - - // Check runs the health check itself, returning an error channel that - // is expected to receive nil or an error. - Check func() chan er.R - - // Interval is a ticker which triggers running our check function. This - // ticker must be started and stopped by the observation. - Interval ticker.Ticker - - // Attempts is the number of calls we make for a single check before - // failing. - Attempts int - - // Timeout is the amount of time we allow our check function to take - // before we time it out. - Timeout time.Duration - - // Backoff is the amount of time we back off between retries for failed - // checks. - Backoff time.Duration -} - -// NewObservation creates an observation. -func NewObservation(name string, check func() er.R, interval, - timeout, backoff time.Duration, attempts int) *Observation { - - return &Observation{ - Name: name, - Check: CreateCheck(check), - Interval: ticker.New(interval), - Attempts: attempts, - Timeout: timeout, - Backoff: backoff, - } -} - -// String returns a string representation of an observation. -func (o *Observation) String() string { - return o.Name -} - -// monitor executes a health check every time its interval ticks until the quit -// channel signals that we should shutdown. This function is also responsible -// for starting and stopping our ticker. -func (o *Observation) monitor(shutdown shutdownFunc, quit chan struct{}) { - log.Debugf("Monitoring: %v", o) - - o.Interval.Resume() - defer o.Interval.Stop() - - for { - select { - case <-o.Interval.Ticks(): - o.retryCheck(quit, shutdown) - - // Exit if we receive the instruction to shutdown. - case <-quit: - return - } - } -} - -// retryCheck calls a check function until it succeeds, or we reach our -// configured number of attempts, waiting for our back off period between failed -// calls. If we fail to obtain a passing health check after the allowed number -// of calls, we will request shutdown. -func (o *Observation) retryCheck(quit chan struct{}, shutdown shutdownFunc) { - var count int - - for count < o.Attempts { - // Increment our call count and call the health check endpoint. - count++ - - // Wait for our check to return, timeout to elapse, or quit - // signal to be received. - var err er.R - select { - case err = <-o.Check(): - - case <-time.After(o.Timeout): - err = er.Errorf("health check: %v timed out after: "+ - "%v", o, o.Timeout) - - case <-quit: - return - } - - // If our error is nil, we have passed our health check, so we - // can exit. - if err == nil { - return - } - - // If we have reached our allowed number of attempts, this - // check has failed so we request shutdown. - if count == o.Attempts { - shutdown("Health check: %v failed after %v "+ - "calls", o, o.Attempts) - - return - } - - log.Infof("Health check: %v, call: %v failed with: %v, "+ - "backing off for: %v", o, count, err, o.Backoff) - - // If we are still within the number of calls allowed for this - // check, we wait for our back off period to elapse, or exit if - // we get the signal to shutdown. - select { - case <-time.After(o.Backoff): - - case <-quit: - return - } - } -} diff --git a/lnd/healthcheck/healthcheck_test.go b/lnd/healthcheck/healthcheck_test.go deleted file mode 100644 index 48514e88..00000000 --- a/lnd/healthcheck/healthcheck_test.go +++ /dev/null @@ -1,226 +0,0 @@ -package healthcheck - -import ( - "testing" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/ticker" - "github.com/stretchr/testify/require" -) - -var ( - errNonNil = er.GenericErrorType.CodeWithDetail("errNonNil", "non-nil test error") - timeout = time.Second - testTime = time.Unix(1, 2) -) - -type mockedCheck struct { - t *testing.T - errChan chan er.R -} - -// newMockCheck creates a new mock. -func newMockCheck(t *testing.T) *mockedCheck { - return &mockedCheck{ - t: t, - errChan: make(chan er.R), - } -} - -// call returns our mock's error channel, which we can send responses on. -func (m *mockedCheck) call() chan er.R { - return m.errChan -} - -// sendError sends an error into our mock's error channel, mocking the sending -// of a response from our check function. -func (m *mockedCheck) sendError(err er.R) { - select { - case m.errChan <- err: - case <-time.After(timeout): - m.t.Fatalf("could not send error: %v", err) - } -} - -// TestMonitor tests creation and triggering of a monitor with a health check. -func TestMonitor(t *testing.T) { - intervalTicker := ticker.NewForce(time.Hour) - - mock := newMockCheck(t) - shutdown := make(chan struct{}) - - // Create our config for monitoring. We will use a 0 back off so that - // out test does not need to wait. - cfg := &Config{ - Checks: []*Observation{ - { - Check: mock.call, - Interval: intervalTicker, - Attempts: 2, - Backoff: 0, - Timeout: time.Hour, - }, - }, - Shutdown: func(string, ...interface{}) { - shutdown <- struct{}{} - }, - } - monitor := NewMonitor(cfg) - - util.RequireNoErr(t, monitor.Start(), "could not start monitor") - - // Tick is a helper we will use to tick our interval. - tick := func() { - select { - case intervalTicker.Force <- testTime: - case <-time.After(timeout): - t.Fatal("could not tick timer") - } - } - - // Tick our timer and provide our error channel with a nil error. This - // mocks our check function succeeding on the first call. - tick() - mock.sendError(nil) - - // Now we tick our timer again. This time send a non-nil error, followed - // by a nil error. This tests our retry logic, because we allow 2 - // retries, so should recover without needing to shutdown. - tick() - mock.sendError(errNonNil.Default()) - mock.sendError(nil) - - // Finally, we tick our timer once more, and send two non-nil errors - // into our error channel. This mocks our check function failing twice. - tick() - mock.sendError(errNonNil.Default()) - mock.sendError(errNonNil.Default()) - - // Since we have failed within our allowed number of retries, we now - // expect a call to our shutdown function. - select { - case <-shutdown: - case <-time.After(timeout): - t.Fatal("expected shutdown") - } - - util.RequireNoErr(t, monitor.Stop(), "could not stop monitor") -} - -// TestRetryCheck tests our retry logic. It does not include a test for exiting -// during the back off period. -func TestRetryCheck(t *testing.T) { - tests := []struct { - name string - - // errors provides an in-order list of errors that we expect our - // health check to respond with. The number of errors in this - // list indicates the number of times we expect our check to - // be called, because our test will fail if we do not consume - // every error. - errors []er.R - - // attempts is the number of times we call a check before - // failing. - attempts int - - // timeout is the time we allow our check to take before we - // fail them. - timeout time.Duration - - // expectedShutdown is true if we expect a shutdown to be - // triggered because all of our calls failed. - expectedShutdown bool - }{ - { - name: "first call succeeds", - errors: []er.R{nil}, - attempts: 2, - timeout: time.Hour, - expectedShutdown: false, - }, - { - name: "first call fails", - errors: []er.R{errNonNil.Default()}, - attempts: 1, - timeout: time.Hour, - expectedShutdown: true, - }, - { - name: "fail then recover", - errors: []er.R{errNonNil.Default(), nil}, - attempts: 2, - timeout: time.Hour, - expectedShutdown: false, - }, - { - name: "always fail", - errors: []er.R{errNonNil.Default(), errNonNil.Default()}, - attempts: 2, - timeout: time.Hour, - expectedShutdown: true, - }, - { - name: "no calls", - errors: nil, - attempts: 0, - timeout: time.Hour, - expectedShutdown: false, - }, - { - name: "call times out", - errors: nil, - attempts: 1, - timeout: 1, - expectedShutdown: true, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - var shutdown bool - shutdownFunc := func(string, ...interface{}) { - shutdown = true - } - - mock := newMockCheck(t) - - // Create an observation that calls our call counting - // function. We set a zero back off so that the test - // will not wait. - observation := &Observation{ - Check: mock.call, - Attempts: test.attempts, - Timeout: test.timeout, - Backoff: 0, - } - quit := make(chan struct{}) - - // Run our retry check in a goroutine because it blocks - // on us sending errors into the mocked caller's error - // channel. - done := make(chan struct{}) - go func() { - observation.retryCheck(quit, shutdownFunc) - close(done) - }() - - // Prompt our mock caller to send responses for calls - // to our call function. - for _, err := range test.errors { - mock.sendError(err) - } - - // Make sure that we have finished running our retry - // check function before we start checking results. - <-done - - require.Equal(t, test.expectedShutdown, shutdown, - "unexpected shutdown state") - }) - } -} diff --git a/lnd/htlcswitch/circuit.go b/lnd/htlcswitch/circuit.go deleted file mode 100644 index c48a11f6..00000000 --- a/lnd/htlcswitch/circuit.go +++ /dev/null @@ -1,232 +0,0 @@ -package htlcswitch - -import ( - "encoding/binary" - "io" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/htlcswitch/hop" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// EmptyCircuitKey is a default value for an outgoing circuit key returned when -// a circuit's keystone has not been set. Note that this value is invalid for -// use as a keystone, since the outgoing channel id can never be equal to -// sourceHop. -var EmptyCircuitKey CircuitKey - -// CircuitKey is a tuple of channel ID and HTLC ID, used to uniquely identify -// HTLCs in a circuit. Circuits are identified primarily by the circuit key of -// the incoming HTLC. However, a circuit may also be referenced by its outgoing -// circuit key after the HTLC has been forwarded via the outgoing link. -type CircuitKey = channeldb.CircuitKey - -// PaymentCircuit is used by the switch as placeholder between when the -// switch makes a forwarding decision and the outgoing link determines the -// proper HTLC ID for the local log. After the outgoing HTLC ID has been -// determined, the half circuit will be converted into a full PaymentCircuit. -type PaymentCircuit struct { - // AddRef is the forward reference of the Add update in the incoming - // link's forwarding package. This value is set on the htlcPacket of the - // returned settle/fail so that it can be removed from disk. - AddRef channeldb.AddRef - - // Incoming is the circuit key identifying the incoming channel and htlc - // index from which this ADD originates. - Incoming CircuitKey - - // Outgoing is the circuit key identifying the outgoing channel, and the - // HTLC index that was used to forward the ADD. It will be nil if this - // circuit's keystone has not been set. - Outgoing *CircuitKey - - // PaymentHash used as unique identifier of payment. - PaymentHash [32]byte - - // IncomingAmount is the value of the HTLC from the incoming link. - IncomingAmount lnwire.MilliSatoshi - - // OutgoingAmount specifies the value of the HTLC leaving the switch, - // either as a payment or forwarded amount. - OutgoingAmount lnwire.MilliSatoshi - - // ErrorEncrypter is used to re-encrypt the onion failure before - // sending it back to the originator of the payment. - ErrorEncrypter hop.ErrorEncrypter - - // LoadedFromDisk is set true for any circuits loaded after the circuit - // map is reloaded from disk. - // - // NOTE: This value is determined implicitly during a restart. It is not - // persisted, and should never be set outside the circuit map. - LoadedFromDisk bool -} - -// HasKeystone returns true if an outgoing link has assigned this circuit's -// outgoing circuit key. -func (c *PaymentCircuit) HasKeystone() bool { - return c.Outgoing != nil -} - -// newPaymentCircuit initializes a payment circuit on the heap using the payment -// hash and an in-memory htlc packet. -func newPaymentCircuit(hash *[32]byte, pkt *htlcPacket) *PaymentCircuit { - var addRef channeldb.AddRef - if pkt.sourceRef != nil { - addRef = *pkt.sourceRef - } - - return &PaymentCircuit{ - AddRef: addRef, - Incoming: CircuitKey{ - ChanID: pkt.incomingChanID, - HtlcID: pkt.incomingHTLCID, - }, - PaymentHash: *hash, - IncomingAmount: pkt.incomingAmount, - OutgoingAmount: pkt.amount, - ErrorEncrypter: pkt.obfuscator, - } -} - -// makePaymentCircuit initializes a payment circuit on the stack using the -// payment hash and an in-memory htlc packet. -func makePaymentCircuit(hash *[32]byte, pkt *htlcPacket) PaymentCircuit { - var addRef channeldb.AddRef - if pkt.sourceRef != nil { - addRef = *pkt.sourceRef - } - - return PaymentCircuit{ - AddRef: addRef, - Incoming: CircuitKey{ - ChanID: pkt.incomingChanID, - HtlcID: pkt.incomingHTLCID, - }, - PaymentHash: *hash, - IncomingAmount: pkt.incomingAmount, - OutgoingAmount: pkt.amount, - ErrorEncrypter: pkt.obfuscator, - } -} - -// Encode writes a PaymentCircuit to the provided io.Writer. -func (c *PaymentCircuit) Encode(w io.Writer) er.R { - if err := c.AddRef.Encode(w); err != nil { - return err - } - - if err := c.Incoming.Encode(w); err != nil { - return err - } - - if _, err := util.Write(w, c.PaymentHash[:]); err != nil { - return err - } - - var scratch [8]byte - - binary.BigEndian.PutUint64(scratch[:], uint64(c.IncomingAmount)) - if _, err := util.Write(w, scratch[:]); err != nil { - return err - } - - binary.BigEndian.PutUint64(scratch[:], uint64(c.OutgoingAmount)) - if _, err := util.Write(w, scratch[:]); err != nil { - return err - } - - // Defaults to EncrypterTypeNone. - var encrypterType hop.EncrypterType - if c.ErrorEncrypter != nil { - encrypterType = c.ErrorEncrypter.Type() - } - - err := util.WriteBin(w, binary.BigEndian, encrypterType) - if err != nil { - return err - } - - // Skip encoding of error encrypter if this half add does not have one. - if encrypterType == hop.EncrypterTypeNone { - return nil - } - - return c.ErrorEncrypter.Encode(w) -} - -// Decode reads a PaymentCircuit from the provided io.Reader. -func (c *PaymentCircuit) Decode(r io.Reader) er.R { - if err := c.AddRef.Decode(r); err != nil { - return err - } - - if err := c.Incoming.Decode(r); err != nil { - return err - } - - if _, err := util.ReadFull(r, c.PaymentHash[:]); err != nil { - return err - } - - var scratch [8]byte - - if _, err := util.ReadFull(r, scratch[:]); err != nil { - return err - } - c.IncomingAmount = lnwire.MilliSatoshi( - binary.BigEndian.Uint64(scratch[:])) - - if _, err := util.ReadFull(r, scratch[:]); err != nil { - return err - } - c.OutgoingAmount = lnwire.MilliSatoshi( - binary.BigEndian.Uint64(scratch[:])) - - // Read the encrypter type used for this circuit. - var encrypterType hop.EncrypterType - err := util.ReadBin(r, binary.BigEndian, &encrypterType) - if err != nil { - return err - } - - switch encrypterType { - case hop.EncrypterTypeNone: - // No encrypter was provided, such as when the payment is - // locally initiated. - return nil - - case hop.EncrypterTypeSphinx: - // Sphinx encrypter was used as this is a forwarded HTLC. - c.ErrorEncrypter = hop.NewSphinxErrorEncrypter() - - case hop.EncrypterTypeMock: - // Test encrypter. - c.ErrorEncrypter = NewMockObfuscator() - - default: - return ErrUnknownEncrypterType.Default() - } - - return c.ErrorEncrypter.Decode(r) -} - -// InKey returns the primary identifier for the circuit corresponding to the -// incoming HTLC. -func (c *PaymentCircuit) InKey() CircuitKey { - return c.Incoming -} - -// OutKey returns the keystone identifying the outgoing link and HTLC ID. If the -// circuit hasn't been completed, this method returns an EmptyKeystone, which is -// an invalid outgoing circuit key. Only call this method if HasKeystone returns -// true. -func (c *PaymentCircuit) OutKey() CircuitKey { - if c.Outgoing != nil { - return *c.Outgoing - } - - return EmptyCircuitKey -} diff --git a/lnd/htlcswitch/circuit_map.go b/lnd/htlcswitch/circuit_map.go deleted file mode 100644 index 8cdb4f9c..00000000 --- a/lnd/htlcswitch/circuit_map.go +++ /dev/null @@ -1,956 +0,0 @@ -package htlcswitch - -import ( - "bytes" - "fmt" - "sync" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/htlcswitch/hop" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/pktlog/log" -) - -var ( - // ErrCorruptedCircuitMap indicates that the on-disk bucketing structure - // has altered since the circuit map instance was initialized. - ErrCorruptedCircuitMap = Err.CodeWithDetail("ErrCorruptedCircuitMap", "circuit map has been corrupted") - - // ErrCircuitNotInHashIndex indicates that a particular circuit did not - // appear in the in-memory hash index. - ErrCircuitNotInHashIndex = Err.CodeWithDetail("ErrCircuitNotInHashIndex", "payment circuit not found in "+ - "hash index") - - // ErrUnknownCircuit signals that circuit could not be removed from the - // map because it was not found. - ErrUnknownCircuit = Err.CodeWithDetail("ErrUnknownCircuit", "unknown payment circuit") - - // ErrCircuitClosing signals that an htlc has already closed this - // circuit in-memory. - ErrCircuitClosing = Err.CodeWithDetail("ErrCircuitClosing", "circuit has already been closed") - - // ErrDuplicateCircuit signals that this circuit was previously - // added. - ErrDuplicateCircuit = Err.CodeWithDetail("ErrDuplicateCircuit", "duplicate circuit add") - - // ErrUnknownKeystone signals that no circuit was found using the - // outgoing circuit key. - ErrUnknownKeystone = Err.CodeWithDetail("ErrUnknownKeystone", "unknown circuit keystone") - - // ErrDuplicateKeystone signals that this circuit was previously - // assigned a keystone. - ErrDuplicateKeystone = Err.CodeWithDetail("ErrDuplicateKeystone", "cannot add duplicate keystone") -) - -// CircuitModifier is a common interface used by channel links to modify the -// contents of the circuit map maintained by the switch. -type CircuitModifier interface { - // OpenCircuits preemptively records a batch keystones that will mark - // currently pending circuits as open. These changes can be rolled back - // on restart if the outgoing Adds do not make it into a commitment - // txn. - OpenCircuits(...Keystone) er.R - - // TrimOpenCircuits removes a channel's open channels with htlc indexes - // above `start`. - TrimOpenCircuits(chanID lnwire.ShortChannelID, start uint64) er.R - - // DeleteCircuits removes the incoming circuit key to remove all - // persistent references to a circuit. Returns a ErrUnknownCircuit if - // any of the incoming keys are not known. - DeleteCircuits(inKeys ...CircuitKey) er.R -} - -// CircuitLookup is a common interface used to lookup information that is stored -// in the circuit map. -type CircuitLookup interface { - // LookupCircuit queries the circuit map for the circuit identified by - // inKey. - LookupCircuit(inKey CircuitKey) *PaymentCircuit - - // LookupOpenCircuit queries the circuit map for a circuit identified - // by its outgoing circuit key. - LookupOpenCircuit(outKey CircuitKey) *PaymentCircuit -} - -// CircuitFwdActions represents the forwarding decision made by the circuit -// map, and is returned from CommitCircuits. The sequence of circuits provided -// to CommitCircuits is split into three sub-sequences, allowing the caller to -// do an in-order scan, comparing the head of each subsequence, to determine -// the decision made by the circuit map. -type CircuitFwdActions struct { - // Adds is the subsequence of circuits that were successfully committed - // in the circuit map. - Adds []*PaymentCircuit - - // Drops is the subsequence of circuits for which no action should be - // done. - Drops []*PaymentCircuit - - // Fails is the subsequence of circuits that should be failed back by - // the calling link. - Fails []*PaymentCircuit -} - -// CircuitMap is an interface for managing the construction and teardown of -// payment circuits used by the switch. -type CircuitMap interface { - CircuitModifier - - CircuitLookup - - // CommitCircuits attempts to add the given circuits to the circuit - // map. The list of circuits is split into three distinct - // sub-sequences, corresponding to adds, drops, and fails. Adds should - // be forwarded to the switch, while fails should be failed back - // locally within the calling link. - CommitCircuits(circuit ...*PaymentCircuit) (*CircuitFwdActions, er.R) - - // CloseCircuit marks the circuit identified by `outKey` as closing - // in-memory, which prevents duplicate settles/fails from completing an - // open circuit twice. - CloseCircuit(outKey CircuitKey) (*PaymentCircuit, er.R) - - // FailCircuit is used by locally failed HTLCs to mark the circuit - // identified by `inKey` as closing in-memory, which prevents duplicate - // settles/fails from being accepted for the same circuit. - FailCircuit(inKey CircuitKey) (*PaymentCircuit, er.R) - - // LookupByPaymentHash queries the circuit map and returns all open - // circuits that use the given payment hash. - LookupByPaymentHash(hash [32]byte) []*PaymentCircuit - - // NumPending returns the total number of active circuits added by - // CommitCircuits. - NumPending() int - - // NumOpen returns the number of circuits with HTLCs that have been - // forwarded via an outgoing link. - NumOpen() int -} - -var ( - // circuitAddKey is the key used to retrieve the bucket containing - // payment circuits. A circuit records information about how to return - // a packet to the source link, potentially including an error - // encrypter for applying this hop's encryption to the payload in the - // reverse direction. - circuitAddKey = []byte("circuit-adds") - - // circuitKeystoneKey is used to retrieve the bucket containing circuit - // keystones, which are set in place once a forwarded packet is - // assigned an index on an outgoing commitment txn. - circuitKeystoneKey = []byte("circuit-keystones") -) - -// circuitMap is a data structure that implements thread safe, persistent -// storage of circuit routing information. The switch consults a circuit map to -// determine where to forward returning HTLC update messages. Circuits are -// always identifiable by their incoming CircuitKey, in addition to their -// outgoing CircuitKey if the circuit is fully-opened. -type circuitMap struct { - cfg *CircuitMapConfig - - mtx sync.RWMutex - - // pending is an in-memory mapping of all half payment circuits, and is - // kept in sync with the on-disk contents of the circuit map. - pending map[CircuitKey]*PaymentCircuit - - // opened is an in-memory mapping of all full payment circuits, which - // is also synchronized with the persistent state of the circuit map. - opened map[CircuitKey]*PaymentCircuit - - // closed is an in-memory set of circuits for which the switch has - // received a settle or fail. This precedes the actual deletion of a - // circuit from disk. - closed map[CircuitKey]struct{} - - // hashIndex is a volatile index that facilitates fast queries by - // payment hash against the contents of circuits. This index can be - // reconstructed entirely from the set of persisted full circuits on - // startup. - hashIndex map[[32]byte]map[CircuitKey]struct{} -} - -// CircuitMapConfig houses the critical interfaces and references necessary to -// parameterize an instance of circuitMap. -type CircuitMapConfig struct { - // DB provides the persistent storage engine for the circuit map. - // TODO(conner): create abstraction to allow for the substitution of - // other persistence engines. - DB *channeldb.DB - - // ExtractErrorEncrypter derives the shared secret used to encrypt - // errors from the obfuscator's ephemeral public key. - ExtractErrorEncrypter hop.ErrorEncrypterExtracter -} - -// NewCircuitMap creates a new instance of the circuitMap. -func NewCircuitMap(cfg *CircuitMapConfig) (CircuitMap, er.R) { - cm := &circuitMap{ - cfg: cfg, - } - - // Initialize the on-disk buckets used by the circuit map. - if err := cm.initBuckets(); err != nil { - return nil, err - } - - // Load any previously persisted circuit into back into memory. - if err := cm.restoreMemState(); err != nil { - return nil, err - } - - // Trim any keystones that were not committed in an outgoing commit txn. - // - // NOTE: This operation will be applied to the persistent state of all - // active channels. Therefore, it must be called before any links are - // created to avoid interfering with normal operation. - if err := cm.trimAllOpenCircuits(); err != nil { - return nil, err - } - - return cm, nil -} - -// initBuckets ensures that the primary buckets used by the circuit are -// initialized so that we can assume their existence after startup. -func (cm *circuitMap) initBuckets() er.R { - return kvdb.Update(cm.cfg.DB, func(tx kvdb.RwTx) er.R { - _, err := tx.CreateTopLevelBucket(circuitKeystoneKey) - if err != nil { - return err - } - - _, err = tx.CreateTopLevelBucket(circuitAddKey) - return err - }, func() {}) -} - -// restoreMemState loads the contents of the half circuit and full circuit -// buckets from disk and reconstructs the in-memory representation of the -// circuit map. Afterwards, the state of the hash index is reconstructed using -// the recovered set of full circuits. This method will also remove any stray -// keystones, which are those that appear fully-opened, but have no pending -// circuit related to the intended incoming link. -func (cm *circuitMap) restoreMemState() er.R { - log.Infof("Restoring in-memory circuit state from disk") - - var ( - opened map[CircuitKey]*PaymentCircuit - pending map[CircuitKey]*PaymentCircuit - ) - - if err := kvdb.Update(cm.cfg.DB, func(tx kvdb.RwTx) er.R { - // Restore any of the circuits persisted in the circuit bucket - // back into memory. - circuitBkt := tx.ReadWriteBucket(circuitAddKey) - if circuitBkt == nil { - return ErrCorruptedCircuitMap.Default() - } - - if err := circuitBkt.ForEach(func(_, v []byte) er.R { - circuit, err := cm.decodeCircuit(v) - if err != nil { - return err - } - - circuit.LoadedFromDisk = true - pending[circuit.Incoming] = circuit - - return nil - }); err != nil { - return err - } - - // Furthermore, load the keystone bucket and resurrect the - // keystones used in any open circuits. - keystoneBkt := tx.ReadWriteBucket(circuitKeystoneKey) - if keystoneBkt == nil { - return ErrCorruptedCircuitMap.Default() - } - - var strayKeystones []Keystone - if err := keystoneBkt.ForEach(func(k, v []byte) er.R { - var ( - inKey CircuitKey - outKey = &CircuitKey{} - ) - - // Decode the incoming and outgoing circuit keys. - if err := inKey.SetBytes(v); err != nil { - return err - } - if err := outKey.SetBytes(k); err != nil { - return err - } - - // Retrieve the pending circuit, set its keystone, then - // add it to the opened map. - circuit, ok := pending[inKey] - if ok { - circuit.Outgoing = outKey - opened[*outKey] = circuit - } else { - strayKeystones = append(strayKeystones, Keystone{ - InKey: inKey, - OutKey: *outKey, - }) - } - - return nil - }); err != nil { - return err - } - - // If any stray keystones were found, we'll proceed to prune - // them from the circuit map's persistent storage. This may - // manifest on older nodes that had updated channels before - // their short channel id was set properly. We believe this - // issue has been fixed, though this will allow older nodes to - // recover without additional intervention. - for _, strayKeystone := range strayKeystones { - // As a precaution, we will only cleanup keystones - // related to locally-initiated payments. If a - // documented case of stray keystones emerges for - // forwarded payments, this check should be removed, but - // with extreme caution. - if strayKeystone.OutKey.ChanID != hop.Source { - continue - } - - log.Infof("Removing stray keystone: %v", strayKeystone) - err := keystoneBkt.Delete(strayKeystone.OutKey.Bytes()) - if err != nil { - return err - } - } - - return nil - - }, func() { - opened = make(map[CircuitKey]*PaymentCircuit) - pending = make(map[CircuitKey]*PaymentCircuit) - }); err != nil { - return err - } - - cm.pending = pending - cm.opened = opened - cm.closed = make(map[CircuitKey]struct{}) - - log.Infof("Payment circuits loaded: num_pending=%v, num_open=%v", - len(pending), len(opened)) - - // Finally, reconstruct the hash index by running through our set of - // open circuits. - cm.hashIndex = make(map[[32]byte]map[CircuitKey]struct{}) - for _, circuit := range opened { - cm.addCircuitToHashIndex(circuit) - } - - return nil -} - -// decodeCircuit reconstructs an in-memory payment circuit from a byte slice. -// The byte slice is assumed to have been generated by the circuit's Encode -// method. If the decoding is successful, the onion obfuscator will be -// reextracted, since it is not stored in plaintext on disk. -func (cm *circuitMap) decodeCircuit(v []byte) (*PaymentCircuit, er.R) { - var circuit = &PaymentCircuit{} - - circuitReader := bytes.NewReader(v) - if err := circuit.Decode(circuitReader); err != nil { - return nil, err - } - - // If the error encrypter is nil, this is locally-source payment so - // there is no encrypter. - if circuit.ErrorEncrypter == nil { - return circuit, nil - } - - // Otherwise, we need to reextract the encrypter, so that the shared - // secret is rederived from what was decoded. - err := circuit.ErrorEncrypter.Reextract( - cm.cfg.ExtractErrorEncrypter, - ) - if err != nil { - return nil, err - } - - return circuit, nil -} - -// trimAllOpenCircuits reads the set of active channels from disk and trims -// keystones for any non-pending channels using the next unallocated htlc index. -// This method is intended to be called on startup. Each link will also trim -// it's own circuits upon startup. -// -// NOTE: This operation will be applied to the persistent state of all active -// channels. Therefore, it must be called before any links are created to avoid -// interfering with normal operation. -func (cm *circuitMap) trimAllOpenCircuits() er.R { - activeChannels, err := cm.cfg.DB.FetchAllOpenChannels() - if err != nil { - return err - } - - for _, activeChannel := range activeChannels { - if activeChannel.IsPending { - continue - } - - // First, skip any channels that have not been assigned their - // final channel identifier, otherwise we would try to trim - // htlcs belonging to the all-zero, hop.Source ID. - chanID := activeChannel.ShortChanID() - if chanID == hop.Source { - continue - } - - // Next, retrieve the next unallocated htlc index, which bounds - // the cutoff of confirmed htlc indexes. - start, err := activeChannel.NextLocalHtlcIndex() - if err != nil { - return err - } - - // Finally, remove all pending circuits above at or above the - // next unallocated local htlc indexes. This has the effect of - // reverting any circuits that have either not been locked in, - // or had not been included in a pending commitment. - err = cm.TrimOpenCircuits(chanID, start) - if err != nil { - return err - } - } - - return nil -} - -// TrimOpenCircuits removes a channel's keystones above the short chan id's -// highest committed htlc index. This has the effect of returning those -// circuits to a half-open state. Since opening of circuits is done in advance -// of actually committing the Add htlcs into a commitment txn, this allows -// circuits to be opened preemptively, since we can roll them back after any -// failures. -func (cm *circuitMap) TrimOpenCircuits(chanID lnwire.ShortChannelID, - start uint64) er.R { - - log.Infof("Trimming open circuits for chan_id=%v, start_htlc_id=%v", - chanID, start) - - var trimmedOutKeys []CircuitKey - - // Scan forward from the last unacked htlc id, stopping as soon as we - // don't find any more. Outgoing htlc id's must be assigned in order, - // so there should never be disjoint segments of keystones to trim. - cm.mtx.Lock() - for i := start; ; i++ { - outKey := CircuitKey{ - ChanID: chanID, - HtlcID: i, - } - - circuit, ok := cm.opened[outKey] - if !ok { - break - } - - circuit.Outgoing = nil - delete(cm.opened, outKey) - trimmedOutKeys = append(trimmedOutKeys, outKey) - cm.removeCircuitFromHashIndex(circuit) - } - cm.mtx.Unlock() - - if len(trimmedOutKeys) == 0 { - return nil - } - - return kvdb.Update(cm.cfg.DB, func(tx kvdb.RwTx) er.R { - keystoneBkt := tx.ReadWriteBucket(circuitKeystoneKey) - if keystoneBkt == nil { - return ErrCorruptedCircuitMap.Default() - } - - for _, outKey := range trimmedOutKeys { - err := keystoneBkt.Delete(outKey.Bytes()) - if err != nil { - return err - } - } - - return nil - }, func() {}) -} - -// LookupByHTLC looks up the payment circuit by the outgoing channel and HTLC -// IDs. Returns nil if there is no such circuit. -func (cm *circuitMap) LookupCircuit(inKey CircuitKey) *PaymentCircuit { - cm.mtx.RLock() - defer cm.mtx.RUnlock() - - return cm.pending[inKey] -} - -// LookupOpenCircuit searches for the circuit identified by its outgoing circuit -// key. -func (cm *circuitMap) LookupOpenCircuit(outKey CircuitKey) *PaymentCircuit { - cm.mtx.RLock() - defer cm.mtx.RUnlock() - - return cm.opened[outKey] -} - -// LookupByPaymentHash looks up and returns any payment circuits with a given -// payment hash. -func (cm *circuitMap) LookupByPaymentHash(hash [32]byte) []*PaymentCircuit { - cm.mtx.RLock() - defer cm.mtx.RUnlock() - - var circuits []*PaymentCircuit - if circuitSet, ok := cm.hashIndex[hash]; ok { - // Iterate over the outgoing circuit keys found with this hash, - // and retrieve the circuit from the opened map. - circuits = make([]*PaymentCircuit, 0, len(circuitSet)) - for key := range circuitSet { - if circuit, ok := cm.opened[key]; ok { - circuits = append(circuits, circuit) - } - } - } - - return circuits -} - -// CommitCircuits accepts any number of circuits and persistently adds them to -// the switch's circuit map. The method returns a list of circuits that had not -// been seen prior by the switch. A link should only forward HTLCs corresponding -// to the returned circuits to the switch. -// -// NOTE: This method uses batched writes to improve performance, gains will only -// be realized if it is called concurrently from separate goroutines. -func (cm *circuitMap) CommitCircuits(circuits ...*PaymentCircuit) ( - *CircuitFwdActions, er.R) { - - inKeys := make([]CircuitKey, 0, len(circuits)) - for _, circuit := range circuits { - inKeys = append(inKeys, circuit.Incoming) - } - - log.Tracef("Committing fresh circuits: %v", log.C(func() string { - return spew.Sdump(inKeys) - })) - - actions := &CircuitFwdActions{} - - // If an empty list was passed, return early to avoid grabbing the lock. - if len(circuits) == 0 { - return actions, nil - } - - // First, we reconcile the provided circuits with our set of pending - // circuits to construct a set of new circuits that need to be written - // to disk. The circuit's pointer is stored so that we only permit this - // exact circuit to be forwarded through the switch. If a circuit is - // already pending, the htlc will be reforwarded by the switch. - // - // NOTE: We track an additional addFails subsequence, which permits us - // to fail back all packets that weren't dropped if we encounter an - // error when committing the circuits. - cm.mtx.Lock() - var adds, drops, fails, addFails []*PaymentCircuit - for _, circuit := range circuits { - inKey := circuit.InKey() - if foundCircuit, ok := cm.pending[inKey]; ok { - switch { - - // This circuit has a keystone, it's waiting for a - // response from the remote peer on the outgoing link. - // Drop it like it's hot, ensure duplicates get caught. - case foundCircuit.HasKeystone(): - drops = append(drops, circuit) - - // If no keystone is set and the switch has not been - // restarted, the corresponding packet should still be - // in the outgoing link's mailbox. It will be delivered - // if it comes online before the switch goes down. - // - // NOTE: Dropping here prevents a flapping, incoming - // link from failing a duplicate add while it is still - // in the server's memory mailboxes. - case !foundCircuit.LoadedFromDisk: - drops = append(drops, circuit) - - // Otherwise, the in-mem packet has been lost due to a - // restart. It is now safe to send back a failure along - // the incoming link. The incoming link should be able - // detect and ignore duplicate packets of this type. - default: - fails = append(fails, circuit) - addFails = append(addFails, circuit) - } - - continue - } - - cm.pending[inKey] = circuit - adds = append(adds, circuit) - addFails = append(addFails, circuit) - } - cm.mtx.Unlock() - - // If all circuits are dropped or failed, we are done. - if len(adds) == 0 { - actions.Drops = drops - actions.Fails = fails - return actions, nil - } - - // Now, optimistically serialize the circuits to add. - var bs = make([]bytes.Buffer, len(adds)) - for i, circuit := range adds { - if err := circuit.Encode(&bs[i]); err != nil { - actions.Drops = drops - actions.Fails = addFails - return actions, err - } - } - - // Write the entire batch of circuits to the persistent circuit bucket - // using bolt's Batch write. This method must be called from multiple, - // distinct goroutines to have any impact on performance. - err := kvdb.Batch(cm.cfg.DB.Backend, func(tx kvdb.RwTx) er.R { - circuitBkt := tx.ReadWriteBucket(circuitAddKey) - if circuitBkt == nil { - return ErrCorruptedCircuitMap.Default() - } - - for i, circuit := range adds { - inKeyBytes := circuit.InKey().Bytes() - circuitBytes := bs[i].Bytes() - - err := circuitBkt.Put(inKeyBytes, circuitBytes) - if err != nil { - return err - } - } - - return nil - }) - - // Return if the write succeeded. - if err == nil { - actions.Adds = adds - actions.Drops = drops - actions.Fails = fails - return actions, nil - } - - // Otherwise, rollback the circuits added to the pending set if the - // write failed. - cm.mtx.Lock() - for _, circuit := range adds { - delete(cm.pending, circuit.InKey()) - } - cm.mtx.Unlock() - - // Since our write failed, we will return the dropped packets and mark - // all other circuits as failed. - actions.Drops = drops - actions.Fails = addFails - - return actions, err -} - -// Keystone is a tuple binding an incoming and outgoing CircuitKey. Keystones -// are preemptively written by an outgoing link before signing a new commitment -// state, and cements which HTLCs we are awaiting a response from a remote -// peer. -type Keystone struct { - InKey CircuitKey - OutKey CircuitKey -} - -// String returns a human readable description of the Keystone. -func (k *Keystone) String() string { - return fmt.Sprintf("%s --> %s", k.InKey, k.OutKey) -} - -// OpenCircuits sets the outgoing circuit key for the circuit identified by -// inKey, persistently marking the circuit as opened. After the changes have -// been persisted, the circuit map's in-memory indexes are updated so that this -// circuit can be queried using LookupByKeystone or LookupByPaymentHash. -func (cm *circuitMap) OpenCircuits(keystones ...Keystone) er.R { - if len(keystones) == 0 { - return nil - } - - log.Tracef("Opening finalized circuits: %v", log.C(func() string { - return spew.Sdump(keystones) - })) - - // Check that all keystones correspond to committed-but-unopened - // circuits. - cm.mtx.RLock() - openedCircuits := make([]*PaymentCircuit, 0, len(keystones)) - for _, ks := range keystones { - if _, ok := cm.opened[ks.OutKey]; ok { - cm.mtx.RUnlock() - return ErrDuplicateKeystone.Default() - } - - circuit, ok := cm.pending[ks.InKey] - if !ok { - cm.mtx.RUnlock() - return ErrUnknownCircuit.Default() - } - - openedCircuits = append(openedCircuits, circuit) - } - cm.mtx.RUnlock() - - err := kvdb.Update(cm.cfg.DB, func(tx kvdb.RwTx) er.R { - // Now, load the circuit bucket to which we will write the - // already serialized circuit. - keystoneBkt := tx.ReadWriteBucket(circuitKeystoneKey) - if keystoneBkt == nil { - return ErrCorruptedCircuitMap.Default() - } - - for _, ks := range keystones { - outBytes := ks.OutKey.Bytes() - inBytes := ks.InKey.Bytes() - err := keystoneBkt.Put(outBytes, inBytes) - if err != nil { - return err - } - } - - return nil - }, func() {}) - - if err != nil { - return err - } - - cm.mtx.Lock() - for i, circuit := range openedCircuits { - ks := keystones[i] - - // Since our persistent operation was successful, we can now - // modify the in memory representations. Set the outgoing - // circuit key on our pending circuit, add the same circuit to - // set of opened circuits, and add this circuit to the hash - // index. - circuit.Outgoing = &CircuitKey{} - *circuit.Outgoing = ks.OutKey - - cm.opened[ks.OutKey] = circuit - cm.addCircuitToHashIndex(circuit) - } - cm.mtx.Unlock() - - return nil -} - -// addCirciutToHashIndex inserts a circuit into the circuit map's hash index, so -// that it can be queried using LookupByPaymentHash. -func (cm *circuitMap) addCircuitToHashIndex(c *PaymentCircuit) { - if _, ok := cm.hashIndex[c.PaymentHash]; !ok { - cm.hashIndex[c.PaymentHash] = make(map[CircuitKey]struct{}) - } - cm.hashIndex[c.PaymentHash][c.OutKey()] = struct{}{} -} - -// FailCircuit marks the circuit identified by `inKey` as closing in-memory, -// which prevents duplicate settles/fails from completing an open circuit twice. -func (cm *circuitMap) FailCircuit(inKey CircuitKey) (*PaymentCircuit, er.R) { - - cm.mtx.Lock() - defer cm.mtx.Unlock() - - circuit, ok := cm.pending[inKey] - if !ok { - return nil, ErrUnknownCircuit.Default() - } - - _, ok = cm.closed[inKey] - if ok { - return nil, ErrCircuitClosing.Default() - } - - cm.closed[inKey] = struct{}{} - - return circuit, nil -} - -// CloseCircuit marks the circuit identified by `outKey` as closing in-memory, -// which prevents duplicate settles/fails from completing an open -// circuit twice. -func (cm *circuitMap) CloseCircuit(outKey CircuitKey) (*PaymentCircuit, er.R) { - - cm.mtx.Lock() - defer cm.mtx.Unlock() - - circuit, ok := cm.opened[outKey] - if !ok { - return nil, ErrUnknownCircuit.Default() - } - - _, ok = cm.closed[circuit.Incoming] - if ok { - return nil, ErrCircuitClosing.Default() - } - - cm.closed[circuit.Incoming] = struct{}{} - - return circuit, nil -} - -// DeleteCircuits destroys the target circuits by removing them from the circuit -// map, additionally removing the circuits' keystones if any HTLCs were -// forwarded through an outgoing link. The circuits should be identified by its -// incoming circuit key. If a given circuit is not found in the circuit map, it -// will be ignored from the query. This would typically indicate that the -// circuit was already cleaned up at a different point in time. -func (cm *circuitMap) DeleteCircuits(inKeys ...CircuitKey) er.R { - - log.Tracef("Deleting resolved circuits: %v", log.C(func() string { - return spew.Sdump(inKeys) - })) - - var ( - closingCircuits = make(map[CircuitKey]struct{}) - removedCircuits = make(map[CircuitKey]*PaymentCircuit) - ) - - cm.mtx.Lock() - // Remove any references to the circuits from memory, keeping track of - // which circuits were removed, and which ones had been marked closed. - // This can be used to restore these entries later if the persistent - // removal fails. - for _, inKey := range inKeys { - circuit, ok := cm.pending[inKey] - if !ok { - continue - } - delete(cm.pending, inKey) - - if _, ok := cm.closed[inKey]; ok { - closingCircuits[inKey] = struct{}{} - delete(cm.closed, inKey) - } - - if circuit.HasKeystone() { - delete(cm.opened, circuit.OutKey()) - cm.removeCircuitFromHashIndex(circuit) - } - - removedCircuits[inKey] = circuit - } - cm.mtx.Unlock() - - err := kvdb.Batch(cm.cfg.DB.Backend, func(tx kvdb.RwTx) er.R { - for _, circuit := range removedCircuits { - // If this htlc made it to an outgoing link, load the - // keystone bucket from which we will remove the - // outgoing circuit key. - if circuit.HasKeystone() { - keystoneBkt := tx.ReadWriteBucket(circuitKeystoneKey) - if keystoneBkt == nil { - return ErrCorruptedCircuitMap.Default() - } - - outKey := circuit.OutKey() - - err := keystoneBkt.Delete(outKey.Bytes()) - if err != nil { - return err - } - } - - // Remove the circuit itself based on the incoming - // circuit key. - circuitBkt := tx.ReadWriteBucket(circuitAddKey) - if circuitBkt == nil { - return ErrCorruptedCircuitMap.Default() - } - - inKey := circuit.InKey() - if err := circuitBkt.Delete(inKey.Bytes()); err != nil { - return err - } - } - - return nil - }) - - // Return if the write succeeded. - if err == nil { - return nil - } - - // If the persistent changes failed, restore the circuit map to it's - // previous state. - cm.mtx.Lock() - for inKey, circuit := range removedCircuits { - cm.pending[inKey] = circuit - - if _, ok := closingCircuits[inKey]; ok { - cm.closed[inKey] = struct{}{} - } - - if circuit.HasKeystone() { - cm.opened[circuit.OutKey()] = circuit - cm.addCircuitToHashIndex(circuit) - } - } - cm.mtx.Unlock() - - return err -} - -// removeCircuitFromHashIndex removes the given circuit from the hash index, -// pruning any unnecessary memory optimistically. -func (cm *circuitMap) removeCircuitFromHashIndex(c *PaymentCircuit) { - // Locate bucket containing this circuit's payment hashes. - circuitsWithHash, ok := cm.hashIndex[c.PaymentHash] - if !ok { - return - } - - outKey := c.OutKey() - - // Remove this circuit from the set of circuitsWithHash. - delete(circuitsWithHash, outKey) - - // Prune the payment hash bucket if no other entries remain. - if len(circuitsWithHash) == 0 { - delete(cm.hashIndex, c.PaymentHash) - } -} - -// NumPending returns the number of active circuits added to the circuit map. -func (cm *circuitMap) NumPending() int { - cm.mtx.RLock() - defer cm.mtx.RUnlock() - - return len(cm.pending) -} - -// NumOpen returns the number of circuits that have been opened by way of -// setting their keystones. This is the number of HTLCs that are waiting for a -// settle/fail response from a remote peer. -func (cm *circuitMap) NumOpen() int { - cm.mtx.RLock() - defer cm.mtx.RUnlock() - - return len(cm.opened) -} diff --git a/lnd/htlcswitch/circuit_test.go b/lnd/htlcswitch/circuit_test.go deleted file mode 100644 index fb20808c..00000000 --- a/lnd/htlcswitch/circuit_test.go +++ /dev/null @@ -1,1387 +0,0 @@ -package htlcswitch_test - -import ( - "bytes" - "io/ioutil" - "reflect" - "testing" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - bitcoinCfg "github.com/pkt-cash/pktd/chaincfg" - sphinx "github.com/pkt-cash/pktd/lightning-onion" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/htlcswitch" - "github.com/pkt-cash/pktd/lnd/htlcswitch/hop" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -var ( - hash1 = [32]byte{0x01} - hash2 = [32]byte{0x02} - hash3 = [32]byte{0x03} - - // sphinxPrivKey is the private key given to freshly created sphinx - // routers. - sphinxPrivKey *btcec.PrivateKey - - // testEphemeralKey is the ephemeral key that will be extracted to - // create onion obfuscators. - testEphemeralKey *btcec.PublicKey - - // testExtracter is a precomputed extraction of testEphemeralKey, using - // the sphinxPrivKey. - testExtracter *hop.SphinxErrorEncrypter -) - -func init() { - // Generate a fresh key for our sphinx router. - var err er.R - sphinxPrivKey, err = btcec.NewPrivateKey(btcec.S256()) - if err != nil { - panic(err) - } - - // And another, whose public key will serve as the test ephemeral key. - testEphemeralPriv, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - panic(err) - } - testEphemeralKey = testEphemeralPriv.PubKey() - - // Finally, properly initialize the test extracter - initTestExtracter() -} - -// initTestExtracter spins up a new onion processor specifically for the purpose -// of generating our testExtracter, which should be derived from the -// testEphemeralKey, and which randomly-generated key is used to init the sphinx -// router. -// -// NOTE: This should be called in init(), after testEphemeralKey has been -// properly initialized. -func initTestExtracter() { - onionProcessor := newOnionProcessor(nil) - defer onionProcessor.Stop() - - obfuscator, _ := onionProcessor.ExtractErrorEncrypter( - testEphemeralKey, - ) - - sphinxExtracter, ok := obfuscator.(*hop.SphinxErrorEncrypter) - if !ok { - panic("did not extract sphinx error encrypter") - } - - testExtracter = sphinxExtracter - - // We also set this error extracter on startup, otherwise it will be nil - // at compile-time. - halfCircuitTests[2].encrypter = testExtracter -} - -// newOnionProcessor creates starts a new htlcswitch.OnionProcessor using a temp -// db and no garbage collection. -func newOnionProcessor(t *testing.T) *hop.OnionProcessor { - sphinxRouter := sphinx.NewRouter( - &keychain.PrivKeyECDH{PrivKey: sphinxPrivKey}, - &bitcoinCfg.SimNetParams, sphinx.NewMemoryReplayLog(), - ) - - if err := sphinxRouter.Start(); err != nil { - t.Fatalf("unable to start sphinx router: %v", err) - } - - return hop.NewOnionProcessor(sphinxRouter) -} - -// newCircuitMap creates a new htlcswitch.CircuitMap using a temp db and a -// fresh sphinx router. -func newCircuitMap(t *testing.T) (*htlcswitch.CircuitMapConfig, - htlcswitch.CircuitMap) { - - onionProcessor := newOnionProcessor(t) - - circuitMapCfg := &htlcswitch.CircuitMapConfig{ - DB: makeCircuitDB(t, ""), - ExtractErrorEncrypter: onionProcessor.ExtractErrorEncrypter, - } - - circuitMap, err := htlcswitch.NewCircuitMap(circuitMapCfg) - if err != nil { - t.Fatalf("unable to create persistent circuit map: %v", err) - } - - return circuitMapCfg, circuitMap -} - -// TestCircuitMapInit is a quick check to ensure that we can start and restore -// the circuit map, as this will be used extensively in this suite. -func TestCircuitMapInit(t *testing.T) { - t.Parallel() - - cfg, _ := newCircuitMap(t) - restartCircuitMap(t, cfg) -} - -var halfCircuitTests = []struct { - hash [32]byte - inValue btcutil.Amount - outValue btcutil.Amount - chanID lnwire.ShortChannelID - htlcID uint64 - encrypter hop.ErrorEncrypter -}{ - { - hash: hash1, - inValue: 0, - outValue: 1000, - chanID: lnwire.NewShortChanIDFromInt(1), - htlcID: 1, - encrypter: nil, - }, - { - hash: hash2, - inValue: 2100, - outValue: 2000, - chanID: lnwire.NewShortChanIDFromInt(2), - htlcID: 2, - encrypter: htlcswitch.NewMockObfuscator(), - }, - { - hash: hash3, - inValue: 10000, - outValue: 9000, - chanID: lnwire.NewShortChanIDFromInt(3), - htlcID: 3, - // NOTE: The value of testExtracter is nil at compile-time, it - // is fully-initialized in initTestExtracter, which should - // repopulate this encrypter. - encrypter: testExtracter, - }, -} - -// TestHalfCircuitSerialization checks that the half circuits can be properly -// encoded and decoded properly. A critical responsibility of this test is to -// verify that the various ErrorEncrypter implementations can be properly -// reconstructed from a serialized half circuit. -func TestHalfCircuitSerialization(t *testing.T) { - t.Parallel() - - onionProcessor := newOnionProcessor(t) - - for i, test := range halfCircuitTests { - circuit := &htlcswitch.PaymentCircuit{ - PaymentHash: test.hash, - IncomingAmount: lnwire.NewMSatFromSatoshis(test.inValue), - OutgoingAmount: lnwire.NewMSatFromSatoshis(test.outValue), - Incoming: htlcswitch.CircuitKey{ - ChanID: test.chanID, - HtlcID: test.htlcID, - }, - ErrorEncrypter: test.encrypter, - } - - // Write the half circuit to our buffer. - var b bytes.Buffer - if err := circuit.Encode(&b); err != nil { - t.Fatalf("unable to encode half payment circuit test=%d: %v", i, err) - } - - // Then try to decode the serialized bytes. - var circuit2 htlcswitch.PaymentCircuit - circuitReader := bytes.NewReader(b.Bytes()) - if err := circuit2.Decode(circuitReader); err != nil { - t.Fatalf("unable to decode half payment circuit test=%d: %v", i, err) - } - - // If the error encrypter is initialized, we will need to - // reextract it from it's decoded state, as this requires an - // ECDH with the onion processor's private key. For mock error - // encrypters, this will be a NOP. - if circuit2.ErrorEncrypter != nil { - err := circuit2.ErrorEncrypter.Reextract( - onionProcessor.ExtractErrorEncrypter, - ) - if err != nil { - t.Fatalf("unable to reextract sphinx error "+ - "encrypter: %v", err) - } - } - - // Reconstructed half circuit should match the original. - if !equalIgnoreLFD(circuit, &circuit2) { - t.Fatalf("unexpected half circuit test=%d, want %v, got %v", - i, circuit, circuit2) - } - } -} - -func TestCircuitMapPersistence(t *testing.T) { - t.Parallel() - - var ( - chan1 = lnwire.NewShortChanIDFromInt(1) - chan2 = lnwire.NewShortChanIDFromInt(2) - circuitMap htlcswitch.CircuitMap - err er.R - ) - - cfg, circuitMap := newCircuitMap(t) - - circuit := circuitMap.LookupCircuit(htlcswitch.CircuitKey{ - ChanID: chan1, - HtlcID: 0, - }) - if circuit != nil { - t.Fatalf("LookupByHTLC returned a circuit before any were added: %v", - circuit) - } - - circuit1 := &htlcswitch.PaymentCircuit{ - Incoming: htlcswitch.CircuitKey{ - ChanID: chan2, - HtlcID: 1, - }, - PaymentHash: hash1, - ErrorEncrypter: htlcswitch.NewMockObfuscator(), - } - if _, err := circuitMap.CommitCircuits(circuit1); err != nil { - t.Fatalf("unable to add half circuit: %v", err) - } - - // Circuit map should have one circuit that has not been fully opened. - assertNumCircuitsWithHash(t, circuitMap, hash1, 0) - assertHasCircuit(t, circuitMap, circuit1) - - cfg, circuitMap = restartCircuitMap(t, cfg) - - assertNumCircuitsWithHash(t, circuitMap, hash1, 0) - assertHasCircuit(t, circuitMap, circuit1) - - // Add multiple circuits with same destination channel but different HTLC - // IDs and payment hashes. - keystone1 := htlcswitch.Keystone{ - InKey: circuit1.Incoming, - OutKey: htlcswitch.CircuitKey{ - ChanID: chan1, - HtlcID: 0, - }, - } - circuit1.Outgoing = &keystone1.OutKey - if err := circuitMap.OpenCircuits(keystone1); err != nil { - t.Fatalf("unable to add full circuit: %v", err) - } - - // Circuit map should reflect addition of circuit1, and the change - // should survive a restart. - assertNumCircuitsWithHash(t, circuitMap, hash1, 1) - assertHasCircuit(t, circuitMap, circuit1) - assertHasKeystone(t, circuitMap, keystone1.OutKey, circuit1) - - cfg, circuitMap = restartCircuitMap(t, cfg) - - assertNumCircuitsWithHash(t, circuitMap, hash1, 1) - assertHasCircuit(t, circuitMap, circuit1) - assertHasKeystone(t, circuitMap, keystone1.OutKey, circuit1) - - circuit2 := &htlcswitch.PaymentCircuit{ - Incoming: htlcswitch.CircuitKey{ - ChanID: chan2, - HtlcID: 2, - }, - PaymentHash: hash2, - ErrorEncrypter: htlcswitch.NewMockObfuscator(), - } - if _, err := circuitMap.CommitCircuits(circuit2); err != nil { - t.Fatalf("unable to add half circuit: %v", err) - } - - assertHasCircuit(t, circuitMap, circuit2) - - keystone2 := htlcswitch.Keystone{ - InKey: circuit2.Incoming, - OutKey: htlcswitch.CircuitKey{ - ChanID: chan1, - HtlcID: 1, - }, - } - circuit2.Outgoing = &keystone2.OutKey - if err := circuitMap.OpenCircuits(keystone2); err != nil { - t.Fatalf("unable to add full circuit: %v", err) - } - - // Should have two full circuits, one under hash1 and another under - // hash2. Both half payment circuits should have been removed when the - // full circuits were added. - assertNumCircuitsWithHash(t, circuitMap, hash1, 1) - assertHasCircuit(t, circuitMap, circuit1) - assertHasKeystone(t, circuitMap, keystone1.OutKey, circuit1) - - assertNumCircuitsWithHash(t, circuitMap, hash2, 1) - assertHasCircuit(t, circuitMap, circuit2) - assertHasKeystone(t, circuitMap, keystone2.OutKey, circuit2) - - assertNumCircuitsWithHash(t, circuitMap, hash3, 0) - - cfg, circuitMap = restartCircuitMap(t, cfg) - - assertNumCircuitsWithHash(t, circuitMap, hash1, 1) - assertHasCircuit(t, circuitMap, circuit1) - assertHasKeystone(t, circuitMap, keystone1.OutKey, circuit1) - - assertNumCircuitsWithHash(t, circuitMap, hash2, 1) - assertHasCircuit(t, circuitMap, circuit2) - assertHasKeystone(t, circuitMap, keystone2.OutKey, circuit2) - - assertNumCircuitsWithHash(t, circuitMap, hash3, 0) - - circuit3 := &htlcswitch.PaymentCircuit{ - Incoming: htlcswitch.CircuitKey{ - ChanID: chan1, - HtlcID: 2, - }, - PaymentHash: hash3, - ErrorEncrypter: htlcswitch.NewMockObfuscator(), - } - if _, err := circuitMap.CommitCircuits(circuit3); err != nil { - t.Fatalf("unable to add half circuit: %v", err) - } - - assertHasCircuit(t, circuitMap, circuit3) - cfg, circuitMap = restartCircuitMap(t, cfg) - assertHasCircuit(t, circuitMap, circuit3) - - // Add another circuit with an already-used HTLC ID but different - // destination channel. - keystone3 := htlcswitch.Keystone{ - InKey: circuit3.Incoming, - OutKey: htlcswitch.CircuitKey{ - ChanID: chan2, - HtlcID: 0, - }, - } - circuit3.Outgoing = &keystone3.OutKey - if err := circuitMap.OpenCircuits(keystone3); err != nil { - t.Fatalf("unable to add full circuit: %v", err) - } - - // Check that all have been marked as full circuits, and that no half - // circuits are currently being tracked. - assertHasKeystone(t, circuitMap, keystone1.OutKey, circuit1) - assertHasKeystone(t, circuitMap, keystone2.OutKey, circuit2) - assertHasKeystone(t, circuitMap, keystone3.OutKey, circuit3) - cfg, circuitMap = restartCircuitMap(t, cfg) - assertHasKeystone(t, circuitMap, keystone1.OutKey, circuit1) - assertHasKeystone(t, circuitMap, keystone2.OutKey, circuit2) - assertHasKeystone(t, circuitMap, keystone3.OutKey, circuit3) - - // Even though a circuit was added with chan1, HTLC ID 2 as the source, - // the lookup should go by destination channel, HTLC ID. - invalidKeystone := htlcswitch.CircuitKey{ - ChanID: chan1, - HtlcID: 2, - } - circuit = circuitMap.LookupOpenCircuit(invalidKeystone) - if circuit != nil { - t.Fatalf("LookupByHTLC returned a circuit without being added: %v", - circuit) - } - - circuit4 := &htlcswitch.PaymentCircuit{ - Incoming: htlcswitch.CircuitKey{ - ChanID: chan2, - HtlcID: 3, - }, - PaymentHash: hash1, - ErrorEncrypter: htlcswitch.NewMockObfuscator(), - } - if _, err := circuitMap.CommitCircuits(circuit4); err != nil { - t.Fatalf("unable to add half circuit: %v", err) - } - - // Circuit map should still only show one circuit with hash1, since we - // have not set the keystone for circuit4. - assertNumCircuitsWithHash(t, circuitMap, hash1, 1) - assertHasCircuit(t, circuitMap, circuit4) - - cfg, circuitMap = restartCircuitMap(t, cfg) - - assertNumCircuitsWithHash(t, circuitMap, hash1, 1) - assertHasCircuit(t, circuitMap, circuit4) - - // Add a circuit with a destination channel and payment hash that are - // already added but a different HTLC ID. - keystone4 := htlcswitch.Keystone{ - InKey: circuit4.Incoming, - OutKey: htlcswitch.CircuitKey{ - ChanID: chan1, - HtlcID: 3, - }, - } - circuit4.Outgoing = &keystone4.OutKey - if err := circuitMap.OpenCircuits(keystone4); err != nil { - t.Fatalf("unable to add full circuit: %v", err) - } - - // Verify that all circuits have been fully added. - assertHasCircuit(t, circuitMap, circuit1) - assertHasKeystone(t, circuitMap, keystone1.OutKey, circuit1) - assertHasCircuit(t, circuitMap, circuit2) - assertHasKeystone(t, circuitMap, keystone2.OutKey, circuit2) - assertHasCircuit(t, circuitMap, circuit3) - assertHasKeystone(t, circuitMap, keystone3.OutKey, circuit3) - assertHasCircuit(t, circuitMap, circuit4) - assertHasKeystone(t, circuitMap, keystone4.OutKey, circuit4) - - // Verify that each circuit is exposed via the proper hash bucketing. - assertNumCircuitsWithHash(t, circuitMap, hash1, 2) - assertHasCircuitForHash(t, circuitMap, hash1, circuit1) - assertHasCircuitForHash(t, circuitMap, hash1, circuit4) - - assertNumCircuitsWithHash(t, circuitMap, hash2, 1) - assertHasCircuitForHash(t, circuitMap, hash2, circuit2) - - assertNumCircuitsWithHash(t, circuitMap, hash3, 1) - assertHasCircuitForHash(t, circuitMap, hash3, circuit3) - - // Restart, then run checks again. - cfg, circuitMap = restartCircuitMap(t, cfg) - - // Verify that all circuits have been fully added. - assertHasCircuit(t, circuitMap, circuit1) - assertHasKeystone(t, circuitMap, keystone1.OutKey, circuit1) - assertHasCircuit(t, circuitMap, circuit2) - assertHasKeystone(t, circuitMap, keystone2.OutKey, circuit2) - assertHasCircuit(t, circuitMap, circuit3) - assertHasKeystone(t, circuitMap, keystone3.OutKey, circuit3) - assertHasCircuit(t, circuitMap, circuit4) - assertHasKeystone(t, circuitMap, keystone4.OutKey, circuit4) - - // Verify that each circuit is exposed via the proper hash bucketing. - assertNumCircuitsWithHash(t, circuitMap, hash1, 2) - assertHasCircuitForHash(t, circuitMap, hash1, circuit1) - assertHasCircuitForHash(t, circuitMap, hash1, circuit4) - - assertNumCircuitsWithHash(t, circuitMap, hash2, 1) - assertHasCircuitForHash(t, circuitMap, hash2, circuit2) - - assertNumCircuitsWithHash(t, circuitMap, hash3, 1) - assertHasCircuitForHash(t, circuitMap, hash3, circuit3) - - // Test removing circuits and the subsequent lookups. - err = circuitMap.DeleteCircuits(circuit1.Incoming) - if err != nil { - t.Fatalf("Remove returned unexpected error: %v", err) - } - - // There should be exactly one remaining circuit with hash1, and it - // should be circuit4. - assertNumCircuitsWithHash(t, circuitMap, hash1, 1) - assertHasCircuitForHash(t, circuitMap, hash1, circuit4) - cfg, circuitMap = restartCircuitMap(t, cfg) - assertNumCircuitsWithHash(t, circuitMap, hash1, 1) - assertHasCircuitForHash(t, circuitMap, hash1, circuit4) - - // Removing already-removed circuit should return an error. - err = circuitMap.DeleteCircuits(circuit1.Incoming) - if err != nil { - t.Fatal("Unexpected failure when deleting already "+ - "deleted circuit: %v", err) - } - - // Verify that nothing related to hash1 has changed - assertNumCircuitsWithHash(t, circuitMap, hash1, 1) - assertHasCircuitForHash(t, circuitMap, hash1, circuit4) - - // Remove last remaining circuit with payment hash hash1. - err = circuitMap.DeleteCircuits(circuit4.Incoming) - if err != nil { - t.Fatalf("Remove returned unexpected error: %v", err) - } - - assertNumCircuitsWithHash(t, circuitMap, hash1, 0) - assertNumCircuitsWithHash(t, circuitMap, hash2, 1) - assertNumCircuitsWithHash(t, circuitMap, hash3, 1) - cfg, circuitMap = restartCircuitMap(t, cfg) - assertNumCircuitsWithHash(t, circuitMap, hash1, 0) - assertNumCircuitsWithHash(t, circuitMap, hash2, 1) - assertNumCircuitsWithHash(t, circuitMap, hash3, 1) - - // Remove last remaining circuit with payment hash hash2. - err = circuitMap.DeleteCircuits(circuit2.Incoming) - if err != nil { - t.Fatalf("Remove returned unexpected error: %v", err) - } - - // There should now only be one remaining circuit, with hash3. - assertNumCircuitsWithHash(t, circuitMap, hash2, 0) - assertNumCircuitsWithHash(t, circuitMap, hash3, 1) - cfg, circuitMap = restartCircuitMap(t, cfg) - assertNumCircuitsWithHash(t, circuitMap, hash2, 0) - assertNumCircuitsWithHash(t, circuitMap, hash3, 1) - - // In removing the final circuit, we will try and remove all other known - // circuits as well. Any circuits that are unknown to the circuit map - // will be ignored, and only circuit 3 should be cause any change in the - // state. - err = circuitMap.DeleteCircuits( - circuit1.Incoming, circuit2.Incoming, - circuit3.Incoming, circuit4.Incoming, - ) - if err != nil { - t.Fatalf("Unexpected failure when removing circuit while also "+ - "deleting already deleted circuits: %v", err) - } - - // Check that the circuit map is empty, even after restarting. - assertNumCircuitsWithHash(t, circuitMap, hash3, 0) - _, circuitMap = restartCircuitMap(t, cfg) - assertNumCircuitsWithHash(t, circuitMap, hash3, 0) -} - -// assertHasKeystone tests that the circuit map contains the provided payment -// circuit. -func assertHasKeystone(t *testing.T, cm htlcswitch.CircuitMap, - outKey htlcswitch.CircuitKey, c *htlcswitch.PaymentCircuit) { - - circuit := cm.LookupOpenCircuit(outKey) - if !equalIgnoreLFD(circuit, c) { - t.Fatalf("unexpected circuit, want: %v, got %v", c, circuit) - } -} - -// assertHasCircuitForHash tests that the provided circuit appears in the list -// of circuits for the given hash. -func assertHasCircuitForHash(t *testing.T, cm htlcswitch.CircuitMap, hash [32]byte, - circuit *htlcswitch.PaymentCircuit) { - - circuits := cm.LookupByPaymentHash(hash) - for _, c := range circuits { - if equalIgnoreLFD(c, circuit) { - return - } - } - - t.Fatalf("unable to find circuit: %v by hash: %v", circuit, hash) -} - -// assertNumCircuitsWithHash tests that the circuit has the right number of full -// circuits, indexed by the given hash. -func assertNumCircuitsWithHash(t *testing.T, cm htlcswitch.CircuitMap, - hash [32]byte, expectedNum int) { - - circuits := cm.LookupByPaymentHash(hash) - if len(circuits) != expectedNum { - t.Fatalf("LookupByPaymentHash returned wrong number of circuits for "+ - "hash=%v: expecected %d, got %d", hash, expectedNum, - len(circuits)) - } -} - -// assertHasCircuit queries the circuit map using the half-circuit's half -// key, and fails if the returned half-circuit differs from the provided one. -func assertHasCircuit(t *testing.T, cm htlcswitch.CircuitMap, - c *htlcswitch.PaymentCircuit) { - - c2 := cm.LookupCircuit(c.Incoming) - if !equalIgnoreLFD(c, c2) { - t.Fatalf("expected circuit: %v, got %v", c, c2) - } -} - -// equalIgnoreLFD compares two payment circuits, but ignores the current value -// of LoadedFromDisk. The value is temporarily set to false for the comparison -// and then restored. -func equalIgnoreLFD(c, c2 *htlcswitch.PaymentCircuit) bool { - ogLFD := c.LoadedFromDisk - ogLFD2 := c2.LoadedFromDisk - - c.LoadedFromDisk = false - c2.LoadedFromDisk = false - - isEqual := reflect.DeepEqual(c, c2) - - c.LoadedFromDisk = ogLFD - c2.LoadedFromDisk = ogLFD2 - - return isEqual -} - -// makeCircuitDB initializes a new test channeldb for testing the persistence of -// the circuit map. If an empty string is provided as a path, a temp directory -// will be created. -func makeCircuitDB(t *testing.T, path string) *channeldb.DB { - if path == "" { - var err error - path, err = ioutil.TempDir("", "circuitdb") - if err != nil { - t.Fatalf("unable to create temp path: %v", err) - } - } - - db, err := channeldb.Open(path) - if err != nil { - t.Fatalf("unable to open channel db: %v", err) - } - - return db -} - -// Creates a new circuit map, backed by a freshly opened channeldb. The existing -// channeldb is closed in order to simulate a complete restart. -func restartCircuitMap(t *testing.T, cfg *htlcswitch.CircuitMapConfig) ( - *htlcswitch.CircuitMapConfig, htlcswitch.CircuitMap) { - - // Record the current temp path and close current db. - dbPath := cfg.DB.Path() - cfg.DB.Close() - - // Reinitialize circuit map with same db path. - cfg2 := &htlcswitch.CircuitMapConfig{ - DB: makeCircuitDB(t, dbPath), - ExtractErrorEncrypter: cfg.ExtractErrorEncrypter, - } - cm2, err := htlcswitch.NewCircuitMap(cfg2) - if err != nil { - t.Fatalf("unable to recreate persistent circuit map: %v", err) - } - - return cfg2, cm2 -} - -// TestCircuitMapCommitCircuits tests the following behavior of CommitCircuits: -// 1. New circuits are successfully added. -// 2. Duplicate circuits are dropped anytime before circuit map shutsdown. -// 3. Duplicate circuits are failed anytime after circuit map restarts. -func TestCircuitMapCommitCircuits(t *testing.T) { - t.Parallel() - - var ( - chan1 = lnwire.NewShortChanIDFromInt(1) - circuitMap htlcswitch.CircuitMap - err er.R - ) - - cfg, circuitMap := newCircuitMap(t) - - circuit := &htlcswitch.PaymentCircuit{ - Incoming: htlcswitch.CircuitKey{ - ChanID: chan1, - HtlcID: 3, - }, - ErrorEncrypter: testExtracter, - } - - // First we will try to add an new circuit to the circuit map, this - // should succeed. - actions, err := circuitMap.CommitCircuits(circuit) - if err != nil { - t.Fatalf("failed to commit circuits: %v", err) - } - if len(actions.Drops) > 0 { - t.Fatalf("new circuit should not have been dropped") - } - if len(actions.Fails) > 0 { - t.Fatalf("new circuit should not have failed") - } - if len(actions.Adds) != 1 { - t.Fatalf("only one circuit should have been added, found %d", - len(actions.Adds)) - } - - circuit2 := circuitMap.LookupCircuit(circuit.Incoming) - if !reflect.DeepEqual(circuit, circuit2) { - t.Fatalf("unexpected committed circuit: got %v, want %v", - circuit2, circuit) - } - - // Then we will try to readd the same circuit again, this should result - // in the circuit being dropped. This can happen if the incoming link - // flaps. - actions, err = circuitMap.CommitCircuits(circuit) - if err != nil { - t.Fatalf("failed to commit circuits: %v", err) - } - if len(actions.Adds) > 0 { - t.Fatalf("duplicate circuit should not have been added to circuit map") - } - if len(actions.Fails) > 0 { - t.Fatalf("duplicate circuit should not have failed") - } - if len(actions.Drops) != 1 { - t.Fatalf("only one circuit should have been dropped, found %d", - len(actions.Drops)) - } - - // Finally, restart the circuit map, which will cause the added circuit - // to be loaded from disk. Since the keystone was never set, subsequent - // attempts to commit the circuit should cause the circuit map to - // indicate that the HTLC should be failed back. - _, circuitMap = restartCircuitMap(t, cfg) - - actions, err = circuitMap.CommitCircuits(circuit) - if err != nil { - t.Fatalf("failed to commit circuits: %v", err) - } - if len(actions.Adds) > 0 { - t.Fatalf("duplicate circuit with incomplete forwarding " + - "decision should not have been added to circuit map") - } - if len(actions.Drops) > 0 { - t.Fatalf("duplicate circuit with incomplete forwarding " + - "decision should not have been dropped by circuit map") - } - if len(actions.Fails) != 1 { - t.Fatalf("only one duplicate circuit with incomplete "+ - "forwarding decision should have been failed, found: "+ - "%d", len(actions.Fails)) - } - - // Lookup the committed circuit again, it should be identical apart from - // the loaded from disk flag. - circuit2 = circuitMap.LookupCircuit(circuit.Incoming) - if !equalIgnoreLFD(circuit, circuit2) { - t.Fatalf("unexpected committed circuit: got %v, want %v", - circuit2, circuit) - } -} - -// TestCircuitMapOpenCircuits checks that circuits are properly opened, and that -// duplicate attempts to open a circuit will result in an error. -func TestCircuitMapOpenCircuits(t *testing.T) { - t.Parallel() - - var ( - chan1 = lnwire.NewShortChanIDFromInt(1) - chan2 = lnwire.NewShortChanIDFromInt(2) - circuitMap htlcswitch.CircuitMap - err er.R - ) - - cfg, circuitMap := newCircuitMap(t) - - circuit := &htlcswitch.PaymentCircuit{ - Incoming: htlcswitch.CircuitKey{ - ChanID: chan1, - HtlcID: 3, - }, - ErrorEncrypter: testExtracter, - } - - // First we will try to add an new circuit to the circuit map, this - // should succeed. - _, err = circuitMap.CommitCircuits(circuit) - if err != nil { - t.Fatalf("failed to commit circuits: %v", err) - } - - keystone := htlcswitch.Keystone{ - InKey: circuit.Incoming, - OutKey: htlcswitch.CircuitKey{ - ChanID: chan2, - HtlcID: 2, - }, - } - - // Open the circuit for the first time. - err = circuitMap.OpenCircuits(keystone) - if err != nil { - t.Fatalf("failed to open circuits: %v", err) - } - - // Check that we can retrieve the open circuit if the circuit map before - // the circuit map is restarted. - circuit2 := circuitMap.LookupOpenCircuit(keystone.OutKey) - if !reflect.DeepEqual(circuit, circuit2) { - t.Fatalf("unexpected open circuit: got %v, want %v", - circuit2, circuit) - } - - if !circuit2.HasKeystone() { - t.Fatalf("open circuit should have keystone") - } - if !reflect.DeepEqual(&keystone.OutKey, circuit2.Outgoing) { - t.Fatalf("expected open circuit to have outgoing key: %v, found %v", - &keystone.OutKey, circuit2.Outgoing) - } - - // Open the circuit for a second time, which should fail due to a - // duplicate keystone - err = circuitMap.OpenCircuits(keystone) - if !htlcswitch.ErrDuplicateKeystone.Is(err) { - t.Fatalf("failed to open circuits: %v", err) - } - - // Then we will try to readd the same circuit again, this should result - // in the circuit being dropped. This can happen if the incoming link - // flaps OR the switch is entirely restarted and the outgoing link has - // not received a response. - actions, err := circuitMap.CommitCircuits(circuit) - if err != nil { - t.Fatalf("failed to commit circuits: %v", err) - } - if len(actions.Adds) > 0 { - t.Fatalf("duplicate circuit should not have been added to circuit map") - } - if len(actions.Fails) > 0 { - t.Fatalf("duplicate circuit should not have failed") - } - if len(actions.Drops) != 1 { - t.Fatalf("only one circuit should have been dropped, found %d", - len(actions.Drops)) - } - - // Now, restart the circuit map, which will cause the opened circuit to - // be loaded from disk. Since we set the keystone on this circuit, it - // should be restored as such in memory. - // - // NOTE: The channel db doesn't have any channel data, so no keystones - // will be trimmed. - _, circuitMap = restartCircuitMap(t, cfg) - - // Check that we can still query for the open circuit. - circuit2 = circuitMap.LookupOpenCircuit(keystone.OutKey) - if !equalIgnoreLFD(circuit, circuit2) { - t.Fatalf("unexpected open circuit: got %v, want %v", - circuit2, circuit) - } - - // Try to open the circuit again, we expect this to fail since the open - // circuit was restored. - err = circuitMap.OpenCircuits(keystone) - if !htlcswitch.ErrDuplicateKeystone.Is(err) { - t.Fatalf("failed to open circuits: %v", err) - } - - // Lastly, with the circuit map restarted, try one more time to recommit - // the open circuit. This should be dropped, and is expected to happen - // if the incoming link flaps OR the switch is entirely restarted and - // the outgoing link has not received a response. - actions, err = circuitMap.CommitCircuits(circuit) - if err != nil { - t.Fatalf("failed to commit circuits: %v", err) - } - if len(actions.Adds) > 0 { - t.Fatalf("duplicate circuit should not have been added to circuit map") - } - if len(actions.Fails) > 0 { - t.Fatalf("duplicate circuit should not have failed") - } - if len(actions.Drops) != 1 { - t.Fatalf("only one circuit should have been dropped, found %d", - len(actions.Drops)) - } -} - -func assertCircuitsOpenedPreRestart(t *testing.T, - circuitMap htlcswitch.CircuitMap, - circuits []*htlcswitch.PaymentCircuit, - keystones []htlcswitch.Keystone) { - - for i, circuit := range circuits { - keystone := keystones[i] - - openCircuit := circuitMap.LookupOpenCircuit(keystone.OutKey) - if !reflect.DeepEqual(circuit, openCircuit) { - t.Fatalf("unexpected open circuit %d: got %v, want %v", - i, openCircuit, circuit) - } - - if !openCircuit.HasKeystone() { - t.Fatalf("open circuit %d should have keystone", i) - } - if !reflect.DeepEqual(&keystone.OutKey, openCircuit.Outgoing) { - t.Fatalf("expected open circuit %d to have outgoing "+ - "key: %v, found %v", i, - &keystone.OutKey, openCircuit.Outgoing) - } - } -} - -func assertCircuitsOpenedPostRestart(t *testing.T, - circuitMap htlcswitch.CircuitMap, - circuits []*htlcswitch.PaymentCircuit, - keystones []htlcswitch.Keystone) { - - for i, circuit := range circuits { - keystone := keystones[i] - - openCircuit := circuitMap.LookupOpenCircuit(keystone.OutKey) - if !equalIgnoreLFD(circuit, openCircuit) { - t.Fatalf("unexpected open circuit %d: got %v, want %v", - i, openCircuit, circuit) - } - - if !openCircuit.HasKeystone() { - t.Fatalf("open circuit %d should have keystone", i) - } - if !reflect.DeepEqual(&keystone.OutKey, openCircuit.Outgoing) { - t.Fatalf("expected open circuit %d to have outgoing "+ - "key: %v, found %v", i, - &keystone.OutKey, openCircuit.Outgoing) - } - } -} - -func assertCircuitsNotOpenedPreRestart(t *testing.T, - circuitMap htlcswitch.CircuitMap, - circuits []*htlcswitch.PaymentCircuit, - keystones []htlcswitch.Keystone, - offset int) { - - for i := range circuits { - keystone := keystones[i] - - openCircuit := circuitMap.LookupOpenCircuit(keystone.OutKey) - if openCircuit != nil { - t.Fatalf("expected circuit %d not to be open", - offset+i) - } - - circuit := circuitMap.LookupCircuit(keystone.InKey) - if circuit == nil { - t.Fatalf("expected to find unopened circuit %d", - offset+i) - } - if circuit.HasKeystone() { - t.Fatalf("circuit %d should not have keystone", - offset+i) - } - } -} - -// TestCircuitMapTrimOpenCircuits verifies that the circuit map properly removes -// circuits from disk and the in-memory state when TrimOpenCircuits is used. -// This test checks that a successful trim survives a restart, and that circuits -// added before the restart can also be trimmed. -func TestCircuitMapTrimOpenCircuits(t *testing.T) { - t.Parallel() - - var ( - chan1 = lnwire.NewShortChanIDFromInt(1) - chan2 = lnwire.NewShortChanIDFromInt(2) - circuitMap htlcswitch.CircuitMap - err er.R - ) - - cfg, circuitMap := newCircuitMap(t) - - const nCircuits = 10 - const firstTrimIndex = 7 - const secondTrimIndex = 3 - - // Create a list of all circuits that will be committed in the circuit - // map. The incoming HtlcIDs are chosen so that there is overlap with - // the outgoing HtlcIDs, but ensures that the test is not dependent on - // them being equal. - circuits := make([]*htlcswitch.PaymentCircuit, nCircuits) - for i := range circuits { - circuits[i] = &htlcswitch.PaymentCircuit{ - Incoming: htlcswitch.CircuitKey{ - ChanID: chan1, - HtlcID: uint64(i + 3), - }, - ErrorEncrypter: htlcswitch.NewMockObfuscator(), - } - } - - // First we will try to add an new circuit to the circuit map, this - // should succeed. - _, err = circuitMap.CommitCircuits(circuits...) - if err != nil { - t.Fatalf("failed to commit circuits: %v", err) - } - - // Now create a list of the keystones that we will use to preemptively - // open the circuits. We set the index as the outgoing HtlcID to i - // simplify the indexing logic of the test. - keystones := make([]htlcswitch.Keystone, nCircuits) - for i := range keystones { - keystones[i] = htlcswitch.Keystone{ - InKey: circuits[i].Incoming, - OutKey: htlcswitch.CircuitKey{ - ChanID: chan2, - HtlcID: uint64(i), - }, - } - } - - // Open the circuits for the first time. - err = circuitMap.OpenCircuits(keystones...) - if err != nil { - t.Fatalf("failed to open circuits: %v", err) - } - - // Check that all circuits are marked open. - assertCircuitsOpenedPreRestart(t, circuitMap, circuits, keystones) - - // Now trim up above outgoing htlcid `firstTrimIndex` (7). This should - // leave the first 7 circuits open, and the rest should be reverted to - // an unopened state. - err = circuitMap.TrimOpenCircuits(chan2, firstTrimIndex) - if err != nil { - t.Fatalf("unable to trim circuits") - } - - assertCircuitsOpenedPreRestart(t, - circuitMap, - circuits[:firstTrimIndex], - keystones[:firstTrimIndex], - ) - - assertCircuitsNotOpenedPreRestart( - t, - circuitMap, - circuits[firstTrimIndex:], - keystones[firstTrimIndex:], - firstTrimIndex, - ) - - // Restart the circuit map, verify that the trim is reflected on - // startup. - cfg, circuitMap = restartCircuitMap(t, cfg) - - assertCircuitsOpenedPostRestart( - t, - circuitMap, - circuits[:firstTrimIndex], - keystones[:firstTrimIndex], - ) - - assertCircuitsNotOpenedPreRestart( - t, - circuitMap, - circuits[firstTrimIndex:], - keystones[firstTrimIndex:], - firstTrimIndex, - ) - - // Now, trim above outgoing htlcid `secondTrimIndex` (3). Only the first - // three circuits should be open, with any others being reverted back to - // unopened. - err = circuitMap.TrimOpenCircuits(chan2, secondTrimIndex) - if err != nil { - t.Fatalf("unable to trim circuits") - } - - assertCircuitsOpenedPostRestart( - t, - circuitMap, - circuits[:secondTrimIndex], - keystones[:secondTrimIndex], - ) - - assertCircuitsNotOpenedPreRestart( - t, - circuitMap, - circuits[secondTrimIndex:], - keystones[secondTrimIndex:], - secondTrimIndex, - ) - - // Restart the circuit map one last time to make sure the changes are - // persisted. - _, circuitMap = restartCircuitMap(t, cfg) - - assertCircuitsOpenedPostRestart( - t, - circuitMap, - circuits[:secondTrimIndex], - keystones[:secondTrimIndex], - ) - - assertCircuitsNotOpenedPreRestart( - t, - circuitMap, - circuits[secondTrimIndex:], - keystones[secondTrimIndex:], - secondTrimIndex, - ) -} - -// TestCircuitMapCloseOpenCircuits asserts that the circuit map can properly -// close open circuits, and that it allows at most one response to do so -// successfully. It also checks that a circuit is reopened if the close was not -// persisted via DeleteCircuits, and can again be closed. -func TestCircuitMapCloseOpenCircuits(t *testing.T) { - t.Parallel() - - var ( - chan1 = lnwire.NewShortChanIDFromInt(1) - chan2 = lnwire.NewShortChanIDFromInt(2) - circuitMap htlcswitch.CircuitMap - err er.R - ) - - cfg, circuitMap := newCircuitMap(t) - - circuit := &htlcswitch.PaymentCircuit{ - Incoming: htlcswitch.CircuitKey{ - ChanID: chan1, - HtlcID: 3, - }, - ErrorEncrypter: &hop.SphinxErrorEncrypter{ - EphemeralKey: testEphemeralKey, - }, - } - - // First we will try to add an new circuit to the circuit map, this - // should succeed. - _, err = circuitMap.CommitCircuits(circuit) - if err != nil { - t.Fatalf("failed to commit circuits: %v", err) - } - - keystone := htlcswitch.Keystone{ - InKey: circuit.Incoming, - OutKey: htlcswitch.CircuitKey{ - ChanID: chan2, - HtlcID: 2, - }, - } - - // Open the circuit for the first time. - err = circuitMap.OpenCircuits(keystone) - if err != nil { - t.Fatalf("failed to open circuits: %v", err) - } - - // Check that we can retrieve the open circuit if the circuit map before - // the circuit map is restarted. - circuit2 := circuitMap.LookupOpenCircuit(keystone.OutKey) - if !reflect.DeepEqual(circuit, circuit2) { - t.Fatalf("unexpected open circuit: got %v, want %v", - circuit2, circuit) - } - - // Open the circuit for a second time, which should fail due to a - // duplicate keystone - err = circuitMap.OpenCircuits(keystone) - if !htlcswitch.ErrDuplicateKeystone.Is(err) { - t.Fatalf("failed to open circuits: %v", err) - } - - // Close the open circuit for the first time, which should succeed. - _, err = circuitMap.FailCircuit(circuit.Incoming) - if err != nil { - t.Fatalf("unable to close unopened circuit") - } - - // Closing the circuit a second time should result in a failure. - _, err = circuitMap.FailCircuit(circuit.Incoming) - if !htlcswitch.ErrCircuitClosing.Is(err) { - t.Fatalf("unable to close unopened circuit") - } - - // Now, restart the circuit map, which will cause the opened circuit to - // be loaded from disk. Since we set the keystone on this circuit, it - // should be restored as such in memory. - // - // NOTE: The channel db doesn't have any channel data, so no keystones - // will be trimmed. - _, circuitMap = restartCircuitMap(t, cfg) - - // Close the open circuit for the first time, which should succeed. - _, err = circuitMap.FailCircuit(circuit.Incoming) - if err != nil { - t.Fatalf("unable to close unopened circuit") - } - - // Closing the circuit a second time should result in a failure. - _, err = circuitMap.FailCircuit(circuit.Incoming) - if !htlcswitch.ErrCircuitClosing.Is(err) { - t.Fatalf("unable to close unopened circuit") - } -} - -// TestCircuitMapCloseUnopenedCircuit tests that closing an unopened circuit -// allows at most semantics, and that the close is not persisted across -// restarts. -func TestCircuitMapCloseUnopenedCircuit(t *testing.T) { - t.Parallel() - - var ( - chan1 = lnwire.NewShortChanIDFromInt(1) - circuitMap htlcswitch.CircuitMap - err er.R - ) - - cfg, circuitMap := newCircuitMap(t) - - circuit := &htlcswitch.PaymentCircuit{ - Incoming: htlcswitch.CircuitKey{ - ChanID: chan1, - HtlcID: 3, - }, - ErrorEncrypter: testExtracter, - } - - // First we will try to add an new circuit to the circuit map, this - // should succeed. - _, err = circuitMap.CommitCircuits(circuit) - if err != nil { - t.Fatalf("failed to commit circuits: %v", err) - } - - // Close the open circuit for the first time, which should succeed. - _, err = circuitMap.FailCircuit(circuit.Incoming) - if err != nil { - t.Fatalf("unable to close unopened circuit") - } - - // Closing the circuit a second time should result in a failure. - _, err = circuitMap.FailCircuit(circuit.Incoming) - if !htlcswitch.ErrCircuitClosing.Is(err) { - t.Fatalf("unable to close unopened circuit") - } - - // Now, restart the circuit map, which will result in the circuit being - // reopened, since no attempt to delete the circuit was made. - _, circuitMap = restartCircuitMap(t, cfg) - - // Close the open circuit for the first time, which should succeed. - _, err = circuitMap.FailCircuit(circuit.Incoming) - if err != nil { - t.Fatalf("unable to close unopened circuit") - } - - // Closing the circuit a second time should result in a failure. - _, err = circuitMap.FailCircuit(circuit.Incoming) - if !htlcswitch.ErrCircuitClosing.Is(err) { - t.Fatalf("unable to close unopened circuit") - } -} - -// TestCircuitMapDeleteUnopenedCircuit checks that an unopened circuit can be -// removed persistently from the circuit map. -func TestCircuitMapDeleteUnopenedCircuit(t *testing.T) { - t.Parallel() - - var ( - chan1 = lnwire.NewShortChanIDFromInt(1) - circuitMap htlcswitch.CircuitMap - err er.R - ) - - cfg, circuitMap := newCircuitMap(t) - - circuit := &htlcswitch.PaymentCircuit{ - Incoming: htlcswitch.CircuitKey{ - ChanID: chan1, - HtlcID: 3, - }, - ErrorEncrypter: testExtracter, - } - - // First we will try to add an new circuit to the circuit map, this - // should succeed. - _, err = circuitMap.CommitCircuits(circuit) - if err != nil { - t.Fatalf("failed to commit circuits: %v", err) - } - - // Close the open circuit for the first time, which should succeed. - _, err = circuitMap.FailCircuit(circuit.Incoming) - if err != nil { - t.Fatalf("unable to close unopened circuit") - } - - err = circuitMap.DeleteCircuits(circuit.Incoming) - if err != nil { - t.Fatalf("unable to close unopened circuit") - } - - // Check that we can retrieve the open circuit if the circuit map before - // the circuit map is restarted. - circuit2 := circuitMap.LookupCircuit(circuit.Incoming) - if circuit2 != nil { - t.Fatalf("unexpected open circuit: got %v, want %v", - circuit2, nil) - } - - // Now, restart the circuit map, and check that the deletion survived - // the restart. - _, circuitMap = restartCircuitMap(t, cfg) - - circuit2 = circuitMap.LookupCircuit(circuit.Incoming) - if circuit2 != nil { - t.Fatalf("unexpected open circuit: got %v, want %v", - circuit2, nil) - } -} - -// TestCircuitMapDeleteUnopenedCircuit checks that an open circuit can be -// removed persistently from the circuit map. -func TestCircuitMapDeleteOpenCircuit(t *testing.T) { - t.Parallel() - - var ( - chan1 = lnwire.NewShortChanIDFromInt(1) - chan2 = lnwire.NewShortChanIDFromInt(2) - circuitMap htlcswitch.CircuitMap - err er.R - ) - - cfg, circuitMap := newCircuitMap(t) - - circuit := &htlcswitch.PaymentCircuit{ - Incoming: htlcswitch.CircuitKey{ - ChanID: chan1, - HtlcID: 3, - }, - ErrorEncrypter: testExtracter, - } - - // First we will try to add an new circuit to the circuit map, this - // should succeed. - _, err = circuitMap.CommitCircuits(circuit) - if err != nil { - t.Fatalf("failed to commit circuits: %v", err) - } - - keystone := htlcswitch.Keystone{ - InKey: circuit.Incoming, - OutKey: htlcswitch.CircuitKey{ - ChanID: chan2, - HtlcID: 2, - }, - } - - // Open the circuit for the first time. - err = circuitMap.OpenCircuits(keystone) - if err != nil { - t.Fatalf("failed to open circuits: %v", err) - } - - // Close the open circuit for the first time, which should succeed. - _, err = circuitMap.FailCircuit(circuit.Incoming) - if err != nil { - t.Fatalf("unable to close unopened circuit") - } - - // Persistently remove the circuit identified by incoming chan id. - err = circuitMap.DeleteCircuits(circuit.Incoming) - if err != nil { - t.Fatalf("unable to close unopened circuit") - } - - // Check that we can no longer retrieve the open circuit. - circuit2 := circuitMap.LookupOpenCircuit(keystone.OutKey) - if circuit2 != nil { - t.Fatalf("unexpected open circuit: got %v, want %v", - circuit2, nil) - } - - // Now, restart the circuit map, and check that the deletion survived - // the restart. - _, circuitMap = restartCircuitMap(t, cfg) - - circuit2 = circuitMap.LookupOpenCircuit(keystone.OutKey) - if circuit2 != nil { - t.Fatalf("unexpected open circuit: got %v, want %v", - circuit2, nil) - } -} diff --git a/lnd/htlcswitch/decayedlog.go b/lnd/htlcswitch/decayedlog.go deleted file mode 100644 index 45cd6faf..00000000 --- a/lnd/htlcswitch/decayedlog.go +++ /dev/null @@ -1,413 +0,0 @@ -package htlcswitch - -import ( - "bytes" - "encoding/binary" - "sync" - "sync/atomic" - - "github.com/pkt-cash/pktd/btcutil/er" - sphinx "github.com/pkt-cash/pktd/lightning-onion" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/pktlog/log" -) - -const ( - // defaultDbDirectory is the default directory where our decayed log - // will store our (sharedHash, CLTV) key-value pairs. - defaultDbDirectory = "sharedhashes" -) - -var ( - // sharedHashBucket is a bucket which houses the first HashPrefixSize - // bytes of a received HTLC's hashed shared secret as the key and the HTLC's - // CLTV expiry as the value. - sharedHashBucket = []byte("shared-hash") - - // batchReplayBucket is a bucket that maps batch identifiers to - // serialized ReplaySets. This is used to give idempotency in the event - // that a batch is processed more than once. - batchReplayBucket = []byte("batch-replay") -) - -var ( - // ErrDecayedLogInit is used to indicate a decayed log failed to create - // the proper bucketing structure on startup. - ErrDecayedLogInit = Err.CodeWithDetail("ErrDecayedLogInit", "unable to initialize decayed log") - - // ErrDecayedLogCorrupted signals that the anticipated bucketing - // structure has diverged since initialization. - ErrDecayedLogCorrupted = Err.CodeWithDetail("ErrDecayedLogCorrupted", "decayed log structure corrupted") -) - -// DecayedLog implements the PersistLog interface. It stores the first -// HashPrefixSize bytes of a sha256-hashed shared secret along with a node's -// CLTV value. It is a decaying log meaning there will be a garbage collector -// to collect entries which are expired according to their stored CLTV value -// and the current block height. DecayedLog wraps boltdb for simplicity and -// batches writes to the database to decrease write contention. -type DecayedLog struct { - started int32 // To be used atomically. - stopped int32 // To be used atomically. - - cfg *kvdb.BoltBackendConfig - - db kvdb.Backend - - notifier chainntnfs.ChainNotifier - - wg sync.WaitGroup - quit chan struct{} -} - -// NewDecayedLog creates a new DecayedLog, which caches recently seen hash -// shared secrets. Entries are evicted as their cltv expires using block epochs -// from the given notifier. -func NewDecayedLog(dbPath, dbFileName string, boltCfg *kvdb.BoltConfig, - notifier chainntnfs.ChainNotifier) *DecayedLog { - - cfg := &kvdb.BoltBackendConfig{ - DBPath: dbPath, - DBFileName: dbFileName, - NoFreelistSync: true, - AutoCompact: boltCfg.AutoCompact, - AutoCompactMinAge: boltCfg.AutoCompactMinAge, - } - - // Use default path for log database - if dbPath == "" { - cfg.DBPath = defaultDbDirectory - } - - return &DecayedLog{ - cfg: cfg, - notifier: notifier, - quit: make(chan struct{}), - } -} - -// Start opens the database we will be using to store hashed shared secrets. -// It also starts the garbage collector in a goroutine to remove stale -// database entries. -func (d *DecayedLog) Start() er.R { - if !atomic.CompareAndSwapInt32(&d.started, 0, 1) { - return nil - } - - // Open the boltdb for use. - var err er.R - d.db, err = kvdb.GetBoltBackend(d.cfg) - if err != nil { - return er.Errorf("could not open boltdb: %v", err) - } - - // Initialize the primary buckets used by the decayed log. - if err := d.initBuckets(); err != nil { - return err - } - - // Start garbage collector. - if d.notifier != nil { - epochClient, err := d.notifier.RegisterBlockEpochNtfn(nil) - if err != nil { - return er.Errorf("unable to register for epoch "+ - "notifications: %v", err) - } - - d.wg.Add(1) - go d.garbageCollector(epochClient) - } - - return nil -} - -// initBuckets initializes the primary buckets used by the decayed log, namely -// the shared hash bucket, and batch replay -func (d *DecayedLog) initBuckets() er.R { - return kvdb.Update(d.db, func(tx kvdb.RwTx) er.R { - _, err := tx.CreateTopLevelBucket(sharedHashBucket) - if err != nil { - return ErrDecayedLogInit.Default() - } - - _, err = tx.CreateTopLevelBucket(batchReplayBucket) - if err != nil { - return ErrDecayedLogInit.Default() - } - - return nil - }, func() {}) -} - -// Stop halts the garbage collector and closes boltdb. -func (d *DecayedLog) Stop() er.R { - if !atomic.CompareAndSwapInt32(&d.stopped, 0, 1) { - return nil - } - - // Stop garbage collector. - close(d.quit) - - d.wg.Wait() - - // Close boltdb. - d.db.Close() - - return nil -} - -// garbageCollector deletes entries from sharedHashBucket whose expiry height -// has already past. This function MUST be run as a goroutine. -func (d *DecayedLog) garbageCollector(epochClient *chainntnfs.BlockEpochEvent) { - defer d.wg.Done() - defer epochClient.Cancel() - - for { - select { - case epoch, ok := <-epochClient.Epochs: - if !ok { - // Block epoch was canceled, shutting down. - log.Infof("Block epoch canceled, " + - "decaying hash log shutting down") - return - } - - // Perform a bout of garbage collection using the - // epoch's block height. - height := uint32(epoch.Height) - numExpired, err := d.gcExpiredHashes(height) - if err != nil { - log.Errorf("unable to expire hashes at "+ - "height=%d", height) - } - - if numExpired > 0 { - log.Infof("Garbage collected %v shared "+ - "secret hashes at height=%v", - numExpired, height) - } - - case <-d.quit: - // Received shutdown request. - log.Infof("Decaying hash log received " + - "shutdown request") - return - } - } -} - -// gcExpiredHashes purges the decaying log of all entries whose CLTV expires -// below the provided height. -func (d *DecayedLog) gcExpiredHashes(height uint32) (uint32, er.R) { - var numExpiredHashes uint32 - - err := kvdb.Batch(d.db, func(tx kvdb.RwTx) er.R { - numExpiredHashes = 0 - - // Grab the shared hash bucket - sharedHashes := tx.ReadWriteBucket(sharedHashBucket) - if sharedHashes == nil { - return er.Errorf("sharedHashBucket " + - "is nil") - } - - var expiredCltv [][]byte - if err := sharedHashes.ForEach(func(k, v []byte) er.R { - // Deserialize the CLTV value for this entry. - cltv := uint32(binary.BigEndian.Uint32(v)) - - if cltv < height { - // This CLTV is expired. We must add it to an - // array which we'll loop over and delete every - // hash contained from the db. - expiredCltv = append(expiredCltv, k) - numExpiredHashes++ - } - - return nil - }); err != nil { - return err - } - - // Delete every item in the array. This must - // be done explicitly outside of the ForEach - // function for safety reasons. - for _, hash := range expiredCltv { - err := sharedHashes.Delete(hash) - if err != nil { - return err - } - } - - return nil - }) - if err != nil { - return 0, err - } - - return numExpiredHashes, nil -} - -// Delete removes a key-pair from the -// sharedHashBucket. -func (d *DecayedLog) Delete(hash *sphinx.HashPrefix) er.R { - return kvdb.Batch(d.db, func(tx kvdb.RwTx) er.R { - sharedHashes := tx.ReadWriteBucket(sharedHashBucket) - if sharedHashes == nil { - return ErrDecayedLogCorrupted.Default() - } - - return sharedHashes.Delete(hash[:]) - }) -} - -// Get retrieves the CLTV of a processed HTLC given the first 20 bytes of the -// Sha-256 hash of the shared secret. -func (d *DecayedLog) Get(hash *sphinx.HashPrefix) (uint32, er.R) { - var value uint32 - - err := kvdb.View(d.db, func(tx kvdb.RTx) er.R { - // Grab the shared hash bucket which stores the mapping from - // truncated sha-256 hashes of shared secrets to CLTV's. - sharedHashes := tx.ReadBucket(sharedHashBucket) - if sharedHashes == nil { - return er.Errorf("sharedHashes is nil, could " + - "not retrieve CLTV value") - } - - // Retrieve the bytes which represents the CLTV - valueBytes := sharedHashes.Get(hash[:]) - if valueBytes == nil { - return sphinx.ErrLogEntryNotFound.Default() - } - - // The first 4 bytes represent the CLTV, store it in value. - value = uint32(binary.BigEndian.Uint32(valueBytes)) - - return nil - }, func() { - value = 0 - }) - if err != nil { - return value, err - } - - return value, nil -} - -// Put stores a shared secret hash as the key and the CLTV as the value. -func (d *DecayedLog) Put(hash *sphinx.HashPrefix, cltv uint32) er.R { - // Optimisitically serialize the cltv value into the scratch buffer. - var scratch [4]byte - binary.BigEndian.PutUint32(scratch[:], cltv) - - return kvdb.Batch(d.db, func(tx kvdb.RwTx) er.R { - sharedHashes := tx.ReadWriteBucket(sharedHashBucket) - if sharedHashes == nil { - return ErrDecayedLogCorrupted.Default() - } - - // Check to see if this hash prefix has been recorded before. If - // a value is found, this packet is being replayed. - valueBytes := sharedHashes.Get(hash[:]) - if valueBytes != nil { - return sphinx.ErrReplayedPacket.Default() - } - - return sharedHashes.Put(hash[:], scratch[:]) - }) -} - -// PutBatch accepts a pending batch of hashed secret entries to write to disk. -// Each hashed secret is inserted with a corresponding time value, dictating -// when the entry will be evicted from the log. -// NOTE: This method enforces idempotency by writing the replay set obtained -// from the first attempt for a particular batch ID, and decoding the return -// value to subsequent calls. For the indices of the replay set to be aligned -// properly, the batch MUST be constructed identically to the first attempt, -// pruning will cause the indices to become invalid. -func (d *DecayedLog) PutBatch(b *sphinx.Batch) (*sphinx.ReplaySet, er.R) { - // Since batched boltdb txns may be executed multiple times before - // succeeding, we will create a new replay set for each invocation to - // avoid any side-effects. If the txn is successful, this replay set - // will be merged with the replay set computed during batch construction - // to generate the complete replay set. If this batch was previously - // processed, the replay set will be deserialized from disk. - var replays *sphinx.ReplaySet - if err := kvdb.Batch(d.db, func(tx kvdb.RwTx) er.R { - sharedHashes := tx.ReadWriteBucket(sharedHashBucket) - if sharedHashes == nil { - return ErrDecayedLogCorrupted.Default() - } - - // Load the batch replay bucket, which will be used to either - // retrieve the result of previously processing this batch, or - // to write the result of this operation. - batchReplayBkt := tx.ReadWriteBucket(batchReplayBucket) - if batchReplayBkt == nil { - return ErrDecayedLogCorrupted.Default() - } - - // Check for the existence of this batch's id in the replay - // bucket. If a non-nil value is found, this indicates that we - // have already processed this batch before. We deserialize the - // resulting and return it to ensure calls to put batch are - // idempotent. - replayBytes := batchReplayBkt.Get(b.ID) - if replayBytes != nil { - replays = sphinx.NewReplaySet() - return replays.Decode(bytes.NewReader(replayBytes)) - } - - // The CLTV will be stored into scratch and then stored into the - // sharedHashBucket. - var scratch [4]byte - - replays = sphinx.NewReplaySet() - err := b.ForEach(func(seqNum uint16, hashPrefix *sphinx.HashPrefix, cltv uint32) er.R { - // Retrieve the bytes which represents the CLTV - valueBytes := sharedHashes.Get(hashPrefix[:]) - if valueBytes != nil { - replays.Add(seqNum) - return nil - } - - // Serialize the cltv value and write an entry keyed by - // the hash prefix. - binary.BigEndian.PutUint32(scratch[:], cltv) - return sharedHashes.Put(hashPrefix[:], scratch[:]) - }) - if err != nil { - return err - } - - // Merge the replay set computed from checking the on-disk - // entries with the in-batch replays computed during this - // batch's construction. - replays.Merge(b.ReplaySet) - - // Write the replay set under the batch identifier to the batch - // replays bucket. This can be used during recovery to test (1) - // that a particular batch was successfully processed and (2) - // recover the indexes of the adds that were rejected as - // replays. - var replayBuf bytes.Buffer - if err := replays.Encode(&replayBuf); err != nil { - return err - } - - return batchReplayBkt.Put(b.ID, replayBuf.Bytes()) - }); err != nil { - return nil, err - } - - b.ReplaySet = replays - b.IsCommitted = true - - return replays, nil -} - -// A compile time check to see if DecayedLog adheres to the PersistLog -// interface. -var _ sphinx.ReplayLog = (*DecayedLog)(nil) diff --git a/lnd/htlcswitch/decayedlog_test.go b/lnd/htlcswitch/decayedlog_test.go deleted file mode 100644 index 3a91e097..00000000 --- a/lnd/htlcswitch/decayedlog_test.go +++ /dev/null @@ -1,333 +0,0 @@ -package htlcswitch - -import ( - "crypto/rand" - "io/ioutil" - "os" - "testing" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - sphinx "github.com/pkt-cash/pktd/lightning-onion" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lntest/mock" -) - -const ( - cltv uint32 = 100000 -) - -// tempDecayedLogPath creates a new temporary database path to back a single -// deccayed log instance. -func tempDecayedLogPath(t *testing.T) (string, string) { - dir, err := ioutil.TempDir("", "decayedlog") - if err != nil { - t.Fatalf("unable to create temporary decayed log dir: %v", err) - } - - return dir, "sphinxreplay.db" -} - -// startup sets up the DecayedLog and possibly the garbage collector. -func startup(dbPath, dbFileName string, notifier bool) (sphinx.ReplayLog, - *mock.ChainNotifier, *sphinx.HashPrefix, er.R) { - - var log sphinx.ReplayLog - var chainNotifier *mock.ChainNotifier - if notifier { - - // Create the MockNotifier which triggers the garbage collector - chainNotifier = &mock.ChainNotifier{ - SpendChan: make(chan *chainntnfs.SpendDetail), - EpochChan: make(chan *chainntnfs.BlockEpoch, 1), - ConfChan: make(chan *chainntnfs.TxConfirmation), - } - - // Initialize the DecayedLog object - log = NewDecayedLog( - dbPath, dbFileName, &kvdb.BoltConfig{}, chainNotifier, - ) - } else { - // Initialize the DecayedLog object - log = NewDecayedLog(dbPath, dbFileName, &kvdb.BoltConfig{}, nil) - } - - // Open the channeldb (start the garbage collector) - err := log.Start() - if err != nil { - return nil, nil, nil, err - } - - // Create a HashPrefix identifier for a packet. Instead of actually - // generating an ECDH secret and hashing it, simulate with random bytes. - // This is used as a key to retrieve the cltv value. - var hashedSecret sphinx.HashPrefix - _, errr := rand.Read(hashedSecret[:]) - if errr != nil { - return nil, nil, nil, er.E(errr) - } - - return log, chainNotifier, &hashedSecret, nil -} - -// shutdown deletes the temporary directory that the test database uses -// and handles closing the database. -func shutdown(dir string, d sphinx.ReplayLog) { - d.Stop() - os.RemoveAll(dir) -} - -// TestDecayedLogGarbageCollector tests the ability of the garbage collector -// to delete expired cltv values every time a block is received. Expired cltv -// values are cltv values that are < current block height. -func TestDecayedLogGarbageCollector(t *testing.T) { - t.Parallel() - - dbPath, dbFileName := tempDecayedLogPath(t) - - d, notifier, hashedSecret, err := startup(dbPath, dbFileName, true) - if err != nil { - t.Fatalf("Unable to start up DecayedLog: %v", err) - } - defer shutdown(dbPath, d) - - // Store in the sharedHashBucket. - err = d.Put(hashedSecret, cltv) - if err != nil { - t.Fatalf("Unable to store in channeldb: %v", err) - } - - // Wait for database write (GC is in a goroutine) - time.Sleep(500 * time.Millisecond) - - // Send block notifications to garbage collector. The garbage collector - // should remove the entry by block 100001. - - // Send block 100000 - notifier.EpochChan <- &chainntnfs.BlockEpoch{ - Height: 100000, - } - - // Assert that hashedSecret is still in the sharedHashBucket - val, err := d.Get(hashedSecret) - if err != nil { - t.Fatalf("Get failed - received an error upon Get: %v", err) - } - - if val != cltv { - t.Fatalf("GC incorrectly deleted CLTV") - } - - // Send block 100001 (expiry block) - notifier.EpochChan <- &chainntnfs.BlockEpoch{ - Height: 100001, - } - - // Wait for database write (GC is in a goroutine) - time.Sleep(500 * time.Millisecond) - - // Assert that hashedSecret is not in the sharedHashBucket - _, err = d.Get(hashedSecret) - if err == nil { - t.Fatalf("CLTV was not deleted") - } - if !sphinx.ErrLogEntryNotFound.Is(err) { - t.Fatalf("Get failed - received unexpected error upon Get: %v", err) - } -} - -// TestDecayedLogPersistentGarbageCollector tests the persistence property of -// the garbage collector. The garbage collector will be restarted immediately and -// a block that expires the stored CLTV value will be sent to the ChainNotifier. -// We test that this causes the pair to be deleted even -// on GC restarts. -func TestDecayedLogPersistentGarbageCollector(t *testing.T) { - t.Parallel() - - dbPath, dbFileName := tempDecayedLogPath(t) - - d, _, hashedSecret, err := startup(dbPath, dbFileName, true) - if err != nil { - t.Fatalf("Unable to start up DecayedLog: %v", err) - } - defer shutdown(dbPath, d) - - // Store in the sharedHashBucket - if err = d.Put(hashedSecret, cltv); err != nil { - t.Fatalf("Unable to store in channeldb: %v", err) - } - - // The hash prefix should be retrievable from the decayed log. - _, err = d.Get(hashedSecret) - if err != nil { - t.Fatalf("Get failed - received unexpected error upon Get: %v", err) - } - - // Shut down DecayedLog and the garbage collector along with it. - d.Stop() - - d2, notifier2, _, err := startup(dbPath, dbFileName, true) - if err != nil { - t.Fatalf("Unable to restart DecayedLog: %v", err) - } - defer shutdown(dbPath, d2) - - // Check that the hash prefix still exists in the new db instance. - _, err = d2.Get(hashedSecret) - if err != nil { - t.Fatalf("Get failed - received unexpected error upon Get: %v", err) - } - - // Send a block notification to the garbage collector that expires - // the stored CLTV. - notifier2.EpochChan <- &chainntnfs.BlockEpoch{ - Height: int32(100001), - } - - // Wait for database write (GC is in a goroutine) - time.Sleep(500 * time.Millisecond) - - // Assert that hashedSecret is not in the sharedHashBucket - _, err = d2.Get(hashedSecret) - if !sphinx.ErrLogEntryNotFound.Is(err) { - t.Fatalf("Get failed - received unexpected error upon Get: %v", err) - } -} - -// TestDecayedLogInsertionAndRetrieval inserts a cltv value into the -// sharedHashBucket and then deletes it and finally asserts that we can no -// longer retrieve it. -func TestDecayedLogInsertionAndDeletion(t *testing.T) { - t.Parallel() - - dbPath, dbFileName := tempDecayedLogPath(t) - - d, _, hashedSecret, err := startup(dbPath, dbFileName, false) - if err != nil { - t.Fatalf("Unable to start up DecayedLog: %v", err) - } - defer shutdown(dbPath, d) - - // Store in the sharedHashBucket. - err = d.Put(hashedSecret, cltv) - if err != nil { - t.Fatalf("Unable to store in channeldb: %v", err) - } - - // Delete hashedSecret from the sharedHashBucket. - err = d.Delete(hashedSecret) - if err != nil { - t.Fatalf("Unable to delete from channeldb: %v", err) - } - - // Assert that hashedSecret is not in the sharedHashBucket - _, err = d.Get(hashedSecret) - if err == nil { - t.Fatalf("CLTV was not deleted") - } - if !sphinx.ErrLogEntryNotFound.Is(err) { - t.Fatalf("Get failed - received unexpected error upon Get: %v", err) - } -} - -// TestDecayedLogStartAndStop tests for persistence. The DecayedLog is started, -// a cltv value is stored in the sharedHashBucket, and then it the DecayedLog -// is stopped. The DecayedLog is then started up again and we test that the -// cltv value is indeed still stored in the sharedHashBucket. We then delete -// the cltv value and check that it persists upon startup. -func TestDecayedLogStartAndStop(t *testing.T) { - t.Parallel() - - dbPath, dbFileName := tempDecayedLogPath(t) - - d, _, hashedSecret, err := startup(dbPath, dbFileName, false) - if err != nil { - t.Fatalf("Unable to start up DecayedLog: %v", err) - } - defer shutdown(dbPath, d) - - // Store in the sharedHashBucket. - err = d.Put(hashedSecret, cltv) - if err != nil { - t.Fatalf("Unable to store in channeldb: %v", err) - } - - // Shutdown the DecayedLog's channeldb - d.Stop() - - d2, _, hashedSecret2, err := startup(dbPath, dbFileName, false) - if err != nil { - t.Fatalf("Unable to restart DecayedLog: %v", err) - } - defer shutdown(dbPath, d2) - - // Retrieve the stored cltv value given the hashedSecret key. - value, err := d2.Get(hashedSecret) - if err != nil { - t.Fatalf("Unable to retrieve from channeldb: %v", err) - } - - // Check that the original cltv value matches the retrieved cltv - // value. - if cltv != value { - t.Fatalf("Value retrieved doesn't match value stored") - } - - // Delete hashedSecret from sharedHashBucket - err = d2.Delete(hashedSecret2) - if err != nil { - t.Fatalf("Unable to delete from channeldb: %v", err) - } - - // Shutdown the DecayedLog's channeldb - d2.Stop() - - d3, _, hashedSecret3, err := startup(dbPath, dbFileName, false) - if err != nil { - t.Fatalf("Unable to restart DecayedLog: %v", err) - } - defer shutdown(dbPath, d3) - - // Assert that hashedSecret is not in the sharedHashBucket - _, err = d3.Get(hashedSecret3) - if err == nil { - t.Fatalf("CLTV was not deleted") - } - if !sphinx.ErrLogEntryNotFound.Is(err) { - t.Fatalf("Get failed - received unexpected error upon Get: %v", err) - } -} - -// TestDecayedLogStorageAndRetrieval stores a cltv value and then retrieves it -// via the nested sharedHashBucket and finally asserts that the original stored -// and retrieved cltv values are equal. -func TestDecayedLogStorageAndRetrieval(t *testing.T) { - t.Parallel() - - dbPath, dbFileName := tempDecayedLogPath(t) - - d, _, hashedSecret, err := startup(dbPath, dbFileName, false) - if err != nil { - t.Fatalf("Unable to start up DecayedLog: %v", err) - } - defer shutdown(dbPath, d) - - // Store in the sharedHashBucket - err = d.Put(hashedSecret, cltv) - if err != nil { - t.Fatalf("Unable to store in channeldb: %v", err) - } - - // Retrieve the stored cltv value given the hashedSecret key. - value, err := d.Get(hashedSecret) - if err != nil { - t.Fatalf("Unable to retrieve from channeldb: %v", err) - } - - // If the original cltv value does not match the value retrieved, - // then the test failed. - if cltv != value { - t.Fatalf("Value retrieved doesn't match value stored") - } -} diff --git a/lnd/htlcswitch/failure.go b/lnd/htlcswitch/failure.go deleted file mode 100644 index f98ba2f9..00000000 --- a/lnd/htlcswitch/failure.go +++ /dev/null @@ -1,193 +0,0 @@ -package htlcswitch - -import ( - "bytes" - "fmt" - - "github.com/pkt-cash/pktd/btcutil/er" - sphinx "github.com/pkt-cash/pktd/lightning-onion" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -var ( - ErrUnknownEncrypterType = Err.Code("ErrUnknownEncrypterType") -) - -// ClearTextError is an interface which is implemented by errors that occur -// when we know the underlying wire failure message. These errors are the -// opposite to opaque errors which are onion-encrypted blobs only understandable -// to the initiating node. ClearTextErrors are used when we fail a htlc at our -// node, or one of our initiated payments failed and we can decrypt the onion -// encrypted error fully. -type ClearTextError interface { - error - - // WireMessage extracts a valid wire failure message from an internal - // error which may contain additional metadata (which should not be - // exposed to the network). This value may be nil in the case where - // an unknown wire error is returned by one of our peers. - WireMessage() lnwire.FailureMessage -} - -// LinkError is an implementation of the ClearTextError interface which -// represents failures that occur on our incoming or outgoing link. -type LinkError struct { - // msg returns the wire failure associated with the error. - // This value should *not* be nil, because we should always - // know the failure type for failures which occur at our own - // node. - msg lnwire.FailureMessage - - // FailureDetail enriches the wire error with additional information. - FailureDetail -} - -// NewLinkError returns a LinkError with the failure message provided. -// The failure message provided should *not* be nil, because we should -// always know the failure type for failures which occur at our own node. -func NewLinkError(msg lnwire.FailureMessage) *LinkError { - return &LinkError{msg: msg} -} - -// NewDetailedLinkError returns a link error that enriches a wire message with -// a failure detail. -func NewDetailedLinkError(msg lnwire.FailureMessage, - detail FailureDetail) *LinkError { - - return &LinkError{ - msg: msg, - FailureDetail: detail, - } -} - -// WireMessage extracts a valid wire failure message from an internal -// error which may contain additional metadata (which should not be -// exposed to the network). This value should never be nil for LinkErrors, -// because we are the ones failing the htlc. -// -// Note this is part of the ClearTextError interface. -func (l *LinkError) WireMessage() lnwire.FailureMessage { - return l.msg -} - -// Error returns the string representation of a link error. -// -// Note this is part of the ClearTextError interface. -func (l *LinkError) Error() string { - // If the link error has no failure detail, return the wire message's - // error. - if l.FailureDetail == nil { - return l.msg.Error() - } - - return l.FailureDetail.FailureString() -} - -// ForwardingError wraps an lnwire.FailureMessage in a struct that also -// includes the source of the error. -type ForwardingError struct { - // FailureSourceIdx is the index of the node that sent the failure. With - // this information, the dispatcher of a payment can modify their set of - // candidate routes in response to the type of failure extracted. Index - // zero is the self node. - FailureSourceIdx int - - // msg is the wire message associated with the error. This value may - // be nil in the case where we fail to decode failure message sent by - // a peer. - msg lnwire.FailureMessage -} - -// WireMessage extracts a valid wire failure message from an internal -// error which may contain additional metadata (which should not be -// exposed to the network). This value may be nil in the case where -// an unknown wire error is returned by one of our peers. -// -// Note this is part of the ClearTextError interface. -func (f *ForwardingError) WireMessage() lnwire.FailureMessage { - return f.msg -} - -// Error implements the built-in error interface. We use this method to allow -// the switch or any callers to insert additional context to the error message -// returned. -func (f *ForwardingError) Error() string { - return fmt.Sprintf( - "%v@%v", f.msg, f.FailureSourceIdx, - ) -} - -// NewForwardingError creates a new payment error which wraps a wire error -// with additional metadata. -func NewForwardingError(failure lnwire.FailureMessage, - index int) *ForwardingError { - - return &ForwardingError{ - FailureSourceIdx: index, - msg: failure, - } -} - -// NewUnknownForwardingError returns a forwarding error which has a nil failure -// message. This constructor should only be used in the case where we cannot -// decode the failure we have received from a peer. -func NewUnknownForwardingError(index int) *ForwardingError { - return &ForwardingError{ - FailureSourceIdx: index, - } -} - -// ErrorDecrypter is an interface that is used to decrypt the onion encrypted -// failure reason an extra out a well formed error. -type ErrorDecrypter interface { - // DecryptError peels off each layer of onion encryption from the first - // hop, to the source of the error. A fully populated - // lnwire.FailureMessage is returned along with the source of the - // error. - DecryptError(lnwire.OpaqueReason) (*ForwardingError, er.R) -} - -// OnionErrorDecrypter is the interface that provides onion level error -// decryption. -type OnionErrorDecrypter interface { - // DecryptError attempts to decrypt the passed encrypted error response. - // The onion failure is encrypted in backward manner, starting from the - // node where error have occurred. As a result, in order to decrypt the - // error we need get all shared secret and apply decryption in the - // reverse order. - DecryptError(encryptedData []byte) (*sphinx.DecryptedError, er.R) -} - -// SphinxErrorDecrypter wraps the sphinx data SphinxErrorDecrypter and maps the -// returned errors to concrete lnwire.FailureMessage instances. -type SphinxErrorDecrypter struct { - OnionErrorDecrypter -} - -// DecryptError peels off each layer of onion encryption from the first hop, to -// the source of the error. A fully populated lnwire.FailureMessage is returned -// along with the source of the error. -// -// NOTE: Part of the ErrorDecrypter interface. -func (s *SphinxErrorDecrypter) DecryptError(reason lnwire.OpaqueReason) ( - *ForwardingError, er.R) { - - failure, err := s.OnionErrorDecrypter.DecryptError(reason) - if err != nil { - return nil, err - } - - // Decode the failure. If an error occurs, we leave the failure message - // field nil. - r := bytes.NewReader(failure.Message) - failureMsg, err := lnwire.DecodeFailure(r, 0) - if err != nil { - return NewUnknownForwardingError(failure.SenderIdx), nil - } - - return NewForwardingError(failureMsg, failure.SenderIdx), nil -} - -// A compile time check to ensure ErrorDecrypter implements the Deobfuscator -// interface. -var _ ErrorDecrypter = (*SphinxErrorDecrypter)(nil) diff --git a/lnd/htlcswitch/failure_detail.go b/lnd/htlcswitch/failure_detail.go deleted file mode 100644 index 341688d1..00000000 --- a/lnd/htlcswitch/failure_detail.go +++ /dev/null @@ -1,97 +0,0 @@ -package htlcswitch - -// FailureDetail is an interface implemented by failures that occur on -// our incoming or outgoing link, or within the switch itself. -type FailureDetail interface { - // FailureString returns the string representation of a failure - // detail. - FailureString() string -} - -// OutgoingFailure is an enum which is used to enrich failures which occur in -// the switch or on our outgoing link with additional metadata. -type OutgoingFailure int - -const ( - // OutgoingFailureNone is returned when the wire message contains - // sufficient information. - OutgoingFailureNone OutgoingFailure = iota - - // OutgoingFailureDecodeError indicates that we could not decode the - // failure reason provided for a failed payment. - OutgoingFailureDecodeError - - // OutgoingFailureLinkNotEligible indicates that a routing attempt was - // made over a link that is not eligible for routing. - OutgoingFailureLinkNotEligible - - // OutgoingFailureOnChainTimeout indicates that a payment had to be - // timed out on chain before it got past the first hop by us or the - // remote party. - OutgoingFailureOnChainTimeout - - // OutgoingFailureHTLCExceedsMax is returned when a htlc exceeds our - // policy's maximum htlc amount. - OutgoingFailureHTLCExceedsMax - - // OutgoingFailureInsufficientBalance is returned when we cannot route a - // htlc due to insufficient outgoing capacity. - OutgoingFailureInsufficientBalance - - // OutgoingFailureCircularRoute is returned when an attempt is made - // to forward a htlc through our node which arrives and leaves on the - // same channel. - OutgoingFailureCircularRoute - - // OutgoingFailureIncompleteForward is returned when we cancel an incomplete - // forward. - OutgoingFailureIncompleteForward - - // OutgoingFailureDownstreamHtlcAdd is returned when we fail to add a - // downstream htlc to our outgoing link. - OutgoingFailureDownstreamHtlcAdd - - // OutgoingFailureForwardsDisabled is returned when the switch is - // configured to disallow forwards. - OutgoingFailureForwardsDisabled -) - -// FailureString returns the string representation of a failure detail. -// -// Note: it is part of the FailureDetail interface. -func (fd OutgoingFailure) FailureString() string { - switch fd { - case OutgoingFailureNone: - return "no failure detail" - - case OutgoingFailureDecodeError: - return "could not decode wire failure" - - case OutgoingFailureLinkNotEligible: - return "link not eligible" - - case OutgoingFailureOnChainTimeout: - return "payment was resolved on-chain, then canceled back" - - case OutgoingFailureHTLCExceedsMax: - return "htlc exceeds maximum policy amount" - - case OutgoingFailureInsufficientBalance: - return "insufficient bandwidth to route htlc" - - case OutgoingFailureCircularRoute: - return "same incoming and outgoing channel" - - case OutgoingFailureIncompleteForward: - return "failed after detecting incomplete forward" - - case OutgoingFailureDownstreamHtlcAdd: - return "could not add downstream htlc" - - case OutgoingFailureForwardsDisabled: - return "node configured to disallow forwards" - - default: - return "unknown failure detail" - } -} diff --git a/lnd/htlcswitch/hodl/config_dev.go b/lnd/htlcswitch/hodl/config_dev.go deleted file mode 100644 index ef8389ad..00000000 --- a/lnd/htlcswitch/hodl/config_dev.go +++ /dev/null @@ -1,67 +0,0 @@ -// +build dev - -package hodl - -// Config is a struct enumerating the possible command line flags that are used -// to activate specific hodl modes. -// -// NOTE: THESE FLAGS ARE INTENDED FOR TESTING PURPOSES ONLY. ACTIVATING THESE -// FLAGS IN PRODUCTION WILL VIOLATE CRITICAL ASSUMPTIONS MADE BY THIS SOFTWARE. -type Config struct { - ExitSettle bool `long:"exit-settle" description:"Instructs the node to drop ADDs for which it is the exit node, and to not settle back to the sender"` - - AddIncoming bool `long:"add-incoming" description:"Instructs the node to drop incoming ADDs before processing them in the incoming link"` - - SettleIncoming bool `long:"settle-incoming" description:"Instructs the node to drop incoming SETTLEs before processing them in the incoming link"` - - FailIncoming bool `long:"fail-incoming" description:"Instructs the node to drop incoming FAILs before processing them in the incoming link"` - - AddOutgoing bool `long:"add-outgoing" description:"Instructs the node to drop outgoing ADDs before applying them to the channel state"` - - SettleOutgoing bool `long:"settle-outgoing" description:"Instructs the node to drop outgoing SETTLEs before applying them to the channel state"` - - FailOutgoing bool `long:"fail-outgoing" description:"Instructs the node to drop outgoing FAILs before applying them to the channel state"` - - Commit bool `long:"commit" description:"Instructs the node to add HTLCs to its local commitment state and to open circuits for any ADDs, but abort before committing the changes"` - - BogusSettle bool `long:"bogus-settle" description:"Instructs the node to settle back any incoming HTLC with a bogus preimage"` -} - -// Mask extracts the flags specified in the configuration, composing a Mask from -// the active flags. -func (c *Config) Mask() Mask { - var flags []Flag - - if c.ExitSettle { - flags = append(flags, ExitSettle) - } - if c.AddIncoming { - flags = append(flags, AddIncoming) - } - if c.SettleIncoming { - flags = append(flags, SettleIncoming) - } - if c.FailIncoming { - flags = append(flags, FailIncoming) - } - if c.AddOutgoing { - flags = append(flags, AddOutgoing) - } - if c.SettleOutgoing { - flags = append(flags, SettleOutgoing) - } - if c.FailOutgoing { - flags = append(flags, FailOutgoing) - } - if c.Commit { - flags = append(flags, Commit) - } - if c.BogusSettle { - flags = append(flags, BogusSettle) - } - - // NOTE: The value returned here will only honor the configuration if - // the dev build flag is present. In production, this method always - // returns hodl.MaskNone and Active(*) always returns false. - return MaskFromFlags(flags...) -} diff --git a/lnd/htlcswitch/hodl/config_prod.go b/lnd/htlcswitch/hodl/config_prod.go deleted file mode 100644 index c5e9e934..00000000 --- a/lnd/htlcswitch/hodl/config_prod.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !dev - -package hodl - -// Config is an empty struct disabling command line hodl flags in production. -type Config struct{} - -// Mask in production always returns MaskNone. -func (c *Config) Mask() Mask { - return MaskNone -} diff --git a/lnd/htlcswitch/hodl/flags.go b/lnd/htlcswitch/hodl/flags.go deleted file mode 100644 index 7fed7d09..00000000 --- a/lnd/htlcswitch/hodl/flags.go +++ /dev/null @@ -1,119 +0,0 @@ -package hodl - -import "fmt" - -// MaskNone represents the empty Mask, in which no breakpoints are -// active. -const MaskNone = Mask(0) - -type ( - // Flag represents a single breakpoint where an HTLC should be dropped - // during forwarding. Flags can be composed into a Mask to express more - // complex combinations. - Flag uint32 - - // Mask is a bitvector combining multiple Flags that can be queried to - // see which breakpoints are active. - Mask uint32 -) - -const ( - // ExitSettle drops an incoming ADD for which we are the exit node, - // before processing in the link. - ExitSettle Flag = 1 << iota - - // AddIncoming drops an incoming ADD before processing if we are not - // the exit node. - AddIncoming - - // SettleIncoming drops an incoming SETTLE before processing if we - // are not the exit node. - SettleIncoming - - // FailIncoming drops an incoming FAIL before processing if we are - // not the exit node. - FailIncoming - - // TODO(conner): add modes for switch breakpoints - - // AddOutgoing drops an outgoing ADD before it is added to the - // in-memory commitment state of the link. - AddOutgoing - - // SettleOutgoing drops an SETTLE before it is added to the - // in-memory commitment state of the link. - SettleOutgoing - - // FailOutgoing drops an outgoing FAIL before is is added to the - // in-memory commitment state of the link. - FailOutgoing - - // Commit drops all HTLC after any outgoing circuits have been - // opened, but before the in-memory commitment state is persisted. - Commit - - // BogusSettle attempts to settle back any incoming HTLC for which we - // are the exit node with a bogus preimage. - BogusSettle -) - -// String returns a human-readable identifier for a given Flag. -func (f Flag) String() string { - switch f { - case ExitSettle: - return "ExitSettle" - case AddIncoming: - return "AddIncoming" - case SettleIncoming: - return "SettleIncoming" - case FailIncoming: - return "FailIncoming" - case AddOutgoing: - return "AddOutgoing" - case SettleOutgoing: - return "SettleOutgoing" - case FailOutgoing: - return "FailOutgoing" - case Commit: - return "Commit" - case BogusSettle: - return "BogusSettle" - default: - return "UnknownHodlFlag" - } -} - -// Warning generates a warning message to log if a particular breakpoint is -// triggered during execution. -func (f Flag) Warning() string { - var msg string - switch f { - case ExitSettle: - msg = "will not attempt to settle ADD with sender" - case AddIncoming: - msg = "will not attempt to forward ADD to switch" - case SettleIncoming: - msg = "will not attempt to forward SETTLE to switch" - case FailIncoming: - msg = "will not attempt to forward FAIL to switch" - case AddOutgoing: - msg = "will not update channel state with downstream ADD" - case SettleOutgoing: - msg = "will not update channel state with downstream SETTLE" - case FailOutgoing: - msg = "will not update channel state with downstream FAIL" - case Commit: - msg = "will not commit pending channel updates" - case BogusSettle: - msg = "will settle HTLC with bogus preimage" - default: - msg = "incorrect hodl flag usage" - } - - return fmt.Sprintf("%s mode enabled -- %s", f, msg) -} - -// Mask returns the Mask consisting solely of this Flag. -func (f Flag) Mask() Mask { - return Mask(f) -} diff --git a/lnd/htlcswitch/hodl/mask_dev.go b/lnd/htlcswitch/hodl/mask_dev.go deleted file mode 100644 index a1d50ff0..00000000 --- a/lnd/htlcswitch/hodl/mask_dev.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build dev - -package hodl - -import ( - "fmt" - "strings" -) - -// MaskFromFlags merges a variadic set of Flags into a single Mask. -func MaskFromFlags(flags ...Flag) Mask { - var mask Mask - for _, flag := range flags { - mask |= Mask(flag) - } - - return mask -} - -// Active returns true if the bit corresponding to the flag is set within the -// mask. -func (m Mask) Active(flag Flag) bool { - return (Flag(m) & flag) > 0 -} - -// String returns a human-readable description of all active Flags. -func (m Mask) String() string { - if m == MaskNone { - return "hodl.Mask(NONE)" - } - - var activeFlags []string - for i := uint(0); i < 32; i++ { - flag := Flag(1 << i) - if m.Active(flag) { - activeFlags = append(activeFlags, flag.String()) - } - } - - return fmt.Sprintf("hodl.Mask(%s)", strings.Join(activeFlags, "|")) -} diff --git a/lnd/htlcswitch/hodl/mask_prod.go b/lnd/htlcswitch/hodl/mask_prod.go deleted file mode 100644 index 1b8a4b70..00000000 --- a/lnd/htlcswitch/hodl/mask_prod.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !dev - -package hodl - -// MaskFromFlags in production always returns MaskNone. -func MaskFromFlags(_ ...Flag) Mask { - return MaskNone -} - -// Active in production always returns false for all Flags. -func (m Mask) Active(_ Flag) bool { - return false -} - -// String returns the human-readable identifier for MaskNone. -func (m Mask) String() string { - return "hodl.Mask(NONE)" -} diff --git a/lnd/htlcswitch/hodl/mask_test.go b/lnd/htlcswitch/hodl/mask_test.go deleted file mode 100644 index 40730d99..00000000 --- a/lnd/htlcswitch/hodl/mask_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package hodl_test - -import ( - "testing" - - "github.com/pkt-cash/pktd/lnd/build" - "github.com/pkt-cash/pktd/lnd/htlcswitch/hodl" -) - -var hodlMaskTests = []struct { - mask hodl.Mask - flags map[hodl.Flag]struct{} -}{ - { - // Check that the empty mask has no active flags. - mask: hodl.MaskNone, - flags: map[hodl.Flag]struct{}{}, - }, - { - // Check that passing no arguments to MaskFromFlags is - // equivalent to MaskNone. - mask: hodl.MaskFromFlags(), - flags: map[hodl.Flag]struct{}{}, - }, - - { - // Check using Mask to convert a single flag into a Mask only - // reports that flag active. - mask: hodl.ExitSettle.Mask(), - flags: map[hodl.Flag]struct{}{ - hodl.ExitSettle: {}, - }, - }, - { - // Check that using MaskFromFlags on a single flag only reports - // that flag active. - mask: hodl.MaskFromFlags(hodl.Commit), - flags: map[hodl.Flag]struct{}{ - hodl.Commit: {}, - }, - }, - - { - // Check that using MaskFromFlags on some-but-not-all flags - // reports the correct subset of flags as active. - mask: hodl.MaskFromFlags( - hodl.ExitSettle, - hodl.Commit, - hodl.AddIncoming, - hodl.SettleOutgoing, - ), - flags: map[hodl.Flag]struct{}{ - hodl.ExitSettle: {}, - hodl.Commit: {}, - hodl.AddIncoming: {}, - hodl.SettleOutgoing: {}, - }, - }, - { - // Check that using MaskFromFlags on all known flags reports - // those an no other flags. - mask: hodl.MaskFromFlags( - hodl.ExitSettle, - hodl.AddIncoming, - hodl.SettleIncoming, - hodl.FailIncoming, - hodl.AddOutgoing, - hodl.SettleOutgoing, - hodl.FailOutgoing, - hodl.Commit, - hodl.BogusSettle, - ), - flags: map[hodl.Flag]struct{}{ - hodl.ExitSettle: {}, - hodl.AddIncoming: {}, - hodl.SettleIncoming: {}, - hodl.FailIncoming: {}, - hodl.AddOutgoing: {}, - hodl.SettleOutgoing: {}, - hodl.FailOutgoing: {}, - hodl.Commit: {}, - hodl.BogusSettle: {}, - }, - }, -} - -// TestMask iterates through all of the hodlMaskTests, checking that the mask -// correctly reports active for flags in the tests' expected flags, and inactive -// for all others. -func TestMask(t *testing.T) { - if !build.IsDevBuild() { - t.Fatalf("htlcswitch tests must be run with '-tags=dev'") - } - - for i, test := range hodlMaskTests { - for j := uint32(0); i < 32; i++ { - flag := hodl.Flag(1 << j) - _, shouldBeActive := test.flags[flag] - - switch { - case shouldBeActive && !test.mask.Active(flag): - t.Fatalf("hodl mask test #%d -- "+ - "expected flag %s to be active", - i, flag) - - case !shouldBeActive && test.mask.Active(flag): - t.Fatalf("hodl mask test #%d -- "+ - "expected flag %s to be inactive", - i, flag) - } - } - } -} diff --git a/lnd/htlcswitch/hop/error_encryptor.go b/lnd/htlcswitch/hop/error_encryptor.go deleted file mode 100644 index f3d2f090..00000000 --- a/lnd/htlcswitch/hop/error_encryptor.go +++ /dev/null @@ -1,206 +0,0 @@ -package hop - -import ( - "bytes" - "io" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - sphinx "github.com/pkt-cash/pktd/lightning-onion" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// EncrypterType establishes an enum used in serialization to indicate how to -// decode a concrete instance of the ErrorEncrypter interface. -type EncrypterType byte - -const ( - // EncrypterTypeNone signals that no error encyrpter is present, this - // can happen if the htlc is originates in the switch. - EncrypterTypeNone EncrypterType = 0 - - // EncrypterTypeSphinx is used to identify a sphinx onion error - // encrypter instance. - EncrypterTypeSphinx = 1 - - // EncrypterTypeMock is used to identify a mock obfuscator instance. - EncrypterTypeMock = 2 -) - -// ErrorEncrypterExtracter defines a function signature that extracts an -// ErrorEncrypter from an sphinx OnionPacket. -type ErrorEncrypterExtracter func(*btcec.PublicKey) (ErrorEncrypter, - lnwire.FailCode) - -// ErrorEncrypter is an interface that is used to encrypt HTLC related errors -// at the source of the error, and also at each intermediate hop all the way -// back to the source of the payment. -type ErrorEncrypter interface { - // EncryptFirstHop transforms a concrete failure message into an - // encrypted opaque failure reason. This method will be used at the - // source that the error occurs. It differs from IntermediateEncrypt - // slightly, in that it computes a proper MAC over the error. - EncryptFirstHop(lnwire.FailureMessage) (lnwire.OpaqueReason, er.R) - - // EncryptMalformedError is similar to EncryptFirstHop (it adds the - // MAC), but it accepts an opaque failure reason rather than a failure - // message. This method is used when we receive an - // UpdateFailMalformedHTLC from the remote peer and then need to - // convert that into a proper error from only the raw bytes. - EncryptMalformedError(lnwire.OpaqueReason) lnwire.OpaqueReason - - // IntermediateEncrypt wraps an already encrypted opaque reason error - // in an additional layer of onion encryption. This process repeats - // until the error arrives at the source of the payment. - IntermediateEncrypt(lnwire.OpaqueReason) lnwire.OpaqueReason - - // Type returns an enum indicating the underlying concrete instance - // backing this interface. - Type() EncrypterType - - // Encode serializes the encrypter's ephemeral public key to the given - // io.Writer. - Encode(io.Writer) er.R - - // Decode deserializes the encrypter' ephemeral public key from the - // given io.Reader. - Decode(io.Reader) er.R - - // Reextract rederives the encrypter using the extracter, performing an - // ECDH with the sphinx router's key and the ephemeral public key. - // - // NOTE: This should be called shortly after Decode to properly - // reinitialize the error encrypter. - Reextract(ErrorEncrypterExtracter) er.R -} - -// SphinxErrorEncrypter is a concrete implementation of both the ErrorEncrypter -// interface backed by an implementation of the Sphinx packet format. As a -// result, all errors handled are themselves wrapped in layers of onion -// encryption and must be treated as such accordingly. -type SphinxErrorEncrypter struct { - *sphinx.OnionErrorEncrypter - - EphemeralKey *btcec.PublicKey -} - -// NewSphinxErrorEncrypter initializes a blank sphinx error encrypter, that -// should be used to deserialize an encoded SphinxErrorEncrypter. Since the -// actual encrypter is not stored in plaintext while at rest, reconstructing the -// error encrypter requires: -// 1) Decode: to deserialize the ephemeral public key. -// 2) Reextract: to "unlock" the actual error encrypter using an active -// OnionProcessor. -func NewSphinxErrorEncrypter() *SphinxErrorEncrypter { - return &SphinxErrorEncrypter{ - OnionErrorEncrypter: nil, - EphemeralKey: &btcec.PublicKey{}, - } -} - -// EncryptFirstHop transforms a concrete failure message into an encrypted -// opaque failure reason. This method will be used at the source that the error -// occurs. It differs from BackwardObfuscate slightly, in that it computes a -// proper MAC over the error. -// -// NOTE: Part of the ErrorEncrypter interface. -func (s *SphinxErrorEncrypter) EncryptFirstHop( - failure lnwire.FailureMessage) (lnwire.OpaqueReason, er.R) { - - var b bytes.Buffer - if err := lnwire.EncodeFailure(&b, failure, 0); err != nil { - return nil, err - } - - // We pass a true as the first parameter to indicate that a MAC should - // be added. - return s.EncryptError(true, b.Bytes()), nil -} - -// EncryptMalformedError is similar to EncryptFirstHop (it adds the MAC), but -// it accepts an opaque failure reason rather than a failure message. This -// method is used when we receive an UpdateFailMalformedHTLC from the remote -// peer and then need to convert that into an proper error from only the raw -// bytes. -// -// NOTE: Part of the ErrorEncrypter interface. -func (s *SphinxErrorEncrypter) EncryptMalformedError( - reason lnwire.OpaqueReason) lnwire.OpaqueReason { - - return s.EncryptError(true, reason) -} - -// IntermediateEncrypt wraps an already encrypted opaque reason error in an -// additional layer of onion encryption. This process repeats until the error -// arrives at the source of the payment. We re-encrypt the message on the -// backwards path to ensure that the error is indistinguishable from any other -// error seen. -// -// NOTE: Part of the ErrorEncrypter interface. -func (s *SphinxErrorEncrypter) IntermediateEncrypt( - reason lnwire.OpaqueReason) lnwire.OpaqueReason { - - return s.EncryptError(false, reason) -} - -// Type returns the identifier for a sphinx error encrypter. -func (s *SphinxErrorEncrypter) Type() EncrypterType { - return EncrypterTypeSphinx -} - -// Encode serializes the error encrypter' ephemeral public key to the provided -// io.Writer. -func (s *SphinxErrorEncrypter) Encode(w io.Writer) er.R { - ephemeral := s.EphemeralKey.SerializeCompressed() - _, err := util.Write(w, ephemeral) - return err -} - -// Decode reconstructs the error encrypter's ephemeral public key from the -// provided io.Reader. -func (s *SphinxErrorEncrypter) Decode(r io.Reader) er.R { - var ephemeral [33]byte - if _, err := util.ReadFull(r, ephemeral[:]); err != nil { - return err - } - - var err er.R - s.EphemeralKey, err = btcec.ParsePubKey(ephemeral[:], btcec.S256()) - if err != nil { - return err - } - - return nil -} - -// Reextract rederives the error encrypter from the currently held EphemeralKey. -// This intended to be used shortly after Decode, to fully initialize a -// SphinxErrorEncrypter. -func (s *SphinxErrorEncrypter) Reextract( - extract ErrorEncrypterExtracter) er.R { - - obfuscator, failcode := extract(s.EphemeralKey) - if failcode != lnwire.CodeNone { - // This should never happen, since we already validated that - // this obfuscator can be extracted when it was received in the - // link. - return er.Errorf("unable to reconstruct onion "+ - "obfuscator, got failcode: %d", failcode) - } - - sphinxEncrypter, ok := obfuscator.(*SphinxErrorEncrypter) - if !ok { - return er.Errorf("incorrect onion error extracter") - } - - // Copy the freshly extracted encrypter. - s.OnionErrorEncrypter = sphinxEncrypter.OnionErrorEncrypter - - return nil - -} - -// A compile time check to ensure SphinxErrorEncrypter implements the -// ErrorEncrypter interface. -var _ ErrorEncrypter = (*SphinxErrorEncrypter)(nil) diff --git a/lnd/htlcswitch/hop/forwarding_info.go b/lnd/htlcswitch/hop/forwarding_info.go deleted file mode 100644 index 73b85872..00000000 --- a/lnd/htlcswitch/hop/forwarding_info.go +++ /dev/null @@ -1,29 +0,0 @@ -package hop - -import ( - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// ForwardingInfo contains all the information that is necessary to forward and -// incoming HTLC to the next hop encoded within a valid HopIterator instance. -// Forwarding links are to use this information to authenticate the information -// received within the incoming HTLC, to ensure that the prior hop didn't -// tamper with the end-to-end routing information at all. -type ForwardingInfo struct { - // Network is the target blockchain network that the HTLC will travel - // over next. - Network Network - - // NextHop is the channel ID of the next hop. The received HTLC should - // be forwarded to this particular channel in order to continue the - // end-to-end route. - NextHop lnwire.ShortChannelID - - // AmountToForward is the amount of milli-satoshis that the receiving - // node should forward to the next hop. - AmountToForward lnwire.MilliSatoshi - - // OutgoingCTLV is the specified value of the CTLV timelock to be used - // in the outgoing HTLC. - OutgoingCTLV uint32 -} diff --git a/lnd/htlcswitch/hop/iterator.go b/lnd/htlcswitch/hop/iterator.go deleted file mode 100644 index 08d3e865..00000000 --- a/lnd/htlcswitch/hop/iterator.go +++ /dev/null @@ -1,394 +0,0 @@ -package hop - -import ( - "bytes" - "io" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - sphinx "github.com/pkt-cash/pktd/lightning-onion" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/pktlog/log" -) - -// Iterator is an interface that abstracts away the routing information -// included in HTLC's which includes the entirety of the payment path of an -// HTLC. This interface provides two basic method which carry out: how to -// interpret the forwarding information encoded within the HTLC packet, and hop -// to encode the forwarding information for the _next_ hop. -type Iterator interface { - // HopPayload returns the set of fields that detail exactly _how_ this - // hop should forward the HTLC to the next hop. Additionally, the - // information encoded within the returned ForwardingInfo is to be used - // by each hop to authenticate the information given to it by the prior - // hop. The payload will also contain any additional TLV fields provided - // by the sender. - HopPayload() (*Payload, er.R) - - // EncodeNextHop encodes the onion packet destined for the next hop - // into the passed io.Writer. - EncodeNextHop(w io.Writer) er.R - - // ExtractErrorEncrypter returns the ErrorEncrypter needed for this hop, - // along with a failure code to signal if the decoding was successful. - ExtractErrorEncrypter(ErrorEncrypterExtracter) (ErrorEncrypter, - lnwire.FailCode) -} - -// sphinxHopIterator is the Sphinx implementation of hop iterator which uses -// onion routing to encode the payment route in such a way so that node might -// see only the next hop in the route.. -type sphinxHopIterator struct { - // ogPacket is the original packet from which the processed packet is - // derived. - ogPacket *sphinx.OnionPacket - - // processedPacket is the outcome of processing an onion packet. It - // includes the information required to properly forward the packet to - // the next hop. - processedPacket *sphinx.ProcessedPacket -} - -// makeSphinxHopIterator converts a processed packet returned from a sphinx -// router and converts it into an hop iterator for usage in the link. -func makeSphinxHopIterator(ogPacket *sphinx.OnionPacket, - packet *sphinx.ProcessedPacket) *sphinxHopIterator { - - return &sphinxHopIterator{ - ogPacket: ogPacket, - processedPacket: packet, - } -} - -// A compile time check to ensure sphinxHopIterator implements the HopIterator -// interface. -var _ Iterator = (*sphinxHopIterator)(nil) - -// Encode encodes iterator and writes it to the writer. -// -// NOTE: Part of the HopIterator interface. -func (r *sphinxHopIterator) EncodeNextHop(w io.Writer) er.R { - return r.processedPacket.NextPacket.Encode(w) -} - -// HopPayload returns the set of fields that detail exactly _how_ this hop -// should forward the HTLC to the next hop. Additionally, the information -// encoded within the returned ForwardingInfo is to be used by each hop to -// authenticate the information given to it by the prior hop. The payload will -// also contain any additional TLV fields provided by the sender. -// -// NOTE: Part of the HopIterator interface. -func (r *sphinxHopIterator) HopPayload() (*Payload, er.R) { - switch r.processedPacket.Payload.Type { - - // If this is the legacy payload, then we'll extract the information - // directly from the pre-populated ForwardingInstructions field. - case sphinx.PayloadLegacy: - fwdInst := r.processedPacket.ForwardingInstructions - return NewLegacyPayload(fwdInst), nil - - // Otherwise, if this is the TLV payload, then we'll make a new stream - // to decode only what we need to make routing decisions. - case sphinx.PayloadTLV: - return NewPayloadFromReader(bytes.NewReader( - r.processedPacket.Payload.Payload, - )) - - default: - return nil, er.Errorf("unknown sphinx payload type: %v", - r.processedPacket.Payload.Type) - } -} - -// ExtractErrorEncrypter decodes and returns the ErrorEncrypter for this hop, -// along with a failure code to signal if the decoding was successful. The -// ErrorEncrypter is used to encrypt errors back to the sender in the event that -// a payment fails. -// -// NOTE: Part of the HopIterator interface. -func (r *sphinxHopIterator) ExtractErrorEncrypter( - extracter ErrorEncrypterExtracter) (ErrorEncrypter, lnwire.FailCode) { - - return extracter(r.ogPacket.EphemeralKey) -} - -// OnionProcessor is responsible for keeping all sphinx dependent parts inside -// and expose only decoding function. With such approach we give freedom for -// subsystems which wants to decode sphinx path to not be dependable from -// sphinx at all. -// -// NOTE: The reason for keeping decoder separated from hop iterator is too -// maintain the hop iterator abstraction. Without it the structures which using -// the hop iterator should contain sphinx router which makes their creations in -// tests dependent from the sphinx internal parts. -type OnionProcessor struct { - router *sphinx.Router -} - -// NewOnionProcessor creates new instance of decoder. -func NewOnionProcessor(router *sphinx.Router) *OnionProcessor { - return &OnionProcessor{router} -} - -// Start spins up the onion processor's sphinx router. -func (p *OnionProcessor) Start() er.R { - return p.router.Start() -} - -// Stop shutsdown the onion processor's sphinx router. -func (p *OnionProcessor) Stop() er.R { - p.router.Stop() - return nil -} - -// DecodeHopIterator attempts to decode a valid sphinx packet from the passed io.Reader -// instance using the rHash as the associated data when checking the relevant -// MACs during the decoding process. -func (p *OnionProcessor) DecodeHopIterator(r io.Reader, rHash []byte, - incomingCltv uint32) (Iterator, lnwire.FailCode) { - - onionPkt := &sphinx.OnionPacket{} - if err := onionPkt.Decode(r); err != nil { - switch { - case sphinx.ErrInvalidOnionVersion.Is(err): - return nil, lnwire.CodeInvalidOnionVersion - case sphinx.ErrInvalidOnionKey.Is(err): - return nil, lnwire.CodeInvalidOnionKey - default: - log.Errorf("unable to decode onion packet: %v", err) - return nil, lnwire.CodeInvalidOnionKey - } - } - - // Attempt to process the Sphinx packet. We include the payment hash of - // the HTLC as it's authenticated within the Sphinx packet itself as - // associated data in order to thwart attempts a replay attacks. In the - // case of a replay, an attacker is *forced* to use the same payment - // hash twice, thereby losing their money entirely. - sphinxPacket, err := p.router.ProcessOnionPacket( - onionPkt, rHash, incomingCltv, - ) - if err != nil { - switch { - case sphinx.ErrInvalidOnionVersion.Is(err): - return nil, lnwire.CodeInvalidOnionVersion - case sphinx.ErrInvalidOnionHMAC.Is(err): - return nil, lnwire.CodeInvalidOnionHmac - case sphinx.ErrInvalidOnionKey.Is(err): - return nil, lnwire.CodeInvalidOnionKey - default: - log.Errorf("unable to process onion packet: %v", err) - return nil, lnwire.CodeInvalidOnionKey - } - } - - return makeSphinxHopIterator(onionPkt, sphinxPacket), lnwire.CodeNone -} - -// ReconstructHopIterator attempts to decode a valid sphinx packet from the passed io.Reader -// instance using the rHash as the associated data when checking the relevant -// MACs during the decoding process. -func (p *OnionProcessor) ReconstructHopIterator(r io.Reader, rHash []byte) ( - Iterator, er.R) { - - onionPkt := &sphinx.OnionPacket{} - if err := onionPkt.Decode(r); err != nil { - return nil, err - } - - // Attempt to process the Sphinx packet. We include the payment hash of - // the HTLC as it's authenticated within the Sphinx packet itself as - // associated data in order to thwart attempts a replay attacks. In the - // case of a replay, an attacker is *forced* to use the same payment - // hash twice, thereby losing their money entirely. - sphinxPacket, err := p.router.ReconstructOnionPacket(onionPkt, rHash) - if err != nil { - return nil, err - } - - return makeSphinxHopIterator(onionPkt, sphinxPacket), nil -} - -// DecodeHopIteratorRequest encapsulates all date necessary to process an onion -// packet, perform sphinx replay detection, and schedule the entry for garbage -// collection. -type DecodeHopIteratorRequest struct { - OnionReader io.Reader - RHash []byte - IncomingCltv uint32 -} - -// DecodeHopIteratorResponse encapsulates the outcome of a batched sphinx onion -// processing. -type DecodeHopIteratorResponse struct { - HopIterator Iterator - FailCode lnwire.FailCode -} - -// Result returns the (HopIterator, lnwire.FailCode) tuple, which should -// correspond to the index of a particular DecodeHopIteratorRequest. -// -// NOTE: The HopIterator should be considered invalid if the fail code is -// anything but lnwire.CodeNone. -func (r *DecodeHopIteratorResponse) Result() (Iterator, lnwire.FailCode) { - return r.HopIterator, r.FailCode -} - -// DecodeHopIterators performs batched decoding and validation of incoming -// sphinx packets. For the same `id`, this method will return the same iterators -// and failcodes upon subsequent invocations. -// -// NOTE: In order for the responses to be valid, the caller must guarantee that -// the presented readers and rhashes *NEVER* deviate across invocations for the -// same id. -func (p *OnionProcessor) DecodeHopIterators(id []byte, - reqs []DecodeHopIteratorRequest) ([]DecodeHopIteratorResponse, er.R) { - - var ( - batchSize = len(reqs) - onionPkts = make([]sphinx.OnionPacket, batchSize) - resps = make([]DecodeHopIteratorResponse, batchSize) - ) - - tx := p.router.BeginTxn(id, batchSize) - - for i, req := range reqs { - onionPkt := &onionPkts[i] - resp := &resps[i] - - err := onionPkt.Decode(req.OnionReader) - switch { - case nil == err: - // success - - case sphinx.ErrInvalidOnionVersion.Is(err): - resp.FailCode = lnwire.CodeInvalidOnionVersion - continue - - case sphinx.ErrInvalidOnionKey.Is(err): - resp.FailCode = lnwire.CodeInvalidOnionKey - continue - - default: - log.Errorf("unable to decode onion packet: %v", err) - resp.FailCode = lnwire.CodeInvalidOnionKey - continue - } - - err = tx.ProcessOnionPacket( - uint16(i), onionPkt, req.RHash, req.IncomingCltv, - ) - switch { - case err == nil: - // success - - case sphinx.ErrInvalidOnionVersion.Is(err): - resp.FailCode = lnwire.CodeInvalidOnionVersion - continue - - case sphinx.ErrInvalidOnionHMAC.Is(err): - resp.FailCode = lnwire.CodeInvalidOnionHmac - continue - - case sphinx.ErrInvalidOnionKey.Is(err): - resp.FailCode = lnwire.CodeInvalidOnionKey - continue - - default: - log.Errorf("unable to process onion packet: %v", err) - resp.FailCode = lnwire.CodeInvalidOnionKey - continue - } - } - - // With that batch created, we will now attempt to write the shared - // secrets to disk. This operation will returns the set of indices that - // were detected as replays, and the computed sphinx packets for all - // indices that did not fail the above loop. Only indices that are not - // in the replay set should be considered valid, as they are - // opportunistically computed. - packets, replays, err := tx.Commit() - if err != nil { - log.Errorf("unable to process onion packet batch %x: %v", - id, err) - - // If we failed to commit the batch to the secret share log, we - // will mark all not-yet-failed channels with a temporary - // channel failure and exit since we cannot proceed. - for i := range resps { - resp := &resps[i] - - // Skip any indexes that already failed onion decoding. - if resp.FailCode != lnwire.CodeNone { - continue - } - - log.Errorf("unable to process onion packet %x-%v", - id, i) - resp.FailCode = lnwire.CodeTemporaryChannelFailure - } - - // TODO(conner): return real errors to caller so link can fail? - return resps, err - } - - // Otherwise, the commit was successful. Now we will post process any - // remaining packets, additionally failing any that were included in the - // replay set. - for i := range resps { - resp := &resps[i] - - // Skip any indexes that already failed onion decoding. - if resp.FailCode != lnwire.CodeNone { - continue - } - - // If this index is contained in the replay set, mark it with a - // temporary channel failure error code. We infer that the - // offending error was due to a replayed packet because this - // index was found in the replay set. - if replays.Contains(uint16(i)) { - log.Errorf("unable to process onion packet: %v", - sphinx.ErrReplayedPacket) - resp.FailCode = lnwire.CodeTemporaryChannelFailure - continue - } - - // Finally, construct a hop iterator from our processed sphinx - // packet, simultaneously caching the original onion packet. - resp.HopIterator = makeSphinxHopIterator(&onionPkts[i], &packets[i]) - } - - return resps, nil -} - -// ExtractErrorEncrypter takes an io.Reader which should contain the onion -// packet as original received by a forwarding node and creates an -// ErrorEncrypter instance using the derived shared secret. In the case that en -// error occurs, a lnwire failure code detailing the parsing failure will be -// returned. -func (p *OnionProcessor) ExtractErrorEncrypter(ephemeralKey *btcec.PublicKey) ( - ErrorEncrypter, lnwire.FailCode) { - - onionObfuscator, err := sphinx.NewOnionErrorEncrypter( - p.router, ephemeralKey, - ) - if err != nil { - switch { - case sphinx.ErrInvalidOnionVersion.Is(err): - return nil, lnwire.CodeInvalidOnionVersion - case sphinx.ErrInvalidOnionHMAC.Is(err): - return nil, lnwire.CodeInvalidOnionHmac - case sphinx.ErrInvalidOnionKey.Is(err): - return nil, lnwire.CodeInvalidOnionKey - default: - log.Errorf("unable to process onion packet: %v", err) - return nil, lnwire.CodeInvalidOnionKey - } - } - - return &SphinxErrorEncrypter{ - OnionErrorEncrypter: onionObfuscator, - EphemeralKey: ephemeralKey, - }, lnwire.CodeNone -} diff --git a/lnd/htlcswitch/hop/iterator_test.go b/lnd/htlcswitch/hop/iterator_test.go deleted file mode 100644 index 8b84895e..00000000 --- a/lnd/htlcswitch/hop/iterator_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package hop - -import ( - "bytes" - "encoding/binary" - "testing" - - "github.com/davecgh/go-spew/spew" - sphinx "github.com/pkt-cash/pktd/lightning-onion" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/record" - "github.com/pkt-cash/pktd/lnd/tlv" -) - -// TestSphinxHopIteratorForwardingInstructions tests that we're able to -// properly decode an onion payload, no matter the payload type, into the -// original set of forwarding instructions. -func TestSphinxHopIteratorForwardingInstructions(t *testing.T) { - t.Parallel() - - // First, we'll make the hop data that the sender would create to send - // an HTLC through our imaginary route. - hopData := sphinx.HopData{ - ForwardAmount: 100000, - OutgoingCltv: 4343, - } - copy(hopData.NextAddress[:], bytes.Repeat([]byte("a"), 8)) - - // Next, we'll make the hop forwarding information that we should - // extract each type, no matter the payload type. - nextAddrInt := binary.BigEndian.Uint64(hopData.NextAddress[:]) - expectedFwdInfo := ForwardingInfo{ - NextHop: lnwire.NewShortChanIDFromInt(nextAddrInt), - AmountToForward: lnwire.MilliSatoshi(hopData.ForwardAmount), - OutgoingCTLV: hopData.OutgoingCltv, - } - - // For our TLV payload, we'll serialize the hop into into a TLV stream - // as we would normally in the routing network. - var b bytes.Buffer - tlvRecords := []tlv.Record{ - record.NewAmtToFwdRecord(&hopData.ForwardAmount), - record.NewLockTimeRecord(&hopData.OutgoingCltv), - record.NewNextHopIDRecord(&nextAddrInt), - } - tlvStream, err := tlv.NewStream(tlvRecords...) - if err != nil { - t.Fatalf("unable to create stream: %v", err) - } - if err := tlvStream.Encode(&b); err != nil { - t.Fatalf("unable to encode stream: %v", err) - } - - var testCases = []struct { - sphinxPacket *sphinx.ProcessedPacket - expectedFwdInfo ForwardingInfo - }{ - // A regular legacy payload that signals more hops. - { - sphinxPacket: &sphinx.ProcessedPacket{ - Payload: sphinx.HopPayload{ - Type: sphinx.PayloadLegacy, - }, - Action: sphinx.MoreHops, - ForwardingInstructions: &hopData, - }, - expectedFwdInfo: expectedFwdInfo, - }, - // A TLV payload, we can leave off the action as we'll always - // read the cid encoded. - { - sphinxPacket: &sphinx.ProcessedPacket{ - Payload: sphinx.HopPayload{ - Type: sphinx.PayloadTLV, - Payload: b.Bytes(), - }, - }, - expectedFwdInfo: expectedFwdInfo, - }, - } - - // Finally, we'll test that we get the same set of - // ForwardingInstructions for each payload type. - iterator := sphinxHopIterator{} - for i, testCase := range testCases { - iterator.processedPacket = testCase.sphinxPacket - - pld, err := iterator.HopPayload() - if err != nil { - t.Fatalf("#%v: unable to extract forwarding "+ - "instructions: %v", i, err) - } - - fwdInfo := pld.ForwardingInfo() - if fwdInfo != testCase.expectedFwdInfo { - t.Fatalf("#%v: wrong fwding info: expected %v, got %v", - i, spew.Sdump(testCase.expectedFwdInfo), - spew.Sdump(fwdInfo)) - } - } -} diff --git a/lnd/htlcswitch/hop/network.go b/lnd/htlcswitch/hop/network.go deleted file mode 100644 index 6f121642..00000000 --- a/lnd/htlcswitch/hop/network.go +++ /dev/null @@ -1,28 +0,0 @@ -package hop - -// Network indicates the blockchain network that is intended to be the next hop -// for a forwarded HTLC. The existence of this field within the ForwardingInfo -// struct enables the ability for HTLC to cross chain-boundaries at will. -type Network uint8 - -const ( - // BitcoinNetwork denotes that an HTLC is to be forwarded along the - // Bitcoin link with the specified short channel ID. - BitcoinNetwork Network = iota - - // LitecoinNetwork denotes that an HTLC is to be forwarded along the - // Litecoin link with the specified short channel ID. - LitecoinNetwork -) - -// String returns the string representation of the target Network. -func (c Network) String() string { - switch c { - case BitcoinNetwork: - return "Bitcoin" - case LitecoinNetwork: - return "Litecoin" - default: - return "Kekcoin" - } -} diff --git a/lnd/htlcswitch/hop/payload.go b/lnd/htlcswitch/hop/payload.go deleted file mode 100644 index 2a3e9567..00000000 --- a/lnd/htlcswitch/hop/payload.go +++ /dev/null @@ -1,292 +0,0 @@ -package hop - -import ( - "encoding/binary" - "fmt" - "io" - - "github.com/pkt-cash/pktd/btcutil/er" - sphinx "github.com/pkt-cash/pktd/lightning-onion" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/record" - "github.com/pkt-cash/pktd/lnd/tlv" -) - -// PayloadViolation is an enum encapsulating the possible invalid payload -// violations that can occur when processing or validating a payload. -type PayloadViolation byte - -const ( - // OmittedViolation indicates that a type was expected to be found the - // payload but was absent. - OmittedViolation PayloadViolation = iota - - // IncludedViolation indicates that a type was expected to be omitted - // from the payload but was present. - IncludedViolation - - // RequiredViolation indicates that an unknown even type was found in - // the payload that we could not process. - RequiredViolation -) - -// String returns a human-readable description of the violation as a verb. -func (v PayloadViolation) String() string { - switch v { - case OmittedViolation: - return "omitted" - - case IncludedViolation: - return "included" - - case RequiredViolation: - return "required" - - default: - return "unknown violation" - } -} - -// ErrInvalidPayload is an error returned when a parsed onion payload either -// included or omitted incorrect records for a particular hop type. -type ErrInvalidPayload struct { - // Type the record's type that cause the violation. - Type tlv.Type - - // Violation is an enum indicating the type of violation detected in - // processing Type. - Violation PayloadViolation - - // FinalHop if true, indicates that the violation is for the final hop - // in the route (identified by next hop id), otherwise the violation is - // for an intermediate hop. - FinalHop bool -} - -// Error returns a human-readable description of the invalid payload error. -func (e ErrInvalidPayload) Error() string { - hopType := "intermediate" - if e.FinalHop { - hopType = "final" - } - - return fmt.Sprintf("onion payload for %s hop %v record with type %d", - hopType, e.Violation, e.Type) -} - -// Payload encapsulates all information delivered to a hop in an onion payload. -// A Hop can represent either a TLV or legacy payload. The primary forwarding -// instruction can be accessed via ForwardingInfo, and additional records can be -// accessed by other member functions. -type Payload struct { - // FwdInfo holds the basic parameters required for HTLC forwarding, e.g. - // amount, cltv, and next hop. - FwdInfo ForwardingInfo - - // MPP holds the info provided in an option_mpp record when parsed from - // a TLV onion payload. - MPP *record.MPP - - // customRecords are user-defined records in the custom type range that - // were included in the payload. - customRecords record.CustomSet -} - -// NewLegacyPayload builds a Payload from the amount, cltv, and next hop -// parameters provided by leegacy onion payloads. -func NewLegacyPayload(f *sphinx.HopData) *Payload { - nextHop := binary.BigEndian.Uint64(f.NextAddress[:]) - - return &Payload{ - FwdInfo: ForwardingInfo{ - Network: BitcoinNetwork, - NextHop: lnwire.NewShortChanIDFromInt(nextHop), - AmountToForward: lnwire.MilliSatoshi(f.ForwardAmount), - OutgoingCTLV: f.OutgoingCltv, - }, - customRecords: make(record.CustomSet), - } -} - -// NewPayloadFromReader builds a new Hop from the passed io.Reader. The reader -// should correspond to the bytes encapsulated in a TLV onion payload. -func NewPayloadFromReader(r io.Reader) (*Payload, er.R) { - var ( - cid uint64 - amt uint64 - cltv uint32 - mpp = &record.MPP{} - ) - - tlvStream, err := tlv.NewStream( - record.NewAmtToFwdRecord(&amt), - record.NewLockTimeRecord(&cltv), - record.NewNextHopIDRecord(&cid), - mpp.Record(), - ) - if err != nil { - return nil, err - } - - parsedTypes, err := tlvStream.DecodeWithParsedTypes(r) - if err != nil { - return nil, err - } - - // Validate whether the sender properly included or omitted tlv records - // in accordance with BOLT 04. - nextHop := lnwire.NewShortChanIDFromInt(cid) - err = ValidateParsedPayloadTypes(parsedTypes, nextHop) - if err != nil { - return nil, err - } - - // Check for violation of the rules for mandatory fields. - violatingType := getMinRequiredViolation(parsedTypes) - if violatingType != nil { - return nil, er.E(ErrInvalidPayload{ - Type: *violatingType, - Violation: RequiredViolation, - FinalHop: nextHop == Exit, - }) - } - - // If no MPP field was parsed, set the MPP field on the resulting - // payload to nil. - if _, ok := parsedTypes[record.MPPOnionType]; !ok { - mpp = nil - } - - // Filter out the custom records. - customRecords := NewCustomRecords(parsedTypes) - - return &Payload{ - FwdInfo: ForwardingInfo{ - Network: BitcoinNetwork, - NextHop: nextHop, - AmountToForward: lnwire.MilliSatoshi(amt), - OutgoingCTLV: cltv, - }, - MPP: mpp, - customRecords: customRecords, - }, nil -} - -// ForwardingInfo returns the basic parameters required for HTLC forwarding, -// e.g. amount, cltv, and next hop. -func (h *Payload) ForwardingInfo() ForwardingInfo { - return h.FwdInfo -} - -// NewCustomRecords filters the types parsed from the tlv stream for custom -// records. -func NewCustomRecords(parsedTypes tlv.TypeMap) record.CustomSet { - customRecords := make(record.CustomSet) - for t, parseResult := range parsedTypes { - if parseResult == nil || t < record.CustomTypeStart { - continue - } - customRecords[uint64(t)] = parseResult - } - return customRecords -} - -// ValidateParsedPayloadTypes checks the types parsed from a hop payload to -// ensure that the proper fields are either included or omitted. The finalHop -// boolean should be true if the payload was parsed for an exit hop. The -// requirements for this method are described in BOLT 04. -func ValidateParsedPayloadTypes(parsedTypes tlv.TypeMap, - nextHop lnwire.ShortChannelID) er.R { - - isFinalHop := nextHop == Exit - - _, hasAmt := parsedTypes[record.AmtOnionType] - _, hasLockTime := parsedTypes[record.LockTimeOnionType] - _, hasNextHop := parsedTypes[record.NextHopOnionType] - _, hasMPP := parsedTypes[record.MPPOnionType] - - switch { - - // All hops must include an amount to forward. - case !hasAmt: - return er.E(ErrInvalidPayload{ - Type: record.AmtOnionType, - Violation: OmittedViolation, - FinalHop: isFinalHop, - }) - - // All hops must include a cltv expiry. - case !hasLockTime: - return er.E(ErrInvalidPayload{ - Type: record.LockTimeOnionType, - Violation: OmittedViolation, - FinalHop: isFinalHop, - }) - - // The exit hop should omit the next hop id. If nextHop != Exit, the - // sender must have included a record, so we don't need to test for its - // inclusion at intermediate hops directly. - case isFinalHop && hasNextHop: - return er.E(ErrInvalidPayload{ - Type: record.NextHopOnionType, - Violation: IncludedViolation, - FinalHop: true, - }) - - // Intermediate nodes should never receive MPP fields. - case !isFinalHop && hasMPP: - return er.E(ErrInvalidPayload{ - Type: record.MPPOnionType, - Violation: IncludedViolation, - FinalHop: isFinalHop, - }) - } - - return nil -} - -// MultiPath returns the record corresponding the option_mpp parsed from the -// onion payload. -func (h *Payload) MultiPath() *record.MPP { - return h.MPP -} - -// CustomRecords returns the custom tlv type records that were parsed from the -// payload. -func (h *Payload) CustomRecords() record.CustomSet { - return h.customRecords -} - -// getMinRequiredViolation checks for unrecognized required (even) fields in the -// standard range and returns the lowest required type. Always returning the -// lowest required type allows a failure message to be deterministic. -func getMinRequiredViolation(set tlv.TypeMap) *tlv.Type { - var ( - requiredViolation bool - minRequiredViolationType tlv.Type - ) - for t, parseResult := range set { - // If a type is even but not known to us, we cannot process the - // payload. We are required to understand a field that we don't - // support. - // - // We always accept custom fields, because a higher level - // application may understand them. - if parseResult == nil || t%2 != 0 || - t >= record.CustomTypeStart { - - continue - } - - if !requiredViolation || t < minRequiredViolationType { - minRequiredViolationType = t - } - requiredViolation = true - } - - if requiredViolation { - return &minRequiredViolationType - } - - return nil -} diff --git a/lnd/htlcswitch/hop/payload_test.go b/lnd/htlcswitch/hop/payload_test.go deleted file mode 100644 index 478ee44a..00000000 --- a/lnd/htlcswitch/hop/payload_test.go +++ /dev/null @@ -1,256 +0,0 @@ -package hop_test - -import ( - "bytes" - "reflect" - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/htlcswitch/hop" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/record" -) - -type decodePayloadTest struct { - name string - payload []byte - expErr error - expCustomRecords map[uint64][]byte - shouldHaveMPP bool -} - -var decodePayloadTests = []decodePayloadTest{ - { - name: "final hop valid", - payload: []byte{0x02, 0x00, 0x04, 0x00}, - }, - { - name: "intermediate hop valid", - payload: []byte{0x02, 0x00, 0x04, 0x00, 0x06, 0x08, 0x01, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - }, - { - name: "final hop no amount", - payload: []byte{0x04, 0x00}, - expErr: hop.ErrInvalidPayload{ - Type: record.AmtOnionType, - Violation: hop.OmittedViolation, - FinalHop: true, - }, - }, - { - name: "intermediate hop no amount", - payload: []byte{0x04, 0x00, 0x06, 0x08, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - }, - expErr: hop.ErrInvalidPayload{ - Type: record.AmtOnionType, - Violation: hop.OmittedViolation, - FinalHop: false, - }, - }, - { - name: "final hop no expiry", - payload: []byte{0x02, 0x00}, - expErr: hop.ErrInvalidPayload{ - Type: record.LockTimeOnionType, - Violation: hop.OmittedViolation, - FinalHop: true, - }, - }, - { - name: "intermediate hop no expiry", - payload: []byte{0x02, 0x00, 0x06, 0x08, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - }, - expErr: hop.ErrInvalidPayload{ - Type: record.LockTimeOnionType, - Violation: hop.OmittedViolation, - FinalHop: false, - }, - }, - { - name: "final hop next sid present", - payload: []byte{0x02, 0x00, 0x04, 0x00, 0x06, 0x08, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - expErr: hop.ErrInvalidPayload{ - Type: record.NextHopOnionType, - Violation: hop.IncludedViolation, - FinalHop: true, - }, - }, - { - name: "required type after omitted hop id", - payload: []byte{0x02, 0x00, 0x04, 0x00, 0x0a, 0x00}, - expErr: hop.ErrInvalidPayload{ - Type: 10, - Violation: hop.RequiredViolation, - FinalHop: true, - }, - }, - { - name: "required type after included hop id", - payload: []byte{0x02, 0x00, 0x04, 0x00, 0x06, 0x08, 0x01, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, - }, - expErr: hop.ErrInvalidPayload{ - Type: 10, - Violation: hop.RequiredViolation, - FinalHop: false, - }, - }, - { - name: "required type zero final hop", - payload: []byte{0x00, 0x00, 0x02, 0x00, 0x04, 0x00}, - expErr: hop.ErrInvalidPayload{ - Type: 0, - Violation: hop.RequiredViolation, - FinalHop: true, - }, - }, - { - name: "required type zero final hop zero sid", - payload: []byte{0x00, 0x00, 0x02, 0x00, 0x04, 0x00, 0x06, 0x08, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - expErr: hop.ErrInvalidPayload{ - Type: record.NextHopOnionType, - Violation: hop.IncludedViolation, - FinalHop: true, - }, - }, - { - name: "required type zero intermediate hop", - payload: []byte{0x00, 0x00, 0x02, 0x00, 0x04, 0x00, 0x06, 0x08, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - expErr: hop.ErrInvalidPayload{ - Type: 0, - Violation: hop.RequiredViolation, - FinalHop: false, - }, - }, - { - name: "required type in custom range", - payload: []byte{0x02, 0x00, 0x04, 0x00, - 0xfe, 0x00, 0x01, 0x00, 0x00, 0x02, 0x10, 0x11, - }, - expCustomRecords: map[uint64][]byte{ - 65536: {0x10, 0x11}, - }, - }, - { - name: "valid intermediate hop", - payload: []byte{0x02, 0x00, 0x04, 0x00, 0x06, 0x08, 0x01, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - expErr: nil, - }, - { - name: "valid final hop", - payload: []byte{0x02, 0x00, 0x04, 0x00}, - expErr: nil, - }, - { - name: "intermediate hop with mpp", - payload: []byte{ - // amount - 0x02, 0x00, - // cltv - 0x04, 0x00, - // next hop id - 0x06, 0x08, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // mpp - 0x08, 0x21, - 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, - 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, - 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, - 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, - 0x08, - }, - expErr: hop.ErrInvalidPayload{ - Type: record.MPPOnionType, - Violation: hop.IncludedViolation, - FinalHop: false, - }, - }, - { - name: "final hop with mpp", - payload: []byte{ - // amount - 0x02, 0x00, - // cltv - 0x04, 0x00, - // mpp - 0x08, 0x21, - 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, - 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, - 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, - 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, - 0x08, - }, - expErr: nil, - shouldHaveMPP: true, - }, -} - -// TestDecodeHopPayloadRecordValidation asserts that parsing the payloads in the -// tests yields the expected errors depending on whether the proper fields were -// included or omitted. -func TestDecodeHopPayloadRecordValidation(t *testing.T) { - for _, test := range decodePayloadTests { - t.Run(test.name, func(t *testing.T) { - testDecodeHopPayloadValidation(t, test) - }) - } -} - -func testDecodeHopPayloadValidation(t *testing.T, test decodePayloadTest) { - var ( - testTotalMsat = lnwire.MilliSatoshi(8) - testAddr = [32]byte{ - 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, - 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, - 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, - 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, - } - ) - - p, err := hop.NewPayloadFromReader(bytes.NewReader(test.payload)) - errr := er.Wrapped(err) - if !reflect.DeepEqual(test.expErr, errr) { - t.Fatalf("expected error mismatch, want: %v, got: %v", - test.expErr, err) - } - if err != nil { - return - } - - // Assert MPP fields if we expect them. - if test.shouldHaveMPP { - if p.MPP == nil { - t.Fatalf("payload should have MPP record") - } - if p.MPP.TotalMsat() != testTotalMsat { - t.Fatalf("invalid total msat") - } - if p.MPP.PaymentAddr() != testAddr { - t.Fatalf("invalid payment addr") - } - } else if p.MPP != nil { - t.Fatalf("unexpected MPP payload") - } - - // Convert expected nil map to empty map, because we always expect an - // initiated map from the payload. - expCustomRecords := make(record.CustomSet) - if test.expCustomRecords != nil { - expCustomRecords = test.expCustomRecords - } - if !reflect.DeepEqual(expCustomRecords, p.CustomRecords()) { - t.Fatalf("invalid custom records") - } -} diff --git a/lnd/htlcswitch/hop/type.go b/lnd/htlcswitch/hop/type.go deleted file mode 100644 index b99f73b9..00000000 --- a/lnd/htlcswitch/hop/type.go +++ /dev/null @@ -1,13 +0,0 @@ -package hop - -import "github.com/pkt-cash/pktd/lnd/lnwire" - -var ( - // Exit is a special "hop" denoting that an incoming HTLC is meant to - // pay finally to the receiving node. - Exit lnwire.ShortChannelID - - // Source is a sentinel "hop" denoting that an incoming HTLC is - // initiated by our own switch. - Source lnwire.ShortChannelID -) diff --git a/lnd/htlcswitch/htlcnotifier.go b/lnd/htlcswitch/htlcnotifier.go deleted file mode 100644 index 4bf17e26..00000000 --- a/lnd/htlcswitch/htlcnotifier.go +++ /dev/null @@ -1,431 +0,0 @@ -package htlcswitch - -import ( - "fmt" - "strings" - "sync" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/htlcswitch/hop" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/subscribe" - "github.com/pkt-cash/pktd/pktlog/log" -) - -// HtlcNotifier notifies clients of htlc forwards, failures and settles for -// htlcs that the switch handles. It takes subscriptions for its events and -// notifies them when htlc events occur. These are served on a best-effort -// basis; events are not persisted, delivery is not guaranteed (in the event -// of a crash in the switch, forward events may be lost) and some events may -// be replayed upon restart. Events consumed from this package should be -// de-duplicated by the htlc's unique combination of incoming+outgoing circuit -// and not relied upon for critical operations. -// -// The htlc notifier sends the following kinds of events: -// Forwarding Event: -// - Represents a htlc which is forwarded onward from our node. -// - Present for htlc forwards through our node and local sends. -// -// Link Failure Event: -// - Indicates that a htlc has failed on our incoming or outgoing link, -// with an incoming boolean which indicates where the failure occurred. -// - Incoming link failures are present for failed attempts to pay one of -// our invoices (insufficient amount or mpp timeout, for example) and for -// forwards that we cannot decode to forward onwards. -// - Outgoing link failures are present for forwards or local payments that -// do not meet our outgoing link's policy (insufficient fees, for example) -// and when we fail to forward the payment on (insufficient outgoing -// capacity, or an unknown outgoing link). -// -// Forwarding Failure Event: -// - Forwarding failures indicate that a htlc we forwarded has failed at -// another node down the route. -// - Present for local sends and htlc forwards which fail after they left -// our node. -// -// Settle event: -// - Settle events are present when a htlc which we added is settled through -// the release of a preimage. -// - Present for local receives, and successful local sends or forwards. -// -// Each htlc is identified by its incoming and outgoing circuit key. Htlcs, -// and their subsequent settles or fails, can be identified by the combination -// of incoming and outgoing circuits. Note that receives to our node will -// have a zero outgoing circuit key because the htlc terminates at our -// node, and sends from our node will have a zero incoming circuit key because -// the send originates at our node. -type HtlcNotifier struct { - started sync.Once - stopped sync.Once - - // now returns the current time, it is set in the htlcnotifier to allow - // for timestamp mocking in tests. - now func() time.Time - - ntfnServer *subscribe.Server -} - -// NewHtlcNotifier creates a new HtlcNotifier which gets htlc forwarded, -// failed and settled events from links our node has established with peers -// and sends notifications to subscribing clients. -func NewHtlcNotifier(now func() time.Time) *HtlcNotifier { - return &HtlcNotifier{ - now: now, - ntfnServer: subscribe.NewServer(), - } -} - -// Start starts the HtlcNotifier and all goroutines it needs to consume -// events and provide subscriptions to clients. -func (h *HtlcNotifier) Start() er.R { - var err er.R - h.started.Do(func() { - log.Trace("HtlcNotifier starting") - err = h.ntfnServer.Start() - }) - return err -} - -// Stop signals the notifier for a graceful shutdown. -func (h *HtlcNotifier) Stop() { - h.stopped.Do(func() { - if err := h.ntfnServer.Stop(); err != nil { - log.Warnf("error stopping htlc notifier: %v", err) - } - }) -} - -// SubscribeHtlcEvents returns a subscribe.Client that will receive updates -// any time the server is made aware of a new event. -func (h *HtlcNotifier) SubscribeHtlcEvents() (*subscribe.Client, er.R) { - return h.ntfnServer.Subscribe() -} - -// HtlcKey uniquely identifies the htlc. -type HtlcKey struct { - // IncomingCircuit is the channel an htlc id of the incoming htlc. - IncomingCircuit channeldb.CircuitKey - - // OutgoingCircuit is the channel and htlc id of the outgoing htlc. - OutgoingCircuit channeldb.CircuitKey -} - -// String returns a string representation of a htlc key. -func (k HtlcKey) String() string { - switch { - case k.IncomingCircuit.ChanID == hop.Source: - return k.OutgoingCircuit.String() - - case k.OutgoingCircuit.ChanID == hop.Exit: - return k.IncomingCircuit.String() - - default: - return fmt.Sprintf("%v -> %v", k.IncomingCircuit, - k.OutgoingCircuit) - } -} - -// HtlcInfo provides the details of a htlc that our node has processed. For -// forwards, incoming and outgoing values are set, whereas sends and receives -// will only have outgoing or incoming details set. -type HtlcInfo struct { - // IncomingTimelock is the time lock of the htlc on our incoming - // channel. - IncomingTimeLock uint32 - - // OutgoingTimelock is the time lock the htlc on our outgoing channel. - OutgoingTimeLock uint32 - - // IncomingAmt is the amount of the htlc on our incoming channel. - IncomingAmt lnwire.MilliSatoshi - - // OutgoingAmt is the amount of the htlc on our outgoing channel. - OutgoingAmt lnwire.MilliSatoshi -} - -// String returns a string representation of a htlc. -func (h HtlcInfo) String() string { - var details []string - - // If the incoming information is not zero, as is the case for a send, - // we include the incoming amount and timelock. - if h.IncomingAmt != 0 || h.IncomingTimeLock != 0 { - str := fmt.Sprintf("incoming amount: %v, "+ - "incoming timelock: %v", h.IncomingAmt, - h.IncomingTimeLock) - - details = append(details, str) - } - - // If the outgoing information is not zero, as is the case for a - // receive, we include the outgoing amount and timelock. - if h.OutgoingAmt != 0 || h.OutgoingTimeLock != 0 { - str := fmt.Sprintf("outgoing amount: %v, "+ - "outgoing timelock: %v", h.OutgoingAmt, - h.OutgoingTimeLock) - - details = append(details, str) - } - - return strings.Join(details, ", ") -} - -// HtlcEventType represents the type of event that a htlc was part of. -type HtlcEventType int - -const ( - // HtlcEventTypeSend represents a htlc that was part of a send from - // our node. - HtlcEventTypeSend HtlcEventType = iota - - // HtlcEventTypeReceive represents a htlc that was part of a receive - // to our node. - HtlcEventTypeReceive - - // HtlcEventTypeForward represents a htlc that was forwarded through - // our node. - HtlcEventTypeForward -) - -// String returns a string representation of a htlc event type. -func (h HtlcEventType) String() string { - switch h { - case HtlcEventTypeSend: - return "send" - - case HtlcEventTypeReceive: - return "receive" - - case HtlcEventTypeForward: - return "forward" - - default: - return "unknown" - } -} - -// ForwardingEvent represents a htlc that was forwarded onwards from our node. -// Sends which originate from our node will report forward events with zero -// incoming circuits in their htlc key. -type ForwardingEvent struct { - // HtlcKey uniquely identifies the htlc, and can be used to match the - // forwarding event with subsequent settle/fail events. - HtlcKey - - // HtlcInfo contains details about the htlc. - HtlcInfo - - // HtlcEventType classifies the event as part of a local send or - // receive, or as part of a forward. - HtlcEventType - - // Timestamp is the time when this htlc was forwarded. - Timestamp time.Time -} - -// LinkFailEvent describes a htlc that failed on our incoming or outgoing -// link. The incoming bool is true for failures on incoming links, and false -// for failures on outgoing links. The failure reason is provided by a lnwire -// failure message which is enriched with a failure detail in the cases where -// the wire failure message does not contain full information about the -// failure. -type LinkFailEvent struct { - // HtlcKey uniquely identifies the htlc. - HtlcKey - - // HtlcInfo contains details about the htlc. - HtlcInfo - - // HtlcEventType classifies the event as part of a local send or - // receive, or as part of a forward. - HtlcEventType - - // LinkError is the reason that we failed the htlc. - LinkError *LinkError - - // Incoming is true if the htlc was failed on an incoming link. - // If it failed on the outgoing link, it is false. - Incoming bool - - // Timestamp is the time when the link failure occurred. - Timestamp time.Time -} - -// ForwardingFailEvent represents a htlc failure which occurred down the line -// after we forwarded a htlc onwards. An error is not included in this event -// because errors returned down the route are encrypted. HtlcInfo is not -// reliably available for forwarding failures, so it is omitted. These events -// should be matched with their corresponding forward event to obtain this -// information. -type ForwardingFailEvent struct { - // HtlcKey uniquely identifies the htlc, and can be used to match the - // htlc with its corresponding forwarding event. - HtlcKey - - // HtlcEventType classifies the event as part of a local send or - // receive, or as part of a forward. - HtlcEventType - - // Timestamp is the time when the forwarding failure was received. - Timestamp time.Time -} - -// SettleEvent represents a htlc that was settled. HtlcInfo is not reliably -// available for forwarding failures, so it is omitted. These events should -// be matched with corresponding forward events or invoices (for receives) -// to obtain additional information about the htlc. -type SettleEvent struct { - // HtlcKey uniquely identifies the htlc, and can be used to match - // forwards with their corresponding forwarding event. - HtlcKey - - // HtlcEventType classifies the event as part of a local send or - // receive, or as part of a forward. - HtlcEventType - - // Timestamp is the time when this htlc was settled. - Timestamp time.Time -} - -// NotifyForwardingEvent notifies the HtlcNotifier than a htlc has been -// forwarded. -// -// Note this is part of the htlcNotifier interface. -func (h *HtlcNotifier) NotifyForwardingEvent(key HtlcKey, info HtlcInfo, - eventType HtlcEventType) { - - event := &ForwardingEvent{ - HtlcKey: key, - HtlcInfo: info, - HtlcEventType: eventType, - Timestamp: h.now(), - } - - log.Tracef("Notifying forward event: %v over %v, %v", eventType, key, - info) - - if err := h.ntfnServer.SendUpdate(event); err != nil { - log.Warnf("Unable to send forwarding event: %v", err) - } -} - -// NotifyLinkFailEvent notifies that a htlc has failed on our incoming -// or outgoing link. -// -// Note this is part of the htlcNotifier interface. -func (h *HtlcNotifier) NotifyLinkFailEvent(key HtlcKey, info HtlcInfo, - eventType HtlcEventType, linkErr *LinkError, incoming bool) { - - event := &LinkFailEvent{ - HtlcKey: key, - HtlcInfo: info, - HtlcEventType: eventType, - LinkError: linkErr, - Incoming: incoming, - Timestamp: h.now(), - } - - log.Tracef("Notifying link failure event: %v over %v, %v", eventType, - key, info) - - if err := h.ntfnServer.SendUpdate(event); err != nil { - log.Warnf("Unable to send link fail event: %v", err) - } -} - -// NotifyForwardingFailEvent notifies the HtlcNotifier that a htlc we -// forwarded has failed down the line. -// -// Note this is part of the htlcNotifier interface. -func (h *HtlcNotifier) NotifyForwardingFailEvent(key HtlcKey, - eventType HtlcEventType) { - - event := &ForwardingFailEvent{ - HtlcKey: key, - HtlcEventType: eventType, - Timestamp: h.now(), - } - - log.Tracef("Notifying forwarding failure event: %v over %v", eventType, - key) - - if err := h.ntfnServer.SendUpdate(event); err != nil { - log.Warnf("Unable to send forwarding fail event: %v", err) - } -} - -// NotifySettleEvent notifies the HtlcNotifier that a htlc that we committed -// to as part of a forward or a receive to our node has been settled. -// -// Note this is part of the htlcNotifier interface. -func (h *HtlcNotifier) NotifySettleEvent(key HtlcKey, eventType HtlcEventType) { - event := &SettleEvent{ - HtlcKey: key, - HtlcEventType: eventType, - Timestamp: h.now(), - } - - log.Tracef("Notifying settle event: %v over %v", eventType, key) - - if err := h.ntfnServer.SendUpdate(event); err != nil { - log.Warnf("Unable to send settle event: %v", err) - } -} - -// newHtlc key returns a htlc key for the packet provided. If the packet -// has a zero incoming channel ID, the packet is for one of our own sends, -// which has the payment id stashed in the incoming htlc id. If this is the -// case, we replace the incoming htlc id with zero so that the notifier -// consistently reports zero circuit keys for events that terminate or -// originate at our node. -func newHtlcKey(pkt *htlcPacket) HtlcKey { - htlcKey := HtlcKey{ - IncomingCircuit: channeldb.CircuitKey{ - ChanID: pkt.incomingChanID, - HtlcID: pkt.incomingHTLCID, - }, - OutgoingCircuit: CircuitKey{ - ChanID: pkt.outgoingChanID, - HtlcID: pkt.outgoingHTLCID, - }, - } - - // If the packet has a zero incoming channel ID, it is a send that was - // initiated at our node. If this is the case, our internal pid is in - // the incoming htlc ID, so we overwrite it with 0 for notification - // purposes. - if pkt.incomingChanID == hop.Source { - htlcKey.IncomingCircuit.HtlcID = 0 - } - - return htlcKey -} - -// newHtlcInfo returns HtlcInfo for the packet provided. -func newHtlcInfo(pkt *htlcPacket) HtlcInfo { - return HtlcInfo{ - IncomingTimeLock: pkt.incomingTimeout, - OutgoingTimeLock: pkt.outgoingTimeout, - IncomingAmt: pkt.incomingAmount, - OutgoingAmt: pkt.amount, - } -} - -// getEventType returns the htlc type based on the fields set in the htlc -// packet. Sends that originate at our node have the source (zero) incoming -// channel ID. Receives to our node have the exit (zero) outgoing channel ID -// and forwards have both fields set. -func getEventType(pkt *htlcPacket) HtlcEventType { - switch { - case pkt.incomingChanID == hop.Source: - return HtlcEventTypeSend - - case pkt.outgoingChanID == hop.Exit: - return HtlcEventTypeReceive - - default: - return HtlcEventTypeForward - } -} diff --git a/lnd/htlcswitch/interceptable_switch.go b/lnd/htlcswitch/interceptable_switch.go deleted file mode 100644 index ce5553dd..00000000 --- a/lnd/htlcswitch/interceptable_switch.go +++ /dev/null @@ -1,174 +0,0 @@ -package htlcswitch - -import ( - "sync" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/htlcswitch/hop" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -var ( - // ErrFwdNotExists is an error returned when the caller tries to resolve - // a forward that doesn't exist anymore. - ErrFwdNotExists = Err.CodeWithDetail("ErrFwdNotExists", "forward does not exist") -) - -// InterceptableSwitch is an implementation of ForwardingSwitch interface. -// This implementation is used like a proxy that wraps the switch and -// intercepts forward requests. A reference to the Switch is held in order -// to communicate back the interception result where the options are: -// Resume - forwards the original request to the switch as is. -// Settle - routes UpdateFulfillHTLC to the originating link. -// Fail - routes UpdateFailHTLC to the originating link. -type InterceptableSwitch struct { - sync.RWMutex - - // htlcSwitch is the underline switch - htlcSwitch *Switch - - // fwdInterceptor is the callback that is called for each forward of - // an incoming htlc. It should return true if it is interested in handling - // it. - fwdInterceptor ForwardInterceptor -} - -// NewInterceptableSwitch returns an instance of InterceptableSwitch. -func NewInterceptableSwitch(s *Switch) *InterceptableSwitch { - return &InterceptableSwitch{htlcSwitch: s} -} - -// SetInterceptor sets the ForwardInterceptor to be used. -func (s *InterceptableSwitch) SetInterceptor( - interceptor ForwardInterceptor) { - - s.Lock() - defer s.Unlock() - s.fwdInterceptor = interceptor -} - -// ForwardPackets attempts to forward the batch of htlcs through the -// switch, any failed packets will be returned to the provided -// ChannelLink. The link's quit signal should be provided to allow -// cancellation of forwarding during link shutdown. -func (s *InterceptableSwitch) ForwardPackets(linkQuit chan struct{}, - packets ...*htlcPacket) er.R { - - var interceptor ForwardInterceptor - s.Lock() - interceptor = s.fwdInterceptor - s.Unlock() - - // Optimize for the case we don't have an interceptor. - if interceptor == nil { - return s.htlcSwitch.ForwardPackets(linkQuit, packets...) - } - - var notIntercepted []*htlcPacket - for _, p := range packets { - if !s.interceptForward(p, interceptor, linkQuit) { - notIntercepted = append(notIntercepted, p) - } - } - return s.htlcSwitch.ForwardPackets(linkQuit, notIntercepted...) -} - -// interceptForward checks if there is any external interceptor interested in -// this packet. Currently only htlc type of UpdateAddHTLC that are forwarded -// are being checked for interception. It can be extended in the future given -// the right use case. -func (s *InterceptableSwitch) interceptForward(packet *htlcPacket, - interceptor ForwardInterceptor, linkQuit chan struct{}) bool { - - switch htlc := packet.htlc.(type) { - case *lnwire.UpdateAddHTLC: - // We are not interested in intercepting initated payments. - if packet.incomingChanID == hop.Source { - return false - } - - intercepted := &interceptedForward{ - linkQuit: linkQuit, - htlc: htlc, - packet: packet, - htlcSwitch: s.htlcSwitch, - } - - // If this htlc was intercepted, don't handle the forward. - return interceptor(intercepted) - default: - return false - } -} - -// interceptedForward implements the InterceptedForward interface. -// It is passed from the switch to external interceptors that are interested -// in holding forwards and resolve them manually. -type interceptedForward struct { - linkQuit chan struct{} - htlc *lnwire.UpdateAddHTLC - packet *htlcPacket - htlcSwitch *Switch -} - -// Packet returns the intercepted htlc packet. -func (f *interceptedForward) Packet() InterceptedPacket { - return InterceptedPacket{ - IncomingCircuit: channeldb.CircuitKey{ - ChanID: f.packet.incomingChanID, - HtlcID: f.packet.incomingHTLCID, - }, - OutgoingChanID: f.packet.outgoingChanID, - Hash: f.htlc.PaymentHash, - OutgoingExpiry: f.htlc.Expiry, - OutgoingAmount: f.htlc.Amount, - IncomingAmount: f.packet.incomingAmount, - IncomingExpiry: f.packet.incomingTimeout, - CustomRecords: f.packet.customRecords, - OnionBlob: f.htlc.OnionBlob, - } -} - -// Resume resumes the default behavior as if the packet was not intercepted. -func (f *interceptedForward) Resume() er.R { - return f.htlcSwitch.ForwardPackets(f.linkQuit, f.packet) -} - -// Fail forward a failed packet to the switch. -func (f *interceptedForward) Fail() er.R { - reason, err := f.packet.obfuscator.EncryptFirstHop(lnwire.NewTemporaryChannelFailure(nil)) - if err != nil { - return er.Errorf("failed to encrypt failure reason %v", err) - } - return f.resolve(&lnwire.UpdateFailHTLC{ - Reason: reason, - }) -} - -// Settle forwards a settled packet to the switch. -func (f *interceptedForward) Settle(preimage lntypes.Preimage) er.R { - if !preimage.Matches(f.htlc.PaymentHash) { - return er.New("preimage does not match hash") - } - return f.resolve(&lnwire.UpdateFulfillHTLC{ - PaymentPreimage: preimage, - }) -} - -// resolve is used for both Settle and Fail and forwards the message to the -// switch. -func (f *interceptedForward) resolve(message lnwire.Message) er.R { - pkt := &htlcPacket{ - incomingChanID: f.packet.incomingChanID, - incomingHTLCID: f.packet.incomingHTLCID, - outgoingChanID: f.packet.outgoingChanID, - outgoingHTLCID: f.packet.outgoingHTLCID, - isResolution: true, - circuit: f.packet.circuit, - htlc: message, - obfuscator: f.packet.obfuscator, - } - return f.htlcSwitch.mailOrchestrator.Deliver(pkt.incomingChanID, pkt) -} diff --git a/lnd/htlcswitch/interfaces.go b/lnd/htlcswitch/interfaces.go deleted file mode 100644 index 5c0a382b..00000000 --- a/lnd/htlcswitch/interfaces.go +++ /dev/null @@ -1,286 +0,0 @@ -package htlcswitch - -import ( - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/invoices" - "github.com/pkt-cash/pktd/lnd/lnpeer" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/record" - "github.com/pkt-cash/pktd/wire" -) - -// InvoiceDatabase is an interface which represents the persistent subsystem -// which may search, lookup and settle invoices. -type InvoiceDatabase interface { - // LookupInvoice attempts to look up an invoice according to its 32 - // byte payment hash. - LookupInvoice(lntypes.Hash) (channeldb.Invoice, er.R) - - // NotifyExitHopHtlc attempts to mark an invoice as settled. If the - // invoice is a debug invoice, then this method is a noop as debug - // invoices are never fully settled. The return value describes how the - // htlc should be resolved. If the htlc cannot be resolved immediately, - // the resolution is sent on the passed in hodlChan later. The eob - // field passes the entire onion hop payload into the invoice registry - // for decoding purposes. - NotifyExitHopHtlc(payHash lntypes.Hash, paidAmount lnwire.MilliSatoshi, - expiry uint32, currentHeight int32, - circuitKey channeldb.CircuitKey, hodlChan chan<- interface{}, - payload invoices.Payload) (invoices.HtlcResolution, er.R) - - // CancelInvoice attempts to cancel the invoice corresponding to the - // passed payment hash. - CancelInvoice(payHash lntypes.Hash) er.R - - // SettleHodlInvoice settles a hold invoice. - SettleHodlInvoice(preimage lntypes.Preimage) er.R - - // HodlUnsubscribeAll unsubscribes from all htlc resolutions. - HodlUnsubscribeAll(subscriber chan<- interface{}) -} - -// ChannelLink is an interface which represents the subsystem for managing the -// incoming htlc requests, applying the changes to the channel, and also -// propagating/forwarding it to htlc switch. -// -// abstraction level -// ^ -// | -// | - - - - - - - - - - - - Lightning - - - - - - - - - - - - - -// | -// | (Switch) (Switch) (Switch) -// | Alice <-- channel link --> Bob <-- channel link --> Carol -// | -// | - - - - - - - - - - - - - TCP - - - - - - - - - - - - - - - -// | -// | (Peer) (Peer) (Peer) -// | Alice <----- tcp conn --> Bob <---- tcp conn -----> Carol -// | -// -type ChannelLink interface { - // TODO(roasbeef): modify interface to embed mail boxes? - - // HandleSwitchPacket handles the switch packets. This packets might be - // forwarded to us from another channel link in case the htlc update - // came from another peer or if the update was created by user - // initially. - // - // NOTE: This function MUST be non-blocking (or block as little as - // possible). - HandleSwitchPacket(*htlcPacket) er.R - - // HandleLocalAddPacket handles a locally-initiated UpdateAddHTLC - // packet. It will be processed synchronously. - HandleLocalAddPacket(*htlcPacket) er.R - - // HandleChannelUpdate handles the htlc requests as settle/add/fail - // which sent to us from remote peer we have a channel with. - // - // NOTE: This function MUST be non-blocking (or block as little as - // possible). - HandleChannelUpdate(lnwire.Message) - - // ChannelPoint returns the channel outpoint for the channel link. - ChannelPoint() *wire.OutPoint - - // ChanID returns the channel ID for the channel link. The channel ID - // is a more compact representation of a channel's full outpoint. - ChanID() lnwire.ChannelID - - // ShortChanID returns the short channel ID for the channel link. The - // short channel ID encodes the exact location in the main chain that - // the original funding output can be found. - ShortChanID() lnwire.ShortChannelID - - // UpdateShortChanID updates the short channel ID for a link. This may - // be required in the event that a link is created before the short - // chan ID for it is known, or a re-org occurs, and the funding - // transaction changes location within the chain. - UpdateShortChanID() (lnwire.ShortChannelID, er.R) - - // UpdateForwardingPolicy updates the forwarding policy for the target - // ChannelLink. Once updated, the link will use the new forwarding - // policy to govern if it an incoming HTLC should be forwarded or not. - UpdateForwardingPolicy(ForwardingPolicy) - - // CheckHtlcForward should return a nil error if the passed HTLC details - // satisfy the current forwarding policy fo the target link. Otherwise, - // a LinkError with a valid protocol failure message should be returned - // in order to signal to the source of the HTLC, the policy consistency - // issue. - CheckHtlcForward(payHash [32]byte, incomingAmt lnwire.MilliSatoshi, - amtToForward lnwire.MilliSatoshi, - incomingTimeout, outgoingTimeout uint32, - heightNow uint32) *LinkError - - // CheckHtlcTransit should return a nil error if the passed HTLC details - // satisfy the current channel policy. Otherwise, a LinkError with a - // valid protocol failure message should be returned in order to signal - // the violation. This call is intended to be used for locally initiated - // payments for which there is no corresponding incoming htlc. - CheckHtlcTransit(payHash [32]byte, amt lnwire.MilliSatoshi, - timeout uint32, heightNow uint32) *LinkError - - // Bandwidth returns the amount of milli-satoshis which current link - // might pass through channel link. The value returned from this method - // represents the up to date available flow through the channel. This - // takes into account any forwarded but un-cleared HTLC's, and any - // HTLC's which have been set to the over flow queue. - Bandwidth() lnwire.MilliSatoshi - - // Stats return the statistics of channel link. Number of updates, - // total sent/received milli-satoshis. - Stats() (uint64, lnwire.MilliSatoshi, lnwire.MilliSatoshi) - - // Peer returns the representation of remote peer with which we have - // the channel link opened. - Peer() lnpeer.Peer - - // EligibleToForward returns a bool indicating if the channel is able - // to actively accept requests to forward HTLC's. A channel may be - // active, but not able to forward HTLC's if it hasn't yet finalized - // the pre-channel operation protocol with the remote peer. The switch - // will use this function in forwarding decisions accordingly. - EligibleToForward() bool - - // AttachMailBox delivers an active MailBox to the link. The MailBox may - // have buffered messages. - AttachMailBox(MailBox) - - // Start/Stop are used to initiate the start/stop of the channel link - // functioning. - Start() er.R - Stop() -} - -// ForwardingLog is an interface that represents a time series database which -// keep track of all successfully completed payment circuits. Every few -// seconds, the switch will collate and flush out all the successful payment -// circuits during the last interval. -type ForwardingLog interface { - // AddForwardingEvents is a method that should write out the set of - // forwarding events in a batch to persistent storage. Outside - // sub-systems can then query the contents of the log for analysis, - // visualizations, etc. - AddForwardingEvents([]channeldb.ForwardingEvent) er.R -} - -// TowerClient is the primary interface used by the daemon to backup pre-signed -// justice transactions to watchtowers. -type TowerClient interface { - // RegisterChannel persistently initializes any channel-dependent - // parameters within the client. This should be called during link - // startup to ensure that the client is able to support the link during - // operation. - RegisterChannel(lnwire.ChannelID) er.R - - // BackupState initiates a request to back up a particular revoked - // state. If the method returns nil, the backup is guaranteed to be - // successful unless the tower is unavailable and client is force quit, - // or the justice transaction would create dust outputs when trying to - // abide by the negotiated policy. If the channel we're trying to back - // up doesn't have a tweak for the remote party's output, then - // isTweakless should be true. - BackupState(*lnwire.ChannelID, *lnwallet.BreachRetribution, bool) er.R -} - -// InterceptableHtlcForwarder is the interface to set the interceptor -// implementation that intercepts htlc forwards. -type InterceptableHtlcForwarder interface { - // SetInterceptor sets a ForwardInterceptor. - SetInterceptor(interceptor ForwardInterceptor) -} - -// ForwardInterceptor is a function that is invoked from the switch for every -// incoming htlc that is intended to be forwarded. It is passed with the -// InterceptedForward that contains the information about the packet and a way -// to resolve it manually later in case it is held. -// The return value indicates if this handler will take control of this forward -// and resolve it later or let the switch execute its default behavior. -type ForwardInterceptor func(InterceptedForward) bool - -// InterceptedPacket contains the relevant information for the interceptor about -// an htlc. -type InterceptedPacket struct { - // IncomingCircuit contains the incoming channel and htlc id of the - // packet. - IncomingCircuit channeldb.CircuitKey - - // OutgoingChanID is the destination channel for this packet. - OutgoingChanID lnwire.ShortChannelID - - // Hash is the payment hash of the htlc. - Hash lntypes.Hash - - // OutgoingExpiry is the absolute block height at which the outgoing - // htlc expires. - OutgoingExpiry uint32 - - // OutgoingAmount is the amount to forward. - OutgoingAmount lnwire.MilliSatoshi - - // IncomingExpiry is the absolute block height at which the incoming - // htlc expires. - IncomingExpiry uint32 - - // IncomingAmount is the amount of the accepted htlc. - IncomingAmount lnwire.MilliSatoshi - - // CustomRecords are user-defined records in the custom type range that - // were included in the payload. - CustomRecords record.CustomSet - - // OnionBlob is the onion packet for the next hop - OnionBlob [lnwire.OnionPacketSize]byte -} - -// InterceptedForward is passed to the ForwardInterceptor for every forwarded -// htlc. It contains all the information about the packet which accordingly -// the interceptor decides if to hold or not. -// In addition this interface allows a later resolution by calling either -// Resume, Settle or Fail. -type InterceptedForward interface { - // Packet returns the intercepted packet. - Packet() InterceptedPacket - - // Resume notifies the intention to resume an existing hold forward. This - // basically means the caller wants to resume with the default behavior for - // this htlc which usually means forward it. - Resume() er.R - - // Settle notifies the intention to settle an existing hold - // forward with a given preimage. - Settle(lntypes.Preimage) er.R - - // Fails notifies the intention to fail an existing hold forward - Fail() er.R -} - -// htlcNotifier is an interface which represents the input side of the -// HtlcNotifier which htlc events are piped through. This interface is intended -// to allow for mocking of the htlcNotifier in tests, so is unexported because -// it is not needed outside of the htlcSwitch package. -type htlcNotifier interface { - // NotifyForwardingEvent notifies the HtlcNotifier than a htlc has been - // forwarded. - NotifyForwardingEvent(key HtlcKey, info HtlcInfo, - eventType HtlcEventType) - - // NotifyIncomingLinkFailEvent notifies that a htlc has failed on our - // incoming link. It takes an isReceive bool to differentiate between - // our node's receives and forwards. - NotifyLinkFailEvent(key HtlcKey, info HtlcInfo, - eventType HtlcEventType, linkErr *LinkError, incoming bool) - - // NotifyForwardingFailEvent notifies the HtlcNotifier that a htlc we - // forwarded has failed down the line. - NotifyForwardingFailEvent(key HtlcKey, eventType HtlcEventType) - - // NotifySettleEvent notifies the HtlcNotifier that a htlc that we - // committed to as part of a forward or a receive to our node has been - // settled. - NotifySettleEvent(key HtlcKey, eventType HtlcEventType) -} diff --git a/lnd/htlcswitch/link.go b/lnd/htlcswitch/link.go deleted file mode 100644 index 274e52d2..00000000 --- a/lnd/htlcswitch/link.go +++ /dev/null @@ -1,3100 +0,0 @@ -package htlcswitch - -import ( - "bytes" - "crypto/sha256" - "fmt" - "math" - prand "math/rand" - "sync" - "sync/atomic" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/go-errors/errors" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/contractcourt" - "github.com/pkt-cash/pktd/lnd/htlcswitch/hodl" - "github.com/pkt-cash/pktd/lnd/htlcswitch/hop" - "github.com/pkt-cash/pktd/lnd/invoices" - "github.com/pkt-cash/pktd/lnd/lnpeer" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/queue" - "github.com/pkt-cash/pktd/lnd/ticker" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/wire" -) - -func init() { - prand.Seed(time.Now().UnixNano()) -} - -const ( - // DefaultMaxOutgoingCltvExpiry is the maximum outgoing time lock that - // the node accepts for forwarded payments. The value is relative to the - // current block height. The reason to have a maximum is to prevent - // funds getting locked up unreasonably long. Otherwise, an attacker - // willing to lock its own funds too, could force the funds of this node - // to be locked up for an indefinite (max int32) number of blocks. - // - // The value 2016 corresponds to on average two weeks worth of blocks - // and is based on the maximum number of hops (20), the default CLTV - // delta (40), and some extra margin to account for the other lightning - // implementations and past lnd versions which used to have a default - // CLTV delta of 144. - DefaultMaxOutgoingCltvExpiry = 2016 - - // DefaultMinLinkFeeUpdateTimeout represents the minimum interval in - // which a link should propose to update its commitment fee rate. - DefaultMinLinkFeeUpdateTimeout = 10 * time.Minute - - // DefaultMaxLinkFeeUpdateTimeout represents the maximum interval in - // which a link should propose to update its commitment fee rate. - DefaultMaxLinkFeeUpdateTimeout = 60 * time.Minute - - // DefaultMaxLinkFeeAllocation is the highest allocation we'll allow - // a channel's commitment fee to be of its balance. This only applies to - // the initiator of the channel. - DefaultMaxLinkFeeAllocation float64 = 0.5 -) - -// ForwardingPolicy describes the set of constraints that a given ChannelLink -// is to adhere to when forwarding HTLC's. For each incoming HTLC, this set of -// constraints will be consulted in order to ensure that adequate fees are -// paid, and our time-lock parameters are respected. In the event that an -// incoming HTLC violates any of these constraints, it is to be _rejected_ with -// the error possibly carrying along a ChannelUpdate message that includes the -// latest policy. -type ForwardingPolicy struct { - // MinHTLC is the smallest HTLC that is to be forwarded. - MinHTLCOut lnwire.MilliSatoshi - - // MaxHTLC is the largest HTLC that is to be forwarded. - MaxHTLC lnwire.MilliSatoshi - - // BaseFee is the base fee, expressed in milli-satoshi that must be - // paid for each incoming HTLC. This field, combined with FeeRate is - // used to compute the required fee for a given HTLC. - BaseFee lnwire.MilliSatoshi - - // FeeRate is the fee rate, expressed in milli-satoshi that must be - // paid for each incoming HTLC. This field combined with BaseFee is - // used to compute the required fee for a given HTLC. - FeeRate lnwire.MilliSatoshi - - // TimeLockDelta is the absolute time-lock value, expressed in blocks, - // that will be subtracted from an incoming HTLC's timelock value to - // create the time-lock value for the forwarded outgoing HTLC. The - // following constraint MUST hold for an HTLC to be forwarded: - // - // * incomingHtlc.timeLock - timeLockDelta = fwdInfo.OutgoingCTLV - // - // where fwdInfo is the forwarding information extracted from the - // per-hop payload of the incoming HTLC's onion packet. - TimeLockDelta uint32 - - // TODO(roasbeef): add fee module inside of switch -} - -// ExpectedFee computes the expected fee for a given htlc amount. The value -// returned from this function is to be used as a sanity check when forwarding -// HTLC's to ensure that an incoming HTLC properly adheres to our propagated -// forwarding policy. -// -// TODO(roasbeef): also add in current available channel bandwidth, inverse -// func -func ExpectedFee(f ForwardingPolicy, - htlcAmt lnwire.MilliSatoshi) lnwire.MilliSatoshi { - - return f.BaseFee + (htlcAmt*f.FeeRate)/1000000 -} - -// ChannelLinkConfig defines the configuration for the channel link. ALL -// elements within the configuration MUST be non-nil for channel link to carry -// out its duties. -type ChannelLinkConfig struct { - // FwrdingPolicy is the initial forwarding policy to be used when - // deciding whether to forwarding incoming HTLC's or not. This value - // can be updated with subsequent calls to UpdateForwardingPolicy - // targeted at a given ChannelLink concrete interface implementation. - FwrdingPolicy ForwardingPolicy - - // Circuits provides restricted access to the switch's circuit map, - // allowing the link to open and close circuits. - Circuits CircuitModifier - - // Switch provides a reference to the HTLC switch, we only use this in - // testing to access circuit operations not typically exposed by the - // CircuitModifier. - // - // TODO(conner): remove after refactoring htlcswitch testing framework. - Switch *Switch - - // ForwardPackets attempts to forward the batch of htlcs through the - // switch. The function returns and error in case it fails to send one or - // more packets. The link's quit signal should be provided to allow - // cancellation of forwarding during link shutdown. - ForwardPackets func(chan struct{}, ...*htlcPacket) er.R - - // DecodeHopIterators facilitates batched decoding of HTLC Sphinx onion - // blobs, which are then used to inform how to forward an HTLC. - // - // NOTE: This function assumes the same set of readers and preimages - // are always presented for the same identifier. - DecodeHopIterators func([]byte, []hop.DecodeHopIteratorRequest) ( - []hop.DecodeHopIteratorResponse, er.R) - - // ExtractErrorEncrypter function is responsible for decoding HTLC - // Sphinx onion blob, and creating onion failure obfuscator. - ExtractErrorEncrypter hop.ErrorEncrypterExtracter - - // FetchLastChannelUpdate retrieves the latest routing policy for a - // target channel. This channel will typically be the outgoing channel - // specified when we receive an incoming HTLC. This will be used to - // provide payment senders our latest policy when sending encrypted - // error messages. - FetchLastChannelUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, er.R) - - // Peer is a lightning network node with which we have the channel link - // opened. - Peer lnpeer.Peer - - // Registry is a sub-system which responsible for managing the invoices - // in thread-safe manner. - Registry InvoiceDatabase - - // PreimageCache is a global witness beacon that houses any new - // preimages discovered by other links. We'll use this to add new - // witnesses that we discover which will notify any sub-systems - // subscribed to new events. - PreimageCache contractcourt.WitnessBeacon - - // OnChannelFailure is a function closure that we'll call if the - // channel failed for some reason. Depending on the severity of the - // error, the closure potentially must force close this channel and - // disconnect the peer. - // - // NOTE: The method must return in order for the ChannelLink to be able - // to shut down properly. - OnChannelFailure func(lnwire.ChannelID, lnwire.ShortChannelID, - LinkFailureError) - - // UpdateContractSignals is a function closure that we'll use to update - // outside sub-systems with the latest signals for our inner Lightning - // channel. These signals will notify the caller when the channel has - // been closed, or when the set of active HTLC's is updated. - UpdateContractSignals func(*contractcourt.ContractSignals) er.R - - // ChainEvents is an active subscription to the chain watcher for this - // channel to be notified of any on-chain activity related to this - // channel. - ChainEvents *contractcourt.ChainEventSubscription - - // FeeEstimator is an instance of a live fee estimator which will be - // used to dynamically regulate the current fee of the commitment - // transaction to ensure timely confirmation. - FeeEstimator chainfee.Estimator - - // hodl.Mask is a bitvector composed of hodl.Flags, specifying breakpoints - // for HTLC forwarding internal to the switch. - // - // NOTE: This should only be used for testing. - HodlMask hodl.Mask - - // SyncStates is used to indicate that we need send the channel - // reestablishment message to the remote peer. It should be done if our - // clients have been restarted, or remote peer have been reconnected. - SyncStates bool - - // BatchTicker is the ticker that determines the interval that we'll - // use to check the batch to see if there're any updates we should - // flush out. By batching updates into a single commit, we attempt to - // increase throughput by maximizing the number of updates coalesced - // into a single commit. - BatchTicker ticker.Ticker - - // FwdPkgGCTicker is the ticker determining the frequency at which - // garbage collection of forwarding packages occurs. We use a - // time-based approach, as opposed to block epochs, as to not hinder - // syncing. - FwdPkgGCTicker ticker.Ticker - - // PendingCommitTicker is a ticker that allows the link to determine if - // a locally initiated commitment dance gets stuck waiting for the - // remote party to revoke. - PendingCommitTicker ticker.Ticker - - // BatchSize is the max size of a batch of updates done to the link - // before we do a state update. - BatchSize uint32 - - // UnsafeReplay will cause a link to replay the adds in its latest - // commitment txn after the link is restarted. This should only be used - // in testing, it is here to ensure the sphinx replay detection on the - // receiving node is persistent. - UnsafeReplay bool - - // MinFeeUpdateTimeout represents the minimum interval in which a link - // will propose to update its commitment fee rate. A random timeout will - // be selected between this and MaxFeeUpdateTimeout. - MinFeeUpdateTimeout time.Duration - - // MaxFeeUpdateTimeout represents the maximum interval in which a link - // will propose to update its commitment fee rate. A random timeout will - // be selected between this and MinFeeUpdateTimeout. - MaxFeeUpdateTimeout time.Duration - - // OutgoingCltvRejectDelta defines the number of blocks before expiry of - // an htlc where we don't offer an htlc anymore. This should be at least - // the outgoing broadcast delta, because in any case we don't want to - // risk offering an htlc that triggers channel closure. - OutgoingCltvRejectDelta uint32 - - // TowerClient is an optional engine that manages the signing, - // encrypting, and uploading of justice transactions to the daemon's - // configured set of watchtowers. - TowerClient TowerClient - - // MaxOutgoingCltvExpiry is the maximum outgoing timelock that the link - // should accept for a forwarded HTLC. The value is relative to the - // current block height. - MaxOutgoingCltvExpiry uint32 - - // MaxFeeAllocation is the highest allocation we'll allow a channel's - // commitment fee to be of its balance. This only applies to the - // initiator of the channel. - MaxFeeAllocation float64 - - // NotifyActiveLink allows the link to tell the ChannelNotifier when a - // link is first started. - NotifyActiveLink func(wire.OutPoint) - - // NotifyActiveChannel allows the link to tell the ChannelNotifier when - // channels becomes active. - NotifyActiveChannel func(wire.OutPoint) - - // NotifyInactiveChannel allows the switch to tell the ChannelNotifier - // when channels become inactive. - NotifyInactiveChannel func(wire.OutPoint) - - // HtlcNotifier is an instance of a htlcNotifier which we will pipe htlc - // events through. - HtlcNotifier htlcNotifier -} - -// localUpdateAddMsg contains a locally initiated htlc and a channel that will -// receive the outcome of the link processing. This channel must be buffered to -// prevent the link from blocking. -type localUpdateAddMsg struct { - pkt *htlcPacket - err chan er.R -} - -// channelLink is the service which drives a channel's commitment update -// state-machine. In the event that an HTLC needs to be propagated to another -// link, the forward handler from config is used which sends HTLC to the -// switch. Additionally, the link encapsulate logic of commitment protocol -// message ordering and updates. -type channelLink struct { - // The following fields are only meant to be used *atomically* - started int32 - reestablished int32 - shutdown int32 - - // failed should be set to true in case a link error happens, making - // sure we don't process any more updates. - failed bool - - // keystoneBatch represents a volatile list of keystones that must be - // written before attempting to sign the next commitment txn. These - // represent all the HTLC's forwarded to the link from the switch. Once - // we lock them into our outgoing commitment, then the circuit has a - // keystone, and is fully opened. - keystoneBatch []Keystone - - // openedCircuits is the set of all payment circuits that will be open - // once we make our next commitment. After making the commitment we'll - // ACK all these from our mailbox to ensure that they don't get - // re-delivered if we reconnect. - openedCircuits []CircuitKey - - // closedCircuits is the set of all payment circuits that will be - // closed once we make our next commitment. After taking the commitment - // we'll ACK all these to ensure that they don't get re-delivered if we - // reconnect. - closedCircuits []CircuitKey - - // channel is a lightning network channel to which we apply htlc - // updates. - channel *lnwallet.LightningChannel - - // shortChanID is the most up to date short channel ID for the link. - shortChanID lnwire.ShortChannelID - - // cfg is a structure which carries all dependable fields/handlers - // which may affect behaviour of the service. - cfg ChannelLinkConfig - - // mailBox is the main interface between the outside world and the - // link. All incoming messages will be sent over this mailBox. Messages - // include new updates from our connected peer, and new packets to be - // forwarded sent by the switch. - mailBox MailBox - - // upstream is a channel that new messages sent from the remote peer to - // the local peer will be sent across. - upstream chan lnwire.Message - - // downstream is a channel in which new multi-hop HTLC's to be - // forwarded will be sent across. Messages from this channel are sent - // by the HTLC switch. - downstream chan *htlcPacket - - // localUpdateAdd is a channel to which locally initiated HTLCs are - // sent across. - localUpdateAdd chan *localUpdateAddMsg - - // htlcUpdates is a channel that we'll use to update outside - // sub-systems with the latest set of active HTLC's on our channel. - htlcUpdates chan *contractcourt.ContractUpdate - - // updateFeeTimer is the timer responsible for updating the link's - // commitment fee every time it fires. - updateFeeTimer *time.Timer - - // uncommittedPreimages stores a list of all preimages that have been - // learned since receiving the last CommitSig from the remote peer. The - // batch will be flushed just before accepting the subsequent CommitSig - // or on shutdown to avoid doing a write for each preimage received. - uncommittedPreimages []lntypes.Preimage - - sync.RWMutex - - // hodlQueue is used to receive exit hop htlc resolutions from invoice - // registry. - hodlQueue *queue.ConcurrentQueue - - // hodlMap stores related htlc data for a circuit key. It allows - // resolving those htlcs when we receive a message on hodlQueue. - hodlMap map[channeldb.CircuitKey]hodlHtlc - - wg sync.WaitGroup - quit chan struct{} -} - -// hodlHtlc contains htlc data that is required for resolution. -type hodlHtlc struct { - pd *lnwallet.PaymentDescriptor - obfuscator hop.ErrorEncrypter -} - -// NewChannelLink creates a new instance of a ChannelLink given a configuration -// and active channel that will be used to verify/apply updates to. -func NewChannelLink(cfg ChannelLinkConfig, - channel *lnwallet.LightningChannel) ChannelLink { - - return &channelLink{ - cfg: cfg, - channel: channel, - shortChanID: channel.ShortChanID(), - // TODO(roasbeef): just do reserve here? - htlcUpdates: make(chan *contractcourt.ContractUpdate), - hodlMap: make(map[channeldb.CircuitKey]hodlHtlc), - hodlQueue: queue.NewConcurrentQueue(10), - quit: make(chan struct{}), - localUpdateAdd: make(chan *localUpdateAddMsg), - } -} - -// A compile time check to ensure channelLink implements the ChannelLink -// interface. -var _ ChannelLink = (*channelLink)(nil) - -// Start starts all helper goroutines required for the operation of the channel -// link. -// -// NOTE: Part of the ChannelLink interface. -func (l *channelLink) Start() er.R { - if !atomic.CompareAndSwapInt32(&l.started, 0, 1) { - err := er.Errorf("channel link(%v): already started", l) - log.Warn("already started") - return err - } - - log.Info("starting") - - // If the config supplied watchtower client, ensure the channel is - // registered before trying to use it during operation. - // TODO(halseth): support anchor types for watchtower. - state := l.channel.State() - if l.cfg.TowerClient != nil && state.ChanType.HasAnchors() { - log.Warnf("Skipping tower registration for anchor " + - "channel type") - } else if l.cfg.TowerClient != nil && !state.ChanType.HasAnchors() { - err := l.cfg.TowerClient.RegisterChannel(l.ChanID()) - if err != nil { - return err - } - } - - l.mailBox.ResetMessages() - l.hodlQueue.Start() - - // Before launching the htlcManager messages, revert any circuits that - // were marked open in the switch's circuit map, but did not make it - // into a commitment txn. We use the next local htlc index as the cut - // off point, since all indexes below that are committed. This action - // is only performed if the link's final short channel ID has been - // assigned, otherwise we would try to trim the htlcs belonging to the - // all-zero, hop.Source ID. - if l.ShortChanID() != hop.Source { - localHtlcIndex, err := l.channel.NextLocalHtlcIndex() - if err != nil { - return er.Errorf("unable to retrieve next local "+ - "htlc index: %v", err) - } - - // NOTE: This is automatically done by the switch when it - // starts up, but is necessary to prevent inconsistencies in - // the case that the link flaps. This is a result of a link's - // life-cycle being shorter than that of the switch. - chanID := l.ShortChanID() - err = l.cfg.Circuits.TrimOpenCircuits(chanID, localHtlcIndex) - if err != nil { - return er.Errorf("unable to trim circuits above "+ - "local htlc index %d: %v", localHtlcIndex, err) - } - - // Since the link is live, before we start the link we'll update - // the ChainArbitrator with the set of new channel signals for - // this channel. - // - // TODO(roasbeef): split goroutines within channel arb to avoid - go func() { - signals := &contractcourt.ContractSignals{ - HtlcUpdates: l.htlcUpdates, - ShortChanID: l.channel.ShortChanID(), - } - - err := l.cfg.UpdateContractSignals(signals) - if err != nil { - log.Errorf("unable to update signals") - } - }() - } - - l.updateFeeTimer = time.NewTimer(l.randomFeeUpdateTimeout()) - - l.wg.Add(1) - go l.htlcManager() - - return nil -} - -// Stop gracefully stops all active helper goroutines, then waits until they've -// exited. -// -// NOTE: Part of the ChannelLink interface. -func (l *channelLink) Stop() { - if !atomic.CompareAndSwapInt32(&l.shutdown, 0, 1) { - log.Warn("already stopped") - return - } - - log.Info("stopping") - - // As the link is stopping, we are no longer interested in htlc - // resolutions coming from the invoice registry. - l.cfg.Registry.HodlUnsubscribeAll(l.hodlQueue.ChanIn()) - - if l.cfg.ChainEvents.Cancel != nil { - l.cfg.ChainEvents.Cancel() - } - - l.updateFeeTimer.Stop() - l.hodlQueue.Stop() - - close(l.quit) - l.wg.Wait() - - // Now that the htlcManager has completely exited, reset the packet - // courier. This allows the mailbox to revaluate any lingering Adds that - // were delivered but didn't make it on a commitment to be failed back - // if the link is offline for an extended period of time. The error is - // ignored since it can only fail when the daemon is exiting. - _ = l.mailBox.ResetPackets() - - // As a final precaution, we will attempt to flush any uncommitted - // preimages to the preimage cache. The preimages should be re-delivered - // after channel reestablishment, however this adds an extra layer of - // protection in case the peer never returns. Without this, we will be - // unable to settle any contracts depending on the preimages even though - // we had learned them at some point. - err := l.cfg.PreimageCache.AddPreimages(l.uncommittedPreimages...) - if err != nil { - log.Errorf("unable to add preimages=%v to cache: %v", - l.uncommittedPreimages, err) - } -} - -// WaitForShutdown blocks until the link finishes shutting down, which includes -// termination of all dependent goroutines. -func (l *channelLink) WaitForShutdown() { - l.wg.Wait() -} - -// EligibleToForward returns a bool indicating if the channel is able to -// actively accept requests to forward HTLC's. We're able to forward HTLC's if -// we know the remote party's next revocation point. Otherwise, we can't -// initiate new channel state. We also require that the short channel ID not be -// the all-zero source ID, meaning that the channel has had its ID finalized. -func (l *channelLink) EligibleToForward() bool { - return l.channel.RemoteNextRevocation() != nil && - l.ShortChanID() != hop.Source && - l.isReestablished() -} - -// isReestablished returns true if the link has successfully completed the -// channel reestablishment dance. -func (l *channelLink) isReestablished() bool { - return atomic.LoadInt32(&l.reestablished) == 1 -} - -// markReestablished signals that the remote peer has successfully exchanged -// channel reestablish messages and that the channel is ready to process -// subsequent messages. -func (l *channelLink) markReestablished() { - atomic.StoreInt32(&l.reestablished, 1) -} - -// sampleNetworkFee samples the current fee rate on the network to get into the -// chain in a timely manner. The returned value is expressed in fee-per-kw, as -// this is the native rate used when computing the fee for commitment -// transactions, and the second-level HTLC transactions. -func (l *channelLink) sampleNetworkFee() (chainfee.SatPerKWeight, er.R) { - // We'll first query for the sat/kw recommended to be confirmed within 3 - // blocks. - feePerKw, err := l.cfg.FeeEstimator.EstimateFeePerKW(3) - if err != nil { - return 0, err - } - - log.Debugf("sampled fee rate for 3 block conf: %v sat/kw", - int64(feePerKw)) - - return feePerKw, nil -} - -// shouldAdjustCommitFee returns true if we should update our commitment fee to -// match that of the network fee. We'll only update our commitment fee if the -// network fee is +/- 10% to our network fee. -func shouldAdjustCommitFee(netFee, chanFee chainfee.SatPerKWeight) bool { - switch { - // If the network fee is greater than the commitment fee, then we'll - // switch to it if it's at least 10% greater than the commit fee. - case netFee > chanFee && netFee >= (chanFee+(chanFee*10)/100): - return true - - // If the network fee is less than our commitment fee, then we'll - // switch to it if it's at least 10% less than the commitment fee. - case netFee < chanFee && netFee <= (chanFee-(chanFee*10)/100): - return true - - // Otherwise, we won't modify our fee. - default: - return false - } -} - -// createFailureWithUpdate retrieves this link's last channel update message and -// passes it into the callback. It expects a fully populated failure message. -func (l *channelLink) createFailureWithUpdate( - cb func(update *lnwire.ChannelUpdate) lnwire.FailureMessage) lnwire.FailureMessage { - - update, err := l.cfg.FetchLastChannelUpdate(l.ShortChanID()) - if err != nil { - return &lnwire.FailTemporaryNodeFailure{} - } - - return cb(update) -} - -// syncChanState attempts to synchronize channel states with the remote party. -// This method is to be called upon reconnection after the initial funding -// flow. We'll compare out commitment chains with the remote party, and re-send -// either a danging commit signature, a revocation, or both. -func (l *channelLink) syncChanStates() er.R { - log.Info("attempting to re-resynchronize") - - // First, we'll generate our ChanSync message to send to the other - // side. Based on this message, the remote party will decide if they - // need to retransmit any data or not. - chanState := l.channel.State() - localChanSyncMsg, err := chanState.ChanSyncMsg() - if err != nil { - return er.Errorf("unable to generate chan sync message for "+ - "ChannelPoint(%v)", l.channel.ChannelPoint()) - } - - if err := l.cfg.Peer.SendMessage(true, localChanSyncMsg); err != nil { - return er.Errorf("unable to send chan sync message for "+ - "ChannelPoint(%v): %v", l.channel.ChannelPoint(), err) - } - - var msgsToReSend []lnwire.Message - - // Next, we'll wait indefinitely to receive the ChanSync message. The - // first message sent MUST be the ChanSync message. - select { - case msg := <-l.upstream: - remoteChanSyncMsg, ok := msg.(*lnwire.ChannelReestablish) - if !ok { - return er.Errorf("first message sent to sync "+ - "should be ChannelReestablish, instead "+ - "received: %T", msg) - } - - // If the remote party indicates that they think we haven't - // done any state updates yet, then we'll retransmit the - // funding locked message first. We do this, as at this point - // we can't be sure if they've really received the - // FundingLocked message. - if remoteChanSyncMsg.NextLocalCommitHeight == 1 && - localChanSyncMsg.NextLocalCommitHeight == 1 && - !l.channel.IsPending() { - - log.Infof("resending FundingLocked message to peer") - - nextRevocation, err := l.channel.NextRevocationKey() - if err != nil { - return er.Errorf("unable to create next "+ - "revocation: %v", err) - } - - fundingLockedMsg := lnwire.NewFundingLocked( - l.ChanID(), nextRevocation, - ) - err = l.cfg.Peer.SendMessage(false, fundingLockedMsg) - if err != nil { - return er.Errorf("unable to re-send "+ - "FundingLocked: %v", err) - } - } - - // In any case, we'll then process their ChanSync message. - log.Info("received re-establishment message from remote side") - - var ( - openedCircuits []CircuitKey - closedCircuits []CircuitKey - ) - - // We've just received a ChanSync message from the remote - // party, so we'll process the message in order to determine - // if we need to re-transmit any messages to the remote party. - msgsToReSend, openedCircuits, closedCircuits, err = - l.channel.ProcessChanSyncMsg(remoteChanSyncMsg) - if err != nil { - return err - } - - // Repopulate any identifiers for circuits that may have been - // opened or unclosed. This may happen if we needed to - // retransmit a commitment signature message. - l.openedCircuits = openedCircuits - l.closedCircuits = closedCircuits - - // Ensure that all packets have been have been removed from the - // link's mailbox. - if err := l.ackDownStreamPackets(); err != nil { - return err - } - - if len(msgsToReSend) > 0 { - log.Infof("sending %v updates to synchronize the "+ - "state", len(msgsToReSend)) - } - - // If we have any messages to retransmit, we'll do so - // immediately so we return to a synchronized state as soon as - // possible. - for _, msg := range msgsToReSend { - l.cfg.Peer.SendMessage(false, msg) - } - - case <-l.quit: - return ErrLinkShuttingDown.Default() - } - - return nil -} - -// resolveFwdPkgs loads any forwarding packages for this link from disk, and -// reprocesses them in order. The primary goal is to make sure that any HTLCs -// we previously received are reinstated in memory, and forwarded to the switch -// if necessary. After a restart, this will also delete any previously -// completed packages. -func (l *channelLink) resolveFwdPkgs() er.R { - fwdPkgs, err := l.channel.LoadFwdPkgs() - if err != nil { - return err - } - - log.Debugf("loaded %d fwd pks", len(fwdPkgs)) - - for _, fwdPkg := range fwdPkgs { - if err := l.resolveFwdPkg(fwdPkg); err != nil { - return err - } - } - - // If any of our reprocessing steps require an update to the commitment - // txn, we initiate a state transition to capture all relevant changes. - if l.channel.PendingLocalUpdateCount() > 0 { - return l.updateCommitTx() - } - - return nil -} - -// resolveFwdPkg interprets the FwdState of the provided package, either -// reprocesses any outstanding htlcs in the package, or performs garbage -// collection on the package. -func (l *channelLink) resolveFwdPkg(fwdPkg *channeldb.FwdPkg) er.R { - // Remove any completed packages to clear up space. - if fwdPkg.State == channeldb.FwdStateCompleted { - log.Debugf("removing completed fwd pkg for height=%d", - fwdPkg.Height) - - err := l.channel.RemoveFwdPkgs(fwdPkg.Height) - if err != nil { - log.Errorf("unable to remove fwd pkg for height=%d: "+ - "%v", fwdPkg.Height, err) - return err - } - } - - // Otherwise this is either a new package or one has gone through - // processing, but contains htlcs that need to be restored in memory. - // We replay this forwarding package to make sure our local mem state - // is resurrected, we mimic any original responses back to the remote - // party, and re-forward the relevant HTLCs to the switch. - - // If the package is fully acked but not completed, it must still have - // settles and fails to propagate. - if !fwdPkg.SettleFailFilter.IsFull() { - settleFails, err := lnwallet.PayDescsFromRemoteLogUpdates( - fwdPkg.Source, fwdPkg.Height, fwdPkg.SettleFails, - ) - if err != nil { - log.Errorf("unable to process remote log updates: %v", - err) - return err - } - l.processRemoteSettleFails(fwdPkg, settleFails) - } - - // Finally, replay *ALL ADDS* in this forwarding package. The - // downstream logic is able to filter out any duplicates, but we must - // shove the entire, original set of adds down the pipeline so that the - // batch of adds presented to the sphinx router does not ever change. - if !fwdPkg.AckFilter.IsFull() { - adds, err := lnwallet.PayDescsFromRemoteLogUpdates( - fwdPkg.Source, fwdPkg.Height, fwdPkg.Adds, - ) - if err != nil { - log.Errorf("unable to process remote log updates: %v", - err) - return err - } - l.processRemoteAdds(fwdPkg, adds) - - // If the link failed during processing the adds, we must - // return to ensure we won't attempted to update the state - // further. - if l.failed { - return er.Errorf("link failed while " + - "processing remote adds") - } - } - - return nil -} - -// fwdPkgGarbager periodically reads all forwarding packages from disk and -// removes those that can be discarded. It is safe to do this entirely in the -// background, since all state is coordinated on disk. This also ensures the -// link can continue to process messages and interleave database accesses. -// -// NOTE: This MUST be run as a goroutine. -func (l *channelLink) fwdPkgGarbager() { - defer l.wg.Done() - - l.cfg.FwdPkgGCTicker.Resume() - defer l.cfg.FwdPkgGCTicker.Stop() - - if err := l.loadAndRemove(); err != nil { - log.Warnf("unable to run initial fwd pkgs gc: %v", err) - } - - for { - select { - case <-l.cfg.FwdPkgGCTicker.Ticks(): - if err := l.loadAndRemove(); err != nil { - log.Warnf("unable to remove fwd pkgs: %v", - err) - continue - } - case <-l.quit: - return - } - } -} - -// loadAndRemove loads all the channels forwarding packages and determines if -// they can be removed. It is called once before the FwdPkgGCTicker ticks so that -// a longer tick interval can be used. -func (l *channelLink) loadAndRemove() er.R { - fwdPkgs, err := l.channel.LoadFwdPkgs() - if err != nil { - return err - } - - var removeHeights []uint64 - for _, fwdPkg := range fwdPkgs { - if fwdPkg.State != channeldb.FwdStateCompleted { - continue - } - - removeHeights = append(removeHeights, fwdPkg.Height) - } - - // If removeHeights is empty, return early so we don't use a db - // transaction. - if len(removeHeights) == 0 { - return nil - } - - return l.channel.RemoveFwdPkgs(removeHeights...) -} - -// htlcManager is the primary goroutine which drives a channel's commitment -// update state-machine in response to messages received via several channels. -// This goroutine reads messages from the upstream (remote) peer, and also from -// downstream channel managed by the channel link. In the event that an htlc -// needs to be forwarded, then send-only forward handler is used which sends -// htlc packets to the switch. Additionally, the this goroutine handles acting -// upon all timeouts for any active HTLCs, manages the channel's revocation -// window, and also the htlc trickle queue+timer for this active channels. -// -// NOTE: This MUST be run as a goroutine. -func (l *channelLink) htlcManager() { - defer func() { - l.cfg.BatchTicker.Stop() - l.wg.Done() - log.Infof("exited") - }() - - log.Infof("HTLC manager started, bandwidth=%v", l.Bandwidth()) - - // Notify any clients that the link is now in the switch via an - // ActiveLinkEvent. - l.cfg.NotifyActiveLink(*l.ChannelPoint()) - - // TODO(roasbeef): need to call wipe chan whenever D/C? - - // If this isn't the first time that this channel link has been - // created, then we'll need to check to see if we need to - // re-synchronize state with the remote peer. settledHtlcs is a map of - // HTLC's that we re-settled as part of the channel state sync. - if l.cfg.SyncStates { - err := l.syncChanStates() - if err != nil { - log.Warnf("error when syncing channel states: %v", err) - - errr := er.Wrapped(err) - errDataLoss, localDataLoss := - errr.(*lnwallet.ErrCommitSyncLocalDataLoss) - - switch { - case ErrLinkShuttingDown.Is(err): - log.Debugf("unable to sync channel states, " + - "link is shutting down") - return - - // We failed syncing the commit chains, probably - // because the remote has lost state. We should force - // close the channel. - case lnwallet.ErrCommitSyncRemoteDataLoss.Is(err): - fallthrough - - // The remote sent us an invalid last commit secret, we - // should force close the channel. - // TODO(halseth): and permanently ban the peer? - case lnwallet.ErrInvalidLastCommitSecret.Is(err): - fallthrough - - // The remote sent us a commit point different from - // what they sent us before. - // TODO(halseth): ban peer? - case lnwallet.ErrInvalidLocalUnrevokedCommitPoint.Is(err): - // We'll fail the link and tell the peer to - // force close the channel. Note that the - // database state is not updated here, but will - // be updated when the close transaction is - // ready to avoid that we go down before - // storing the transaction in the db. - l.fail( - LinkFailureError{ - code: ErrSyncError, - ForceClose: true, - }, - "unable to synchronize channel "+ - "states: %v", err, - ) - return - - // We have lost state and cannot safely force close the - // channel. Fail the channel and wait for the remote to - // hopefully force close it. The remote has sent us its - // latest unrevoked commitment point, and we'll store - // it in the database, such that we can attempt to - // recover the funds if the remote force closes the - // channel. - case localDataLoss: - err := l.channel.MarkDataLoss( - errDataLoss.CommitPoint, - ) - if err != nil { - log.Errorf("unable to mark channel "+ - "data loss: %v", err) - } - - // We determined the commit chains were not possible to - // sync. We cautiously fail the channel, but don't - // force close. - // TODO(halseth): can we safely force close in any - // cases where this error is returned? - case lnwallet.ErrCannotSyncCommitChains.Is(err): - if err := l.channel.MarkBorked(); err != nil { - log.Errorf("unable to mark channel "+ - "borked: %v", err) - } - - // Other, unspecified error. - default: - } - - l.fail( - LinkFailureError{ - code: ErrRecoveryError, - ForceClose: false, - }, - "unable to synchronize channel "+ - "states: %v", err, - ) - return - } - } - - // We've successfully reestablished the channel, mark it as such to - // allow the switch to forward HTLCs in the outbound direction. - l.markReestablished() - - // Now that we've received both funding locked and channel reestablish, - // we can go ahead and send the active channel notification. We'll also - // defer the inactive notification for when the link exits to ensure - // that every active notification is matched by an inactive one. - l.cfg.NotifyActiveChannel(*l.ChannelPoint()) - defer l.cfg.NotifyInactiveChannel(*l.ChannelPoint()) - - // With the channel states synced, we now reset the mailbox to ensure - // we start processing all unacked packets in order. This is done here - // to ensure that all acknowledgments that occur during channel - // resynchronization have taken affect, causing us only to pull unacked - // packets after starting to read from the downstream mailbox. - l.mailBox.ResetPackets() - - // After cleaning up any memory pertaining to incoming packets, we now - // replay our forwarding packages to handle any htlcs that can be - // processed locally, or need to be forwarded out to the switch. We will - // only attempt to resolve packages if our short chan id indicates that - // the channel is not pending, otherwise we should have no htlcs to - // reforward. - if l.ShortChanID() != hop.Source { - if err := l.resolveFwdPkgs(); err != nil { - l.fail(LinkFailureError{code: ErrInternalError}, - "unable to resolve fwd pkgs: %v", err) - return - } - - // With our link's in-memory state fully reconstructed, spawn a - // goroutine to manage the reclamation of disk space occupied by - // completed forwarding packages. - l.wg.Add(1) - go l.fwdPkgGarbager() - } - - for { - // We must always check if we failed at some point processing - // the last update before processing the next. - if l.failed { - log.Errorf("link failed, exiting htlcManager") - return - } - - // If the previous event resulted in a non-empty batch, resume - // the batch ticker so that it can be cleared. Otherwise pause - // the ticker to prevent waking up the htlcManager while the - // batch is empty. - if l.channel.PendingLocalUpdateCount() > 0 { - l.cfg.BatchTicker.Resume() - } else { - l.cfg.BatchTicker.Pause() - } - - select { - // Our update fee timer has fired, so we'll check the network - // fee to see if we should adjust our commitment fee. - case <-l.updateFeeTimer.C: - l.updateFeeTimer.Reset(l.randomFeeUpdateTimeout()) - - // If we're not the initiator of the channel, don't we - // don't control the fees, so we can ignore this. - if !l.channel.IsInitiator() { - continue - } - - // If we are the initiator, then we'll sample the - // current fee rate to get into the chain within 3 - // blocks. - netFee, err := l.sampleNetworkFee() - if err != nil { - log.Errorf("unable to sample network fee: %v", - err) - continue - } - - // We'll check to see if we should update the fee rate - // based on our current set fee rate. We'll cap the new - // fee rate to our max fee allocation. - commitFee := l.channel.CommitFeeRate() - maxFee := l.channel.MaxFeeRate(l.cfg.MaxFeeAllocation) - newCommitFee := chainfee.SatPerKWeight( - math.Min(float64(netFee), float64(maxFee)), - ) - if !shouldAdjustCommitFee(newCommitFee, commitFee) { - continue - } - - // If we do, then we'll send a new UpdateFee message to - // the remote party, to be locked in with a new update. - if err := l.updateChannelFee(newCommitFee); err != nil { - log.Errorf("unable to update fee rate: %v", - err) - continue - } - - // The underlying channel has notified us of a unilateral close - // carried out by the remote peer. In the case of such an - // event, we'll wipe the channel state from the peer, and mark - // the contract as fully settled. Afterwards we can exit. - // - // TODO(roasbeef): add force closure? also breach? - case <-l.cfg.ChainEvents.RemoteUnilateralClosure: - log.Warnf("remote peer has closed on-chain") - - // TODO(roasbeef): remove all together - go func() { - chanPoint := l.channel.ChannelPoint() - l.cfg.Peer.WipeChannel(chanPoint) - }() - - return - - case <-l.cfg.BatchTicker.Ticks(): - // Attempt to extend the remote commitment chain - // including all the currently pending entries. If the - // send was unsuccessful, then abandon the update, - // waiting for the revocation window to open up. - if !l.updateCommitTxOrFail() { - return - } - - case <-l.cfg.PendingCommitTicker.Ticks(): - l.fail(LinkFailureError{code: ErrRemoteUnresponsive}, - "unable to complete dance") - return - - // A message from the switch was just received. This indicates - // that the link is an intermediate hop in a multi-hop HTLC - // circuit. - case pkt := <-l.downstream: - l.handleDownstreamPkt(pkt) - - // A message containing a locally initiated add was received. - case msg := <-l.localUpdateAdd: - msg.err <- l.handleDownstreamUpdateAdd(msg.pkt) - - // A message from the connected peer was just received. This - // indicates that we have a new incoming HTLC, either directly - // for us, or part of a multi-hop HTLC circuit. - case msg := <-l.upstream: - l.handleUpstreamMsg(msg) - - // A htlc resolution is received. This means that we now have a - // resolution for a previously accepted htlc. - case hodlItem := <-l.hodlQueue.ChanOut(): - htlcResolution := hodlItem.(invoices.HtlcResolution) - err := l.processHodlQueue(htlcResolution) - if err != nil { - l.fail(LinkFailureError{code: ErrInternalError}, - fmt.Sprintf("process hodl queue: %v", - err.String()), - ) - return - } - - case <-l.quit: - return - } - } -} - -// processHodlQueue processes a received htlc resolution and continues reading -// from the hodl queue until no more resolutions remain. When this function -// returns without an error, the commit tx should be updated. -func (l *channelLink) processHodlQueue( - firstResolution invoices.HtlcResolution) er.R { - - // Try to read all waiting resolution messages, so that they can all be - // processed in a single commitment tx update. - htlcResolution := firstResolution -loop: - for { - // Lookup all hodl htlcs that can be failed or settled with this event. - // The hodl htlc must be present in the map. - circuitKey := htlcResolution.CircuitKey() - hodlHtlc, ok := l.hodlMap[circuitKey] - if !ok { - return er.Errorf("hodl htlc not found: %v", circuitKey) - } - - if err := l.processHtlcResolution(htlcResolution, hodlHtlc); err != nil { - return err - } - - // Clean up hodl map. - delete(l.hodlMap, circuitKey) - - select { - case item := <-l.hodlQueue.ChanOut(): - htlcResolution = item.(invoices.HtlcResolution) - default: - break loop - } - } - - // Update the commitment tx. - if err := l.updateCommitTx(); err != nil { - return er.Errorf("unable to update commitment: %v", err) - } - - return nil -} - -// processHtlcResolution applies a received htlc resolution to the provided -// htlc. When this function returns without an error, the commit tx should be -// updated. -func (l *channelLink) processHtlcResolution(resolution invoices.HtlcResolution, - htlc hodlHtlc) er.R { - - circuitKey := resolution.CircuitKey() - - // Determine required action for the resolution based on the type of - // resolution we have received. - switch res := resolution.(type) { - // Settle htlcs that returned a settle resolution using the preimage - // in the resolution. - case *invoices.HtlcSettleResolution: - log.Debugf("received settle resolution for %v "+ - "with outcome: %v", circuitKey, res.Outcome) - - return l.settleHTLC(res.Preimage, htlc.pd) - - // For htlc failures, we get the relevant failure message based - // on the failure resolution and then fail the htlc. - case *invoices.HtlcFailResolution: - log.Debugf("received cancel resolution for "+ - "%v with outcome: %v", circuitKey, res.Outcome) - - // Get the lnwire failure message based on the resolution - // result. - failure := getResolutionFailure(res, htlc.pd.Amount) - - l.sendHTLCError( - htlc.pd, failure, htlc.obfuscator, true, - ) - return nil - - // Fail if we do not get a settle of fail resolution, since we - // are only expecting to handle settles and fails. - default: - return er.Errorf("unknown htlc resolution type: %T", - resolution) - } -} - -// getResolutionFailure returns the wire message that a htlc resolution should -// be failed with. -func getResolutionFailure(resolution *invoices.HtlcFailResolution, - amount lnwire.MilliSatoshi) *LinkError { - - // If the resolution has been resolved as part of a MPP timeout, - // we need to fail the htlc with lnwire.FailMppTimeout. - if resolution.Outcome == invoices.ResultMppTimeout { - return NewDetailedLinkError( - &lnwire.FailMPPTimeout{}, resolution.Outcome, - ) - } - - // If the htlc is not a MPP timeout, we fail it with - // FailIncorrectDetails. This error is sent for invoice payment - // failures such as underpayment/ expiry too soon and hodl invoices - // (which return FailIncorrectDetails to avoid leaking information). - incorrectDetails := lnwire.NewFailIncorrectDetails( - amount, uint32(resolution.AcceptHeight), - ) - - return NewDetailedLinkError(incorrectDetails, resolution.Outcome) -} - -// randomFeeUpdateTimeout returns a random timeout between the bounds defined -// within the link's configuration that will be used to determine when the link -// should propose an update to its commitment fee rate. -func (l *channelLink) randomFeeUpdateTimeout() time.Duration { - lower := int64(l.cfg.MinFeeUpdateTimeout) - upper := int64(l.cfg.MaxFeeUpdateTimeout) - return time.Duration(prand.Int63n(upper-lower) + lower) -} - -// handleDownstreamUpdateAdd processes an UpdateAddHTLC packet sent from the -// downstream HTLC Switch. -func (l *channelLink) handleDownstreamUpdateAdd(pkt *htlcPacket) er.R { - htlc, ok := pkt.htlc.(*lnwire.UpdateAddHTLC) - if !ok { - return er.New("not an UpdateAddHTLC packet") - } - - // If hodl.AddOutgoing mode is active, we exit early to simulate - // arbitrary delays between the switch adding an ADD to the - // mailbox, and the HTLC being added to the commitment state. - if l.cfg.HodlMask.Active(hodl.AddOutgoing) { - log.Warnf(hodl.AddOutgoing.Warning()) - l.mailBox.AckPacket(pkt.inKey()) - return nil - } - - // A new payment has been initiated via the downstream channel, - // so we add the new HTLC to our local log, then update the - // commitment chains. - htlc.ChanID = l.ChanID() - openCircuitRef := pkt.inKey() - index, err := l.channel.AddHTLC(htlc, &openCircuitRef) - if err != nil { - // The HTLC was unable to be added to the state machine, - // as a result, we'll signal the switch to cancel the - // pending payment. - log.Warnf("Unable to handle downstream add HTLC: %v", - err) - - // Remove this packet from the link's mailbox, this - // prevents it from being reprocessed if the link - // restarts and resets it mailbox. If this response - // doesn't make it back to the originating link, it will - // be rejected upon attempting to reforward the Add to - // the switch, since the circuit was never fully opened, - // and the forwarding package shows it as - // unacknowledged. - l.mailBox.FailAdd(pkt) - - return er.E(NewDetailedLinkError( - lnwire.NewTemporaryChannelFailure(nil), - OutgoingFailureDownstreamHtlcAdd, - )) - } - - log.Tracef("received downstream htlc: payment_hash=%x, "+ - "local_log_index=%v, pend_updates=%v", - htlc.PaymentHash[:], index, - l.channel.PendingLocalUpdateCount()) - - pkt.outgoingChanID = l.ShortChanID() - pkt.outgoingHTLCID = index - htlc.ID = index - - log.Debugf("queueing keystone of ADD open circuit: %s->%s", - pkt.inKey(), pkt.outKey()) - - l.openedCircuits = append(l.openedCircuits, pkt.inKey()) - l.keystoneBatch = append(l.keystoneBatch, pkt.keystone()) - - _ = l.cfg.Peer.SendMessage(false, htlc) - - // Send a forward event notification to htlcNotifier. - l.cfg.HtlcNotifier.NotifyForwardingEvent( - newHtlcKey(pkt), - HtlcInfo{ - IncomingTimeLock: pkt.incomingTimeout, - IncomingAmt: pkt.incomingAmount, - OutgoingTimeLock: htlc.Expiry, - OutgoingAmt: htlc.Amount, - }, - getEventType(pkt), - ) - - l.tryBatchUpdateCommitTx() - - return nil -} - -// handleDownstreamPkt processes an HTLC packet sent from the downstream HTLC -// Switch. Possible messages sent by the switch include requests to forward new -// HTLCs, timeout previously cleared HTLCs, and finally to settle currently -// cleared HTLCs with the upstream peer. -// -// TODO(roasbeef): add sync ntfn to ensure switch always has consistent view? -func (l *channelLink) handleDownstreamPkt(pkt *htlcPacket) { - switch htlc := pkt.htlc.(type) { - case *lnwire.UpdateAddHTLC: - // Handle add message. The returned error can be ignored, - // because it is also sent through the mailbox. - _ = l.handleDownstreamUpdateAdd(pkt) - - case *lnwire.UpdateFulfillHTLC: - // If hodl.SettleOutgoing mode is active, we exit early to - // simulate arbitrary delays between the switch adding the - // SETTLE to the mailbox, and the HTLC being added to the - // commitment state. - if l.cfg.HodlMask.Active(hodl.SettleOutgoing) { - log.Warnf(hodl.SettleOutgoing.Warning()) - l.mailBox.AckPacket(pkt.inKey()) - return - } - - // An HTLC we forward to the switch has just settled somewhere - // upstream. Therefore we settle the HTLC within the our local - // state machine. - inKey := pkt.inKey() - err := l.channel.SettleHTLC( - htlc.PaymentPreimage, - pkt.incomingHTLCID, - pkt.sourceRef, - pkt.destRef, - &inKey, - ) - if err != nil { - log.Errorf("unable to settle incoming HTLC for "+ - "circuit-key=%v: %v", inKey, err) - - // If the HTLC index for Settle response was not known - // to our commitment state, it has already been - // cleaned up by a prior response. We'll thus try to - // clean up any lingering state to ensure we don't - // continue reforwarding. - errr := er.Wrapped(err) - if _, ok := errr.(lnwallet.ErrUnknownHtlcIndex); ok { - l.cleanupSpuriousResponse(pkt) - } - - // Remove the packet from the link's mailbox to ensure - // it doesn't get replayed after a reconnection. - l.mailBox.AckPacket(inKey) - - return - } - - log.Debugf("queueing removal of SETTLE closed circuit: "+ - "%s->%s", pkt.inKey(), pkt.outKey()) - - l.closedCircuits = append(l.closedCircuits, pkt.inKey()) - - // With the HTLC settled, we'll need to populate the wire - // message to target the specific channel and HTLC to be - // canceled. - htlc.ChanID = l.ChanID() - htlc.ID = pkt.incomingHTLCID - - // Then we send the HTLC settle message to the connected peer - // so we can continue the propagation of the settle message. - l.cfg.Peer.SendMessage(false, htlc) - - // Send a settle event notification to htlcNotifier. - l.cfg.HtlcNotifier.NotifySettleEvent( - newHtlcKey(pkt), - getEventType(pkt), - ) - - // Immediately update the commitment tx to minimize latency. - l.updateCommitTxOrFail() - - case *lnwire.UpdateFailHTLC: - // If hodl.FailOutgoing mode is active, we exit early to - // simulate arbitrary delays between the switch adding a FAIL to - // the mailbox, and the HTLC being added to the commitment - // state. - if l.cfg.HodlMask.Active(hodl.FailOutgoing) { - log.Warnf(hodl.FailOutgoing.Warning()) - l.mailBox.AckPacket(pkt.inKey()) - return - } - - // An HTLC cancellation has been triggered somewhere upstream, - // we'll remove then HTLC from our local state machine. - inKey := pkt.inKey() - err := l.channel.FailHTLC( - pkt.incomingHTLCID, - htlc.Reason, - pkt.sourceRef, - pkt.destRef, - &inKey, - ) - if err != nil { - log.Errorf("unable to cancel incoming HTLC for "+ - "circuit-key=%v: %v", inKey, err) - - // If the HTLC index for Fail response was not known to - // our commitment state, it has already been cleaned up - // by a prior response. We'll thus try to clean up any - // lingering state to ensure we don't continue - // reforwarding. - errr := er.Wrapped(err) - if _, ok := errr.(lnwallet.ErrUnknownHtlcIndex); ok { - l.cleanupSpuriousResponse(pkt) - } - - // Remove the packet from the link's mailbox to ensure - // it doesn't get replayed after a reconnection. - l.mailBox.AckPacket(inKey) - - return - } - - log.Debugf("queueing removal of FAIL closed circuit: %s->%s", - pkt.inKey(), pkt.outKey()) - - l.closedCircuits = append(l.closedCircuits, pkt.inKey()) - - // With the HTLC removed, we'll need to populate the wire - // message to target the specific channel and HTLC to be - // canceled. The "Reason" field will have already been set - // within the switch. - htlc.ChanID = l.ChanID() - htlc.ID = pkt.incomingHTLCID - - // We send the HTLC message to the peer which initially created - // the HTLC. - l.cfg.Peer.SendMessage(false, htlc) - - // If the packet does not have a link failure set, it failed - // further down the route so we notify a forwarding failure. - // Otherwise, we notify a link failure because it failed at our - // node. - if pkt.linkFailure != nil { - l.cfg.HtlcNotifier.NotifyLinkFailEvent( - newHtlcKey(pkt), - newHtlcInfo(pkt), - getEventType(pkt), - pkt.linkFailure, - false, - ) - } else { - l.cfg.HtlcNotifier.NotifyForwardingFailEvent( - newHtlcKey(pkt), getEventType(pkt), - ) - } - - // Immediately update the commitment tx to minimize latency. - l.updateCommitTxOrFail() - } -} - -// tryBatchUpdateCommitTx updates the commitment transaction if the batch is -// full. -func (l *channelLink) tryBatchUpdateCommitTx() { - if l.channel.PendingLocalUpdateCount() < uint64(l.cfg.BatchSize) { - return - } - - l.updateCommitTxOrFail() -} - -// cleanupSpuriousResponse attempts to ack any AddRef or SettleFailRef -// associated with this packet. If successful in doing so, it will also purge -// the open circuit from the circuit map and remove the packet from the link's -// mailbox. -func (l *channelLink) cleanupSpuriousResponse(pkt *htlcPacket) { - inKey := pkt.inKey() - - log.Debugf("cleaning up spurious response for incoming "+ - "circuit-key=%v", inKey) - - // If the htlc packet doesn't have a source reference, it is unsafe to - // proceed, as skipping this ack may cause the htlc to be reforwarded. - if pkt.sourceRef == nil { - log.Errorf("uanble to cleanup response for incoming "+ - "circuit-key=%v, does not contain source reference", - inKey) - return - } - - // If the source reference is present, we will try to prevent this link - // from resending the packet to the switch. To do so, we ack the AddRef - // of the incoming HTLC belonging to this link. - err := l.channel.AckAddHtlcs(*pkt.sourceRef) - if err != nil { - log.Errorf("unable to ack AddRef for incoming "+ - "circuit-key=%v: %v", inKey, err) - - // If this operation failed, it is unsafe to attempt removal of - // the destination reference or circuit, so we exit early. The - // cleanup may proceed with a different packet in the future - // that succeeds on this step. - return - } - - // Now that we know this link will stop retransmitting Adds to the - // switch, we can begin to teardown the response reference and circuit - // map. - // - // If the packet includes a destination reference, then a response for - // this HTLC was locked into the outgoing channel. Attempt to remove - // this reference, so we stop retransmitting the response internally. - // Even if this fails, we will proceed in trying to delete the circuit. - // When retransmitting responses, the destination references will be - // cleaned up if an open circuit is not found in the circuit map. - if pkt.destRef != nil { - err := l.channel.AckSettleFails(*pkt.destRef) - if err != nil { - log.Errorf("unable to ack SettleFailRef "+ - "for incoming circuit-key=%v: %v", - inKey, err) - } - } - - log.Debugf("deleting circuit for incoming circuit-key=%x", inKey) - - // With all known references acked, we can now safely delete the circuit - // from the switch's circuit map, as the state is no longer needed. - err = l.cfg.Circuits.DeleteCircuits(inKey) - if err != nil { - log.Errorf("unable to delete circuit for "+ - "circuit-key=%v: %v", inKey, err) - } -} - -// handleUpstreamMsg processes wire messages related to commitment state -// updates from the upstream peer. The upstream peer is the peer whom we have a -// direct channel with, updating our respective commitment chains. -func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) { - switch msg := msg.(type) { - - case *lnwire.UpdateAddHTLC: - // We just received an add request from an upstream peer, so we - // add it to our state machine, then add the HTLC to our - // "settle" list in the event that we know the preimage. - index, err := l.channel.ReceiveHTLC(msg) - if err != nil { - l.fail(LinkFailureError{code: ErrInvalidUpdate}, - "unable to handle upstream add HTLC: %v", err) - return - } - - log.Tracef("receive upstream htlc with payment hash(%x), "+ - "assigning index: %v", msg.PaymentHash[:], index) - - case *lnwire.UpdateFulfillHTLC: - pre := msg.PaymentPreimage - idx := msg.ID - if err := l.channel.ReceiveHTLCSettle(pre, idx); err != nil { - l.fail( - LinkFailureError{ - code: ErrInvalidUpdate, - ForceClose: true, - }, - "unable to handle upstream settle HTLC: %v", err, - ) - return - } - - settlePacket := &htlcPacket{ - outgoingChanID: l.ShortChanID(), - outgoingHTLCID: idx, - htlc: &lnwire.UpdateFulfillHTLC{ - PaymentPreimage: pre, - }, - } - - // Add the newly discovered preimage to our growing list of - // uncommitted preimage. These will be written to the witness - // cache just before accepting the next commitment signature - // from the remote peer. - l.uncommittedPreimages = append(l.uncommittedPreimages, pre) - - // Pipeline this settle, send it to the switch. - go l.forwardBatch(settlePacket) - - case *lnwire.UpdateFailMalformedHTLC: - // Convert the failure type encoded within the HTLC fail - // message to the proper generic lnwire error code. - var failure lnwire.FailureMessage - switch msg.FailureCode { - case lnwire.CodeInvalidOnionVersion: - failure = &lnwire.FailInvalidOnionVersion{ - OnionSHA256: msg.ShaOnionBlob, - } - case lnwire.CodeInvalidOnionHmac: - failure = &lnwire.FailInvalidOnionHmac{ - OnionSHA256: msg.ShaOnionBlob, - } - - case lnwire.CodeInvalidOnionKey: - failure = &lnwire.FailInvalidOnionKey{ - OnionSHA256: msg.ShaOnionBlob, - } - default: - log.Warnf("unexpected failure code received in "+ - "UpdateFailMailformedHTLC: %v", msg.FailureCode) - - // We don't just pass back the error we received from - // our successor. Otherwise we might report a failure - // that penalizes us more than needed. If the onion that - // we forwarded was correct, the node should have been - // able to send back its own failure. The node did not - // send back its own failure, so we assume there was a - // problem with the onion and report that back. We reuse - // the invalid onion key failure because there is no - // specific error for this case. - failure = &lnwire.FailInvalidOnionKey{ - OnionSHA256: msg.ShaOnionBlob, - } - } - - // With the error parsed, we'll convert the into it's opaque - // form. - var b bytes.Buffer - if err := lnwire.EncodeFailure(&b, failure, 0); err != nil { - log.Errorf("unable to encode malformed error: %v", err) - return - } - - // If remote side have been unable to parse the onion blob we - // have sent to it, than we should transform the malformed HTLC - // message to the usual HTLC fail message. - err := l.channel.ReceiveFailHTLC(msg.ID, b.Bytes()) - if err != nil { - l.fail(LinkFailureError{code: ErrInvalidUpdate}, - "unable to handle upstream fail HTLC: %v", err) - return - } - - case *lnwire.UpdateFailHTLC: - idx := msg.ID - err := l.channel.ReceiveFailHTLC(idx, msg.Reason[:]) - if err != nil { - l.fail(LinkFailureError{code: ErrInvalidUpdate}, - "unable to handle upstream fail HTLC: %v", err) - return - } - - case *lnwire.CommitSig: - // Since we may have learned new preimages for the first time, - // we'll add them to our preimage cache. By doing this, we - // ensure any contested contracts watched by any on-chain - // arbitrators can now sweep this HTLC on-chain. We delay - // committing the preimages until just before accepting the new - // remote commitment, as afterwards the peer won't resend the - // Settle messages on the next channel reestablishment. Doing so - // allows us to more effectively batch this operation, instead - // of doing a single write per preimage. - err := l.cfg.PreimageCache.AddPreimages( - l.uncommittedPreimages..., - ) - if err != nil { - l.fail( - LinkFailureError{code: ErrInternalError}, - "unable to add preimages=%v to cache: %v", - l.uncommittedPreimages, err, - ) - return - } - - // Instead of truncating the slice to conserve memory - // allocations, we simply set the uncommitted preimage slice to - // nil so that a new one will be initialized if any more - // witnesses are discovered. We do this maximum size of the - // slice can occupy 15KB, and want to ensure we release that - // memory back to the runtime. - l.uncommittedPreimages = nil - - // We just received a new updates to our local commitment - // chain, validate this new commitment, closing the link if - // invalid. - err = l.channel.ReceiveNewCommitment(msg.CommitSig, msg.HtlcSigs) - if err != nil { - // If we were unable to reconstruct their proposed - // commitment, then we'll examine the type of error. If - // it's an InvalidCommitSigError, then we'll send a - // direct error. - var sendData []byte - errr := er.Wrapped(err) - switch errr.(type) { - case *lnwallet.InvalidCommitSigError: - sendData = []byte(err.String()) - case *lnwallet.InvalidHtlcSigError: - sendData = []byte(err.String()) - } - l.fail( - LinkFailureError{ - code: ErrInvalidCommitment, - ForceClose: true, - SendData: sendData, - }, - "ChannelPoint(%v): unable to accept new "+ - "commitment: %v", - l.channel.ChannelPoint(), err, - ) - return - } - - // As we've just accepted a new state, we'll now - // immediately send the remote peer a revocation for our prior - // state. - nextRevocation, currentHtlcs, err := l.channel.RevokeCurrentCommitment() - if err != nil { - log.Errorf("unable to revoke commitment: %v", err) - return - } - l.cfg.Peer.SendMessage(false, nextRevocation) - - // Since we just revoked our commitment, we may have a new set - // of HTLC's on our commitment, so we'll send them over our - // HTLC update channel so any callers can be notified. - select { - case l.htlcUpdates <- &contractcourt.ContractUpdate{ - HtlcKey: contractcourt.LocalHtlcSet, - Htlcs: currentHtlcs, - }: - case <-l.quit: - return - } - - // If both commitment chains are fully synced from our PoV, - // then we don't need to reply with a signature as both sides - // already have a commitment with the latest accepted. - if !l.channel.OweCommitment(true) { - return - } - - // Otherwise, the remote party initiated the state transition, - // so we'll reply with a signature to provide them with their - // version of the latest commitment. - if !l.updateCommitTxOrFail() { - return - } - - case *lnwire.RevokeAndAck: - // We've received a revocation from the remote chain, if valid, - // this moves the remote chain forward, and expands our - // revocation window. - fwdPkg, adds, settleFails, remoteHTLCs, err := l.channel.ReceiveRevocation( - msg, - ) - if err != nil { - // TODO(halseth): force close? - l.fail(LinkFailureError{code: ErrInvalidRevocation}, - "unable to accept revocation: %v", err) - return - } - - // The remote party now has a new primary commitment, so we'll - // update the contract court to be aware of this new set (the - // prior old remote pending). - select { - case l.htlcUpdates <- &contractcourt.ContractUpdate{ - HtlcKey: contractcourt.RemoteHtlcSet, - Htlcs: remoteHTLCs, - }: - case <-l.quit: - return - } - - // If we have a tower client, we'll proceed in backing up the - // state that was just revoked. - // TODO(halseth): support anchor types for watchtower. - state := l.channel.State() - if l.cfg.TowerClient != nil && state.ChanType.HasAnchors() { - log.Warnf("Skipping tower backup for anchor " + - "channel type") - } else if l.cfg.TowerClient != nil && !state.ChanType.HasAnchors() { - breachInfo, err := lnwallet.NewBreachRetribution( - state, state.RemoteCommitment.CommitHeight-1, 0, - ) - if err != nil { - l.fail(LinkFailureError{code: ErrInternalError}, - "failed to load breach info: %v", err) - return - } - - chanType := l.channel.State().ChanType - chanID := l.ChanID() - err = l.cfg.TowerClient.BackupState( - &chanID, breachInfo, chanType.IsTweakless(), - ) - if err != nil { - l.fail(LinkFailureError{code: ErrInternalError}, - "unable to queue breach backup: %v", err) - return - } - } - - l.processRemoteSettleFails(fwdPkg, settleFails) - l.processRemoteAdds(fwdPkg, adds) - - // If the link failed during processing the adds, we must - // return to ensure we won't attempted to update the state - // further. - if l.failed { - return - } - - // The revocation window opened up. If there are pending local - // updates, try to update the commit tx. Pending updates could - // already have been present because of a previously failed - // update to the commit tx or freshly added in by - // processRemoteAdds. Also in case there are no local updates, - // but there are still remote updates that are not in the remote - // commit tx yet, send out an update. - if l.channel.OweCommitment(true) { - if !l.updateCommitTxOrFail() { - return - } - } - - case *lnwire.UpdateFee: - // We received fee update from peer. If we are the initiator we - // will fail the channel, if not we will apply the update. - fee := chainfee.SatPerKWeight(msg.FeePerKw) - if err := l.channel.ReceiveUpdateFee(fee); err != nil { - l.fail(LinkFailureError{code: ErrInvalidUpdate}, - "error receiving fee update: %v", err) - return - } - case *lnwire.Error: - // Error received from remote, MUST fail channel, but should - // only print the contents of the error message if all - // characters are printable ASCII. - l.fail( - LinkFailureError{ - code: ErrRemoteError, - - // TODO(halseth): we currently don't fail the - // channel permanently, as there are some sync - // issues with other implementations that will - // lead to them sending an error message, but - // we can recover from on next connection. See - // https://github.com/ElementsProject/lightning/issues/4212 - PermanentFailure: false, - }, - "ChannelPoint(%v): received error from peer: %v", - l.channel.ChannelPoint(), msg.Error(), - ) - default: - log.Warnf("received unknown message of type %T", msg) - } - -} - -// ackDownStreamPackets is responsible for removing htlcs from a link's mailbox -// for packets delivered from server, and cleaning up any circuits closed by -// signing a previous commitment txn. This method ensures that the circuits are -// removed from the circuit map before removing them from the link's mailbox, -// otherwise it could be possible for some circuit to be missed if this link -// flaps. -func (l *channelLink) ackDownStreamPackets() er.R { - // First, remove the downstream Add packets that were included in the - // previous commitment signature. This will prevent the Adds from being - // replayed if this link disconnects. - for _, inKey := range l.openedCircuits { - // In order to test the sphinx replay logic of the remote - // party, unsafe replay does not acknowledge the packets from - // the mailbox. We can then force a replay of any Add packets - // held in memory by disconnecting and reconnecting the link. - if l.cfg.UnsafeReplay { - continue - } - - log.Debugf("removing Add packet %s from mailbox", inKey) - l.mailBox.AckPacket(inKey) - } - - // Now, we will delete all circuits closed by the previous commitment - // signature, which is the result of downstream Settle/Fail packets. We - // batch them here to ensure circuits are closed atomically and for - // performance. - err := l.cfg.Circuits.DeleteCircuits(l.closedCircuits...) - switch err { - case nil: - // Successful deletion. - - default: - log.Errorf("unable to delete %d circuits: %v", - len(l.closedCircuits), err) - return err - } - - // With the circuits removed from memory and disk, we now ack any - // Settle/Fails in the mailbox to ensure they do not get redelivered - // after startup. If forgive is enabled and we've reached this point, - // the circuits must have been removed at some point, so it is now safe - // to un-queue the corresponding Settle/Fails. - for _, inKey := range l.closedCircuits { - log.Debugf("removing Fail/Settle packet %s from mailbox", - inKey) - l.mailBox.AckPacket(inKey) - } - - // Lastly, reset our buffers to be empty while keeping any acquired - // growth in the backing array. - l.openedCircuits = l.openedCircuits[:0] - l.closedCircuits = l.closedCircuits[:0] - - return nil -} - -// updateCommitTxOrFail updates the commitment tx and if that fails, it fails -// the link. -func (l *channelLink) updateCommitTxOrFail() bool { - if err := l.updateCommitTx(); err != nil { - l.fail(LinkFailureError{code: ErrInternalError}, - "unable to update commitment: %v", err) - return false - } - - return true -} - -// updateCommitTx signs, then sends an update to the remote peer adding a new -// commitment to their commitment chain which includes all the latest updates -// we've received+processed up to this point. -func (l *channelLink) updateCommitTx() er.R { - // Preemptively write all pending keystones to disk, just in case the - // HTLCs we have in memory are included in the subsequent attempt to - // sign a commitment state. - err := l.cfg.Circuits.OpenCircuits(l.keystoneBatch...) - if err != nil { - return err - } - - // Reset the batch, but keep the backing buffer to avoid reallocating. - l.keystoneBatch = l.keystoneBatch[:0] - - // If hodl.Commit mode is active, we will refrain from attempting to - // commit any in-memory modifications to the channel state. Exiting here - // permits testing of either the switch or link's ability to trim - // circuits that have been opened, but unsuccessfully committed. - if l.cfg.HodlMask.Active(hodl.Commit) { - log.Warnf(hodl.Commit.Warning()) - return nil - } - - theirCommitSig, htlcSigs, pendingHTLCs, err := l.channel.SignNextCommitment() - if lnwallet.ErrNoWindow.Is(err) { - l.cfg.PendingCommitTicker.Resume() - - log.Tracef("revocation window exhausted, unable to send: "+ - "%v, pend_updates=%v, dangling_closes%v", - l.channel.PendingLocalUpdateCount(), - log.C(func() string { - return spew.Sdump(l.openedCircuits) - }), - log.C(func() string { - return spew.Sdump(l.closedCircuits) - }), - ) - return nil - } else if err != nil { - return err - } - - if err := l.ackDownStreamPackets(); err != nil { - return err - } - - l.cfg.PendingCommitTicker.Pause() - - // The remote party now has a new pending commitment, so we'll update - // the contract court to be aware of this new set (the prior old remote - // pending). - select { - case l.htlcUpdates <- &contractcourt.ContractUpdate{ - HtlcKey: contractcourt.RemotePendingHtlcSet, - Htlcs: pendingHTLCs, - }: - case <-l.quit: - return ErrLinkShuttingDown.Default() - } - - commitSig := &lnwire.CommitSig{ - ChanID: l.ChanID(), - CommitSig: theirCommitSig, - HtlcSigs: htlcSigs, - } - l.cfg.Peer.SendMessage(false, commitSig) - - return nil -} - -// Peer returns the representation of remote peer with which we have the -// channel link opened. -// -// NOTE: Part of the ChannelLink interface. -func (l *channelLink) Peer() lnpeer.Peer { - return l.cfg.Peer -} - -// ChannelPoint returns the channel outpoint for the channel link. -// NOTE: Part of the ChannelLink interface. -func (l *channelLink) ChannelPoint() *wire.OutPoint { - return l.channel.ChannelPoint() -} - -// ShortChanID returns the short channel ID for the channel link. The short -// channel ID encodes the exact location in the main chain that the original -// funding output can be found. -// -// NOTE: Part of the ChannelLink interface. -func (l *channelLink) ShortChanID() lnwire.ShortChannelID { - l.RLock() - defer l.RUnlock() - - return l.shortChanID -} - -// UpdateShortChanID updates the short channel ID for a link. This may be -// required in the event that a link is created before the short chan ID for it -// is known, or a re-org occurs, and the funding transaction changes location -// within the chain. -// -// NOTE: Part of the ChannelLink interface. -func (l *channelLink) UpdateShortChanID() (lnwire.ShortChannelID, er.R) { - chanID := l.ChanID() - - // Refresh the channel state's short channel ID by loading it from disk. - // This ensures that the channel state accurately reflects the updated - // short channel ID. - err := l.channel.State().RefreshShortChanID() - if err != nil { - log.Errorf("unable to refresh short_chan_id for chan_id=%v: "+ - "%v", chanID, err) - return hop.Source, err - } - - sid := l.channel.ShortChanID() - - log.Infof("updating to short_chan_id=%v for chan_id=%v", sid, chanID) - - l.Lock() - l.shortChanID = sid - l.Unlock() - - go func() { - err := l.cfg.UpdateContractSignals(&contractcourt.ContractSignals{ - HtlcUpdates: l.htlcUpdates, - ShortChanID: sid, - }) - if err != nil { - log.Errorf("unable to update signals") - } - }() - - // Now that the short channel ID has been properly updated, we can begin - // garbage collecting any forwarding packages we create. - l.wg.Add(1) - go l.fwdPkgGarbager() - - return sid, nil -} - -// ChanID returns the channel ID for the channel link. The channel ID is a more -// compact representation of a channel's full outpoint. -// -// NOTE: Part of the ChannelLink interface. -func (l *channelLink) ChanID() lnwire.ChannelID { - return lnwire.NewChanIDFromOutPoint(l.channel.ChannelPoint()) -} - -// Bandwidth returns the total amount that can flow through the channel link at -// this given instance. The value returned is expressed in millisatoshi and can -// be used by callers when making forwarding decisions to determine if a link -// can accept an HTLC. -// -// NOTE: Part of the ChannelLink interface. -func (l *channelLink) Bandwidth() lnwire.MilliSatoshi { - // Get the balance available on the channel for new HTLCs. This takes - // the channel reserve into account so HTLCs up to this value won't - // violate it. - return l.channel.AvailableBalance() -} - -// AttachMailBox updates the current mailbox used by this link, and hooks up -// the mailbox's message and packet outboxes to the link's upstream and -// downstream chans, respectively. -func (l *channelLink) AttachMailBox(mailbox MailBox) { - l.Lock() - l.mailBox = mailbox - l.upstream = mailbox.MessageOutBox() - l.downstream = mailbox.PacketOutBox() - l.Unlock() -} - -// UpdateForwardingPolicy updates the forwarding policy for the target -// ChannelLink. Once updated, the link will use the new forwarding policy to -// govern if it an incoming HTLC should be forwarded or not. We assume that -// fields that are zero are intentionally set to zero, so we'll use newPolicy to -// update all of the link's FwrdingPolicy's values. -// -// NOTE: Part of the ChannelLink interface. -func (l *channelLink) UpdateForwardingPolicy(newPolicy ForwardingPolicy) { - l.Lock() - defer l.Unlock() - - l.cfg.FwrdingPolicy = newPolicy -} - -// CheckHtlcForward should return a nil error if the passed HTLC details -// satisfy the current forwarding policy fo the target link. Otherwise, -// a LinkError with a valid protocol failure message should be returned -// in order to signal to the source of the HTLC, the policy consistency -// issue. -// -// NOTE: Part of the ChannelLink interface. -func (l *channelLink) CheckHtlcForward(payHash [32]byte, - incomingHtlcAmt, amtToForward lnwire.MilliSatoshi, - incomingTimeout, outgoingTimeout uint32, - heightNow uint32) *LinkError { - - l.RLock() - policy := l.cfg.FwrdingPolicy - l.RUnlock() - - // First check whether the outgoing htlc satisfies the channel policy. - err := l.canSendHtlc( - policy, payHash, amtToForward, outgoingTimeout, heightNow, - ) - if err != nil { - return err - } - - // Next, using the amount of the incoming HTLC, we'll calculate the - // expected fee this incoming HTLC must carry in order to satisfy the - // constraints of the outgoing link. - expectedFee := ExpectedFee(policy, amtToForward) - - // If the actual fee is less than our expected fee, then we'll reject - // this HTLC as it didn't provide a sufficient amount of fees, or the - // values have been tampered with, or the send used incorrect/dated - // information to construct the forwarding information for this hop. In - // any case, we'll cancel this HTLC. - actualFee := incomingHtlcAmt - amtToForward - if incomingHtlcAmt < amtToForward || actualFee < expectedFee { - log.Errorf("outgoing htlc(%x) has insufficient fee: "+ - "expected %v, got %v", - payHash[:], int64(expectedFee), int64(actualFee)) - - // As part of the returned error, we'll send our latest routing - // policy so the sending node obtains the most up to date data. - failure := l.createFailureWithUpdate( - func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage { - return lnwire.NewFeeInsufficient( - amtToForward, *upd, - ) - }, - ) - return NewLinkError(failure) - } - - // Finally, we'll ensure that the time-lock on the outgoing HTLC meets - // the following constraint: the incoming time-lock minus our time-lock - // delta should equal the outgoing time lock. Otherwise, whether the - // sender messed up, or an intermediate node tampered with the HTLC. - timeDelta := policy.TimeLockDelta - if incomingTimeout < outgoingTimeout+timeDelta { - log.Errorf("incoming htlc(%x) has incorrect time-lock value: "+ - "expected at least %v block delta, got %v block delta", - payHash[:], timeDelta, incomingTimeout-outgoingTimeout) - - // Grab the latest routing policy so the sending node is up to - // date with our current policy. - failure := l.createFailureWithUpdate( - func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage { - return lnwire.NewIncorrectCltvExpiry( - incomingTimeout, *upd, - ) - }, - ) - return NewLinkError(failure) - } - - return nil -} - -// CheckHtlcTransit should return a nil error if the passed HTLC details -// satisfy the current channel policy. Otherwise, a LinkError with a -// valid protocol failure message should be returned in order to signal -// the violation. This call is intended to be used for locally initiated -// payments for which there is no corresponding incoming htlc. -func (l *channelLink) CheckHtlcTransit(payHash [32]byte, - amt lnwire.MilliSatoshi, timeout uint32, - heightNow uint32) *LinkError { - - l.RLock() - policy := l.cfg.FwrdingPolicy - l.RUnlock() - - return l.canSendHtlc( - policy, payHash, amt, timeout, heightNow, - ) -} - -// htlcSatifiesPolicyOutgoing checks whether the given htlc parameters satisfy -// the channel's amount and time lock constraints. -func (l *channelLink) canSendHtlc(policy ForwardingPolicy, - payHash [32]byte, amt lnwire.MilliSatoshi, timeout uint32, - heightNow uint32) *LinkError { - - // As our first sanity check, we'll ensure that the passed HTLC isn't - // too small for the next hop. If so, then we'll cancel the HTLC - // directly. - if amt < policy.MinHTLCOut { - log.Errorf("outgoing htlc(%x) is too small: min_htlc=%v, "+ - "htlc_value=%v", payHash[:], policy.MinHTLCOut, - amt) - - // As part of the returned error, we'll send our latest routing - // policy so the sending node obtains the most up to date data. - failure := l.createFailureWithUpdate( - func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage { - return lnwire.NewAmountBelowMinimum( - amt, *upd, - ) - }, - ) - return NewLinkError(failure) - } - - // Next, ensure that the passed HTLC isn't too large. If so, we'll - // cancel the HTLC directly. - if policy.MaxHTLC != 0 && amt > policy.MaxHTLC { - log.Errorf("outgoing htlc(%x) is too large: max_htlc=%v, "+ - "htlc_value=%v", payHash[:], policy.MaxHTLC, amt) - - // As part of the returned error, we'll send our latest routing - // policy so the sending node obtains the most up-to-date data. - failure := l.createFailureWithUpdate( - func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage { - return lnwire.NewTemporaryChannelFailure(upd) - }, - ) - return NewDetailedLinkError(failure, OutgoingFailureHTLCExceedsMax) - } - - // We want to avoid offering an HTLC which will expire in the near - // future, so we'll reject an HTLC if the outgoing expiration time is - // too close to the current height. - if timeout <= heightNow+l.cfg.OutgoingCltvRejectDelta { - log.Errorf("htlc(%x) has an expiry that's too soon: "+ - "outgoing_expiry=%v, best_height=%v", payHash[:], - timeout, heightNow) - failure := l.createFailureWithUpdate( - func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage { - return lnwire.NewExpiryTooSoon(*upd) - }, - ) - return NewLinkError(failure) - } - - // Check absolute max delta. - if timeout > l.cfg.MaxOutgoingCltvExpiry+heightNow { - log.Errorf("outgoing htlc(%x) has a time lock too far in "+ - "the future: got %v, but maximum is %v", payHash[:], - timeout-heightNow, l.cfg.MaxOutgoingCltvExpiry) - - return NewLinkError(&lnwire.FailExpiryTooFar{}) - } - - // Check to see if there is enough balance in this channel. - if amt > l.Bandwidth() { - failure := l.createFailureWithUpdate( - func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage { - return lnwire.NewTemporaryChannelFailure(upd) - }, - ) - return NewDetailedLinkError( - failure, OutgoingFailureInsufficientBalance, - ) - } - - return nil -} - -// Stats returns the statistics of channel link. -// -// NOTE: Part of the ChannelLink interface. -func (l *channelLink) Stats() (uint64, lnwire.MilliSatoshi, lnwire.MilliSatoshi) { - snapshot := l.channel.StateSnapshot() - - return snapshot.ChannelCommitment.CommitHeight, - snapshot.TotalMSatSent, - snapshot.TotalMSatReceived -} - -// String returns the string representation of channel link. -// -// NOTE: Part of the ChannelLink interface. -func (l *channelLink) String() string { - return l.channel.ChannelPoint().String() -} - -// HandleSwitchPacket handles the switch packets. This packets which might be -// forwarded to us from another channel link in case the htlc update came from -// another peer or if the update was created by user -// -// NOTE: Part of the ChannelLink interface. -func (l *channelLink) HandleSwitchPacket(pkt *htlcPacket) er.R { - log.Tracef("received switch packet inkey=%v, outkey=%v", - pkt.inKey(), pkt.outKey()) - - return l.mailBox.AddPacket(pkt) -} - -// HandleLocalAddPacket handles a locally-initiated UpdateAddHTLC packet. It -// will be processed synchronously. -// -// NOTE: Part of the ChannelLink interface. -func (l *channelLink) HandleLocalAddPacket(pkt *htlcPacket) er.R { - log.Tracef("received switch packet outkey=%v", pkt.outKey()) - - // Create a buffered result channel to prevent the link from blocking. - errChan := make(chan er.R, 1) - - select { - case l.localUpdateAdd <- &localUpdateAddMsg{ - pkt: pkt, - err: errChan, - }: - case <-l.quit: - return ErrLinkShuttingDown.Default() - } - - select { - case err := <-errChan: - return err - case <-l.quit: - return ErrLinkShuttingDown.Default() - } -} - -// HandleChannelUpdate handles the htlc requests as settle/add/fail which sent -// to us from remote peer we have a channel with. -// -// NOTE: Part of the ChannelLink interface. -func (l *channelLink) HandleChannelUpdate(message lnwire.Message) { - l.mailBox.AddMessage(message) -} - -// updateChannelFee updates the commitment fee-per-kw on this channel by -// committing to an update_fee message. -func (l *channelLink) updateChannelFee(feePerKw chainfee.SatPerKWeight) er.R { - - log.Infof("updating commit fee to %v sat/kw", feePerKw) - - // We skip sending the UpdateFee message if the channel is not - // currently eligible to forward messages. - if !l.EligibleToForward() { - log.Debugf("skipping fee update for inactive channel") - return nil - } - - // First, we'll update the local fee on our commitment. - if err := l.channel.UpdateFee(feePerKw); err != nil { - return err - } - - // We'll then attempt to send a new UpdateFee message, and also lock it - // in immediately by triggering a commitment update. - msg := lnwire.NewUpdateFee(l.ChanID(), uint32(feePerKw)) - if err := l.cfg.Peer.SendMessage(false, msg); err != nil { - return err - } - return l.updateCommitTx() -} - -// processRemoteSettleFails accepts a batch of settle/fail payment descriptors -// after receiving a revocation from the remote party, and reprocesses them in -// the context of the provided forwarding package. Any settles or fails that -// have already been acknowledged in the forwarding package will not be sent to -// the switch. -func (l *channelLink) processRemoteSettleFails(fwdPkg *channeldb.FwdPkg, - settleFails []*lnwallet.PaymentDescriptor) { - - if len(settleFails) == 0 { - return - } - - log.Debugf("settle-fail-filter %v", fwdPkg.SettleFailFilter) - - var switchPackets []*htlcPacket - for i, pd := range settleFails { - // Skip any settles or fails that have already been - // acknowledged by the incoming link that originated the - // forwarded Add. - if fwdPkg.SettleFailFilter.Contains(uint16(i)) { - continue - } - - // TODO(roasbeef): rework log entries to a shared - // interface. - - switch pd.EntryType { - - // A settle for an HTLC we previously forwarded HTLC has been - // received. So we'll forward the HTLC to the switch which will - // handle propagating the settle to the prior hop. - case lnwallet.Settle: - // If hodl.SettleIncoming is requested, we will not - // forward the SETTLE to the switch and will not signal - // a free slot on the commitment transaction. - if l.cfg.HodlMask.Active(hodl.SettleIncoming) { - log.Warnf(hodl.SettleIncoming.Warning()) - continue - } - - settlePacket := &htlcPacket{ - outgoingChanID: l.ShortChanID(), - outgoingHTLCID: pd.ParentIndex, - destRef: pd.DestRef, - htlc: &lnwire.UpdateFulfillHTLC{ - PaymentPreimage: pd.RPreimage, - }, - } - - // Add the packet to the batch to be forwarded, and - // notify the overflow queue that a spare spot has been - // freed up within the commitment state. - switchPackets = append(switchPackets, settlePacket) - - // A failureCode message for a previously forwarded HTLC has - // been received. As a result a new slot will be freed up in - // our commitment state, so we'll forward this to the switch so - // the backwards undo can continue. - case lnwallet.Fail: - // If hodl.SettleIncoming is requested, we will not - // forward the FAIL to the switch and will not signal a - // free slot on the commitment transaction. - if l.cfg.HodlMask.Active(hodl.FailIncoming) { - log.Warnf(hodl.FailIncoming.Warning()) - continue - } - - // Fetch the reason the HTLC was canceled so we can - // continue to propagate it. This failure originated - // from another node, so the linkFailure field is not - // set on the packet. - failPacket := &htlcPacket{ - outgoingChanID: l.ShortChanID(), - outgoingHTLCID: pd.ParentIndex, - destRef: pd.DestRef, - htlc: &lnwire.UpdateFailHTLC{ - Reason: lnwire.OpaqueReason( - pd.FailReason, - ), - }, - } - - // If the failure message lacks an HMAC (but includes - // the 4 bytes for encoding the message and padding - // lengths, then this means that we received it as an - // UpdateFailMalformedHTLC. As a result, we'll signal - // that we need to convert this error within the switch - // to an actual error, by encrypting it as if we were - // the originating hop. - convertedErrorSize := lnwire.FailureMessageLength + 4 - if len(pd.FailReason) == convertedErrorSize { - failPacket.convertedError = true - } - - // Add the packet to the batch to be forwarded, and - // notify the overflow queue that a spare spot has been - // freed up within the commitment state. - switchPackets = append(switchPackets, failPacket) - } - } - - // Only spawn the task forward packets we have a non-zero number. - if len(switchPackets) > 0 { - go l.forwardBatch(switchPackets...) - } -} - -// processRemoteAdds serially processes each of the Add payment descriptors -// which have been "locked-in" by receiving a revocation from the remote party. -// The forwarding package provided instructs how to process this batch, -// indicating whether this is the first time these Adds are being processed, or -// whether we are reprocessing as a result of a failure or restart. Adds that -// have already been acknowledged in the forwarding package will be ignored. -func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg, - lockedInHtlcs []*lnwallet.PaymentDescriptor) { - - log.Tracef("processing %d remote adds for height %d", - len(lockedInHtlcs), fwdPkg.Height) - - decodeReqs := make( - []hop.DecodeHopIteratorRequest, 0, len(lockedInHtlcs), - ) - for _, pd := range lockedInHtlcs { - switch pd.EntryType { - - // TODO(conner): remove type switch? - case lnwallet.Add: - // Before adding the new htlc to the state machine, - // parse the onion object in order to obtain the - // routing information with DecodeHopIterator function - // which process the Sphinx packet. - onionReader := bytes.NewReader(pd.OnionBlob) - - req := hop.DecodeHopIteratorRequest{ - OnionReader: onionReader, - RHash: pd.RHash[:], - IncomingCltv: pd.Timeout, - } - - decodeReqs = append(decodeReqs, req) - } - } - - // Atomically decode the incoming htlcs, simultaneously checking for - // replay attempts. A particular index in the returned, spare list of - // channel iterators should only be used if the failure code at the - // same index is lnwire.FailCodeNone. - decodeResps, sphinxErr := l.cfg.DecodeHopIterators( - fwdPkg.ID(), decodeReqs, - ) - if sphinxErr != nil { - l.fail(LinkFailureError{code: ErrInternalError}, - "unable to decode hop iterators: %v", sphinxErr) - return - } - - var switchPackets []*htlcPacket - - for i, pd := range lockedInHtlcs { - idx := uint16(i) - - if fwdPkg.State == channeldb.FwdStateProcessed && - fwdPkg.AckFilter.Contains(idx) { - - // If this index is already found in the ack filter, - // the response to this forwarding decision has already - // been committed by one of our commitment txns. ADDs - // in this state are waiting for the rest of the fwding - // package to get acked before being garbage collected. - continue - } - - // An incoming HTLC add has been full-locked in. As a result we - // can now examine the forwarding details of the HTLC, and the - // HTLC itself to decide if: we should forward it, cancel it, - // or are able to settle it (and it adheres to our fee related - // constraints). - - // Fetch the onion blob that was included within this processed - // payment descriptor. - var onionBlob [lnwire.OnionPacketSize]byte - copy(onionBlob[:], pd.OnionBlob) - - // Before adding the new htlc to the state machine, parse the - // onion object in order to obtain the routing information with - // DecodeHopIterator function which process the Sphinx packet. - chanIterator, failureCode := decodeResps[i].Result() - if failureCode != lnwire.CodeNone { - // If we're unable to process the onion blob than we - // should send the malformed htlc error to payment - // sender. - l.sendMalformedHTLCError(pd.HtlcIndex, failureCode, - onionBlob[:], pd.SourceRef) - - log.Errorf("unable to decode onion hop "+ - "iterator: %v", failureCode) - continue - } - - // Retrieve onion obfuscator from onion blob in order to - // produce initial obfuscation of the onion failureCode. - obfuscator, failureCode := chanIterator.ExtractErrorEncrypter( - l.cfg.ExtractErrorEncrypter, - ) - if failureCode != lnwire.CodeNone { - // If we're unable to process the onion blob than we - // should send the malformed htlc error to payment - // sender. - l.sendMalformedHTLCError( - pd.HtlcIndex, failureCode, onionBlob[:], pd.SourceRef, - ) - - log.Errorf("unable to decode onion "+ - "obfuscator: %v", failureCode) - continue - } - - heightNow := l.cfg.Switch.BestHeight() - - pld, err := chanIterator.HopPayload() - if err != nil { - // If we're unable to process the onion payload, or we - // received invalid onion payload failure, then we - // should send an error back to the caller so the HTLC - // can be canceled. - var failedType uint64 - if e, ok := er.Wrapped(err).(hop.ErrInvalidPayload); ok { - failedType = uint64(e.Type) - } - - // TODO: currently none of the test unit infrastructure - // is setup to handle TLV payloads, so testing this - // would require implementing a separate mock iterator - // for TLV payloads that also supports injecting invalid - // payloads. Deferring this non-trival effort till a - // later date - failure := lnwire.NewInvalidOnionPayload(failedType, 0) - l.sendHTLCError( - pd, NewLinkError(failure), obfuscator, false, - ) - - log.Errorf("unable to decode forwarding "+ - "instructions: %v", err) - continue - } - - fwdInfo := pld.ForwardingInfo() - - switch fwdInfo.NextHop { - case hop.Exit: - err := l.processExitHop( - pd, obfuscator, fwdInfo, heightNow, pld, - ) - if err != nil { - l.fail(LinkFailureError{code: ErrInternalError}, - err.String(), - ) - - return - } - - // There are additional channels left within this route. So - // we'll simply do some forwarding package book-keeping. - default: - // If hodl.AddIncoming is requested, we will not - // validate the forwarded ADD, nor will we send the - // packet to the htlc switch. - if l.cfg.HodlMask.Active(hodl.AddIncoming) { - log.Warnf(hodl.AddIncoming.Warning()) - continue - } - - switch fwdPkg.State { - case channeldb.FwdStateProcessed: - // This add was not forwarded on the previous - // processing phase, run it through our - // validation pipeline to reproduce an error. - // This may trigger a different error due to - // expiring timelocks, but we expect that an - // error will be reproduced. - if !fwdPkg.FwdFilter.Contains(idx) { - break - } - - // Otherwise, it was already processed, we can - // can collect it and continue. - addMsg := &lnwire.UpdateAddHTLC{ - Expiry: fwdInfo.OutgoingCTLV, - Amount: fwdInfo.AmountToForward, - PaymentHash: pd.RHash, - } - - // Finally, we'll encode the onion packet for - // the _next_ hop using the hop iterator - // decoded for the current hop. - buf := bytes.NewBuffer(addMsg.OnionBlob[0:0]) - - // We know this cannot fail, as this ADD - // was marked forwarded in a previous - // round of processing. - chanIterator.EncodeNextHop(buf) - - updatePacket := &htlcPacket{ - incomingChanID: l.ShortChanID(), - incomingHTLCID: pd.HtlcIndex, - outgoingChanID: fwdInfo.NextHop, - sourceRef: pd.SourceRef, - incomingAmount: pd.Amount, - amount: addMsg.Amount, - htlc: addMsg, - obfuscator: obfuscator, - incomingTimeout: pd.Timeout, - outgoingTimeout: fwdInfo.OutgoingCTLV, - customRecords: pld.CustomRecords(), - } - switchPackets = append( - switchPackets, updatePacket, - ) - - continue - } - - // TODO(roasbeef): ensure don't accept outrageous - // timeout for htlc - - // With all our forwarding constraints met, we'll - // create the outgoing HTLC using the parameters as - // specified in the forwarding info. - addMsg := &lnwire.UpdateAddHTLC{ - Expiry: fwdInfo.OutgoingCTLV, - Amount: fwdInfo.AmountToForward, - PaymentHash: pd.RHash, - } - - // Finally, we'll encode the onion packet for the - // _next_ hop using the hop iterator decoded for the - // current hop. - buf := bytes.NewBuffer(addMsg.OnionBlob[0:0]) - err := chanIterator.EncodeNextHop(buf) - if err != nil { - log.Errorf("unable to encode the "+ - "remaining route %v", err) - - failure := l.createFailureWithUpdate( - func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage { - return lnwire.NewTemporaryChannelFailure( - upd, - ) - }, - ) - - l.sendHTLCError( - pd, NewLinkError(failure), obfuscator, false, - ) - continue - } - - // Now that this add has been reprocessed, only append - // it to our list of packets to forward to the switch - // this is the first time processing the add. If the - // fwd pkg has already been processed, then we entered - // the above section to recreate a previous error. If - // the packet had previously been forwarded, it would - // have been added to switchPackets at the top of this - // section. - if fwdPkg.State == channeldb.FwdStateLockedIn { - updatePacket := &htlcPacket{ - incomingChanID: l.ShortChanID(), - incomingHTLCID: pd.HtlcIndex, - outgoingChanID: fwdInfo.NextHop, - sourceRef: pd.SourceRef, - incomingAmount: pd.Amount, - amount: addMsg.Amount, - htlc: addMsg, - obfuscator: obfuscator, - incomingTimeout: pd.Timeout, - outgoingTimeout: fwdInfo.OutgoingCTLV, - customRecords: pld.CustomRecords(), - } - - fwdPkg.FwdFilter.Set(idx) - switchPackets = append(switchPackets, - updatePacket) - } - } - } - - // Commit the htlcs we are intending to forward if this package has not - // been fully processed. - if fwdPkg.State == channeldb.FwdStateLockedIn { - err := l.channel.SetFwdFilter(fwdPkg.Height, fwdPkg.FwdFilter) - if err != nil { - l.fail(LinkFailureError{code: ErrInternalError}, - "unable to set fwd filter: %v", err) - return - } - } - - if len(switchPackets) == 0 { - return - } - - log.Debugf("forwarding %d packets to switch", len(switchPackets)) - - // NOTE: This call is made synchronous so that we ensure all circuits - // are committed in the exact order that they are processed in the link. - // Failing to do this could cause reorderings/gaps in the range of - // opened circuits, which violates assumptions made by the circuit - // trimming. - l.forwardBatch(switchPackets...) -} - -// processExitHop handles an htlc for which this link is the exit hop. It -// returns a boolean indicating whether the commitment tx needs an update. -func (l *channelLink) processExitHop(pd *lnwallet.PaymentDescriptor, - obfuscator hop.ErrorEncrypter, fwdInfo hop.ForwardingInfo, - heightNow uint32, payload invoices.Payload) er.R { - - // If hodl.ExitSettle is requested, we will not validate the final hop's - // ADD, nor will we settle the corresponding invoice or respond with the - // preimage. - if l.cfg.HodlMask.Active(hodl.ExitSettle) { - log.Warnf(hodl.ExitSettle.Warning()) - - return nil - } - - // As we're the exit hop, we'll double check the hop-payload included in - // the HTLC to ensure that it was crafted correctly by the sender and - // matches the HTLC we were extended. - if pd.Amount != fwdInfo.AmountToForward { - - log.Errorf("onion payload of incoming htlc(%x) has incorrect "+ - "value: expected %v, got %v", pd.RHash, - pd.Amount, fwdInfo.AmountToForward) - - failure := NewLinkError( - lnwire.NewFinalIncorrectHtlcAmount(pd.Amount), - ) - l.sendHTLCError(pd, failure, obfuscator, true) - - return nil - } - - // We'll also ensure that our time-lock value has been computed - // correctly. - if pd.Timeout != fwdInfo.OutgoingCTLV { - log.Errorf("onion payload of incoming htlc(%x) has incorrect "+ - "time-lock: expected %v, got %v", - pd.RHash[:], pd.Timeout, fwdInfo.OutgoingCTLV) - - failure := NewLinkError( - lnwire.NewFinalIncorrectCltvExpiry(pd.Timeout), - ) - l.sendHTLCError(pd, failure, obfuscator, true) - - return nil - } - - // Notify the invoiceRegistry of the exit hop htlc. If we crash right - // after this, this code will be re-executed after restart. We will - // receive back a resolution event. - invoiceHash := lntypes.Hash(pd.RHash) - - circuitKey := channeldb.CircuitKey{ - ChanID: l.ShortChanID(), - HtlcID: pd.HtlcIndex, - } - - event, err := l.cfg.Registry.NotifyExitHopHtlc( - invoiceHash, pd.Amount, pd.Timeout, int32(heightNow), - circuitKey, l.hodlQueue.ChanIn(), payload, - ) - if err != nil { - return err - } - - // Create a hodlHtlc struct and decide either resolved now or later. - htlc := hodlHtlc{ - pd: pd, - obfuscator: obfuscator, - } - - // If the event is nil, the invoice is being held, so we save payment - // descriptor for future reference. - if event == nil { - l.hodlMap[circuitKey] = htlc - return nil - } - - // Process the received resolution. - return l.processHtlcResolution(event, htlc) -} - -// settleHTLC settles the HTLC on the channel. -func (l *channelLink) settleHTLC(preimage lntypes.Preimage, - pd *lnwallet.PaymentDescriptor) er.R { - - hash := preimage.Hash() - - log.Infof("settling htlc %v as exit hop", hash) - - err := l.channel.SettleHTLC( - preimage, pd.HtlcIndex, pd.SourceRef, nil, nil, - ) - if err != nil { - return er.Errorf("unable to settle htlc: %v", err) - } - - // If the link is in hodl.BogusSettle mode, replace the preimage with a - // fake one before sending it to the peer. - if l.cfg.HodlMask.Active(hodl.BogusSettle) { - log.Warnf(hodl.BogusSettle.Warning()) - preimage = [32]byte{} - copy(preimage[:], bytes.Repeat([]byte{2}, 32)) - } - - // HTLC was successfully settled locally send notification about it - // remote peer. - l.cfg.Peer.SendMessage(false, &lnwire.UpdateFulfillHTLC{ - ChanID: l.ChanID(), - ID: pd.HtlcIndex, - PaymentPreimage: preimage, - }) - - // Once we have successfully settled the htlc, notify a settle event. - l.cfg.HtlcNotifier.NotifySettleEvent( - HtlcKey{ - IncomingCircuit: channeldb.CircuitKey{ - ChanID: l.ShortChanID(), - HtlcID: pd.HtlcIndex, - }, - }, - HtlcEventTypeReceive, - ) - - return nil -} - -// forwardBatch forwards the given htlcPackets to the switch, and waits on the -// err chan for the individual responses. This method is intended to be spawned -// as a goroutine so the responses can be handled in the background. -func (l *channelLink) forwardBatch(packets ...*htlcPacket) { - // Don't forward packets for which we already have a response in our - // mailbox. This could happen if a packet fails and is buffered in the - // mailbox, and the incoming link flaps. - var filteredPkts = make([]*htlcPacket, 0, len(packets)) - for _, pkt := range packets { - if l.mailBox.HasPacket(pkt.inKey()) { - continue - } - - filteredPkts = append(filteredPkts, pkt) - } - - if err := l.cfg.ForwardPackets(l.quit, filteredPkts...); err != nil { - log.Errorf("Unhandled error while reforwarding htlc "+ - "settle/fail over htlcswitch: %v", err) - } -} - -// sendHTLCError functions cancels HTLC and send cancel message back to the -// peer from which HTLC was received. -func (l *channelLink) sendHTLCError(pd *lnwallet.PaymentDescriptor, - failure *LinkError, e hop.ErrorEncrypter, isReceive bool) { - - reason, err := e.EncryptFirstHop(failure.WireMessage()) - if err != nil { - log.Errorf("unable to obfuscate error: %v", err) - return - } - - err = l.channel.FailHTLC(pd.HtlcIndex, reason, pd.SourceRef, nil, nil) - if err != nil { - log.Errorf("unable cancel htlc: %v", err) - return - } - - l.cfg.Peer.SendMessage(false, &lnwire.UpdateFailHTLC{ - ChanID: l.ChanID(), - ID: pd.HtlcIndex, - Reason: reason, - }) - - // Notify a link failure on our incoming link. Outgoing htlc information - // is not available at this point, because we have not decrypted the - // onion, so it is excluded. - var eventType HtlcEventType - if isReceive { - eventType = HtlcEventTypeReceive - } else { - eventType = HtlcEventTypeForward - } - - l.cfg.HtlcNotifier.NotifyLinkFailEvent( - HtlcKey{ - IncomingCircuit: channeldb.CircuitKey{ - ChanID: l.ShortChanID(), - HtlcID: pd.HtlcIndex, - }, - }, - HtlcInfo{ - IncomingTimeLock: pd.Timeout, - IncomingAmt: pd.Amount, - }, - eventType, - failure, - true, - ) -} - -// sendMalformedHTLCError helper function which sends the malformed HTLC update -// to the payment sender. -func (l *channelLink) sendMalformedHTLCError(htlcIndex uint64, - code lnwire.FailCode, onionBlob []byte, sourceRef *channeldb.AddRef) { - - shaOnionBlob := sha256.Sum256(onionBlob) - err := l.channel.MalformedFailHTLC(htlcIndex, code, shaOnionBlob, sourceRef) - if err != nil { - log.Errorf("unable cancel htlc: %v", err) - return - } - - l.cfg.Peer.SendMessage(false, &lnwire.UpdateFailMalformedHTLC{ - ChanID: l.ChanID(), - ID: htlcIndex, - ShaOnionBlob: shaOnionBlob, - FailureCode: code, - }) -} - -// fail is a function which is used to encapsulate the action necessary for -// properly failing the link. It takes a LinkFailureError, which will be passed -// to the OnChannelFailure closure, in order for it to determine if we should -// force close the channel, and if we should send an error message to the -// remote peer. -func (l *channelLink) fail(linkErr LinkFailureError, - format string, a ...interface{}) { - reason := errors.Errorf(format, a...) - - // Return if we have already notified about a failure. - if l.failed { - log.Warnf("ignoring link failure (%v), as link already "+ - "failed", reason) - return - } - - log.Errorf("failing link: %s with error: %v", reason, linkErr) - - // Set failed, such that we won't process any more updates, and notify - // the peer about the failure. - l.failed = true - l.cfg.OnChannelFailure(l.ChanID(), l.ShortChanID(), linkErr) -} diff --git a/lnd/htlcswitch/link_isolated_test.go b/lnd/htlcswitch/link_isolated_test.go deleted file mode 100644 index 81dd85da..00000000 --- a/lnd/htlcswitch/link_isolated_test.go +++ /dev/null @@ -1,268 +0,0 @@ -package htlcswitch - -import ( - "crypto/sha256" - "testing" - "time" - - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -type linkTestContext struct { - t *testing.T - - aliceLink ChannelLink - bobChannel *lnwallet.LightningChannel - aliceMsgs <-chan lnwire.Message -} - -// sendHtlcBobToAlice sends an HTLC from Bob to Alice, that pays to a preimage -// already in Alice's registry. -func (l *linkTestContext) sendHtlcBobToAlice(htlc *lnwire.UpdateAddHTLC) { - l.t.Helper() - - _, err := l.bobChannel.AddHTLC(htlc, nil) - if err != nil { - l.t.Fatalf("bob failed adding htlc: %v", err) - } - - l.aliceLink.HandleChannelUpdate(htlc) -} - -// sendHtlcAliceToBob sends an HTLC from Alice to Bob, by first committing the -// HTLC in the circuit map, then delivering the outgoing packet to Alice's link. -// The HTLC will be sent to Bob via Alice's message stream. -func (l *linkTestContext) sendHtlcAliceToBob(htlcID int, - htlc *lnwire.UpdateAddHTLC) { - - l.t.Helper() - - circuitMap := l.aliceLink.(*channelLink).cfg.Switch.circuits - fwdActions, err := circuitMap.CommitCircuits( - &PaymentCircuit{ - Incoming: CircuitKey{ - HtlcID: uint64(htlcID), - }, - PaymentHash: htlc.PaymentHash, - }, - ) - if err != nil { - l.t.Fatalf("unable to commit circuit: %v", err) - } - - if len(fwdActions.Adds) != 1 { - l.t.Fatalf("expected 1 adds, found %d", len(fwdActions.Adds)) - } - - err = l.aliceLink.HandleSwitchPacket(&htlcPacket{ - incomingHTLCID: uint64(htlcID), - htlc: htlc, - }) - if err != nil { - l.t.Fatal(err) - } -} - -// receiveHtlcAliceToBob pulls the next message from Alice's message stream, -// asserts that it is an UpdateAddHTLC, then applies it to Bob's state machine. -func (l *linkTestContext) receiveHtlcAliceToBob() { - l.t.Helper() - - var msg lnwire.Message - select { - case msg = <-l.aliceMsgs: - case <-time.After(15 * time.Second): - l.t.Fatalf("did not received htlc from alice") - } - - htlcAdd, ok := msg.(*lnwire.UpdateAddHTLC) - if !ok { - l.t.Fatalf("expected UpdateAddHTLC, got %T", msg) - } - - _, err := l.bobChannel.ReceiveHTLC(htlcAdd) - if err != nil { - l.t.Fatalf("bob failed receiving htlc: %v", err) - } -} - -// sendCommitSigBobToAlice makes Bob sign a new commitment and send it to -// Alice, asserting that it signs expHtlcs number of HTLCs. -func (l *linkTestContext) sendCommitSigBobToAlice(expHtlcs int) { - l.t.Helper() - - sig, htlcSigs, _, err := l.bobChannel.SignNextCommitment() - if err != nil { - l.t.Fatalf("error signing commitment: %v", err) - } - - commitSig := &lnwire.CommitSig{ - CommitSig: sig, - HtlcSigs: htlcSigs, - } - - if len(commitSig.HtlcSigs) != expHtlcs { - l.t.Fatalf("Expected %d htlc sigs, got %d", expHtlcs, - len(commitSig.HtlcSigs)) - } - - l.aliceLink.HandleChannelUpdate(commitSig) -} - -// receiveRevAndAckAliceToBob waits for Alice to send a RevAndAck to Bob, then -// hands this to Bob. -func (l *linkTestContext) receiveRevAndAckAliceToBob() { - l.t.Helper() - - var msg lnwire.Message - select { - case msg = <-l.aliceMsgs: - case <-time.After(15 * time.Second): - l.t.Fatalf("did not receive message") - } - - rev, ok := msg.(*lnwire.RevokeAndAck) - if !ok { - l.t.Fatalf("expected RevokeAndAck, got %T", msg) - } - - _, _, _, _, err := l.bobChannel.ReceiveRevocation(rev) - if err != nil { - l.t.Fatalf("bob failed receiving revocation: %v", err) - } -} - -// receiveCommitSigAliceToBob waits for Alice to send a CommitSig to Bob, -// signing expHtlcs numbers of HTLCs, then hands this to Bob. -func (l *linkTestContext) receiveCommitSigAliceToBob(expHtlcs int) { - l.t.Helper() - - comSig := l.receiveCommitSigAlice(expHtlcs) - - err := l.bobChannel.ReceiveNewCommitment( - comSig.CommitSig, comSig.HtlcSigs, - ) - if err != nil { - l.t.Fatalf("bob failed receiving commitment: %v", err) - } -} - -// receiveCommitSigAlice waits for Alice to send a CommitSig, signing expHtlcs -// numbers of HTLCs. -func (l *linkTestContext) receiveCommitSigAlice(expHtlcs int) *lnwire.CommitSig { - l.t.Helper() - - var msg lnwire.Message - select { - case msg = <-l.aliceMsgs: - case <-time.After(15 * time.Second): - l.t.Fatalf("did not receive message") - } - - comSig, ok := msg.(*lnwire.CommitSig) - if !ok { - l.t.Fatalf("expected CommitSig, got %T", msg) - } - - if len(comSig.HtlcSigs) != expHtlcs { - l.t.Fatalf("expected %d htlc sigs, got %d", expHtlcs, - len(comSig.HtlcSigs)) - } - - return comSig -} - -// sendRevAndAckBobToAlice make Bob revoke his current commitment, then hand -// the RevokeAndAck to Alice. -func (l *linkTestContext) sendRevAndAckBobToAlice() { - l.t.Helper() - - rev, _, err := l.bobChannel.RevokeCurrentCommitment() - if err != nil { - l.t.Fatalf("unable to revoke commitment: %v", err) - } - - l.aliceLink.HandleChannelUpdate(rev) -} - -// receiveSettleAliceToBob waits for Alice to send a HTLC settle message to -// Bob, then hands this to Bob. -func (l *linkTestContext) receiveSettleAliceToBob() { - l.t.Helper() - - var msg lnwire.Message - select { - case msg = <-l.aliceMsgs: - case <-time.After(15 * time.Second): - l.t.Fatalf("did not receive message") - } - - settleMsg, ok := msg.(*lnwire.UpdateFulfillHTLC) - if !ok { - l.t.Fatalf("expected UpdateFulfillHTLC, got %T", msg) - } - - err := l.bobChannel.ReceiveHTLCSettle(settleMsg.PaymentPreimage, - settleMsg.ID) - if err != nil { - l.t.Fatalf("failed settling htlc: %v", err) - } -} - -// sendSettleBobToAlice settles an HTLC on Bob's state machine, then sends an -// UpdateFulfillHTLC message to Alice's upstream inbox. -func (l *linkTestContext) sendSettleBobToAlice(htlcID uint64, - preimage lntypes.Preimage) { - - l.t.Helper() - - err := l.bobChannel.SettleHTLC(preimage, htlcID, nil, nil, nil) - if err != nil { - l.t.Fatalf("alice failed settling htlc id=%d hash=%x", - htlcID, sha256.Sum256(preimage[:])) - } - - settle := &lnwire.UpdateFulfillHTLC{ - ID: htlcID, - PaymentPreimage: preimage, - } - - l.aliceLink.HandleChannelUpdate(settle) -} - -// receiveSettleAliceToBob waits for Alice to send a HTLC settle message to -// Bob, then hands this to Bob. -func (l *linkTestContext) receiveFailAliceToBob() { - l.t.Helper() - - var msg lnwire.Message - select { - case msg = <-l.aliceMsgs: - case <-time.After(15 * time.Second): - l.t.Fatalf("did not receive message") - } - - failMsg, ok := msg.(*lnwire.UpdateFailHTLC) - if !ok { - l.t.Fatalf("expected UpdateFailHTLC, got %T", msg) - } - - err := l.bobChannel.ReceiveFailHTLC(failMsg.ID, failMsg.Reason) - if err != nil { - l.t.Fatalf("unable to apply received fail htlc: %v", err) - } -} - -// assertNoMsgFromAlice asserts that Alice hasn't sent a message. Before -// calling, make sure that Alice has had the opportunity to send the message. -func (l *linkTestContext) assertNoMsgFromAlice(timeout time.Duration) { - l.t.Helper() - - select { - case msg := <-l.aliceMsgs: - l.t.Fatalf("unexpected message from Alice: %v", msg) - case <-time.After(timeout): - } -} diff --git a/lnd/htlcswitch/link_test.go b/lnd/htlcswitch/link_test.go deleted file mode 100644 index ee13f74e..00000000 --- a/lnd/htlcswitch/link_test.go +++ /dev/null @@ -1,6293 +0,0 @@ -package htlcswitch - -import ( - "bytes" - "crypto/rand" - "crypto/sha256" - "encoding/binary" - "fmt" - "net" - "os" - "reflect" - "runtime" - "sync" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/chaincfg/globalcfg" - sphinx "github.com/pkt-cash/pktd/lightning-onion" - "github.com/pkt-cash/pktd/lnd/build" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/contractcourt" - "github.com/pkt-cash/pktd/lnd/htlcswitch/hodl" - "github.com/pkt-cash/pktd/lnd/htlcswitch/hop" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/lnpeer" - "github.com/pkt-cash/pktd/lnd/lntest/wait" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/ticker" - "github.com/pkt-cash/pktd/wire" -) - -const ( - testStartingHeight = 100 - testDefaultDelta = 6 -) - -// concurrentTester is a thread-safe wrapper around the Fatalf method of a -// *testing.T instance. With this wrapper multiple goroutines can safely -// attempt to fail a test concurrently. -type concurrentTester struct { - mtx sync.Mutex - *testing.T -} - -func newConcurrentTester(t *testing.T) *concurrentTester { - return &concurrentTester{ - T: t, - } -} - -func (c *concurrentTester) Fatalf(format string, args ...interface{}) { - c.T.Helper() - - c.mtx.Lock() - defer c.mtx.Unlock() - - c.T.Fatalf(format, args...) -} - -// messageToString is used to produce less spammy log messages in trace mode by -// setting the 'Curve" parameter to nil. Doing this avoids printing out each of -// the field elements in the curve parameters for secp256k1. -func messageToString(msg lnwire.Message) string { - switch m := msg.(type) { - case *lnwire.RevokeAndAck: - m.NextRevocationKey.Curve = nil - case *lnwire.AcceptChannel: - m.FundingKey.Curve = nil - m.RevocationPoint.Curve = nil - m.PaymentPoint.Curve = nil - m.DelayedPaymentPoint.Curve = nil - m.FirstCommitmentPoint.Curve = nil - case *lnwire.OpenChannel: - m.FundingKey.Curve = nil - m.RevocationPoint.Curve = nil - m.PaymentPoint.Curve = nil - m.DelayedPaymentPoint.Curve = nil - m.FirstCommitmentPoint.Curve = nil - case *lnwire.FundingLocked: - m.NextPerCommitmentPoint.Curve = nil - } - - return spew.Sdump(msg) -} - -// expectedMessage struct holds the message which travels from one peer to -// another, and additional information like, should this message we skipped for -// handling. -type expectedMessage struct { - from string - to string - message lnwire.Message - skip bool -} - -// createLogFunc is a helper function which returns the function which will be -// used for logging message are received from another peer. -func createLogFunc(name string, channelID lnwire.ChannelID) messageInterceptor { - return func(m lnwire.Message) (bool, er.R) { - chanID, err := getChanID(m) - if err != nil { - return false, err - } - - if chanID == channelID { - fmt.Printf("---------------------- \n %v received: "+ - "%v", name, messageToString(m)) - } - return false, nil - } -} - -// createInterceptorFunc creates the function by the given set of messages -// which, checks the order of the messages and skip the ones which were -// indicated to be intercepted. -func createInterceptorFunc(prefix, receiver string, messages []expectedMessage, - chanID lnwire.ChannelID, debug bool) messageInterceptor { - - // Filter message which should be received with given peer name. - var expectToReceive []expectedMessage - for _, message := range messages { - if message.to == receiver { - expectToReceive = append(expectToReceive, message) - } - } - - // Return function which checks the message order and skip the - // messages. - return func(m lnwire.Message) (bool, er.R) { - messageChanID, err := getChanID(m) - if err != nil { - return false, err - } - - if messageChanID == chanID { - if len(expectToReceive) == 0 { - return false, er.Errorf("%v received "+ - "unexpected message out of range: %v", - receiver, m.MsgType()) - } - - expectedMessage := expectToReceive[0] - expectToReceive = expectToReceive[1:] - - if expectedMessage.message.MsgType() != m.MsgType() { - return false, er.Errorf("%v received wrong message: \n"+ - "real: %v\nexpected: %v", receiver, m.MsgType(), - expectedMessage.message.MsgType()) - } - - if debug { - var postfix string - if revocation, ok := m.(*lnwire.RevokeAndAck); ok { - var zeroHash chainhash.Hash - if bytes.Equal(zeroHash[:], revocation.Revocation[:]) { - postfix = "- empty revocation" - } - } - - if expectedMessage.skip { - fmt.Printf("skipped: %v: %v %v \n", prefix, - m.MsgType(), postfix) - } else { - fmt.Printf("%v: %v %v \n", prefix, m.MsgType(), postfix) - } - } - - return expectedMessage.skip, nil - } - return false, nil - } -} - -// TestChannelLinkSingleHopPayment in this test we checks the interaction -// between Alice and Bob within scope of one channel. -func TestChannelLinkSingleHopPayment(t *testing.T) { - t.Parallel() - - // Setup a alice-bob network. - alice, bob, cleanUp, err := createTwoClusterChannels( - btcutil.UnitsPerCoin()*3, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newTwoHopNetwork( - t, alice.channel, bob.channel, testStartingHeight, - ) - if err := n.start(); err != nil { - t.Fatal(err) - } - defer n.stop() - - aliceBandwidthBefore := n.aliceChannelLink.Bandwidth() - bobBandwidthBefore := n.bobChannelLink.Bandwidth() - - debug := false - if debug { - // Log message that alice receives. - n.aliceServer.intersect(createLogFunc("alice", - n.aliceChannelLink.ChanID())) - - // Log message that bob receives. - n.bobServer.intersect(createLogFunc("bob", - n.bobChannelLink.ChanID())) - } - - amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - htlcAmt, totalTimelock, hops := generateHops(amount, testStartingHeight, - n.bobChannelLink) - - // Wait for: - // * HTLC add request to be sent to bob. - // * alice<->bob commitment state to be updated. - // * settle request to be sent back from bob to alice. - // * alice<->bob commitment state to be updated. - // * user notification to be sent. - receiver := n.bobServer - firstHop := n.bobChannelLink.ShortChanID() - rhash, err := makePayment( - n.aliceServer, receiver, firstHop, hops, amount, htlcAmt, - totalTimelock, - ).Wait(30 * time.Second) - if err != nil { - t.Fatalf("unable to make the payment: %v", err) - } - - // Wait for Alice to receive the revocation. - // - // TODO(roasbeef); replace with select over returned err chan - time.Sleep(2 * time.Second) - - // Check that alice invoice was settled and bandwidth of HTLC - // links was changed. - invoice, err := receiver.registry.LookupInvoice(rhash) - if err != nil { - t.Fatalf("unable to get invoice: %v", err) - } - if invoice.State != channeldb.ContractSettled { - t.Fatal("alice invoice wasn't settled") - } - - if aliceBandwidthBefore-amount != n.aliceChannelLink.Bandwidth() { - t.Fatal("alice bandwidth should have decrease on payment " + - "amount") - } - - if bobBandwidthBefore+amount != n.bobChannelLink.Bandwidth() { - t.Fatalf("bob bandwidth isn't match: expected %v, got %v", - bobBandwidthBefore+amount, - n.bobChannelLink.Bandwidth()) - } -} - -// TestChannelLinkMultiHopPayment checks the ability to send payment over two -// hops. In this test we send the payment from Carol to Alice over Bob peer. -// (Carol -> Bob -> Alice) and checking that HTLC was settled properly and -// balances were changed in two channels. -// -// The test is executed with two different OutgoingCltvRejectDelta values for -// bob. In addition to a normal positive value, we also test the zero case -// because this is currently the configured value in lnd -// (defaultOutgoingCltvRejectDelta). -func TestChannelLinkMultiHopPayment(t *testing.T) { - t.Run( - "bobOutgoingCltvRejectDelta 3", - func(t *testing.T) { - testChannelLinkMultiHopPayment(t, 3) - }, - ) - t.Run( - "bobOutgoingCltvRejectDelta 0", - func(t *testing.T) { - testChannelLinkMultiHopPayment(t, 0) - }, - ) -} - -func testChannelLinkMultiHopPayment(t *testing.T, - bobOutgoingCltvRejectDelta uint32) { - - t.Parallel() - - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*3, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, testStartingHeight) - - n.firstBobChannelLink.cfg.OutgoingCltvRejectDelta = - bobOutgoingCltvRejectDelta - - n.secondBobChannelLink.cfg.OutgoingCltvRejectDelta = - bobOutgoingCltvRejectDelta - - if err := n.start(); err != nil { - t.Fatal(err) - } - defer n.stop() - - carolBandwidthBefore := n.carolChannelLink.Bandwidth() - firstBobBandwidthBefore := n.firstBobChannelLink.Bandwidth() - secondBobBandwidthBefore := n.secondBobChannelLink.Bandwidth() - aliceBandwidthBefore := n.aliceChannelLink.Bandwidth() - - debug := false - if debug { - // Log messages that alice receives from bob. - n.aliceServer.intersect(createLogFunc("[alice]<-bob<-carol: ", - n.aliceChannelLink.ChanID())) - - // Log messages that bob receives from alice. - n.bobServer.intersect(createLogFunc("alice->[bob]->carol: ", - n.firstBobChannelLink.ChanID())) - - // Log messages that bob receives from carol. - n.bobServer.intersect(createLogFunc("alice<-[bob]<-carol: ", - n.secondBobChannelLink.ChanID())) - - // Log messages that carol receives from bob. - n.carolServer.intersect(createLogFunc("alice->bob->[carol]", - n.carolChannelLink.ChanID())) - } - - amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - htlcAmt, totalTimelock, hops := generateHops(amount, - testStartingHeight, - n.firstBobChannelLink, n.carolChannelLink) - - // Wait for: - // * HTLC add request to be sent from Alice to Bob. - // * Alice<->Bob commitment states to be updated. - // * HTLC add request to be propagated to Carol. - // * Bob<->Carol commitment state to be updated. - // * settle request to be sent back from Carol to Bob. - // * Alice<->Bob commitment state to be updated. - // * settle request to be sent back from Bob to Alice. - // * Alice<->Bob commitment states to be updated. - // * user notification to be sent. - receiver := n.carolServer - firstHop := n.firstBobChannelLink.ShortChanID() - rhash, err := makePayment( - n.aliceServer, n.carolServer, firstHop, hops, amount, htlcAmt, - totalTimelock, - ).Wait(30 * time.Second) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } - - // Wait for Alice and Bob's second link to receive the revocation. - time.Sleep(2 * time.Second) - - // Check that Carol invoice was settled and bandwidth of HTLC - // links were changed. - invoice, err := receiver.registry.LookupInvoice(rhash) - if err != nil { - t.Fatalf("unable to get invoice: %v", err) - } - if invoice.State != channeldb.ContractSettled { - t.Fatal("carol invoice haven't been settled") - } - - expectedAliceBandwidth := aliceBandwidthBefore - htlcAmt - if expectedAliceBandwidth != n.aliceChannelLink.Bandwidth() { - t.Fatalf("channel bandwidth incorrect: expected %v, got %v", - expectedAliceBandwidth, n.aliceChannelLink.Bandwidth()) - } - - expectedBobBandwidth1 := firstBobBandwidthBefore + htlcAmt - if expectedBobBandwidth1 != n.firstBobChannelLink.Bandwidth() { - t.Fatalf("channel bandwidth incorrect: expected %v, got %v", - expectedBobBandwidth1, n.firstBobChannelLink.Bandwidth()) - } - - expectedBobBandwidth2 := secondBobBandwidthBefore - amount - if expectedBobBandwidth2 != n.secondBobChannelLink.Bandwidth() { - t.Fatalf("channel bandwidth incorrect: expected %v, got %v", - expectedBobBandwidth2, n.secondBobChannelLink.Bandwidth()) - } - - expectedCarolBandwidth := carolBandwidthBefore + amount - if expectedCarolBandwidth != n.carolChannelLink.Bandwidth() { - t.Fatalf("channel bandwidth incorrect: expected %v, got %v", - expectedCarolBandwidth, n.carolChannelLink.Bandwidth()) - } -} - -// TestChannelLinkCancelFullCommitment tests the ability for links to cancel -// forwarded HTLCs once all of their commitment slots are full. -func TestChannelLinkCancelFullCommitment(t *testing.T) { - t.Parallel() - - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*3, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newTwoHopNetwork( - t, channels.aliceToBob, channels.bobToAlice, testStartingHeight, - ) - if err := n.start(); err != nil { - t.Fatal(err) - } - defer n.stop() - - // Fill up the commitment from Alice's side with 20 sat payments. - count := (input.MaxHTLCNumber / 2) - amt := lnwire.NewMSatFromSatoshis(20000) - - htlcAmt, totalTimelock, hopsForwards := generateHops(amt, - testStartingHeight, n.bobChannelLink) - - firstHop := n.aliceChannelLink.ShortChanID() - - // Create channels to buffer the preimage and error channels used in - // making the preliminary payments. - preimages := make([]lntypes.Preimage, count) - aliceErrChan := make(chan chan er.R, count) - - var wg sync.WaitGroup - for i := 0; i < count; i++ { - // Deterministically generate preimages. Avoid the all-zeroes - // preimage because that will be rejected by the database. - preimages[i] = lntypes.Preimage{byte(i >> 8), byte(i), 1} - - wg.Add(1) - go func(i int) { - defer wg.Done() - - errChan := n.makeHoldPayment( - n.aliceServer, n.bobServer, firstHop, - hopsForwards, amt, htlcAmt, totalTimelock, - preimages[i], - ) - aliceErrChan <- errChan - }(i) - } - - // Wait for Alice to finish filling her commitment. - wg.Wait() - close(aliceErrChan) - - // Now make an additional payment from Alice to Bob, this should be - // canceled because the commitment in this direction is full. - err = <-makePayment( - n.aliceServer, n.bobServer, firstHop, hopsForwards, amt, - htlcAmt, totalTimelock, - ).err - if err == nil { - t.Fatalf("overflow payment should have failed") - } - errr := er.Wrapped(err) - lerr, ok := errr.(*LinkError) - if !ok { - t.Fatalf("expected LinkError, got: %T", err) - } - - msg := lerr.WireMessage() - if _, ok := msg.(*lnwire.FailTemporaryChannelFailure); !ok { - t.Fatalf("expected TemporaryChannelFailure, got: %T", msg) - } - - // Now, settle all htlcs held by bob and clear the commitment of htlcs. - for _, preimage := range preimages { - preimage := preimage - - // It's possible that the HTLCs have not been delivered to the - // invoice registry at this point, so we poll until we are able - // to settle. - err = wait.NoError(func() er.R { - return n.bobServer.registry.SettleHodlInvoice(preimage) - }, time.Minute) - if err != nil { - t.Fatal(err) - } - } - - // Ensure that all of the payments sent by alice eventually succeed. - for errChan := range aliceErrChan { - err := <-errChan - if err != nil { - t.Fatalf("alice payment failed: %v", err) - } - } -} - -// TestExitNodeTimelockPayloadMismatch tests that when an exit node receives an -// incoming HTLC, if the time lock encoded in the payload of the forwarded HTLC -// doesn't match the expected payment value, then the HTLC will be rejected -// with the appropriate error. -func TestExitNodeTimelockPayloadMismatch(t *testing.T) { - t.Parallel() - - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*5, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, testStartingHeight) - if err := n.start(); err != nil { - t.Fatal(err) - } - defer n.stop() - - amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - htlcAmt, htlcExpiry, hops := generateHops(amount, - testStartingHeight, n.firstBobChannelLink) - - // In order to exercise this case, we'll now _manually_ modify the - // per-hop payload for outgoing time lock to be the incorrect value. - // The proper value of the outgoing CLTV should be the policy set by - // the receiving node, instead we set it to be a random value. - hops[0].FwdInfo.OutgoingCTLV = 500 - firstHop := n.firstBobChannelLink.ShortChanID() - _, err = makePayment( - n.aliceServer, n.bobServer, firstHop, hops, amount, htlcAmt, - htlcExpiry, - ).Wait(30 * time.Second) - if err == nil { - t.Fatalf("payment should have failed but didn't") - } - - errr := er.Wrapped(err) - rtErr, ok := errr.(ClearTextError) - if !ok { - t.Fatalf("expected a ClearTextError, instead got: %T", err) - } - - switch rtErr.WireMessage().(type) { - case *lnwire.FailFinalIncorrectCltvExpiry: - default: - t.Fatalf("incorrect error, expected incorrect cltv expiry, "+ - "instead have: %v", err) - } -} - -// TestExitNodeAmountPayloadMismatch tests that when an exit node receives an -// incoming HTLC, if the amount encoded in the onion payload of the forwarded -// HTLC doesn't match the expected payment value, then the HTLC will be -// rejected. -func TestExitNodeAmountPayloadMismatch(t *testing.T) { - t.Parallel() - - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*5, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, testStartingHeight) - if err := n.start(); err != nil { - t.Fatal(err) - } - defer n.stop() - - amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - htlcAmt, htlcExpiry, hops := generateHops(amount, testStartingHeight, - n.firstBobChannelLink) - - // In order to exercise this case, we'll now _manually_ modify the - // per-hop payload for amount to be the incorrect value. The proper - // value of the amount to forward should be the amount that the - // receiving node expects to receive. - hops[0].FwdInfo.AmountToForward = 1 - firstHop := n.firstBobChannelLink.ShortChanID() - _, err = makePayment( - n.aliceServer, n.bobServer, firstHop, hops, amount, htlcAmt, - htlcExpiry, - ).Wait(30 * time.Second) - if err == nil { - t.Fatalf("payment should have failed but didn't") - } - assertFailureCode(t, err, lnwire.CodeFinalIncorrectHtlcAmount) -} - -// TestLinkForwardTimelockPolicyMismatch tests that if a node is an -// intermediate node in a multi-hop payment, and receives an HTLC which -// violates its specified multi-hop policy, then the HTLC is rejected. -func TestLinkForwardTimelockPolicyMismatch(t *testing.T) { - t.Parallel() - - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*5, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, testStartingHeight) - if err := n.start(); err != nil { - t.Fatal(err) - } - defer n.stop() - - // We'll be sending 1 BTC over a 2-hop (3 vertex) route. - amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - - // Generate the route over two hops, ignoring the total time lock that - // we'll need to use for the first HTLC in order to have a sufficient - // time-lock value to account for the decrements over the entire route. - htlcAmt, htlcExpiry, hops := generateHops(amount, testStartingHeight, - n.firstBobChannelLink, n.carolChannelLink) - htlcExpiry -= 2 - - // Next, we'll make the payment which'll send an HTLC with our - // specified parameters to the first hop in the route. - firstHop := n.firstBobChannelLink.ShortChanID() - _, err = makePayment( - n.aliceServer, n.carolServer, firstHop, hops, amount, htlcAmt, - htlcExpiry, - ).Wait(30 * time.Second) - - // We should get an error, and that error should indicate that the HTLC - // should be rejected due to a policy violation. - if err == nil { - t.Fatalf("payment should have failed but didn't") - } - errr := er.Wrapped(err) - rtErr, ok := errr.(ClearTextError) - if !ok { - t.Fatalf("expected a ClearTextError, instead got: %T", err) - } - - switch rtErr.WireMessage().(type) { - case *lnwire.FailIncorrectCltvExpiry: - default: - t.Fatalf("incorrect error, expected incorrect cltv expiry, "+ - "instead have: %v", err) - } -} - -// TestLinkForwardFeePolicyMismatch tests that if a node is an intermediate -// node in a multi-hop payment and receives an HTLC that violates its current -// fee policy, then the HTLC is rejected with the proper error. -func TestLinkForwardFeePolicyMismatch(t *testing.T) { - t.Parallel() - - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*3, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, testStartingHeight) - if err := n.start(); err != nil { - t.Fatal(err) - } - defer n.stop() - - // We'll be sending 1 BTC over a 2-hop (3 vertex) route. Given the - // current default fee of 1 SAT, if we just send a single BTC over in - // an HTLC, it should be rejected. - amountNoFee := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - - // Generate the route over two hops, ignoring the amount we _should_ - // actually send in order to be able to cover fees. - _, htlcExpiry, hops := generateHops(amountNoFee, testStartingHeight, - n.firstBobChannelLink, n.carolChannelLink) - - // Next, we'll make the payment which'll send an HTLC with our - // specified parameters to the first hop in the route. - firstHop := n.firstBobChannelLink.ShortChanID() - _, err = makePayment( - n.aliceServer, n.bobServer, firstHop, hops, amountNoFee, - amountNoFee, htlcExpiry, - ).Wait(30 * time.Second) - - // We should get an error, and that error should indicate that the HTLC - // should be rejected due to a policy violation. - if err == nil { - t.Fatalf("payment should have failed but didn't") - } - - errr := er.Wrapped(err) - rtErr, ok := errr.(ClearTextError) - if !ok { - t.Fatalf("expected a ClearTextError, instead got: %T", err) - } - - switch rtErr.WireMessage().(type) { - case *lnwire.FailFeeInsufficient: - default: - t.Fatalf("incorrect error, expected fee insufficient, "+ - "instead have: %T", err) - } -} - -// TestLinkForwardFeePolicyMismatch tests that if a node is an intermediate -// node and receives an HTLC which is _below_ its min HTLC policy, then the -// HTLC will be rejected. -func TestLinkForwardMinHTLCPolicyMismatch(t *testing.T) { - t.Parallel() - - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*5, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, testStartingHeight) - if err := n.start(); err != nil { - t.Fatal(err) - } - defer n.stop() - - // The current default global min HTLC policy set in the default config - // for the three-hop-network is 5 SAT. So in order to trigger this - // failure mode, we'll create an HTLC with 1 satoshi. - amountNoFee := lnwire.NewMSatFromSatoshis(1) - - // With the amount set, we'll generate a route over 2 hops within the - // network that attempts to pay out our specified amount. - htlcAmt, htlcExpiry, hops := generateHops(amountNoFee, testStartingHeight, - n.firstBobChannelLink, n.carolChannelLink) - - // Next, we'll make the payment which'll send an HTLC with our - // specified parameters to the first hop in the route. - firstHop := n.firstBobChannelLink.ShortChanID() - _, err = makePayment( - n.aliceServer, n.bobServer, firstHop, hops, amountNoFee, - htlcAmt, htlcExpiry, - ).Wait(30 * time.Second) - - // We should get an error, and that error should indicate that the HTLC - // should be rejected due to a policy violation (below min HTLC). - if err == nil { - t.Fatalf("payment should have failed but didn't") - } - - errr := er.Wrapped(err) - rtErr, ok := errr.(ClearTextError) - if !ok { - t.Fatalf("expected a ClearTextError, instead got: %T", err) - } - - switch rtErr.WireMessage().(type) { - case *lnwire.FailAmountBelowMinimum: - default: - t.Fatalf("incorrect error, expected amount below minimum, "+ - "instead have: %v", err) - } -} - -// TestLinkForwardMaxHTLCPolicyMismatch tests that if a node is an intermediate -// node and receives an HTLC which is _above_ its max HTLC policy then the -// HTLC will be rejected. -func TestLinkForwardMaxHTLCPolicyMismatch(t *testing.T) { - t.Parallel() - - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*5, btcutil.UnitsPerCoin()*5, - ) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newThreeHopNetwork( - t, channels.aliceToBob, channels.bobToAlice, channels.bobToCarol, - channels.carolToBob, testStartingHeight, - ) - if err := n.start(); err != nil { - t.Fatal(err) - } - defer n.stop() - - // In order to trigger this failure mode, we'll update our policy to have - // a new max HTLC of 10 satoshis. - maxHtlc := lnwire.NewMSatFromSatoshis(10) - - // First we'll generate a route over 2 hops within the network that - // attempts to pay out an amount greater than the max HTLC we're about to - // set. - amountNoFee := maxHtlc + 1 - htlcAmt, htlcExpiry, hops := generateHops( - amountNoFee, testStartingHeight, n.firstBobChannelLink, - n.carolChannelLink, - ) - - // We'll now update Bob's policy to set the max HTLC we chose earlier. - n.secondBobChannelLink.cfg.FwrdingPolicy.MaxHTLC = maxHtlc - - // Finally, we'll make the payment which'll send an HTLC with our - // specified parameters. - firstHop := n.firstBobChannelLink.ShortChanID() - _, err = makePayment( - n.aliceServer, n.carolServer, firstHop, hops, amountNoFee, - htlcAmt, htlcExpiry, - ).Wait(30 * time.Second) - - // We should get an error indicating a temporary channel failure, The - // failure is temporary because this payment would be allowed if Bob - // updated his policy to increase the max HTLC. - if err == nil { - t.Fatalf("payment should have failed but didn't") - } - - errr := er.Wrapped(err) - rtErr, ok := errr.(ClearTextError) - if !ok { - t.Fatalf("expected a ClearTextError, instead got: %T", err) - } - - switch rtErr.WireMessage().(type) { - case *lnwire.FailTemporaryChannelFailure: - default: - t.Fatalf("incorrect error, expected temporary channel failure, "+ - "instead have: %v", err) - } -} - -// TestUpdateForwardingPolicy tests that the forwarding policy for a link is -// able to be updated properly. We'll first create an HTLC that meets the -// specified policy, assert that it succeeds, update the policy (to invalidate -// the prior HTLC), and then ensure that the HTLC is rejected. -func TestUpdateForwardingPolicy(t *testing.T) { - t.Parallel() - - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*5, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, testStartingHeight) - if err := n.start(); err != nil { - t.Fatal(err) - } - defer n.stop() - - carolBandwidthBefore := n.carolChannelLink.Bandwidth() - firstBobBandwidthBefore := n.firstBobChannelLink.Bandwidth() - secondBobBandwidthBefore := n.secondBobChannelLink.Bandwidth() - aliceBandwidthBefore := n.aliceChannelLink.Bandwidth() - - amountNoFee := lnwire.NewMSatFromSatoshis(10) - htlcAmt, htlcExpiry, hops := generateHops(amountNoFee, - testStartingHeight, - n.firstBobChannelLink, n.carolChannelLink) - - // First, send this 10 mSAT payment over the three hops, the payment - // should succeed, and all balances should be updated accordingly. - firstHop := n.firstBobChannelLink.ShortChanID() - payResp, err := makePayment( - n.aliceServer, n.carolServer, firstHop, hops, amountNoFee, - htlcAmt, htlcExpiry, - ).Wait(30 * time.Second) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } - - // Carol's invoice should now be shown as settled as the payment - // succeeded. - invoice, err := n.carolServer.registry.LookupInvoice(payResp) - if err != nil { - t.Fatalf("unable to get invoice: %v", err) - } - if invoice.State != channeldb.ContractSettled { - t.Fatal("carol invoice haven't been settled") - } - - expectedAliceBandwidth := aliceBandwidthBefore - htlcAmt - if expectedAliceBandwidth != n.aliceChannelLink.Bandwidth() { - t.Fatalf("channel bandwidth incorrect: expected %v, got %v", - expectedAliceBandwidth, n.aliceChannelLink.Bandwidth()) - } - expectedBobBandwidth1 := firstBobBandwidthBefore + htlcAmt - if expectedBobBandwidth1 != n.firstBobChannelLink.Bandwidth() { - t.Fatalf("channel bandwidth incorrect: expected %v, got %v", - expectedBobBandwidth1, n.firstBobChannelLink.Bandwidth()) - } - expectedBobBandwidth2 := secondBobBandwidthBefore - amountNoFee - if expectedBobBandwidth2 != n.secondBobChannelLink.Bandwidth() { - t.Fatalf("channel bandwidth incorrect: expected %v, got %v", - expectedBobBandwidth2, n.secondBobChannelLink.Bandwidth()) - } - expectedCarolBandwidth := carolBandwidthBefore + amountNoFee - if expectedCarolBandwidth != n.carolChannelLink.Bandwidth() { - t.Fatalf("channel bandwidth incorrect: expected %v, got %v", - expectedCarolBandwidth, n.carolChannelLink.Bandwidth()) - } - - // Now we'll update Bob's policy to jack up his free rate to an extent - // that'll cause him to reject the same HTLC that we just sent. - // - // TODO(roasbeef): should implement grace period within link policy - // update logic - newPolicy := n.globalPolicy - newPolicy.BaseFee = lnwire.NewMSatFromSatoshis(1000) - n.secondBobChannelLink.UpdateForwardingPolicy(newPolicy) - - // Next, we'll send the payment again, using the exact same per-hop - // payload for each node. This payment should fail as it won't factor - // in Bob's new fee policy. - _, err = makePayment( - n.aliceServer, n.carolServer, firstHop, hops, amountNoFee, - htlcAmt, htlcExpiry, - ).Wait(30 * time.Second) - if err == nil { - t.Fatalf("payment should've been rejected") - } - - errr := er.Wrapped(err) - rtErr, ok := errr.(ClearTextError) - if !ok { - t.Fatalf("expected a ClearTextError, instead got (%T): %v", err, err) - } - - switch rtErr.WireMessage().(type) { - case *lnwire.FailFeeInsufficient: - default: - t.Fatalf("expected FailFeeInsufficient instead got: %v", err) - } - - // Reset the policy so we can then test updating the max HTLC policy. - n.secondBobChannelLink.UpdateForwardingPolicy(n.globalPolicy) - - // As a sanity check, ensure the original payment now succeeds again. - _, err = makePayment( - n.aliceServer, n.carolServer, firstHop, hops, amountNoFee, - htlcAmt, htlcExpiry, - ).Wait(30 * time.Second) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } - - // Now we'll update Bob's policy to lower his max HTLC to an extent - // that'll cause him to reject the same HTLC that we just sent. - newPolicy = n.globalPolicy - newPolicy.MaxHTLC = amountNoFee - 1 - n.secondBobChannelLink.UpdateForwardingPolicy(newPolicy) - - // Next, we'll send the payment again, using the exact same per-hop - // payload for each node. This payment should fail as it won't factor - // in Bob's new max HTLC policy. - _, err = makePayment( - n.aliceServer, n.carolServer, firstHop, hops, amountNoFee, - htlcAmt, htlcExpiry, - ).Wait(30 * time.Second) - if err == nil { - t.Fatalf("payment should've been rejected") - } - - errr = er.Wrapped(err) - rtErr, ok = errr.(ClearTextError) - if !ok { - t.Fatalf("expected a ClearTextError, instead got (%T): %v", - err, err) - } - - switch rtErr.WireMessage().(type) { - case *lnwire.FailTemporaryChannelFailure: - default: - t.Fatalf("expected TemporaryChannelFailure, instead got: %v", - err) - } -} - -// TestChannelLinkMultiHopInsufficientPayment checks that we receive error if -// bob<->alice channel has insufficient BTC capacity/bandwidth. In this test we -// send the payment from Carol to Alice over Bob peer. (Carol -> Bob -> Alice) -func TestChannelLinkMultiHopInsufficientPayment(t *testing.T) { - t.Parallel() - - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*3, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, testStartingHeight) - if err := n.start(); err != nil { - t.Fatalf("unable to start three hop network: %v", err) - } - defer n.stop() - - carolBandwidthBefore := n.carolChannelLink.Bandwidth() - firstBobBandwidthBefore := n.firstBobChannelLink.Bandwidth() - secondBobBandwidthBefore := n.secondBobChannelLink.Bandwidth() - aliceBandwidthBefore := n.aliceChannelLink.Bandwidth() - - // We'll attempt to send 4 BTC although the alice-to-bob channel only - // has 3 BTC total capacity. As a result, this payment should be - // rejected. - amount := lnwire.NewMSatFromSatoshis(4 * btcutil.UnitsPerCoin()) - htlcAmt, totalTimelock, hops := generateHops(amount, testStartingHeight, - n.firstBobChannelLink, n.carolChannelLink) - - // Wait for: - // * HTLC add request to be sent to from Alice to Bob. - // * Alice<->Bob commitment states to be updated. - // * Bob trying to add HTLC add request in Bob<->Carol channel. - // * Cancel HTLC request to be sent back from Bob to Alice. - // * user notification to be sent. - - receiver := n.carolServer - firstHop := n.firstBobChannelLink.ShortChanID() - rhash, err := makePayment( - n.aliceServer, n.carolServer, firstHop, hops, amount, htlcAmt, - totalTimelock, - ).Wait(30 * time.Second) - if err == nil { - t.Fatal("error haven't been received") - } - assertFailureCode(t, err, lnwire.CodeTemporaryChannelFailure) - - // Wait for Alice to receive the revocation. - // - // TODO(roasbeef): add in ntfn hook for state transition completion - time.Sleep(100 * time.Millisecond) - - // Check that alice invoice wasn't settled and bandwidth of htlc - // links hasn't been changed. - invoice, err := receiver.registry.LookupInvoice(rhash) - if err != nil { - t.Fatalf("unable to get invoice: %v", err) - } - if invoice.State == channeldb.ContractSettled { - t.Fatal("carol invoice have been settled") - } - - if n.aliceChannelLink.Bandwidth() != aliceBandwidthBefore { - t.Fatal("the bandwidth of alice channel link which handles " + - "alice->bob channel should be the same") - } - - if n.firstBobChannelLink.Bandwidth() != firstBobBandwidthBefore { - t.Fatal("the bandwidth of bob channel link which handles " + - "alice->bob channel should be the same") - } - - if n.secondBobChannelLink.Bandwidth() != secondBobBandwidthBefore { - t.Fatal("the bandwidth of bob channel link which handles " + - "bob->carol channel should be the same") - } - - if n.carolChannelLink.Bandwidth() != carolBandwidthBefore { - t.Fatal("the bandwidth of carol channel link which handles " + - "bob->carol channel should be the same") - } -} - -// TestChannelLinkMultiHopUnknownPaymentHash checks that we receive remote error -// from Alice if she received not suitable payment hash for htlc. -func TestChannelLinkMultiHopUnknownPaymentHash(t *testing.T) { - t.Parallel() - - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*5, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, testStartingHeight) - if err := n.start(); err != nil { - t.Fatalf("unable to start three hop network: %v", err) - } - defer n.stop() - - carolBandwidthBefore := n.carolChannelLink.Bandwidth() - firstBobBandwidthBefore := n.firstBobChannelLink.Bandwidth() - secondBobBandwidthBefore := n.secondBobChannelLink.Bandwidth() - aliceBandwidthBefore := n.aliceChannelLink.Bandwidth() - - amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - - htlcAmt, totalTimelock, hops := generateHops(amount, testStartingHeight, - n.firstBobChannelLink, n.carolChannelLink) - blob, err := generateRoute(hops...) - if err != nil { - t.Fatal(err) - } - - // Generate payment invoice and htlc, but don't add this invoice to the - // receiver registry. This should trigger an unknown payment hash - // failure. - _, htlc, pid, err := generatePayment( - amount, htlcAmt, totalTimelock, blob, - ) - if err != nil { - t.Fatal(err) - } - - // Send payment and expose err channel. - err = n.aliceServer.htlcSwitch.SendHTLC( - n.firstBobChannelLink.ShortChanID(), pid, htlc, - ) - if err != nil { - t.Fatalf("unable to get send payment: %v", err) - } - - resultChan, err := n.aliceServer.htlcSwitch.GetPaymentResult( - pid, htlc.PaymentHash, newMockDeobfuscator(), - ) - if err != nil { - t.Fatalf("unable to get payment result: %v", err) - } - - var result *PaymentResult - var ok bool - select { - - case result, ok = <-resultChan: - if !ok { - t.Fatalf("unexpected shutdown") - } - case <-time.After(5 * time.Second): - t.Fatalf("no result arrive") - } - - assertFailureCode( - t, result.Error, lnwire.CodeIncorrectOrUnknownPaymentDetails, - ) - - // Wait for Alice to receive the revocation. - time.Sleep(100 * time.Millisecond) - - if n.aliceChannelLink.Bandwidth() != aliceBandwidthBefore { - t.Fatal("the bandwidth of alice channel link which handles " + - "alice->bob channel should be the same") - } - - if n.firstBobChannelLink.Bandwidth() != firstBobBandwidthBefore { - t.Fatal("the bandwidth of bob channel link which handles " + - "alice->bob channel should be the same") - } - - if n.secondBobChannelLink.Bandwidth() != secondBobBandwidthBefore { - t.Fatal("the bandwidth of bob channel link which handles " + - "bob->carol channel should be the same") - } - - if n.carolChannelLink.Bandwidth() != carolBandwidthBefore { - t.Fatal("the bandwidth of carol channel link which handles " + - "bob->carol channel should be the same") - } -} - -// TestChannelLinkMultiHopUnknownNextHop construct the chain of hops -// Carol<->Bob<->Alice and checks that we receive remote error from Bob if he -// has no idea about next hop (hop might goes down and routing info not updated -// yet). -func TestChannelLinkMultiHopUnknownNextHop(t *testing.T) { - t.Parallel() - - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*5, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, testStartingHeight) - if err := n.start(); err != nil { - t.Fatal(err) - } - defer n.stop() - - carolBandwidthBefore := n.carolChannelLink.Bandwidth() - firstBobBandwidthBefore := n.firstBobChannelLink.Bandwidth() - secondBobBandwidthBefore := n.secondBobChannelLink.Bandwidth() - aliceBandwidthBefore := n.aliceChannelLink.Bandwidth() - - amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - htlcAmt, totalTimelock, hops := generateHops(amount, testStartingHeight, - n.firstBobChannelLink, n.carolChannelLink) - - // Remove bob's outgoing link with Carol. This will cause him to fail - // back the payment to Alice since he is unaware of Carol when the - // payment comes across. - bobChanID := lnwire.NewChanIDFromOutPoint( - &channels.bobToCarol.State().FundingOutpoint, - ) - n.bobServer.htlcSwitch.RemoveLink(bobChanID) - - firstHop := n.firstBobChannelLink.ShortChanID() - receiver := n.carolServer - rhash, err := makePayment( - n.aliceServer, receiver, firstHop, hops, amount, htlcAmt, - totalTimelock).Wait(30 * time.Second) - if err == nil { - t.Fatal("error haven't been received") - } - errr := er.Wrapped(err) - rtErr, ok := errr.(ClearTextError) - if !ok { - t.Fatalf("expected ClearTextError") - } - - if _, ok = rtErr.WireMessage().(*lnwire.FailUnknownNextPeer); !ok { - t.Fatalf("wrong error has been received: %T", - rtErr.WireMessage()) - } - - // Wait for Alice to receive the revocation. - // - // TODO(roasbeef): add in ntfn hook for state transition completion - time.Sleep(100 * time.Millisecond) - - // Check that alice invoice wasn't settled and bandwidth of htlc - // links hasn't been changed. - invoice, err := receiver.registry.LookupInvoice(rhash) - if err != nil { - t.Fatalf("unable to get invoice: %v", err) - } - if invoice.State == channeldb.ContractSettled { - t.Fatal("carol invoice have been settled") - } - - if n.aliceChannelLink.Bandwidth() != aliceBandwidthBefore { - t.Fatal("the bandwidth of alice channel link which handles " + - "alice->bob channel should be the same") - } - - if n.firstBobChannelLink.Bandwidth() != firstBobBandwidthBefore { - t.Fatal("the bandwidth of bob channel link which handles " + - "alice->bob channel should be the same") - } - - if n.secondBobChannelLink.Bandwidth() != secondBobBandwidthBefore { - t.Fatal("the bandwidth of bob channel link which handles " + - "bob->carol channel should be the same") - } - - if n.carolChannelLink.Bandwidth() != carolBandwidthBefore { - t.Fatal("the bandwidth of carol channel link which handles " + - "bob->carol channel should be the same") - } - - // Load the forwarding packages for Bob's incoming link. The payment - // should have been rejected by the switch, and the AddRef in this link - // should be acked by the failed payment. - bobInFwdPkgs, err := channels.bobToAlice.State().LoadFwdPkgs() - if err != nil { - t.Fatalf("unable to load bob's fwd pkgs: %v", err) - } - - // There should be exactly two forward packages, as a full state - // transition requires two commitment dances. - if len(bobInFwdPkgs) != 2 { - t.Fatalf("bob should have exactly 2 fwdpkgs, has %d", - len(bobInFwdPkgs)) - } - - // Only one of the forwarding package should have an Add in it, the - // other will be empty. Either way, both AckFilters should be fully - // acked. - for _, fwdPkg := range bobInFwdPkgs { - if !fwdPkg.AckFilter.IsFull() { - t.Fatalf("fwdpkg chanid=%v height=%d AckFilter is not "+ - "fully acked", fwdPkg.Source, fwdPkg.Height) - } - } -} - -// TestChannelLinkMultiHopDecodeError checks that we send HTLC cancel if -// decoding of onion blob failed. -func TestChannelLinkMultiHopDecodeError(t *testing.T) { - t.Parallel() - - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*3, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, testStartingHeight) - if err := n.start(); err != nil { - t.Fatalf("unable to start three hop network: %v", err) - } - defer n.stop() - - // Replace decode function with another which throws an error. - n.carolChannelLink.cfg.ExtractErrorEncrypter = func( - *btcec.PublicKey) (hop.ErrorEncrypter, lnwire.FailCode) { - return nil, lnwire.CodeInvalidOnionVersion - } - - carolBandwidthBefore := n.carolChannelLink.Bandwidth() - firstBobBandwidthBefore := n.firstBobChannelLink.Bandwidth() - secondBobBandwidthBefore := n.secondBobChannelLink.Bandwidth() - aliceBandwidthBefore := n.aliceChannelLink.Bandwidth() - - amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - htlcAmt, totalTimelock, hops := generateHops(amount, testStartingHeight, - n.firstBobChannelLink, n.carolChannelLink) - - receiver := n.carolServer - firstHop := n.firstBobChannelLink.ShortChanID() - rhash, err := makePayment( - n.aliceServer, n.carolServer, firstHop, hops, amount, htlcAmt, - totalTimelock, - ).Wait(30 * time.Second) - if err == nil { - t.Fatal("error haven't been received") - } - - errr := er.Wrapped(err) - rtErr, ok := errr.(ClearTextError) - if !ok { - t.Fatalf("expected a ClearTextError, instead got: %T", err) - } - - switch rtErr.WireMessage().(type) { - case *lnwire.FailInvalidOnionVersion: - default: - t.Fatalf("wrong error have been received: %v", err) - } - - // Wait for Bob to receive the revocation. - time.Sleep(100 * time.Millisecond) - - // Check that alice invoice wasn't settled and bandwidth of htlc - // links hasn't been changed. - invoice, err := receiver.registry.LookupInvoice(rhash) - if err != nil { - t.Fatalf("unable to get invoice: %v", err) - } - if invoice.State == channeldb.ContractSettled { - t.Fatal("carol invoice have been settled") - } - - if n.aliceChannelLink.Bandwidth() != aliceBandwidthBefore { - t.Fatal("the bandwidth of alice channel link which handles " + - "alice->bob channel should be the same") - } - - if n.firstBobChannelLink.Bandwidth() != firstBobBandwidthBefore { - t.Fatal("the bandwidth of bob channel link which handles " + - "alice->bob channel should be the same") - } - - if n.secondBobChannelLink.Bandwidth() != secondBobBandwidthBefore { - t.Fatal("the bandwidth of bob channel link which handles " + - "bob->carol channel should be the same") - } - - if n.carolChannelLink.Bandwidth() != carolBandwidthBefore { - t.Fatal("the bandwidth of carol channel link which handles " + - "bob->carol channel should be the same") - } -} - -// TestChannelLinkExpiryTooSoonExitNode tests that if we send an HTLC to a node -// with an expiry that is already expired, or too close to the current block -// height, then it will cancel the HTLC. -func TestChannelLinkExpiryTooSoonExitNode(t *testing.T) { - t.Parallel() - - // The starting height for this test will be 200. So we'll base all - // HTLC starting points off of that. - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*3, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - const startingHeight = 200 - n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, startingHeight) - if err := n.start(); err != nil { - t.Fatalf("unable to start three hop network: %v", err) - } - defer n.stop() - - amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - - // We'll craft an HTLC packet, but set the final hop CLTV to 5 blocks - // after the current true height. This is less than the test invoice - // cltv delta of 6, so we expect the incoming htlc to be failed by the - // exit hop. - htlcAmt, totalTimelock, hops := generateHops(amount, - startingHeight-1, n.firstBobChannelLink) - - // Now we'll send out the payment from Alice to Bob. - firstHop := n.firstBobChannelLink.ShortChanID() - _, err = makePayment( - n.aliceServer, n.bobServer, firstHop, hops, amount, htlcAmt, - totalTimelock, - ).Wait(30 * time.Second) - - // The payment should've failed as the time lock value was in the - // _past_. - if err == nil { - t.Fatalf("payment should have failed due to a too early " + - "time lock value") - } - - errr := er.Wrapped(err) - rtErr, ok := errr.(ClearTextError) - if !ok { - t.Fatalf("expected a ClearTextError, instead got: %T %v", - rtErr, err) - } - - switch rtErr.WireMessage().(type) { - case *lnwire.FailIncorrectDetails: - default: - t.Fatalf("expected incorrect_or_unknown_payment_details, "+ - "instead have: %v", err) - } -} - -// TestChannelLinkExpiryTooSoonExitNode tests that if we send a multi-hop HTLC, -// and the time lock is too early for an intermediate node, then they cancel -// the HTLC back to the sender. -func TestChannelLinkExpiryTooSoonMidNode(t *testing.T) { - t.Parallel() - - // The starting height for this test will be 200. So we'll base all - // HTLC starting points off of that. - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*3, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - const startingHeight = 200 - n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, startingHeight) - if err := n.start(); err != nil { - t.Fatalf("unable to start three hop network: %v", err) - } - defer n.stop() - - amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - - // We'll craft an HTLC packet, but set the starting height to 3 blocks - // before the current true height. This means that the outgoing time - // lock of the middle hop will be at starting height + 3 blocks (channel - // policy time lock delta is 6 blocks). There is an expiry grace delta - // of 3 blocks relative to the current height, meaning that htlc will - // not be sent out by the middle hop. - htlcAmt, totalTimelock, hops := generateHops(amount, - startingHeight-3, n.firstBobChannelLink, n.carolChannelLink) - - // Now we'll send out the payment from Alice to Bob. - firstHop := n.firstBobChannelLink.ShortChanID() - _, err = makePayment( - n.aliceServer, n.bobServer, firstHop, hops, amount, htlcAmt, - totalTimelock, - ).Wait(30 * time.Second) - - // The payment should've failed as the time lock value was in the - // _past_. - if err == nil { - t.Fatalf("payment should have failed due to a too early " + - "time lock value") - } - - errr := er.Wrapped(err) - rtErr, ok := errr.(ClearTextError) - if !ok { - t.Fatalf("expected a ClearTextError, instead got: %T: %v", - rtErr, err) - } - - switch rtErr.WireMessage().(type) { - case *lnwire.FailExpiryTooSoon: - default: - t.Fatalf("incorrect error, expected final time lock too "+ - "early, instead have: %v", err) - } -} - -// TestChannelLinkSingleHopMessageOrdering test checks ordering of message which -// flying around between Alice and Bob are correct when Bob sends payments to -// Alice. -func TestChannelLinkSingleHopMessageOrdering(t *testing.T) { - t.Parallel() - - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*3, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, testStartingHeight) - - chanID := n.aliceChannelLink.ChanID() - - messages := []expectedMessage{ - {"alice", "bob", &lnwire.ChannelReestablish{}, false}, - {"bob", "alice", &lnwire.ChannelReestablish{}, false}, - - {"alice", "bob", &lnwire.FundingLocked{}, false}, - {"bob", "alice", &lnwire.FundingLocked{}, false}, - - {"alice", "bob", &lnwire.UpdateAddHTLC{}, false}, - {"alice", "bob", &lnwire.CommitSig{}, false}, - {"bob", "alice", &lnwire.RevokeAndAck{}, false}, - {"bob", "alice", &lnwire.CommitSig{}, false}, - {"alice", "bob", &lnwire.RevokeAndAck{}, false}, - - {"bob", "alice", &lnwire.UpdateFulfillHTLC{}, false}, - {"bob", "alice", &lnwire.CommitSig{}, false}, - {"alice", "bob", &lnwire.RevokeAndAck{}, false}, - {"alice", "bob", &lnwire.CommitSig{}, false}, - {"bob", "alice", &lnwire.RevokeAndAck{}, false}, - } - - debug := false - if debug { - // Log message that alice receives. - n.aliceServer.intersect(createLogFunc("alice", - n.aliceChannelLink.ChanID())) - - // Log message that bob receives. - n.bobServer.intersect(createLogFunc("bob", - n.firstBobChannelLink.ChanID())) - } - - // Check that alice receives messages in right order. - n.aliceServer.intersect(createInterceptorFunc("[alice] <-- [bob]", - "alice", messages, chanID, false)) - - // Check that bob receives messages in right order. - n.bobServer.intersect(createInterceptorFunc("[alice] --> [bob]", - "bob", messages, chanID, false)) - - if err := n.start(); err != nil { - t.Fatalf("unable to start three hop network: %v", err) - } - defer n.stop() - - amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - htlcAmt, totalTimelock, hops := generateHops(amount, testStartingHeight, - n.firstBobChannelLink) - - // Wait for: - // * HTLC add request to be sent to bob. - // * alice<->bob commitment state to be updated. - // * settle request to be sent back from bob to alice. - // * alice<->bob commitment state to be updated. - // * user notification to be sent. - firstHop := n.firstBobChannelLink.ShortChanID() - _, err = makePayment( - n.aliceServer, n.bobServer, firstHop, hops, amount, htlcAmt, - totalTimelock, - ).Wait(30 * time.Second) - if err != nil { - t.Fatalf("unable to make the payment: %v", err) - } -} - -type mockPeer struct { - sync.Mutex - disconnected bool - sentMsgs chan lnwire.Message - quit chan struct{} -} - -func (m *mockPeer) QuitSignal() <-chan struct{} { - return m.quit -} - -var _ lnpeer.Peer = (*mockPeer)(nil) - -func (m *mockPeer) SendMessage(sync bool, msgs ...lnwire.Message) er.R { - if m.disconnected { - return er.Errorf("disconnected") - } - - select { - case m.sentMsgs <- msgs[0]: - case <-m.quit: - return er.Errorf("mockPeer shutting down") - } - return nil -} -func (m *mockPeer) SendMessageLazy(sync bool, msgs ...lnwire.Message) er.R { - return m.SendMessage(sync, msgs...) -} -func (m *mockPeer) AddNewChannel(_ *channeldb.OpenChannel, - _ <-chan struct{}) er.R { - return nil -} -func (m *mockPeer) WipeChannel(*wire.OutPoint) {} -func (m *mockPeer) PubKey() [33]byte { - return [33]byte{} -} -func (m *mockPeer) IdentityKey() *btcec.PublicKey { - return nil -} -func (m *mockPeer) Address() net.Addr { - return nil -} -func (m *mockPeer) LocalFeatures() *lnwire.FeatureVector { - return nil -} -func (m *mockPeer) RemoteFeatures() *lnwire.FeatureVector { - return nil -} - -func newSingleLinkTestHarness(chanAmt, chanReserve btcutil.Amount) ( - ChannelLink, *lnwallet.LightningChannel, chan time.Time, func() er.R, - func(), func() (*lnwallet.LightningChannel, er.R), er.R) { - - var chanIDBytes [8]byte - if _, err := util.ReadFull(rand.Reader, chanIDBytes[:]); err != nil { - return nil, nil, nil, nil, nil, nil, err - } - - chanID := lnwire.NewShortChanIDFromInt( - binary.BigEndian.Uint64(chanIDBytes[:])) - - aliceLc, bobLc, fCleanUp, err := createTestChannel( - alicePrivKey, bobPrivKey, chanAmt, chanAmt, - chanReserve, chanReserve, chanID, - ) - if err != nil { - return nil, nil, nil, nil, nil, nil, err - } - - var ( - decoder = newMockIteratorDecoder() - obfuscator = NewMockObfuscator() - alicePeer = &mockPeer{ - sentMsgs: make(chan lnwire.Message, 2000), - quit: make(chan struct{}), - } - globalPolicy = ForwardingPolicy{ - MinHTLCOut: lnwire.NewMSatFromSatoshis(5), - MaxHTLC: lnwire.NewMSatFromSatoshis(chanAmt), - BaseFee: lnwire.NewMSatFromSatoshis(1), - TimeLockDelta: 6, - } - invoiceRegistry = newMockRegistry(globalPolicy.TimeLockDelta) - ) - - pCache := newMockPreimageCache() - - aliceDb := aliceLc.channel.State().Db - aliceSwitch, err := initSwitchWithDB(testStartingHeight, aliceDb) - if err != nil { - return nil, nil, nil, nil, nil, nil, err - } - - // Instantiate with a long interval, so that we can precisely control - // the firing via force feeding. - bticker := ticker.NewForce(time.Hour) - aliceCfg := ChannelLinkConfig{ - FwrdingPolicy: globalPolicy, - Peer: alicePeer, - Switch: aliceSwitch, - Circuits: aliceSwitch.CircuitModifier(), - ForwardPackets: aliceSwitch.ForwardPackets, - DecodeHopIterators: decoder.DecodeHopIterators, - ExtractErrorEncrypter: func(*btcec.PublicKey) ( - hop.ErrorEncrypter, lnwire.FailCode) { - return obfuscator, lnwire.CodeNone - }, - FetchLastChannelUpdate: mockGetChanUpdateMessage, - PreimageCache: pCache, - OnChannelFailure: func(lnwire.ChannelID, - lnwire.ShortChannelID, LinkFailureError) { - }, - UpdateContractSignals: func(*contractcourt.ContractSignals) er.R { - return nil - }, - Registry: invoiceRegistry, - ChainEvents: &contractcourt.ChainEventSubscription{}, - BatchTicker: bticker, - FwdPkgGCTicker: ticker.NewForce(15 * time.Second), - PendingCommitTicker: ticker.New(time.Minute), - // Make the BatchSize and Min/MaxFeeUpdateTimeout large enough - // to not trigger commit updates automatically during tests. - BatchSize: 10000, - MinFeeUpdateTimeout: 30 * time.Minute, - MaxFeeUpdateTimeout: 40 * time.Minute, - MaxOutgoingCltvExpiry: DefaultMaxOutgoingCltvExpiry, - MaxFeeAllocation: DefaultMaxLinkFeeAllocation, - NotifyActiveLink: func(wire.OutPoint) {}, - NotifyActiveChannel: func(wire.OutPoint) {}, - NotifyInactiveChannel: func(wire.OutPoint) {}, - HtlcNotifier: aliceSwitch.cfg.HtlcNotifier, - } - - aliceLink := NewChannelLink(aliceCfg, aliceLc.channel) - start := func() er.R { - return aliceSwitch.AddLink(aliceLink) - } - go func() { - for { - select { - case <-aliceLink.(*channelLink).htlcUpdates: - case <-aliceLink.(*channelLink).quit: - return - } - } - }() - - cleanUp := func() { - close(alicePeer.quit) - defer fCleanUp() - } - - return aliceLink, bobLc.channel, bticker.Force, start, cleanUp, - aliceLc.restore, nil -} - -func assertLinkBandwidth(t *testing.T, link ChannelLink, - expected lnwire.MilliSatoshi) { - - currentBandwidth := link.Bandwidth() - _, _, line, _ := runtime.Caller(1) - if currentBandwidth != expected { - t.Fatalf("line %v: alice's link bandwidth is incorrect: "+ - "expected %v, got %v", line, expected, currentBandwidth) - } -} - -// handleStateUpdate handles the messages sent from the link after -// the batch ticker has triggered a state update. -func handleStateUpdate(link *channelLink, - remoteChannel *lnwallet.LightningChannel) er.R { - sentMsgs := link.cfg.Peer.(*mockPeer).sentMsgs - var msg lnwire.Message - select { - case msg = <-sentMsgs: - case <-time.After(60 * time.Second): - return er.Errorf("did not receive CommitSig from Alice") - } - - // The link should be sending a commit sig at this point. - commitSig, ok := msg.(*lnwire.CommitSig) - if !ok { - return er.Errorf("expected CommitSig, got %T", msg) - } - - // Let the remote channel receive the commit sig, and - // respond with a revocation + commitsig. - err := remoteChannel.ReceiveNewCommitment( - commitSig.CommitSig, commitSig.HtlcSigs) - if err != nil { - return err - } - - remoteRev, _, err := remoteChannel.RevokeCurrentCommitment() - if err != nil { - return err - } - link.HandleChannelUpdate(remoteRev) - - remoteSig, remoteHtlcSigs, _, err := remoteChannel.SignNextCommitment() - if err != nil { - return err - } - commitSig = &lnwire.CommitSig{ - CommitSig: remoteSig, - HtlcSigs: remoteHtlcSigs, - } - link.HandleChannelUpdate(commitSig) - - // This should make the link respond with a revocation. - select { - case msg = <-sentMsgs: - case <-time.After(60 * time.Second): - return er.Errorf("did not receive RevokeAndAck from Alice") - } - - revoke, ok := msg.(*lnwire.RevokeAndAck) - if !ok { - return er.Errorf("expected RevokeAndAck got %T", msg) - } - _, _, _, _, err = remoteChannel.ReceiveRevocation(revoke) - if err != nil { - return er.Errorf("unable to receive "+ - "revocation: %v", err) - } - - return nil -} - -// updateState is used exchange the messages necessary to do a full state -// transition. If initiateUpdate=true, then this call will make the link -// trigger an update by sending on the batchTick channel, if not, it will -// make the remoteChannel initiate the state update. -func updateState(batchTick chan time.Time, link *channelLink, - remoteChannel *lnwallet.LightningChannel, - initiateUpdate bool) er.R { - sentMsgs := link.cfg.Peer.(*mockPeer).sentMsgs - - if initiateUpdate { - // Trigger update by ticking the batchTicker. - select { - case batchTick <- time.Now(): - case <-link.quit: - return er.Errorf("link shutting down") - } - return handleStateUpdate(link, remoteChannel) - } - - // The remote is triggering the state update, emulate this by - // signing and sending CommitSig to the link. - remoteSig, remoteHtlcSigs, _, err := remoteChannel.SignNextCommitment() - if err != nil { - return err - } - - commitSig := &lnwire.CommitSig{ - CommitSig: remoteSig, - HtlcSigs: remoteHtlcSigs, - } - link.HandleChannelUpdate(commitSig) - - // The link should respond with a revocation + commit sig. - var msg lnwire.Message - select { - case msg = <-sentMsgs: - case <-time.After(60 * time.Second): - return er.Errorf("did not receive RevokeAndAck from Alice") - } - - revoke, ok := msg.(*lnwire.RevokeAndAck) - if !ok { - return er.Errorf("expected RevokeAndAck got %T", - msg) - } - _, _, _, _, err = remoteChannel.ReceiveRevocation(revoke) - if err != nil { - return er.Errorf("unable to receive "+ - "revocation: %v", err) - } - select { - case msg = <-sentMsgs: - case <-time.After(60 * time.Second): - return er.Errorf("did not receive CommitSig from Alice") - } - - commitSig, ok = msg.(*lnwire.CommitSig) - if !ok { - return er.Errorf("expected CommitSig, got %T", msg) - } - - err = remoteChannel.ReceiveNewCommitment( - commitSig.CommitSig, commitSig.HtlcSigs) - if err != nil { - return err - } - - // Lastly, send a revocation back to the link. - remoteRev, _, err := remoteChannel.RevokeCurrentCommitment() - if err != nil { - return err - } - link.HandleChannelUpdate(remoteRev) - - // Sleep to make sure Alice has handled the remote revocation. - time.Sleep(500 * time.Millisecond) - - return nil -} - -// TestChannelLinkBandwidthConsistency ensures that the reported bandwidth of a -// given ChannelLink is properly updated in response to downstream messages -// from the switch, and upstream messages from its channel peer. -// -// TODO(roasbeef): add sync hook into packet processing so can eliminate all -// sleep in this test and the one below -func TestChannelLinkBandwidthConsistency(t *testing.T) { - if !build.IsDevBuild() { - t.Fatalf("htlcswitch tests must be run with '-tags debug") - } - t.Parallel() - - // TODO(roasbeef): replace manual bit twiddling with concept of - // resource cost for packets? - // * or also able to consult link - - // We'll start the test by creating a single instance of - chanAmt := btcutil.UnitsPerCoin() * 5 - - aliceLink, bobChannel, tmr, start, cleanUp, _, err := - newSingleLinkTestHarness(chanAmt, 0) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } - defer cleanUp() - - if err := start(); err != nil { - t.Fatalf("unable to start test harness: %v", err) - } - - var ( - carolChanID = lnwire.NewShortChanIDFromInt(3) - mockBlob [lnwire.OnionPacketSize]byte - coreChan = aliceLink.(*channelLink).channel - coreLink = aliceLink.(*channelLink) - defaultCommitFee = coreChan.StateSnapshot().CommitFee - aliceStartingBandwidth = aliceLink.Bandwidth() - aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs - ) - - // We put Alice into hodl.ExitSettle mode, such that she won't settle - // incoming HTLCs automatically. - coreLink.cfg.HodlMask = hodl.MaskFromFlags(hodl.ExitSettle) - - estimator := chainfee.NewStaticEstimator(6000, 0) - feePerKw, err := estimator.EstimateFeePerKW(1) - if err != nil { - t.Fatalf("unable to query fee estimator: %v", err) - } - htlcFee := lnwire.NewMSatFromSatoshis( - feePerKw.FeeForWeight(input.HTLCWeight), - ) - - // The starting bandwidth of the channel should be exactly the amount - // that we created the channel between her and Bob, minus the - // commitment fee and fee for adding an additional HTLC. - expectedBandwidth := lnwire.NewMSatFromSatoshis( - chanAmt-defaultCommitFee, - ) - htlcFee - assertLinkBandwidth(t, aliceLink, expectedBandwidth) - - // Next, we'll create an HTLC worth 1 BTC, and send it into the link as - // a switch initiated payment. The resulting bandwidth should - // now be decremented to reflect the new HTLC. - htlcAmt := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - invoice, htlc, _, err := generatePayment( - htlcAmt, htlcAmt, 5, mockBlob, - ) - if err != nil { - t.Fatalf("unable to create payment: %v", err) - } - addPkt := htlcPacket{ - htlc: htlc, - incomingChanID: hop.Source, - incomingHTLCID: 0, - obfuscator: NewMockObfuscator(), - } - - circuit := makePaymentCircuit(&htlc.PaymentHash, &addPkt) - _, err = coreLink.cfg.Switch.commitCircuits(&circuit) - if err != nil { - t.Fatalf("unable to commit circuit: %v", err) - } - - addPkt.circuit = &circuit - if err := aliceLink.HandleSwitchPacket(&addPkt); err != nil { - t.Fatalf("unable to handle switch packet: %v", err) - } - time.Sleep(time.Millisecond * 500) - - // The resulting bandwidth should reflect that Alice is paying the - // htlc amount in addition to the htlc fee. - assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt-htlcFee) - - // Alice should send the HTLC to Bob. - var msg lnwire.Message - select { - case msg = <-aliceMsgs: - case <-time.After(15 * time.Second): - t.Fatalf("did not receive message") - } - - addHtlc, ok := msg.(*lnwire.UpdateAddHTLC) - if !ok { - t.Fatalf("expected UpdateAddHTLC, got %T", msg) - } - - bobIndex, err := bobChannel.ReceiveHTLC(addHtlc) - if err != nil { - t.Fatalf("bob failed receiving htlc: %v", err) - } - - // Lock in the HTLC. - if err := updateState(tmr, coreLink, bobChannel, true); err != nil { - t.Fatalf("unable to update state: %v", err) - } - // Locking in the HTLC should not change Alice's bandwidth. - assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt-htlcFee) - - // If we now send in a valid HTLC settle for the prior HTLC we added, - // then the bandwidth should remain unchanged as the remote party will - // gain additional channel balance. - err = bobChannel.SettleHTLC(*invoice.Terms.PaymentPreimage, bobIndex, nil, nil, nil) - if err != nil { - t.Fatalf("unable to settle htlc: %v", err) - } - htlcSettle := &lnwire.UpdateFulfillHTLC{ - ID: 0, - PaymentPreimage: *invoice.Terms.PaymentPreimage, - } - aliceLink.HandleChannelUpdate(htlcSettle) - time.Sleep(time.Millisecond * 500) - - // Since the settle is not locked in yet, Alice's bandwidth should still - // reflect that she has to pay the fee. - assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt-htlcFee) - - // Lock in the settle. - if err := updateState(tmr, coreLink, bobChannel, false); err != nil { - t.Fatalf("unable to update state: %v", err) - } - - // Now that it is settled, Alice should have gotten the htlc fee back. - assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt) - - // Next, we'll add another HTLC initiated by the switch (of the same - // amount as the prior one). - _, htlc, _, err = generatePayment(htlcAmt, htlcAmt, 5, mockBlob) - if err != nil { - t.Fatalf("unable to create payment: %v", err) - } - addPkt = htlcPacket{ - htlc: htlc, - incomingChanID: hop.Source, - incomingHTLCID: 1, - obfuscator: NewMockObfuscator(), - } - - circuit = makePaymentCircuit(&htlc.PaymentHash, &addPkt) - _, err = coreLink.cfg.Switch.commitCircuits(&circuit) - if err != nil { - t.Fatalf("unable to commit circuit: %v", err) - } - - addPkt.circuit = &circuit - if err := aliceLink.HandleSwitchPacket(&addPkt); err != nil { - t.Fatalf("unable to handle switch packet: %v", err) - } - time.Sleep(time.Millisecond * 500) - - // Again, Alice's bandwidth decreases by htlcAmt+htlcFee. - assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-2*htlcAmt-htlcFee) - - // Alice will send the HTLC to Bob. - select { - case msg = <-aliceMsgs: - case <-time.After(15 * time.Second): - t.Fatalf("did not receive message") - } - - addHtlc, ok = msg.(*lnwire.UpdateAddHTLC) - if !ok { - t.Fatalf("expected UpdateAddHTLC, got %T", msg) - } - - bobIndex, err = bobChannel.ReceiveHTLC(addHtlc) - if err != nil { - t.Fatalf("bob failed receiving htlc: %v", err) - } - - // Lock in the HTLC, which should not affect the bandwidth. - if err := updateState(tmr, coreLink, bobChannel, true); err != nil { - t.Fatalf("unable to update state: %v", err) - } - - assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt*2-htlcFee) - - // With that processed, we'll now generate an HTLC fail (sent by the - // remote peer) to cancel the HTLC we just added. This should return us - // back to the bandwidth of the link right before the HTLC was sent. - err = bobChannel.FailHTLC(bobIndex, []byte("nop"), nil, nil, nil) - if err != nil { - t.Fatalf("unable to fail htlc: %v", err) - } - failMsg := &lnwire.UpdateFailHTLC{ - ID: 1, - Reason: lnwire.OpaqueReason([]byte("nop")), - } - - aliceLink.HandleChannelUpdate(failMsg) - time.Sleep(time.Millisecond * 500) - - // Before the Fail gets locked in, the bandwidth should remain unchanged. - assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt*2-htlcFee) - - // Lock in the Fail. - if err := updateState(tmr, coreLink, bobChannel, false); err != nil { - t.Fatalf("unable to update state: %v", err) - } - - // Now the bandwidth should reflect the failed HTLC. - assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt) - - // Moving along, we'll now receive a new HTLC from the remote peer, - // with an ID of 0 as this is their first HTLC. The bandwidth should - // remain unchanged (but Alice will need to pay the fee for the extra - // HTLC). - htlcAmt, totalTimelock, hops := generateHops(htlcAmt, testStartingHeight, - coreLink) - blob, err := generateRoute(hops...) - if err != nil { - t.Fatalf("unable to gen route: %v", err) - } - invoice, htlc, _, err = generatePayment( - htlcAmt, htlcAmt, totalTimelock, blob, - ) - if err != nil { - t.Fatalf("unable to create payment: %v", err) - } - - // We must add the invoice to the registry, such that Alice expects - // this payment. - err = coreLink.cfg.Registry.(*mockInvoiceRegistry).AddInvoice( - *invoice, htlc.PaymentHash, - ) - if err != nil { - t.Fatalf("unable to add invoice to registry: %v", err) - } - - htlc.ID = 0 - _, err = bobChannel.AddHTLC(htlc, nil) - if err != nil { - t.Fatalf("unable to add htlc: %v", err) - } - aliceLink.HandleChannelUpdate(htlc) - - // Alice's balance remains unchanged until this HTLC is locked in. - assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt) - - // Lock in the HTLC. - if err := updateState(tmr, coreLink, bobChannel, false); err != nil { - t.Fatalf("unable to update state: %v", err) - } - - // Since Bob is adding this HTLC, Alice only needs to pay the fee. - assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt-htlcFee) - time.Sleep(time.Millisecond * 500) - - addPkt = htlcPacket{ - htlc: htlc, - incomingChanID: aliceLink.ShortChanID(), - incomingHTLCID: 0, - obfuscator: NewMockObfuscator(), - } - - circuit = makePaymentCircuit(&htlc.PaymentHash, &addPkt) - _, err = coreLink.cfg.Switch.commitCircuits(&circuit) - if err != nil { - t.Fatalf("unable to commit circuit: %v", err) - } - - addPkt.outgoingChanID = carolChanID - addPkt.outgoingHTLCID = 0 - - err = coreLink.cfg.Switch.openCircuits(addPkt.keystone()) - if err != nil { - t.Fatalf("unable to set keystone: %v", err) - } - - // Next, we'll settle the HTLC with our knowledge of the pre-image that - // we eventually learn (simulating a multi-hop payment). The bandwidth - // of the channel should now be re-balanced to the starting point. - settlePkt := htlcPacket{ - incomingChanID: aliceLink.ShortChanID(), - incomingHTLCID: 0, - circuit: &circuit, - outgoingChanID: addPkt.outgoingChanID, - outgoingHTLCID: addPkt.outgoingHTLCID, - htlc: &lnwire.UpdateFulfillHTLC{ - ID: 0, - PaymentPreimage: *invoice.Terms.PaymentPreimage, - }, - obfuscator: NewMockObfuscator(), - } - - if err := aliceLink.HandleSwitchPacket(&settlePkt); err != nil { - t.Fatalf("unable to handle switch packet: %v", err) - } - time.Sleep(time.Millisecond * 500) - - // Settling this HTLC gives Alice all her original bandwidth back. - assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth) - - select { - case msg = <-aliceMsgs: - case <-time.After(15 * time.Second): - t.Fatalf("did not receive message") - } - - settleMsg, ok := msg.(*lnwire.UpdateFulfillHTLC) - if !ok { - t.Fatalf("expected UpdateFulfillHTLC, got %T", msg) - } - err = bobChannel.ReceiveHTLCSettle(settleMsg.PaymentPreimage, settleMsg.ID) - if err != nil { - t.Fatalf("failed receiving fail htlc: %v", err) - } - - // After failing an HTLC, the link will automatically trigger - // a state update. - if err := handleStateUpdate(coreLink, bobChannel); err != nil { - t.Fatalf("unable to update state: %v", err) - } - - // Finally, we'll test the scenario of failing an HTLC received by the - // remote node. This should result in no perceived bandwidth changes. - htlcAmt, totalTimelock, hops = generateHops(htlcAmt, testStartingHeight, - coreLink) - blob, err = generateRoute(hops...) - if err != nil { - t.Fatalf("unable to gen route: %v", err) - } - invoice, htlc, _, err = generatePayment( - htlcAmt, htlcAmt, totalTimelock, blob, - ) - if err != nil { - t.Fatalf("unable to create payment: %v", err) - } - err = coreLink.cfg.Registry.(*mockInvoiceRegistry).AddInvoice( - *invoice, htlc.PaymentHash, - ) - if err != nil { - t.Fatalf("unable to add invoice to registry: %v", err) - } - - // Since we are not using the link to handle HTLC IDs for the - // remote channel, we must set this manually. This is the second - // HTLC we add, hence it should have an ID of 1 (Alice's channel - // link will set this automatically for her side). - htlc.ID = 1 - _, err = bobChannel.AddHTLC(htlc, nil) - if err != nil { - t.Fatalf("unable to add htlc: %v", err) - } - aliceLink.HandleChannelUpdate(htlc) - time.Sleep(time.Millisecond * 500) - - // No changes before the HTLC is locked in. - assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth) - if err := updateState(tmr, coreLink, bobChannel, false); err != nil { - t.Fatalf("unable to update state: %v", err) - } - - // After lock-in, Alice will have to pay the htlc fee. - assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcFee) - - addPkt = htlcPacket{ - htlc: htlc, - incomingChanID: aliceLink.ShortChanID(), - incomingHTLCID: 1, - obfuscator: NewMockObfuscator(), - } - - circuit = makePaymentCircuit(&htlc.PaymentHash, &addPkt) - _, err = coreLink.cfg.Switch.commitCircuits(&circuit) - if err != nil { - t.Fatalf("unable to commit circuit: %v", err) - } - - addPkt.outgoingChanID = carolChanID - addPkt.outgoingHTLCID = 1 - - err = coreLink.cfg.Switch.openCircuits(addPkt.keystone()) - if err != nil { - t.Fatalf("unable to set keystone: %v", err) - } - - failPkt := htlcPacket{ - incomingChanID: aliceLink.ShortChanID(), - incomingHTLCID: 1, - circuit: &circuit, - outgoingChanID: addPkt.outgoingChanID, - outgoingHTLCID: addPkt.outgoingHTLCID, - htlc: &lnwire.UpdateFailHTLC{ - ID: 1, - }, - obfuscator: NewMockObfuscator(), - } - - if err := aliceLink.HandleSwitchPacket(&failPkt); err != nil { - t.Fatalf("unable to handle switch packet: %v", err) - } - time.Sleep(time.Millisecond * 500) - - // Alice should get all her bandwidth back. - assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth) - - // Message should be sent to Bob. - select { - case msg = <-aliceMsgs: - case <-time.After(15 * time.Second): - t.Fatalf("did not receive message") - } - failMsg, ok = msg.(*lnwire.UpdateFailHTLC) - if !ok { - t.Fatalf("expected UpdateFailHTLC, got %T", msg) - } - err = bobChannel.ReceiveFailHTLC(failMsg.ID, []byte("fail")) - if err != nil { - t.Fatalf("failed receiving fail htlc: %v", err) - } - - // After failing an HTLC, the link will automatically trigger - // a state update. - if err := handleStateUpdate(coreLink, bobChannel); err != nil { - t.Fatalf("unable to update state: %v", err) - } - assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth) -} - -// genAddsAndCircuits creates `numHtlcs` sequential ADD packets and there -// corresponding circuits. The provided `htlc` is used in all test packets. -func genAddsAndCircuits(numHtlcs int, htlc *lnwire.UpdateAddHTLC) ( - []*htlcPacket, []*PaymentCircuit) { - - addPkts := make([]*htlcPacket, 0, numHtlcs) - circuits := make([]*PaymentCircuit, 0, numHtlcs) - for i := 0; i < numHtlcs; i++ { - addPkt := htlcPacket{ - htlc: htlc, - incomingChanID: hop.Source, - incomingHTLCID: uint64(i), - obfuscator: NewMockObfuscator(), - } - - circuit := makePaymentCircuit(&htlc.PaymentHash, &addPkt) - addPkt.circuit = &circuit - - addPkts = append(addPkts, &addPkt) - circuits = append(circuits, &circuit) - } - - return addPkts, circuits -} - -// TestChannelLinkTrimCircuitsPending checks that the switch and link properly -// trim circuits if there are open circuits corresponding to ADDs on a pending -// commmitment transaction. -func TestChannelLinkTrimCircuitsPending(t *testing.T) { - t.Parallel() - - chanAmt := btcutil.UnitsPerCoin() * 5 - const ( - numHtlcs = 4 - halfHtlcs = numHtlcs / 2 - ) - - // We'll start by creating a new link with our chanAmt (5 BTC). We will - // only be testing Alice's behavior, so the reference to Bob's channel - // state is unnecessary. - aliceLink, _, batchTicker, start, cleanUp, restore, err := - newSingleLinkTestHarness(chanAmt, 0) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } - defer cleanUp() - - if err := start(); err != nil { - t.Fatalf("unable to start test harness: %v", err) - } - - alice := newPersistentLinkHarness( - t, aliceLink, batchTicker, restore, - ) - - // Compute the static fees that will be used to determine the - // correctness of Alice's bandwidth when forwarding HTLCs. - estimator := chainfee.NewStaticEstimator(6000, 0) - feePerKw, err := estimator.EstimateFeePerKW(1) - if err != nil { - t.Fatalf("unable to query fee estimator: %v", err) - } - - defaultCommitFee := alice.channel.StateSnapshot().CommitFee - htlcFee := lnwire.NewMSatFromSatoshis( - feePerKw.FeeForWeight(input.HTLCWeight), - ) - - // The starting bandwidth of the channel should be exactly the amount - // that we created the channel between her and Bob, minus the commitment - // fee and fee of adding an HTLC. - expectedBandwidth := lnwire.NewMSatFromSatoshis( - chanAmt-defaultCommitFee, - ) - htlcFee - assertLinkBandwidth(t, alice.link, expectedBandwidth) - - // Capture Alice's starting bandwidth to perform later, relative - // bandwidth assertions. - aliceStartingBandwidth := alice.link.Bandwidth() - - // Next, we'll create an HTLC worth 1 BTC that will be used as a dummy - // message for the test. - var mockBlob [lnwire.OnionPacketSize]byte - htlcAmt := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - _, htlc, _, err := generatePayment(htlcAmt, htlcAmt, 5, mockBlob) - if err != nil { - t.Fatalf("unable to create payment: %v", err) - } - - // Create `numHtlc` htlcPackets and payment circuits that will be used - // to drive the test. All of the packets will use the same dummy HTLC. - addPkts, circuits := genAddsAndCircuits(numHtlcs, htlc) - - // To begin the test, start by committing the circuits belong to our - // first two HTLCs. - fwdActions := alice.commitCircuits(circuits[:halfHtlcs]) - - // Both of these circuits should have successfully added, as this is the - // first attempt to send them. - if len(fwdActions.Adds) != halfHtlcs { - t.Fatalf("expected %d circuits to be added", halfHtlcs) - } - alice.assertNumPendingNumOpenCircuits(2, 0) - - // Since both were committed successfully, we will now deliver them to - // Alice's link. - for _, addPkt := range addPkts[:halfHtlcs] { - if err := alice.link.HandleSwitchPacket(addPkt); err != nil { - t.Fatalf("unable to handle switch packet: %v", err) - } - } - - // Wait until Alice's link has sent both HTLCs via the peer. - alice.checkSent(addPkts[:halfHtlcs]) - - // The resulting bandwidth should reflect that Alice is paying both - // htlc amounts, in addition to both htlc fees. - assertLinkBandwidth(t, alice.link, - aliceStartingBandwidth-halfHtlcs*(htlcAmt+htlcFee), - ) - - // Now, initiate a state transition by Alice so that the pending HTLCs - // are locked in. This will *not* involve any participation by Bob, - // which ensures the commitment will remain in a pending state. - alice.trySignNextCommitment() - alice.assertNumPendingNumOpenCircuits(2, 2) - - // Restart Alice's link, which simulates a disconnection with the remote - // peer. - cleanUp = alice.restart(false) - defer cleanUp() - - alice.assertNumPendingNumOpenCircuits(2, 2) - - // Make a second attempt to commit the first two circuits. This can - // happen if the incoming link flaps, but also allows us to verify that - // the circuits were trimmed properly. - fwdActions = alice.commitCircuits(circuits[:halfHtlcs]) - - // Since Alice has a pending commitment with the first two HTLCs, the - // restart should not have trimmed them from the circuit map. - // Therefore, we expect both of these circuits to be dropped by the - // switch, as keystones should still be set. - if len(fwdActions.Drops) != halfHtlcs { - t.Fatalf("expected %d packets to be dropped", halfHtlcs) - } - - // The resulting bandwidth should remain unchanged from before, - // reflecting that Alice is paying both htlc amounts, in addition to - // both htlc fees. - assertLinkBandwidth(t, alice.link, - aliceStartingBandwidth-halfHtlcs*(htlcAmt+htlcFee), - ) - - // Now, restart Alice's link *and* the entire switch. This will ensure - // that entire circuit map is reloaded from disk, and we can now test - // against the behavioral differences of committing circuits that - // conflict with duplicate circuits after a restart. - cleanUp = alice.restart(true) - defer cleanUp() - - alice.assertNumPendingNumOpenCircuits(2, 2) - - // Alice should not send out any messages. Even though Alice has a - // pending commitment transaction, channel reestablishment is not - // enabled in this test. - select { - case <-alice.msgs: - t.Fatalf("message should not have been sent by Alice") - case <-time.After(time.Second): - } - - // We will now try to commit the circuits for all of our HTLCs. The - // first two are already on the pending commitment transaction, the - // latter two are new HTLCs. - fwdActions = alice.commitCircuits(circuits) - - // The first two circuits should have been dropped, as they are still on - // the pending commitment transaction, and the restart should not have - // trimmed the circuits for these valid HTLCs. - if len(fwdActions.Drops) != halfHtlcs { - t.Fatalf("expected %d packets to be dropped", halfHtlcs) - } - // The latter two circuits are unknown the circuit map, and should - // report being added. - if len(fwdActions.Adds) != halfHtlcs { - t.Fatalf("expected %d packets to be added", halfHtlcs) - } - - // Deliver the latter two HTLCs to Alice's links so that they can be - // processed and added to the in-memory commitment state. - for _, addPkt := range addPkts[halfHtlcs:] { - if err := alice.link.HandleSwitchPacket(addPkt); err != nil { - t.Fatalf("unable to handle switch packet: %v", err) - } - } - - // Wait for Alice to send the two latter HTLCs via the peer. - alice.checkSent(addPkts[halfHtlcs:]) - - // With two HTLCs on the pending commit, and two added to the in-memory - // commitment state, the resulting bandwidth should reflect that Alice - // is paying the all htlc amounts in addition to all htlc fees. - assertLinkBandwidth(t, alice.link, - aliceStartingBandwidth-numHtlcs*(htlcAmt+htlcFee), - ) - - // We will try to initiate a state transition for Alice, which will - // ensure the circuits for the two in-memory HTLCs are opened. However, - // since we have a pending commitment, these HTLCs will not actually be - // included in a commitment. - alice.trySignNextCommitment() - alice.assertNumPendingNumOpenCircuits(4, 4) - - // Restart Alice's link to simulate a disconnect. Since the switch - // remains up throughout, the two latter HTLCs will remain in the link's - // mailbox, and will reprocessed upon being reattached to the link. - cleanUp = alice.restart(false) - defer cleanUp() - - alice.assertNumPendingNumOpenCircuits(4, 2) - - // Again, try to recommit all of our circuits. - fwdActions = alice.commitCircuits(circuits) - - // It is expected that all of these will get dropped by the switch. - // The first two circuits are still open as a result of being on the - // commitment transaction. The latter two should have had their open - // circuits trimmed, *but* since the HTLCs are still in Alice's mailbox, - // the switch knows not to fail them as a result of the latter two - // circuits never having been loaded from disk. - if len(fwdActions.Drops) != numHtlcs { - t.Fatalf("expected %d packets to be dropped", numHtlcs) - } - - // Wait for the latter two htlcs to be pulled from the mailbox, added to - // the in-memory channel state, and sent out via the peer. - alice.checkSent(addPkts[halfHtlcs:]) - - // This should result in reconstructing the same bandwidth as our last - // assertion. There are two HTLCs on the pending commit, and two added - // to the in-memory commitment state, the resulting bandwidth should - // reflect that Alice is paying the all htlc amounts in addition to all - // htlc fees. - assertLinkBandwidth(t, alice.link, - aliceStartingBandwidth-numHtlcs*(htlcAmt+htlcFee), - ) - - // Again, we will try to initiate a state transition for Alice, which - // will ensure the circuits for the two in-memory HTLCs are opened. - // As before, these HTLCs will not actually be included in a commitment - // since we have a pending commitment. - alice.trySignNextCommitment() - alice.assertNumPendingNumOpenCircuits(4, 4) - - // As a final persistence check, we will restart the link and switch, - // wiping the latter two HTLCs from memory, and forcing their circuits - // to be reloaded from disk. - cleanUp = alice.restart(true) - defer cleanUp() - - alice.assertNumPendingNumOpenCircuits(4, 2) - - // Alice's mailbox will be empty after the restart, and no channel - // reestablishment is configured, so no messages will be sent upon - // restart. - select { - case <-alice.msgs: - t.Fatalf("message should not have been sent by Alice") - case <-time.After(time.Second): - } - - // Finally, make one last attempt to commit all circuits. - fwdActions = alice.commitCircuits(circuits) - - // The first two HTLCs should still be dropped by the htlcswitch. Their - // existence on the pending commitment transaction should prevent their - // open circuits from being trimmed. - if len(fwdActions.Drops) != halfHtlcs { - t.Fatalf("expected %d packets to be dropped", halfHtlcs) - } - // The latter two HTLCs should now be failed by the switch. These will - // have been trimmed by the link or switch restarting, and since the - // HTLCs are known to be lost from memory (since their circuits were - // loaded from disk), it is safe fail them back as they won't ever be - // delivered to the outgoing link. - if len(fwdActions.Fails) != halfHtlcs { - t.Fatalf("expected %d packets to be dropped", halfHtlcs) - } - - // Since the latter two HTLCs have been completely dropped from memory, - // only the first two HTLCs we added should still be reflected in the - // channel bandwidth. - assertLinkBandwidth(t, alice.link, - aliceStartingBandwidth-halfHtlcs*(htlcAmt+htlcFee), - ) -} - -// TestChannelLinkTrimCircuitsNoCommit checks that the switch and link properly trim -// circuits if the ADDs corresponding to open circuits are never committed. -func TestChannelLinkTrimCircuitsNoCommit(t *testing.T) { - if !build.IsDevBuild() { - t.Fatalf("htlcswitch tests must be run with '-tags debug") - } - - t.Parallel() - - chanAmt := btcutil.UnitsPerCoin() * 5 - const ( - numHtlcs = 4 - halfHtlcs = numHtlcs / 2 - ) - - // We'll start by creating a new link with our chanAmt (5 BTC). We will - // only be testing Alice's behavior, so the reference to Bob's channel - // state is unnecessary. - aliceLink, _, batchTicker, start, cleanUp, restore, err := - newSingleLinkTestHarness(chanAmt, 0) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } - defer cleanUp() - - if err := start(); err != nil { - t.Fatalf("unable to start test harness: %v", err) - } - - alice := newPersistentLinkHarness( - t, aliceLink, batchTicker, restore, - ) - - // We'll put Alice into hodl.Commit mode, such that the circuits for any - // outgoing ADDs are opened, but the changes are not committed in the - // channel state. - alice.coreLink.cfg.HodlMask = hodl.Commit.Mask() - - // Compute the static fees that will be used to determine the - // correctness of Alice's bandwidth when forwarding HTLCs. - estimator := chainfee.NewStaticEstimator(6000, 0) - feePerKw, err := estimator.EstimateFeePerKW(1) - if err != nil { - t.Fatalf("unable to query fee estimator: %v", err) - } - - defaultCommitFee := alice.channel.StateSnapshot().CommitFee - htlcFee := lnwire.NewMSatFromSatoshis( - feePerKw.FeeForWeight(input.HTLCWeight), - ) - - // The starting bandwidth of the channel should be exactly the amount - // that we created the channel between her and Bob, minus the commitment - // fee and fee for adding an additional HTLC. - expectedBandwidth := lnwire.NewMSatFromSatoshis( - chanAmt-defaultCommitFee, - ) - htlcFee - assertLinkBandwidth(t, alice.link, expectedBandwidth) - - // Capture Alice's starting bandwidth to perform later, relative - // bandwidth assertions. - aliceStartingBandwidth := alice.link.Bandwidth() - - // Next, we'll create an HTLC worth 1 BTC that will be used as a dummy - // message for the test. - var mockBlob [lnwire.OnionPacketSize]byte - htlcAmt := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - _, htlc, _, err := generatePayment(htlcAmt, htlcAmt, 5, mockBlob) - if err != nil { - t.Fatalf("unable to create payment: %v", err) - } - - // Create `numHtlc` htlcPackets and payment circuits that will be used - // to drive the test. All of the packets will use the same dummy HTLC. - addPkts, circuits := genAddsAndCircuits(numHtlcs, htlc) - - // To begin the test, start by committing the circuits belong to our - // first two HTLCs. - fwdActions := alice.commitCircuits(circuits[:halfHtlcs]) - - // Both of these circuits should have successfully added, as this is the - // first attempt to send them. - if len(fwdActions.Adds) != halfHtlcs { - t.Fatalf("expected %d circuits to be added", halfHtlcs) - } - - // Since both were committed successfully, we will now deliver them to - // Alice's link. - for _, addPkt := range addPkts[:halfHtlcs] { - if err := alice.link.HandleSwitchPacket(addPkt); err != nil { - t.Fatalf("unable to handle switch packet: %v", err) - } - } - - // Wait until Alice's link has sent both HTLCs via the peer. - alice.checkSent(addPkts[:halfHtlcs]) - - // The resulting bandwidth should reflect that Alice is paying both - // htlc amounts, in addition to both htlc fees. - assertLinkBandwidth(t, alice.link, - aliceStartingBandwidth-halfHtlcs*(htlcAmt+htlcFee), - ) - - alice.assertNumPendingNumOpenCircuits(2, 0) - - // Now, init a state transition by Alice to try and commit the HTLCs. - // Since she is in hodl.Commit mode, this will fail, but the circuits - // will be opened persistently. - alice.trySignNextCommitment() - - alice.assertNumPendingNumOpenCircuits(2, 2) - - // Restart Alice's link, which simulates a disconnection with the remote - // peer. Alice's link and switch should trim the circuits that were - // opened but not committed. - cleanUp = alice.restart(false, hodl.Commit) - defer cleanUp() - - alice.assertNumPendingNumOpenCircuits(2, 0) - - // The first two HTLCs should have been reset in Alice's mailbox since - // the switch was not shutdown. Knowing this the switch should drop the - // two circuits, even if the circuits were trimmed. - fwdActions = alice.commitCircuits(circuits[:halfHtlcs]) - if len(fwdActions.Drops) != halfHtlcs { - t.Fatalf("expected %d packets to be dropped since "+ - "the switch has not been restarted", halfHtlcs) - } - - // Wait for alice to process the first two HTLCs resend them via the - // peer. - alice.checkSent(addPkts[:halfHtlcs]) - - // The resulting bandwidth should reflect that Alice is paying both htlc - // amounts, in addition to both htlc fees. - assertLinkBandwidth(t, alice.link, - aliceStartingBandwidth-halfHtlcs*(htlcAmt+htlcFee), - ) - - // Again, initiate another state transition by Alice to try and commit - // the HTLCs. Since she is in hodl.Commit mode, this will fail, but the - // circuits will be opened persistently. - alice.trySignNextCommitment() - alice.assertNumPendingNumOpenCircuits(2, 2) - - // Now, we we will do a full restart of the link and switch, configuring - // Alice again in hodl.Commit mode. Since none of the HTLCs were - // actually committed, the previously opened circuits should be trimmed - // by both the link and switch. - cleanUp = alice.restart(true, hodl.Commit) - defer cleanUp() - - alice.assertNumPendingNumOpenCircuits(2, 0) - - // Attempt another commit of our first two circuits. Both should fail, - // as the opened circuits should have been trimmed, and circuit map - // recognizes that these HTLCs were lost during the restart. - fwdActions = alice.commitCircuits(circuits[:halfHtlcs]) - if len(fwdActions.Fails) != halfHtlcs { - t.Fatalf("expected %d packets to be failed", halfHtlcs) - } - - // Bob should not receive any HTLCs from Alice, since Alice's mailbox is - // empty and there is no pending commitment. - select { - case <-alice.msgs: - t.Fatalf("received unexpected message from Alice") - case <-time.After(time.Second): - } - - // Alice's bandwidth should have reverted back to her starting value. - assertLinkBandwidth(t, alice.link, aliceStartingBandwidth) - - // Now, try to commit the last two payment circuits, which are unused - // thus far. These should succeed without hesitation. - fwdActions = alice.commitCircuits(circuits[halfHtlcs:]) - if len(fwdActions.Adds) != halfHtlcs { - t.Fatalf("expected %d packets to be added", halfHtlcs) - } - - // Deliver the last two HTLCs to the link via Alice's mailbox. - for _, addPkt := range addPkts[halfHtlcs:] { - if err := alice.link.HandleSwitchPacket(addPkt); err != nil { - t.Fatalf("unable to handle switch packet: %v", err) - } - } - - // Verify that Alice processed and sent out the ADD packets via the - // peer. - alice.checkSent(addPkts[halfHtlcs:]) - - // The resulting bandwidth should reflect that Alice is paying both htlc - // amounts, in addition to both htlc fees. - assertLinkBandwidth(t, alice.link, - aliceStartingBandwidth-halfHtlcs*(htlcAmt+htlcFee), - ) - - // Now, initiate a state transition for Alice. Since we are hodl.Commit - // mode, this will only open the circuits that were added to the - // in-memory channel state. - alice.trySignNextCommitment() - alice.assertNumPendingNumOpenCircuits(4, 2) - - // Restart Alice's link, and place her back in hodl.Commit mode. On - // restart, all previously opened circuits should be trimmed by both the - // link and the switch. - cleanUp = alice.restart(false, hodl.Commit) - defer cleanUp() - - alice.assertNumPendingNumOpenCircuits(4, 0) - - // Now, try to commit all of known circuits. - fwdActions = alice.commitCircuits(circuits) - - // The first two HTLCs will fail to commit for the same reason as - // before, the circuits have been trimmed. - if len(fwdActions.Fails) != halfHtlcs { - t.Fatalf("expected %d packet to be failed", halfHtlcs) - } - - // The last two HTLCs will be dropped, as thought the circuits are - // trimmed, the switch is aware that the HTLCs are still in Alice's - // mailbox. - if len(fwdActions.Drops) != halfHtlcs { - t.Fatalf("expected %d packet to be dropped", halfHtlcs) - } - - // Wait until Alice reprocesses the last two HTLCs and sends them via - // the peer. - alice.checkSent(addPkts[halfHtlcs:]) - - // Her bandwidth should now reflect having sent only those two HTLCs. - assertLinkBandwidth(t, alice.link, - aliceStartingBandwidth-halfHtlcs*(htlcAmt+htlcFee), - ) - - // Now, initiate a state transition for Alice. Since we are hodl.Commit - // mode, this will only open the circuits that were added to the - // in-memory channel state. - alice.trySignNextCommitment() - alice.assertNumPendingNumOpenCircuits(4, 2) - - // Finally, do one last restart of both the link and switch. This will - // flush the HTLCs from the mailbox. The circuits should now be trimmed - // for all of the HTLCs. - cleanUp = alice.restart(true, hodl.Commit) - defer cleanUp() - - alice.assertNumPendingNumOpenCircuits(4, 0) - - // Bob should not receive any HTLCs from Alice, as none of the HTLCs are - // in Alice's mailbox, and channel reestablishment is disabled. - select { - case <-alice.msgs: - t.Fatalf("received unexpected message from Alice") - case <-time.After(time.Second): - } - - // Attempt to commit the last two circuits, both should now fail since - // though they were opened before shutting down, the circuits have been - // properly trimmed. - fwdActions = alice.commitCircuits(circuits[halfHtlcs:]) - if len(fwdActions.Fails) != halfHtlcs { - t.Fatalf("expected %d packet to be failed", halfHtlcs) - } - - // Alice balance should not have changed since the start. - assertLinkBandwidth(t, alice.link, aliceStartingBandwidth) -} - -// TestChannelLinkTrimCircuitsRemoteCommit checks that the switch and link -// don't trim circuits if the ADD is locked in on the remote commitment but -// not on our local commitment. -func TestChannelLinkTrimCircuitsRemoteCommit(t *testing.T) { - t.Parallel() - - chanAmt := btcutil.UnitsPerCoin() * 5 - const ( - numHtlcs = 2 - ) - - // We'll start by creating a new link with our chanAmt (5 BTC). - aliceLink, bobChan, batchTicker, start, cleanUp, restore, err := - newSingleLinkTestHarness(chanAmt, 0) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } - - if err := start(); err != nil { - t.Fatalf("unable to start test harness: %v", err) - } - defer cleanUp() - - alice := newPersistentLinkHarness( - t, aliceLink, batchTicker, restore, - ) - - // Compute the static fees that will be used to determine the - // correctness of Alice's bandwidth when forwarding HTLCs. - estimator := chainfee.NewStaticEstimator(6000, 0) - feePerKw, err := estimator.EstimateFeePerKW(1) - if err != nil { - t.Fatalf("unable to query fee estimator: %v", err) - } - - defaultCommitFee := alice.channel.StateSnapshot().CommitFee - htlcFee := lnwire.NewMSatFromSatoshis( - feePerKw.FeeForWeight(input.HTLCWeight), - ) - - // The starting bandwidth of the channel should be exactly the amount - // that we created the channel between her and Bob, minus the commitment - // fee and fee of adding an HTLC. - expectedBandwidth := lnwire.NewMSatFromSatoshis( - chanAmt-defaultCommitFee, - ) - htlcFee - assertLinkBandwidth(t, alice.link, expectedBandwidth) - - // Capture Alice's starting bandwidth to perform later, relative - // bandwidth assertions. - aliceStartingBandwidth := alice.link.Bandwidth() - - // Next, we'll create an HTLC worth 1 BTC that will be used as a dummy - // message for the test. - var mockBlob [lnwire.OnionPacketSize]byte - htlcAmt := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - _, htlc, _, err := generatePayment(htlcAmt, htlcAmt, 5, mockBlob) - if err != nil { - t.Fatalf("unable to create payment: %v", err) - } - - // Create `numHtlc` htlcPackets and payment circuits that will be used - // to drive the test. All of the packets will use the same dummy HTLC. - addPkts, circuits := genAddsAndCircuits(numHtlcs, htlc) - - // To begin the test, start by committing the circuits for our first two - // HTLCs. - fwdActions := alice.commitCircuits(circuits) - - // Both of these circuits should have successfully added, as this is the - // first attempt to send them. - if len(fwdActions.Adds) != numHtlcs { - t.Fatalf("expected %d circuits to be added", numHtlcs) - } - alice.assertNumPendingNumOpenCircuits(2, 0) - - // Since both were committed successfully, we will now deliver them to - // Alice's link. - for _, addPkt := range addPkts { - if err := alice.link.HandleSwitchPacket(addPkt); err != nil { - t.Fatalf("unable to handle switch packet: %v", err) - } - } - - // Wait until Alice's link has sent both HTLCs via the peer. - alice.checkSent(addPkts) - - // Pass both of the htlcs to Bob. - for i, addPkt := range addPkts { - pkt, ok := addPkt.htlc.(*lnwire.UpdateAddHTLC) - if !ok { - t.Fatalf("unable to add packet") - } - - pkt.ID = uint64(i) - - _, err := bobChan.ReceiveHTLC(pkt) - if err != nil { - t.Fatalf("unable to receive htlc: %v", err) - } - } - - // The resulting bandwidth should reflect that Alice is paying both - // htlc amounts, in addition to both htlc fees. - assertLinkBandwidth(t, alice.link, - aliceStartingBandwidth-numHtlcs*(htlcAmt+htlcFee), - ) - - // Now, initiate a state transition by Alice so that the pending HTLCs - // are locked in. - alice.trySignNextCommitment() - alice.assertNumPendingNumOpenCircuits(2, 2) - - select { - case aliceMsg := <-alice.msgs: - // Pass the commitment signature to Bob. - sig, ok := aliceMsg.(*lnwire.CommitSig) - if !ok { - t.Fatalf("alice did not send commitment signature") - } - - err := bobChan.ReceiveNewCommitment(sig.CommitSig, sig.HtlcSigs) - if err != nil { - t.Fatalf("unable to receive new commitment: %v", err) - } - case <-time.After(time.Second): - } - - // Next, revoke Bob's current commitment and send it to Alice so that we - // can test that Alice's circuits aren't trimmed. - rev, _, err := bobChan.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke current commitment: %v", err) - } - - _, _, _, _, err = alice.channel.ReceiveRevocation(rev) - if err != nil { - t.Fatalf("unable to receive revocation: %v", err) - } - - // Restart Alice's link, which simulates a disconnection with the remote - // peer. - cleanUp = alice.restart(false) - defer cleanUp() - - alice.assertNumPendingNumOpenCircuits(2, 2) - - // Restart the link + switch and check that the number of open circuits - // doesn't change. - cleanUp = alice.restart(true) - defer cleanUp() - - alice.assertNumPendingNumOpenCircuits(2, 2) -} - -// TestChannelLinkBandwidthChanReserve checks that the bandwidth available -// on the channel link reflects the channel reserve that must be kept -// at all times. -func TestChannelLinkBandwidthChanReserve(t *testing.T) { - t.Parallel() - - // First start a link that has a balance greater than it's - // channel reserve. - chanAmt := btcutil.UnitsPerCoin() * 5 - chanReserve := btcutil.UnitsPerCoin() * 1 - aliceLink, bobChannel, batchTimer, start, cleanUp, _, err := - newSingleLinkTestHarness(chanAmt, chanReserve) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } - defer cleanUp() - - if err := start(); err != nil { - t.Fatalf("unable to start test harness: %v", err) - } - - var ( - mockBlob [lnwire.OnionPacketSize]byte - coreLink = aliceLink.(*channelLink) - coreChan = coreLink.channel - defaultCommitFee = coreChan.StateSnapshot().CommitFee - aliceStartingBandwidth = aliceLink.Bandwidth() - aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs - ) - - estimator := chainfee.NewStaticEstimator(6000, 0) - feePerKw, err := estimator.EstimateFeePerKW(1) - if err != nil { - t.Fatalf("unable to query fee estimator: %v", err) - } - htlcFee := lnwire.NewMSatFromSatoshis( - feePerKw.FeeForWeight(input.HTLCWeight), - ) - - // The starting bandwidth of the channel should be exactly the amount - // that we created the channel between her and Bob, minus the channel - // reserve, commitment fee and fee for adding an additional HTLC. - expectedBandwidth := lnwire.NewMSatFromSatoshis( - chanAmt-defaultCommitFee-chanReserve) - htlcFee - assertLinkBandwidth(t, aliceLink, expectedBandwidth) - - // Next, we'll create an HTLC worth 3 BTC, and send it into the link as - // a switch initiated payment. The resulting bandwidth should - // now be decremented to reflect the new HTLC. - htlcAmt := lnwire.NewMSatFromSatoshis(3 * btcutil.UnitsPerCoin()) - invoice, htlc, _, err := generatePayment(htlcAmt, htlcAmt, 5, mockBlob) - if err != nil { - t.Fatalf("unable to create payment: %v", err) - } - - addPkt := &htlcPacket{ - htlc: htlc, - obfuscator: NewMockObfuscator(), - } - circuit := makePaymentCircuit(&htlc.PaymentHash, addPkt) - _, err = coreLink.cfg.Switch.commitCircuits(&circuit) - if err != nil { - t.Fatalf("unable to commit circuit: %v", err) - } - - aliceLink.HandleSwitchPacket(addPkt) - time.Sleep(time.Millisecond * 100) - assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt-htlcFee) - - // Alice should send the HTLC to Bob. - var msg lnwire.Message - select { - case msg = <-aliceMsgs: - case <-time.After(15 * time.Second): - t.Fatalf("did not receive message") - } - - addHtlc, ok := msg.(*lnwire.UpdateAddHTLC) - if !ok { - t.Fatalf("expected UpdateAddHTLC, got %T", msg) - } - - bobIndex, err := bobChannel.ReceiveHTLC(addHtlc) - if err != nil { - t.Fatalf("bob failed receiving htlc: %v", err) - } - - // Lock in the HTLC. - if err := updateState(batchTimer, coreLink, bobChannel, true); err != nil { - t.Fatalf("unable to update state: %v", err) - } - - assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt-htlcFee) - - // If we now send in a valid HTLC settle for the prior HTLC we added, - // then the bandwidth should remain unchanged as the remote party will - // gain additional channel balance. - err = bobChannel.SettleHTLC(*invoice.Terms.PaymentPreimage, bobIndex, nil, nil, nil) - if err != nil { - t.Fatalf("unable to settle htlc: %v", err) - } - htlcSettle := &lnwire.UpdateFulfillHTLC{ - ID: bobIndex, - PaymentPreimage: *invoice.Terms.PaymentPreimage, - } - aliceLink.HandleChannelUpdate(htlcSettle) - time.Sleep(time.Millisecond * 500) - - // Since the settle is not locked in yet, Alice's bandwidth should still - // reflect that she has to pay the fee. - assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt-htlcFee) - - // Lock in the settle. - if err := updateState(batchTimer, coreLink, bobChannel, false); err != nil { - t.Fatalf("unable to update state: %v", err) - } - - time.Sleep(time.Millisecond * 100) - assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth-htlcAmt) - - // Now we create a channel that has a channel reserve that is - // greater than it's balance. In these case only payments can - // be received on this channel, not sent. The available bandwidth - // should therefore be 0. - bobChanAmt := btcutil.UnitsPerCoin() * 1 - bobChanReserve := btcutil.Amount(btcutil.UnitsPerCoinF() * 1.5) - bobLink, _, _, start, bobCleanUp, _, err := - newSingleLinkTestHarness(bobChanAmt, bobChanReserve) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } - defer bobCleanUp() - - if err := start(); err != nil { - t.Fatalf("unable to start test harness: %v", err) - } - - // Make sure bandwidth is reported as 0. - assertLinkBandwidth(t, bobLink, 0) -} - -// TestChannelRetransmission tests the ability of the channel links to -// synchronize theirs states after abrupt disconnect. -func TestChannelRetransmission(t *testing.T) { - t.Parallel() - - retransmissionTests := []struct { - name string - messages []expectedMessage - }{ - { - // Tests the ability of the channel links states to be - // synchronized after remote node haven't receive - // revoke and ack message. - name: "intercept last alice revoke_and_ack", - messages: []expectedMessage{ - // First initialization of the channel. - {"alice", "bob", &lnwire.ChannelReestablish{}, false}, - {"bob", "alice", &lnwire.ChannelReestablish{}, false}, - - {"alice", "bob", &lnwire.FundingLocked{}, false}, - {"bob", "alice", &lnwire.FundingLocked{}, false}, - - // Send payment from Alice to Bob and intercept - // the last revocation message, in this case - // Bob should not proceed the payment farther. - {"alice", "bob", &lnwire.UpdateAddHTLC{}, false}, - {"alice", "bob", &lnwire.CommitSig{}, false}, - {"bob", "alice", &lnwire.RevokeAndAck{}, false}, - {"bob", "alice", &lnwire.CommitSig{}, false}, - {"alice", "bob", &lnwire.RevokeAndAck{}, true}, - - // Reestablish messages exchange on nodes restart. - {"alice", "bob", &lnwire.ChannelReestablish{}, false}, - {"bob", "alice", &lnwire.ChannelReestablish{}, false}, - - // Alice should resend the revoke_and_ack - // message to Bob because Bob claimed it in the - // re-establish message. - {"alice", "bob", &lnwire.RevokeAndAck{}, false}, - - // Proceed the payment farther by sending the - // fulfilment message and trigger the state - // update. - {"bob", "alice", &lnwire.UpdateFulfillHTLC{}, false}, - {"bob", "alice", &lnwire.CommitSig{}, false}, - {"alice", "bob", &lnwire.RevokeAndAck{}, false}, - {"alice", "bob", &lnwire.CommitSig{}, false}, - {"bob", "alice", &lnwire.RevokeAndAck{}, false}, - }, - }, - { - // Tests the ability of the channel links states to be - // synchronized after remote node haven't receive - // revoke and ack message. - name: "intercept bob revoke_and_ack commit_sig messages", - messages: []expectedMessage{ - {"alice", "bob", &lnwire.ChannelReestablish{}, false}, - {"bob", "alice", &lnwire.ChannelReestablish{}, false}, - - {"alice", "bob", &lnwire.FundingLocked{}, false}, - {"bob", "alice", &lnwire.FundingLocked{}, false}, - - // Send payment from Alice to Bob and intercept - // the last revocation message, in this case - // Bob should not proceed the payment farther. - {"alice", "bob", &lnwire.UpdateAddHTLC{}, false}, - {"alice", "bob", &lnwire.CommitSig{}, false}, - - // Intercept bob commit sig and revoke and ack - // messages. - {"bob", "alice", &lnwire.RevokeAndAck{}, true}, - {"bob", "alice", &lnwire.CommitSig{}, true}, - - // Reestablish messages exchange on nodes restart. - {"alice", "bob", &lnwire.ChannelReestablish{}, false}, - {"bob", "alice", &lnwire.ChannelReestablish{}, false}, - - // Bob should resend previously intercepted messages. - {"bob", "alice", &lnwire.RevokeAndAck{}, false}, - {"bob", "alice", &lnwire.CommitSig{}, false}, - - // Proceed the payment farther by sending the - // fulfilment message and trigger the state - // update. - {"alice", "bob", &lnwire.RevokeAndAck{}, false}, - {"bob", "alice", &lnwire.UpdateFulfillHTLC{}, false}, - {"bob", "alice", &lnwire.CommitSig{}, false}, - {"alice", "bob", &lnwire.RevokeAndAck{}, false}, - {"alice", "bob", &lnwire.CommitSig{}, false}, - {"bob", "alice", &lnwire.RevokeAndAck{}, false}, - }, - }, - { - // Tests the ability of the channel links states to be - // synchronized after remote node haven't receive - // update and commit sig messages. - name: "intercept update add htlc and commit sig messages", - messages: []expectedMessage{ - {"alice", "bob", &lnwire.ChannelReestablish{}, false}, - {"bob", "alice", &lnwire.ChannelReestablish{}, false}, - - {"alice", "bob", &lnwire.FundingLocked{}, false}, - {"bob", "alice", &lnwire.FundingLocked{}, false}, - - // Attempt make a payment from Alice to Bob, - // which is intercepted, emulating the Bob - // server abrupt stop. - {"alice", "bob", &lnwire.UpdateAddHTLC{}, true}, - {"alice", "bob", &lnwire.CommitSig{}, true}, - - // Restart of the nodes, and after that nodes - // should exchange the reestablish messages. - {"alice", "bob", &lnwire.ChannelReestablish{}, false}, - {"bob", "alice", &lnwire.ChannelReestablish{}, false}, - - {"alice", "bob", &lnwire.FundingLocked{}, false}, - {"bob", "alice", &lnwire.FundingLocked{}, false}, - - // After Bob has notified Alice that he didn't - // receive updates Alice should re-send them. - {"alice", "bob", &lnwire.UpdateAddHTLC{}, false}, - {"alice", "bob", &lnwire.CommitSig{}, false}, - - {"bob", "alice", &lnwire.RevokeAndAck{}, false}, - {"bob", "alice", &lnwire.CommitSig{}, false}, - {"alice", "bob", &lnwire.RevokeAndAck{}, false}, - - {"bob", "alice", &lnwire.UpdateFulfillHTLC{}, false}, - {"bob", "alice", &lnwire.CommitSig{}, false}, - {"alice", "bob", &lnwire.RevokeAndAck{}, false}, - {"alice", "bob", &lnwire.CommitSig{}, false}, - {"bob", "alice", &lnwire.RevokeAndAck{}, false}, - }, - }, - } - paymentWithRestart := func(t *testing.T, messages []expectedMessage) { - channels, cleanUp, restoreChannelsFromDb, err := createClusterChannels( - btcutil.UnitsPerCoin()*5, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - chanID := lnwire.NewChanIDFromOutPoint(channels.aliceToBob.ChannelPoint()) - serverErr := make(chan error, 4) - - aliceInterceptor := createInterceptorFunc("[alice] <-- [bob]", - "alice", messages, chanID, false) - bobInterceptor := createInterceptorFunc("[alice] --> [bob]", - "bob", messages, chanID, false) - - ct := newConcurrentTester(t) - - // Add interceptor to check the order of Bob and Alice - // messages. - n := newThreeHopNetwork(ct, - channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, - testStartingHeight, - ) - n.aliceServer.intersect(aliceInterceptor) - n.bobServer.intersect(bobInterceptor) - if err := n.start(); err != nil { - ct.Fatalf("unable to start three hop network: %v", err) - } - defer n.stop() - - bobBandwidthBefore := n.firstBobChannelLink.Bandwidth() - aliceBandwidthBefore := n.aliceChannelLink.Bandwidth() - - amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - htlcAmt, totalTimelock, hops := generateHops(amount, testStartingHeight, - n.firstBobChannelLink) - - // Send payment which should fail because we intercept the - // update and commit messages. - // - // TODO(roasbeef); increase timeout? - receiver := n.bobServer - firstHop := n.firstBobChannelLink.ShortChanID() - rhash, err := makePayment( - n.aliceServer, receiver, firstHop, hops, amount, - htlcAmt, totalTimelock, - ).Wait(time.Second * 5) - if err == nil { - ct.Fatalf("payment shouldn't haven been finished") - } - - // Stop network cluster and create new one, with the old - // channels states. Also do the *hack* - save the payment - // receiver to pass it in new channel link, otherwise payment - // will be failed because of the unknown payment hash. Hack - // will be removed with sphinx payment. - bobRegistry := n.bobServer.registry - n.stop() - - channels, err = restoreChannelsFromDb() - if err != nil { - ct.Fatalf("unable to restore channels from database: %v", err) - } - - n = newThreeHopNetwork(ct, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, testStartingHeight) - n.firstBobChannelLink.cfg.Registry = bobRegistry - n.aliceServer.intersect(aliceInterceptor) - n.bobServer.intersect(bobInterceptor) - - if err := n.start(); err != nil { - ct.Fatalf("unable to start three hop network: %v", err) - } - defer n.stop() - - // Wait for reestablishment to be proceeded and invoice to be settled. - // TODO(andrew.shvv) Will be removed if we move the notification center - // to the channel link itself. - - var invoice channeldb.Invoice - for i := 0; i < 20; i++ { - select { - case <-time.After(time.Millisecond * 200): - case serverErr := <-serverErr: - ct.Fatalf("server error: %v", serverErr) - } - - // Check that alice invoice wasn't settled and - // bandwidth of htlc links hasn't been changed. - invoice, err = receiver.registry.LookupInvoice(rhash) - if err != nil { - err = er.Errorf("unable to get invoice: %v", err) - continue - } - if invoice.State != channeldb.ContractSettled { - err = er.Errorf("alice invoice haven't been settled") - continue - } - - aliceExpectedBandwidth := aliceBandwidthBefore - htlcAmt - if aliceExpectedBandwidth != n.aliceChannelLink.Bandwidth() { - err = er.Errorf("expected alice to have %v, instead has %v", - aliceExpectedBandwidth, n.aliceChannelLink.Bandwidth()) - continue - } - - bobExpectedBandwidth := bobBandwidthBefore + htlcAmt - if bobExpectedBandwidth != n.firstBobChannelLink.Bandwidth() { - err = er.Errorf("expected bob to have %v, instead has %v", - bobExpectedBandwidth, n.firstBobChannelLink.Bandwidth()) - continue - } - - break - } - - if err != nil { - ct.Fatal(err) - } - } - - for _, test := range retransmissionTests { - passed := t.Run(test.name, func(t *testing.T) { - paymentWithRestart(t, test.messages) - }) - - if !passed { - break - } - } - -} - -// TestShouldAdjustCommitFee tests the shouldAdjustCommitFee pivot function to -// ensure that ie behaves properly. We should only update the fee if it -// deviates from our current fee by more 10% or more. -func TestShouldAdjustCommitFee(t *testing.T) { - tests := []struct { - netFee chainfee.SatPerKWeight - chanFee chainfee.SatPerKWeight - shouldAdjust bool - }{ - - // The network fee is 3x lower than the current commitment - // transaction. As a result, we should adjust our fee to match - // it. - { - netFee: 100, - chanFee: 3000, - shouldAdjust: true, - }, - - // The network fee is lower than the current commitment fee, - // but only slightly so, so we won't update the commitment fee. - { - netFee: 2999, - chanFee: 3000, - shouldAdjust: false, - }, - - // The network fee is lower than the commitment fee, but only - // right before it crosses our current threshold. - { - netFee: 1000, - chanFee: 1099, - shouldAdjust: false, - }, - - // The network fee is lower than the commitment fee, and within - // our range of adjustment, so we should adjust. - { - netFee: 1000, - chanFee: 1100, - shouldAdjust: true, - }, - - // The network fee is 2x higher than our commitment fee, so we - // should adjust upwards. - { - netFee: 2000, - chanFee: 1000, - shouldAdjust: true, - }, - - // The network fee is higher than our commitment fee, but only - // slightly so, so we won't update. - { - netFee: 1001, - chanFee: 1000, - shouldAdjust: false, - }, - - // The network fee is higher than our commitment fee, but - // hasn't yet crossed our activation threshold. - { - netFee: 1100, - chanFee: 1099, - shouldAdjust: false, - }, - - // The network fee is higher than our commitment fee, and - // within our activation threshold, so we should update our - // fee. - { - netFee: 1100, - chanFee: 1000, - shouldAdjust: true, - }, - - // Our fees match exactly, so we shouldn't update it at all. - { - netFee: 1000, - chanFee: 1000, - shouldAdjust: false, - }, - } - - for i, test := range tests { - adjustedFee := shouldAdjustCommitFee( - test.netFee, test.chanFee, - ) - - if adjustedFee && !test.shouldAdjust { - t.Fatalf("test #%v failed: net_fee=%v, "+ - "chan_fee=%v, adjust_expect=%v, adjust_returned=%v", - i, test.netFee, test.chanFee, test.shouldAdjust, - adjustedFee) - } - } -} - -// TestChannelLinkShutdownDuringForward asserts that a link can be fully -// stopped when it is trying to send synchronously through the switch. The -// specific case this can occur is when a link forwards incoming Adds. We test -// this by forcing the switch into a state where it will not accept new packets, -// and then killing the link, which can only succeed if forwarding can be -// canceled by a call to Stop. -func TestChannelLinkShutdownDuringForward(t *testing.T) { - t.Parallel() - - // First, we'll create our traditional three hop network. We're - // interested in testing the ability to stop the link when it is - // synchronously forwarding to the switch, which happens when an - // incoming link forwards Adds. Thus, the test will be performed - // against Bob's first link. - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*3, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, testStartingHeight) - - if err := n.start(); err != nil { - t.Fatal(err) - } - defer n.stop() - defer n.feeEstimator.Stop() - - // Define a helper method that strobes the switch's log ticker, and - // unblocks after nothing has been pulled for two seconds. - waitForBobsSwitchToBlock := func() { - bobSwitch := n.firstBobChannelLink.cfg.Switch - ticker := bobSwitch.cfg.LogEventTicker.(*ticker.Force) - timeout := time.After(15 * time.Second) - for { - time.Sleep(50 * time.Millisecond) - select { - case ticker.Force <- time.Now(): - - case <-time.After(2 * time.Second): - return - - case <-timeout: - t.Fatalf("switch did not block") - } - } - } - - // Define a helper method that strobes the link's batch ticker, and - // unblocks after nothing has been pulled for two seconds. - waitForBobsIncomingLinkToBlock := func() { - ticker := n.firstBobChannelLink.cfg.BatchTicker.(*ticker.Force) - timeout := time.After(15 * time.Second) - for { - time.Sleep(50 * time.Millisecond) - select { - case ticker.Force <- time.Now(): - - case <-time.After(2 * time.Second): - // We'll give a little extra time here, to - // ensure that the packet is being pressed - // against the htlcPlex. - time.Sleep(50 * time.Millisecond) - return - - case <-timeout: - t.Fatalf("link did not block") - } - } - } - - // To test that the cancellation is happening properly, we will set the - // switch's htlcPlex to nil, so that calls to routeAsync block, and can - // only exit if the link (or switch) is exiting. We will only be testing - // the link here. - // - // In order to avoid data races, we need to ensure the switch isn't - // selecting on that channel in the meantime. We'll prevent this by - // first acquiring the index mutex and forcing a log event so that the - // htlcForwarder is blocked inside the logTicker case, which also needs - // the indexMtx. - n.firstBobChannelLink.cfg.Switch.indexMtx.Lock() - - // Strobe the log ticker, and wait for switch to stop accepting any more - // log ticks. - waitForBobsSwitchToBlock() - - // While the htlcForwarder is blocked, swap out the htlcPlex with a nil - // channel, and unlock the indexMtx to allow return to the - // htlcForwarder's main select. After this, any attempt to forward - // through the switch will block. - n.firstBobChannelLink.cfg.Switch.htlcPlex = nil - n.firstBobChannelLink.cfg.Switch.indexMtx.Unlock() - - // Now, make a payment from Alice to Carol, which should cause Bob's - // incoming link to block when it tries to submit the packet to the nil - // htlcPlex. - amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - htlcAmt, totalTimelock, hops := generateHops( - amount, testStartingHeight, - n.firstBobChannelLink, n.carolChannelLink, - ) - - firstHop := n.firstBobChannelLink.ShortChanID() - makePayment( - n.aliceServer, n.carolServer, firstHop, hops, amount, htlcAmt, - totalTimelock, - ) - - // Strobe the batch ticker of Bob's incoming link, waiting for it to - // become fully blocked. - waitForBobsIncomingLinkToBlock() - - // Finally, stop the link to test that it can exit while synchronously - // forwarding Adds to the switch. - done := make(chan struct{}) - go func() { - n.firstBobChannelLink.Stop() - close(done) - }() - - select { - case <-time.After(3 * time.Second): - t.Fatalf("unable to shutdown link while fwding incoming Adds") - case <-done: - } -} - -// TestChannelLinkUpdateCommitFee tests that when a new block comes in, the -// channel link properly checks to see if it should update the commitment fee. -func TestChannelLinkUpdateCommitFee(t *testing.T) { - t.Parallel() - - // First, we'll create our traditional three hop network. We'll only be - // interacting with and asserting the state of two of the end points - // for this test. - aliceInitialBalance := btcutil.UnitsPerCoin() * 3 - channels, cleanUp, _, err := createClusterChannels( - aliceInitialBalance, btcutil.UnitsPerCoin()*5, - ) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, testStartingHeight) - - // First, we'll set up some message interceptors to ensure that the - // proper messages are sent when updating fees. - chanID := n.aliceChannelLink.ChanID() - messages := []expectedMessage{ - {"alice", "bob", &lnwire.ChannelReestablish{}, false}, - {"bob", "alice", &lnwire.ChannelReestablish{}, false}, - - {"alice", "bob", &lnwire.FundingLocked{}, false}, - {"bob", "alice", &lnwire.FundingLocked{}, false}, - - // First fee update. - {"alice", "bob", &lnwire.UpdateFee{}, false}, - {"alice", "bob", &lnwire.CommitSig{}, false}, - {"bob", "alice", &lnwire.RevokeAndAck{}, false}, - {"bob", "alice", &lnwire.CommitSig{}, false}, - {"alice", "bob", &lnwire.RevokeAndAck{}, false}, - - // Second fee update. - {"alice", "bob", &lnwire.UpdateFee{}, false}, - {"alice", "bob", &lnwire.CommitSig{}, false}, - {"bob", "alice", &lnwire.RevokeAndAck{}, false}, - {"bob", "alice", &lnwire.CommitSig{}, false}, - {"alice", "bob", &lnwire.RevokeAndAck{}, false}, - } - n.aliceServer.intersect(createInterceptorFunc("[alice] <-- [bob]", - "alice", messages, chanID, false)) - n.bobServer.intersect(createInterceptorFunc("[alice] --> [bob]", - "bob", messages, chanID, false)) - - if err := n.start(); err != nil { - t.Fatal(err) - } - defer n.stop() - defer n.feeEstimator.Stop() - - startingFeeRate := channels.aliceToBob.CommitFeeRate() - - // triggerFeeUpdate is a helper closure to determine whether a fee - // update was triggered and completed properly. - triggerFeeUpdate := func(feeEstimate, newFeeRate chainfee.SatPerKWeight, - shouldUpdate bool) { - - t.Helper() - - // Record the fee rates before the links process the fee update - // to test the case where a fee update isn't triggered. - aliceBefore := channels.aliceToBob.CommitFeeRate() - bobBefore := channels.bobToAlice.CommitFeeRate() - - // For the sake of this test, we'll reset the timer so that - // Alice's link queries for a new network fee. - n.aliceChannelLink.updateFeeTimer.Reset(time.Millisecond) - - // Next, we'll send the first fee rate response to Alice. - select { - case n.feeEstimator.byteFeeIn <- feeEstimate: - case <-time.After(time.Second * 5): - t.Fatalf("alice didn't query for the new network fee") - } - - // Give the links some time to process the fee update. - time.Sleep(time.Second) - - // Record the fee rates after the links have processed the fee - // update and ensure they are correct based on whether a fee - // update should have been triggered. - aliceAfter := channels.aliceToBob.CommitFeeRate() - bobAfter := channels.bobToAlice.CommitFeeRate() - - switch { - case shouldUpdate && aliceAfter != newFeeRate: - t.Fatalf("alice's fee rate didn't change: expected %v, "+ - "got %v", newFeeRate, aliceAfter) - - case shouldUpdate && bobAfter != newFeeRate: - t.Fatalf("bob's fee rate didn't change: expected %v, "+ - "got %v", newFeeRate, bobAfter) - - case !shouldUpdate && aliceAfter != aliceBefore: - t.Fatalf("alice's fee rate shouldn't have changed: "+ - "expected %v, got %v", aliceAfter, aliceAfter) - - case !shouldUpdate && bobAfter != bobBefore: - t.Fatalf("bob's fee rate shouldn't have changed: "+ - "expected %v, got %v", bobBefore, bobAfter) - } - } - - // Triggering the link to update the fee of the channel with the same - // fee rate should not send a fee update. - triggerFeeUpdate(startingFeeRate, startingFeeRate, false) - - // Triggering the link to update the fee of the channel with a much - // larger fee rate _should_ send a fee update. - newFeeRate := startingFeeRate * 3 - triggerFeeUpdate(newFeeRate, newFeeRate, true) - - // Triggering the link to update the fee of the channel with a fee rate - // that exceeds its maximum fee allocation should result in a fee rate - // corresponding to the maximum fee allocation. - const maxFeeRate chainfee.SatPerKWeight = 207182320 - triggerFeeUpdate(maxFeeRate+1, maxFeeRate, true) -} - -// TestChannelLinkAcceptDuplicatePayment tests that if a link receives an -// incoming HTLC for a payment we have already settled, then it accepts the -// HTLC. We do this to simplify the processing of settles after restarts or -// failures, reducing ambiguity when a batch is only partially processed. -func TestChannelLinkAcceptDuplicatePayment(t *testing.T) { - t.Parallel() - - // First, we'll create our traditional three hop network. We'll only be - // interacting with and asserting the state of two of the end points - // for this test. - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*3, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, testStartingHeight) - if err := n.start(); err != nil { - t.Fatalf("unable to start three hop network: %v", err) - } - defer n.stop() - - amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - - // We'll start off by making a payment from Alice to Carol. We'll - // manually generate this request so we can control all the parameters. - htlcAmt, totalTimelock, hops := generateHops(amount, testStartingHeight, - n.firstBobChannelLink, n.carolChannelLink) - blob, err := generateRoute(hops...) - if err != nil { - t.Fatal(err) - } - invoice, htlc, pid, err := generatePayment( - amount, htlcAmt, totalTimelock, blob, - ) - if err != nil { - t.Fatal(err) - } - - err = n.carolServer.registry.AddInvoice(*invoice, htlc.PaymentHash) - if err != nil { - t.Fatalf("unable to add invoice in carol registry: %v", err) - } - - // With the invoice now added to Carol's registry, we'll send the - // payment. - err = n.aliceServer.htlcSwitch.SendHTLC( - n.firstBobChannelLink.ShortChanID(), pid, htlc, - ) - if err != nil { - t.Fatalf("unable to send payment to carol: %v", err) - } - - resultChan, err := n.aliceServer.htlcSwitch.GetPaymentResult( - pid, htlc.PaymentHash, newMockDeobfuscator(), - ) - if err != nil { - t.Fatalf("unable to get payment result: %v", err) - } - - // Now, if we attempt to send the payment *again* it should be rejected - // as it's a duplicate request. - err = n.aliceServer.htlcSwitch.SendHTLC( - n.firstBobChannelLink.ShortChanID(), pid, htlc, - ) - if !ErrDuplicateAdd.Is(err) { - t.Fatalf("ErrDuplicateAdd should have been "+ - "received got: %v", err) - } - - select { - case result, ok := <-resultChan: - if !ok { - t.Fatalf("unexpected shutdown") - } - - if result.Error != nil { - t.Fatalf("payment failed: %v", result.Error) - } - case <-time.After(5 * time.Second): - t.Fatalf("payment result did not arrive") - } -} - -// TestChannelLinkAcceptOverpay tests that if we create an invoice for sender, -// and the sender sends *more* than specified in the invoice, then we'll still -// accept it and settle as normal. -func TestChannelLinkAcceptOverpay(t *testing.T) { - t.Parallel() - - // First, we'll create our traditional three hop network. We'll only be - // interacting with and asserting the state of two of the end points - // for this test. - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*3, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, testStartingHeight) - if err := n.start(); err != nil { - t.Fatalf("unable to start three hop network: %v", err) - } - defer n.stop() - - carolBandwidthBefore := n.carolChannelLink.Bandwidth() - firstBobBandwidthBefore := n.firstBobChannelLink.Bandwidth() - secondBobBandwidthBefore := n.secondBobChannelLink.Bandwidth() - aliceBandwidthBefore := n.aliceChannelLink.Bandwidth() - - // We'll request a route to send 10k satoshis via Alice -> Bob -> - // Carol. - amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - htlcAmt, totalTimelock, hops := generateHops( - amount, testStartingHeight, - n.firstBobChannelLink, n.carolChannelLink, - ) - - // When we actually go to send the payment, we'll actually create an - // invoice at Carol for only half of this amount. - receiver := n.carolServer - firstHop := n.firstBobChannelLink.ShortChanID() - rhash, err := makePayment( - n.aliceServer, n.carolServer, firstHop, hops, amount/2, htlcAmt, - totalTimelock, - ).Wait(30 * time.Second) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } - - // Wait for Alice and Bob's second link to receive the revocation. - time.Sleep(2 * time.Second) - - // Even though we sent 2x what was asked for, Carol should still have - // accepted the payment and marked it as settled. - invoice, err := receiver.registry.LookupInvoice(rhash) - if err != nil { - t.Fatalf("unable to get invoice: %v", err) - } - if invoice.State != channeldb.ContractSettled { - t.Fatal("carol invoice haven't been settled") - } - - expectedAliceBandwidth := aliceBandwidthBefore - htlcAmt - if expectedAliceBandwidth != n.aliceChannelLink.Bandwidth() { - t.Fatalf("channel bandwidth incorrect: expected %v, got %v", - expectedAliceBandwidth, n.aliceChannelLink.Bandwidth()) - } - - expectedBobBandwidth1 := firstBobBandwidthBefore + htlcAmt - if expectedBobBandwidth1 != n.firstBobChannelLink.Bandwidth() { - t.Fatalf("channel bandwidth incorrect: expected %v, got %v", - expectedBobBandwidth1, n.firstBobChannelLink.Bandwidth()) - } - - expectedBobBandwidth2 := secondBobBandwidthBefore - amount - if expectedBobBandwidth2 != n.secondBobChannelLink.Bandwidth() { - t.Fatalf("channel bandwidth incorrect: expected %v, got %v", - expectedBobBandwidth2, n.secondBobChannelLink.Bandwidth()) - } - - expectedCarolBandwidth := carolBandwidthBefore + amount - if expectedCarolBandwidth != n.carolChannelLink.Bandwidth() { - t.Fatalf("channel bandwidth incorrect: expected %v, got %v", - expectedCarolBandwidth, n.carolChannelLink.Bandwidth()) - } - - // Finally, we'll ensure that the amount we paid is properly reflected - // in the stored invoice. - if invoice.AmtPaid != amount { - t.Fatalf("expected amt paid to be %v, is instead %v", amount, - invoice.AmtPaid) - } -} - -// persistentLinkHarness is used to control the lifecylce of a link and the -// switch that operates it. It supports the ability to restart either the link -// or both the link and the switch. -type persistentLinkHarness struct { - t *testing.T - - link ChannelLink - coreLink *channelLink - channel *lnwallet.LightningChannel - - batchTicker chan time.Time - msgs chan lnwire.Message - - restoreChan func() (*lnwallet.LightningChannel, er.R) -} - -// newPersistentLinkHarness initializes a new persistentLinkHarness and derives -// the supporting references from the active link. -func newPersistentLinkHarness(t *testing.T, link ChannelLink, - batchTicker chan time.Time, - restore func() (*lnwallet.LightningChannel, - er.R)) *persistentLinkHarness { - - coreLink := link.(*channelLink) - - return &persistentLinkHarness{ - t: t, - link: link, - coreLink: coreLink, - channel: coreLink.channel, - batchTicker: batchTicker, - msgs: coreLink.cfg.Peer.(*mockPeer).sentMsgs, - restoreChan: restore, - } -} - -// restart facilitates a shutdown and restart of the link maintained by the -// harness. The primary purpose of this method is to ensure the consistency of -// the supporting references is maintained across restarts. -// -// If `restartSwitch` is set, the entire switch will also be restarted, -// and will be reinitialized with the contents of the channeldb backing Alice's -// channel. -// -// Any number of hodl flags can be passed as additional arguments to this -// method. If none are provided, the mask will be extracted as hodl.MaskNone. -func (h *persistentLinkHarness) restart(restartSwitch bool, - hodlFlags ...hodl.Flag) func() { - - // First, remove the link from the switch. - h.coreLink.cfg.Switch.RemoveLink(h.link.ChanID()) - - if restartSwitch { - // If a switch restart is requested, we will stop it. It will be - // reinstantiated in restartLink. - h.coreLink.cfg.Switch.Stop() - } - - // Since our in-memory state may have diverged from our persistent - // state, we will restore the persisted state to ensure we always start - // the link in a consistent state. - var err er.R - h.channel, err = h.restoreChan() - if err != nil { - h.t.Fatalf("unable to restore channels: %v", err) - } - - // Now, restart the link using the channel state. This will take care of - // adding the link to an existing switch, or creating a new one using - // the database owned by the link. - var cleanUp func() - h.link, h.batchTicker, cleanUp, err = h.restartLink( - h.channel, restartSwitch, hodlFlags, - ) - if err != nil { - h.t.Fatalf("unable to restart alicelink: %v", err) - } - - // Repopulate the remaining fields in the harness. - h.coreLink = h.link.(*channelLink) - h.msgs = h.coreLink.cfg.Peer.(*mockPeer).sentMsgs - - return cleanUp -} - -// checkSent reads the links message stream and verify that the messages are -// dequeued in the same order as provided by `pkts`. -func (h *persistentLinkHarness) checkSent(pkts []*htlcPacket) { - for _, pkt := range pkts { - var msg lnwire.Message - select { - case msg = <-h.msgs: - case <-time.After(15 * time.Second): - h.t.Fatalf("did not receive message") - } - - if !reflect.DeepEqual(msg, pkt.htlc) { - h.t.Fatalf("unexpected packet, want %v, got %v", - pkt.htlc, msg) - } - } -} - -// commitCircuits accepts a list of circuits and tries to commit them to the -// switch's circuit map. The forwarding actions are returned if there was no -// failure. -func (h *persistentLinkHarness) commitCircuits(circuits []*PaymentCircuit) *CircuitFwdActions { - fwdActions, err := h.coreLink.cfg.Switch.commitCircuits(circuits...) - if err != nil { - h.t.Fatalf("unable to commit circuit: %v", err) - } - - return fwdActions -} - -func (h *persistentLinkHarness) assertNumPendingNumOpenCircuits( - wantPending, wantOpen int) { - - _, _, line, _ := runtime.Caller(1) - - numPending := h.coreLink.cfg.Switch.circuits.NumPending() - if numPending != wantPending { - h.t.Fatalf("line: %d: wrong number of pending circuits: "+ - "want %d, got %d", line, wantPending, numPending) - } - numOpen := h.coreLink.cfg.Switch.circuits.NumOpen() - if numOpen != wantOpen { - h.t.Fatalf("line: %d: wrong number of open circuits: "+ - "want %d, got %d", line, wantOpen, numOpen) - } -} - -// trySignNextCommitment signals the batch ticker so that the link will try to -// update its commitment transaction. -func (h *persistentLinkHarness) trySignNextCommitment() { - select { - case h.batchTicker <- time.Now(): - // Give the link enough time to process the request. - time.Sleep(time.Millisecond * 500) - - case <-time.After(15 * time.Second): - h.t.Fatalf("did not initiate state transition") - } -} - -// restartLink creates a new channel link from the given channel state, and adds -// to an htlcswitch. If none is provided by the caller, a new one will be -// created using Alice's database. -func (h *persistentLinkHarness) restartLink( - aliceChannel *lnwallet.LightningChannel, restartSwitch bool, - hodlFlags []hodl.Flag) ( - ChannelLink, chan time.Time, func(), er.R) { - - var ( - decoder = newMockIteratorDecoder() - obfuscator = NewMockObfuscator() - alicePeer = &mockPeer{ - sentMsgs: make(chan lnwire.Message, 2000), - quit: make(chan struct{}), - } - - globalPolicy = ForwardingPolicy{ - MinHTLCOut: lnwire.NewMSatFromSatoshis(5), - BaseFee: lnwire.NewMSatFromSatoshis(1), - TimeLockDelta: 6, - } - - pCache = newMockPreimageCache() - ) - - aliceDb := aliceChannel.State().Db - aliceSwitch := h.coreLink.cfg.Switch - if restartSwitch { - var err er.R - aliceSwitch, err = initSwitchWithDB(testStartingHeight, aliceDb) - if err != nil { - return nil, nil, nil, err - } - } - - // Instantiate with a long interval, so that we can precisely control - // the firing via force feeding. - bticker := ticker.NewForce(time.Hour) - aliceCfg := ChannelLinkConfig{ - FwrdingPolicy: globalPolicy, - Peer: alicePeer, - Switch: aliceSwitch, - Circuits: aliceSwitch.CircuitModifier(), - ForwardPackets: aliceSwitch.ForwardPackets, - DecodeHopIterators: decoder.DecodeHopIterators, - ExtractErrorEncrypter: func(*btcec.PublicKey) ( - hop.ErrorEncrypter, lnwire.FailCode) { - return obfuscator, lnwire.CodeNone - }, - FetchLastChannelUpdate: mockGetChanUpdateMessage, - PreimageCache: pCache, - OnChannelFailure: func(lnwire.ChannelID, - lnwire.ShortChannelID, LinkFailureError) { - }, - UpdateContractSignals: func(*contractcourt.ContractSignals) er.R { - return nil - }, - Registry: h.coreLink.cfg.Registry, - ChainEvents: &contractcourt.ChainEventSubscription{}, - BatchTicker: bticker, - FwdPkgGCTicker: ticker.New(5 * time.Second), - PendingCommitTicker: ticker.New(time.Minute), - // Make the BatchSize and Min/MaxFeeUpdateTimeout large enough - // to not trigger commit updates automatically during tests. - BatchSize: 10000, - MinFeeUpdateTimeout: 30 * time.Minute, - MaxFeeUpdateTimeout: 40 * time.Minute, - // Set any hodl flags requested for the new link. - HodlMask: hodl.MaskFromFlags(hodlFlags...), - MaxOutgoingCltvExpiry: DefaultMaxOutgoingCltvExpiry, - MaxFeeAllocation: DefaultMaxLinkFeeAllocation, - NotifyActiveLink: func(wire.OutPoint) {}, - NotifyActiveChannel: func(wire.OutPoint) {}, - NotifyInactiveChannel: func(wire.OutPoint) {}, - HtlcNotifier: aliceSwitch.cfg.HtlcNotifier, - } - - aliceLink := NewChannelLink(aliceCfg, aliceChannel) - if err := aliceSwitch.AddLink(aliceLink); err != nil { - return nil, nil, nil, err - } - go func() { - for { - select { - case <-aliceLink.(*channelLink).htlcUpdates: - case <-aliceLink.(*channelLink).quit: - return - } - } - }() - - cleanUp := func() { - close(alicePeer.quit) - defer aliceLink.Stop() - } - - return aliceLink, bticker.Force, cleanUp, nil -} - -// gnerateHtlc generates a simple payment from Bob to Alice. -func generateHtlc(t *testing.T, coreLink *channelLink, - id uint64) *lnwire.UpdateAddHTLC { - - t.Helper() - - htlc, invoice := generateHtlcAndInvoice(t, id) - - // We must add the invoice to the registry, such that Alice - // expects this payment. - err := coreLink.cfg.Registry.(*mockInvoiceRegistry).AddInvoice( - *invoice, htlc.PaymentHash, - ) - if err != nil { - t.Fatalf("unable to add invoice to registry: %v", err) - } - - return htlc -} - -// generateHtlcAndInvoice generates an invoice and a single hop htlc to send to -// the receiver. -func generateHtlcAndInvoice(t *testing.T, - id uint64) (*lnwire.UpdateAddHTLC, *channeldb.Invoice) { - - t.Helper() - - htlcAmt := lnwire.NewMSatFromSatoshis(10000) - htlcExpiry := testStartingHeight + testInvoiceCltvExpiry - hops := []*hop.Payload{ - hop.NewLegacyPayload(&sphinx.HopData{ - Realm: [1]byte{}, // hop.BitcoinNetwork - NextAddress: [8]byte{}, // hop.Exit, - ForwardAmount: uint64(htlcAmt), - OutgoingCltv: uint32(htlcExpiry), - }), - } - blob, err := generateRoute(hops...) - if err != nil { - t.Fatalf("unable to generate route: %v", err) - } - - invoice, htlc, _, err := generatePayment( - htlcAmt, htlcAmt, uint32(htlcExpiry), blob, - ) - if err != nil { - t.Fatalf("unable to create payment: %v", err) - } - - htlc.ID = id - - return htlc, invoice -} - -// TestChannelLinkNoMoreUpdates tests that we won't send a new commitment -// when there are no new updates to sign. -func TestChannelLinkNoMoreUpdates(t *testing.T) { - t.Parallel() - - chanAmt := btcutil.UnitsPerCoin() * 5 - chanReserve := btcutil.UnitsPerCoin() * 1 - aliceLink, bobChannel, _, start, cleanUp, _, err := - newSingleLinkTestHarness(chanAmt, chanReserve) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } - defer cleanUp() - - if err := start(); err != nil { - t.Fatalf("unable to start test harness: %v", err) - } - - var ( - coreLink = aliceLink.(*channelLink) - aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs - ) - - // Add two HTLCs to Alice's registry, that Bob can pay. - htlc1 := generateHtlc(t, coreLink, 0) - htlc2 := generateHtlc(t, coreLink, 1) - - ctx := linkTestContext{ - t: t, - aliceLink: aliceLink, - aliceMsgs: aliceMsgs, - bobChannel: bobChannel, - } - - // We now play out the following scanario: - // - // (1) Alice receives htlc1 from Bob. - // (2) Bob sends signature covering htlc1. - // (3) Alice receives htlc2 from Bob. - // (4) Since Bob has sent a new commitment signature, Alice should - // first respond with a revocation. - // (5) Alice should also send a commitment signature for the new state, - // covering htlc1. - // (6) Bob sends a new commitment signature, covering htlc2 that he sent - // earlier. This signature should cover hltc1 + htlc2. - // (7) Alice should revoke the old commitment. This ACKs htlc2. - // (8) Bob can now revoke his old commitment in response to the - // signature Alice sent covering htlc1. - // (9) htlc1 is now locked in on Bob's commitment, and we expect Alice - // to settle it. - // (10) Alice should send a signature covering this settle to Bob. Only - // htlc2 should now be covered by this signature. - // (11) Bob can revoke his last state, which will also ACK the settle - // of htlc1. - // (12) Bob sends a new commitment signature. This signature should - // cover htlc2. - // (13) Alice will send a settle for htlc2. - // (14) Alice will also send a signature covering the settle. - // (15) Alice should send a revocation in response to the signature Bob - // sent earlier. - // (16) Bob will revoke his commitment in response to the commitment - // Alice sent. - // (17) Send a signature for the empty state. No HTLCs are left. - // (18) Alice will revoke her previous state. - // Alice Bob - // | | - // | ... | - // | | <--- idle (no htlc on either side) - // | | - ctx.sendHtlcBobToAlice(htlc1) // |<----- add-1 ------| (1) - ctx.sendCommitSigBobToAlice(1) // |<------ sig -------| (2) - ctx.sendHtlcBobToAlice(htlc2) // |<----- add-2 ------| (3) - ctx.receiveRevAndAckAliceToBob() // |------- rev ------>| (4) <--- Alice acks add-1 - ctx.receiveCommitSigAliceToBob(1) // |------- sig ------>| (5) <--- Alice signs add-1 - ctx.sendCommitSigBobToAlice(2) // |<------ sig -------| (6) - ctx.receiveRevAndAckAliceToBob() // |------- rev ------>| (7) <--- Alice acks add-2 - ctx.sendRevAndAckBobToAlice() // |<------ rev -------| (8) - ctx.receiveSettleAliceToBob() // |------ ful-1 ----->| (9) - ctx.receiveCommitSigAliceToBob(1) // |------- sig ------>| (10) <--- Alice signs add-1 + add-2 + ful-1 = add-2 - ctx.sendRevAndAckBobToAlice() // |<------ rev -------| (11) - ctx.sendCommitSigBobToAlice(1) // |<------ sig -------| (12) - ctx.receiveSettleAliceToBob() // |------ ful-2 ----->| (13) - ctx.receiveCommitSigAliceToBob(0) // |------- sig ------>| (14) <--- Alice signs add-2 + ful-2 = no htlcs - ctx.receiveRevAndAckAliceToBob() // |------- rev ------>| (15) - ctx.sendRevAndAckBobToAlice() // |<------ rev -------| (16) <--- Bob acks that there are no more htlcs - ctx.sendCommitSigBobToAlice(0) // |<------ sig -------| (17) - ctx.receiveRevAndAckAliceToBob() // |------- rev ------>| (18) <--- Alice acks that there are no htlcs on Alice's side - - // No there are no more changes to ACK or sign, make sure Alice doesn't - // attempt to send any more messages. - var msg lnwire.Message - select { - case msg = <-aliceMsgs: - t.Fatalf("did not expect message %T", msg) - case <-time.After(100 * time.Millisecond): - } -} - -// checkHasPreimages inspects Alice's preimage cache, and asserts whether the -// preimages for the provided HTLCs are known and unknown, and that all of them -// match the expected status of expOk. -func checkHasPreimages(t *testing.T, coreLink *channelLink, - htlcs []*lnwire.UpdateAddHTLC, expOk bool) { - - t.Helper() - - err := wait.NoError(func() er.R { - for i := range htlcs { - _, ok := coreLink.cfg.PreimageCache.LookupPreimage( - htlcs[i].PaymentHash, - ) - if ok == expOk { - continue - } - - return er.Errorf("expected to find witness: %v, "+ - "got %v for hash=%x", expOk, ok, - htlcs[i].PaymentHash) - } - - return nil - }, 5*time.Second) - if err != nil { - t.Fatalf("unable to find preimages: %v", err) - } -} - -// TestChannelLinkWaitForRevocation tests that we will keep accepting updates -// to our commitment transaction, even when we are waiting for a revocation -// from the remote node. -func TestChannelLinkWaitForRevocation(t *testing.T) { - t.Parallel() - - chanAmt := btcutil.UnitsPerCoin() * 5 - chanReserve := btcutil.UnitsPerCoin() * 1 - aliceLink, bobChannel, _, start, cleanUp, _, err := - newSingleLinkTestHarness(chanAmt, chanReserve) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } - defer cleanUp() - - if err := start(); err != nil { - t.Fatalf("unable to start test harness: %v", err) - } - - var ( - coreLink = aliceLink.(*channelLink) - aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs - ) - - // We will send 10 HTLCs in total, from Bob to Alice. - numHtlcs := 10 - var htlcs []*lnwire.UpdateAddHTLC - for i := 0; i < numHtlcs; i++ { - htlc := generateHtlc(t, coreLink, uint64(i)) - htlcs = append(htlcs, htlc) - } - - ctx := linkTestContext{ - t: t, - aliceLink: aliceLink, - aliceMsgs: aliceMsgs, - bobChannel: bobChannel, - } - - assertNoMsgFromAlice := func() { - select { - case <-aliceMsgs: - t.Fatalf("did not expect message from Alice") - case <-time.After(50 * time.Millisecond): - } - } - - // We play out the following scenario: - // - // (1) Add the first HTLC. - // (2) Bob sends signature covering the htlc. - // (3) Since Bob has sent a new commitment signature, Alice should first - // respond with a revocation. This revocation will ACK the first htlc. - // (4) Alice should also send a commitment signature for the new state, - // locking in the HTLC on Bob's commitment. Note that we don't - // immediately let Bob respond with a revocation in this case. - // (5.i) Now we send the rest of the HTLCs from Bob to Alice. - // (6.i) Bob sends a new commitment signature, covering all HTLCs up - // to this point. - // (7.i) Alice should respond to Bob's state updates with revocations, - // but cannot send any new signatures for Bob's state because her - // revocation window is exhausted. - // (8) Now let Bob finally send his revocation. - // (9) We expect Alice to settle her first HTLC, since it was already - // locked in. - // (10) Now Alice should send a signature covering this settle + lock - // in the rest of the HTLCs on Bob's commitment. - // (11) Bob receives the new signature for his commitment, and can - // revoke his old state, ACKing the settle. - // (12.i) Now Alice can settle all the HTLCs, since they are locked in - // on both parties' commitments. - // (13) Bob can send a signature covering the first settle Alice sent. - // Bob's signature should cover all the remaining HTLCs as well, since - // he hasn't ACKed the last settles yet. Alice receives the signature - // from Bob. Alice's commitment now has the first HTLC settled, and all - // the other HTLCs locked in. - // (14) Alice will send a signature for all the settles she just sent. - // (15) Bob can revoke his previous state, in response to Alice's - // signature. - // (16) In response to the signature Bob sent, Alice can - // revoke her previous state. - // (17) Bob still hasn't sent a commitment covering all settles, so do - // that now. Since Bob ACKed all settles, no HTLCs should be left on - // the commitment. - // (18) Alice will revoke her previous state. - // Alice Bob - // | | - // | ... | - // | | <--- idle (no htlc on either side) - // | | - ctx.sendHtlcBobToAlice(htlcs[0]) // |<----- add-1 ------| (1) - ctx.sendCommitSigBobToAlice(1) // |<------ sig -------| (2) - ctx.receiveRevAndAckAliceToBob() // |------- rev ------>| (3) <--- Alice acks add-1 - ctx.receiveCommitSigAliceToBob(1) // |------- sig ------>| (4) <--- Alice signs add-1 - for i := 1; i < numHtlcs; i++ { // | | - ctx.sendHtlcBobToAlice(htlcs[i]) // |<----- add-i ------| (5.i) - ctx.sendCommitSigBobToAlice(i + 1) // |<------ sig -------| (6.i) - ctx.receiveRevAndAckAliceToBob() // |------- rev ------>| (7.i) <--- Alice acks add-i - assertNoMsgFromAlice() // | | - // | | Alice should not send a sig for - // | | Bob's last state, since she is - // | | still waiting for a revocation - // | | for the previous one. - } // | | - ctx.sendRevAndAckBobToAlice() // |<------ rev -------| (8) Finally let Bob send rev - ctx.receiveSettleAliceToBob() // |------ ful-1 ----->| (9) - ctx.receiveCommitSigAliceToBob(numHtlcs - 1) // |------- sig ------>| (10) <--- Alice signs add-i - ctx.sendRevAndAckBobToAlice() // |<------ rev -------| (11) - for i := 1; i < numHtlcs; i++ { // | | - ctx.receiveSettleAliceToBob() // |------ ful-1 ----->| (12.i) - } // | | - ctx.sendCommitSigBobToAlice(numHtlcs - 1) // |<------ sig -------| (13) - ctx.receiveCommitSigAliceToBob(0) // |------- sig ------>| (14) - ctx.sendRevAndAckBobToAlice() // |<------ rev -------| (15) - ctx.receiveRevAndAckAliceToBob() // |------- rev ------>| (16) - ctx.sendCommitSigBobToAlice(0) // |<------ sig -------| (17) - ctx.receiveRevAndAckAliceToBob() // |------- rev ------>| (18) - - // Both side's state is now updated, no more messages should be sent. - assertNoMsgFromAlice() -} - -// TestChannelLinkNoEmptySig asserts that no empty commit sig message is sent -// when the commitment txes are out of sync. -func TestChannelLinkNoEmptySig(t *testing.T) { - t.Parallel() - - chanAmt := btcutil.UnitsPerCoin() * 5 - chanReserve := btcutil.UnitsPerCoin() * 1 - aliceLink, bobChannel, batchTicker, start, cleanUp, _, err := - newSingleLinkTestHarness(chanAmt, chanReserve) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } - defer cleanUp() - - if err := start(); err != nil { - t.Fatalf("unable to start test harness: %v", err) - } - defer aliceLink.Stop() - - var ( - coreLink = aliceLink.(*channelLink) - aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs - ) - - ctx := linkTestContext{ - t: t, - aliceLink: aliceLink, - aliceMsgs: aliceMsgs, - bobChannel: bobChannel, - } - - // Send htlc 1 from Alice to Bob. - htlc1, _ := generateHtlcAndInvoice(t, 0) - ctx.sendHtlcAliceToBob(0, htlc1) - ctx.receiveHtlcAliceToBob() - - // Tick the batch ticker to trigger a commitsig from Alice->Bob. - select { - case batchTicker <- time.Now(): - case <-time.After(5 * time.Second): - t.Fatalf("could not force commit sig") - } - - // Receive a CommitSig from Alice covering the Add from above. - ctx.receiveCommitSigAliceToBob(1) - - // Bob revokes previous commitment tx. - ctx.sendRevAndAckBobToAlice() - - // Alice sends htlc 2 to Bob. - htlc2, _ := generateHtlcAndInvoice(t, 0) - ctx.sendHtlcAliceToBob(1, htlc2) - ctx.receiveHtlcAliceToBob() - - // Tick the batch ticker to trigger a commitsig from Alice->Bob. - select { - case batchTicker <- time.Now(): - case <-time.After(5 * time.Second): - t.Fatalf("could not force commit sig") - } - - // Get the commit sig from Alice, but don't send it to Bob yet. - commitSigAlice := ctx.receiveCommitSigAlice(2) - - // Bob adds htlc 1 to its remote commit tx. - ctx.sendCommitSigBobToAlice(1) - - // Now send Bob the signature from Alice covering both htlcs. - err = bobChannel.ReceiveNewCommitment( - commitSigAlice.CommitSig, commitSigAlice.HtlcSigs, - ) - if err != nil { - t.Fatalf("bob failed receiving commitment: %v", err) - } - - // Both Alice and Bob revoke their previous commitment txes. - ctx.receiveRevAndAckAliceToBob() - ctx.sendRevAndAckBobToAlice() - - // The commit txes are not in sync, but it is Bob's turn to send a new - // signature. We don't expect Alice to send out any message. This check - // allows some time for the log commit ticker to trigger for Alice. - ctx.assertNoMsgFromAlice(time.Second) -} - -// TestChannelLinkBatchPreimageWrite asserts that a link will batch preimage -// writes when just as it receives a CommitSig to lock in any Settles, and also -// if the link is aware of any uncommitted preimages if the link is stopped, -// i.e. due to a disconnection or shutdown. -func TestChannelLinkBatchPreimageWrite(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - disconnect bool - }{ - { - name: "flush on commit sig", - disconnect: false, - }, - { - name: "flush on disconnect", - disconnect: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - testChannelLinkBatchPreimageWrite(t, test.disconnect) - }) - } -} - -func testChannelLinkBatchPreimageWrite(t *testing.T, disconnect bool) { - chanAmt := btcutil.UnitsPerCoin() * 5 - chanReserve := btcutil.UnitsPerCoin() * 1 - aliceLink, bobChannel, batchTicker, startUp, cleanUp, _, err := - newSingleLinkTestHarness(chanAmt, chanReserve) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } - defer cleanUp() - - if err := startUp(); err != nil { - t.Fatalf("unable to start test harness: %v", err) - } - - var ( - coreLink = aliceLink.(*channelLink) - aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs - ) - - // We will send 10 HTLCs in total, from Bob to Alice. - numHtlcs := 10 - var htlcs []*lnwire.UpdateAddHTLC - var invoices []*channeldb.Invoice - for i := 0; i < numHtlcs; i++ { - htlc, invoice := generateHtlcAndInvoice(t, uint64(i)) - htlcs = append(htlcs, htlc) - invoices = append(invoices, invoice) - } - - ctx := linkTestContext{ - t: t, - aliceLink: aliceLink, - aliceMsgs: aliceMsgs, - bobChannel: bobChannel, - } - - // First, send a batch of Adds from Alice to Bob. - for i, htlc := range htlcs { - ctx.sendHtlcAliceToBob(i, htlc) - ctx.receiveHtlcAliceToBob() - } - - // Assert that no preimages exist for these htlcs in Alice's cache. - checkHasPreimages(t, coreLink, htlcs, false) - - // Force alice's link to sign a commitment covering the htlcs sent thus - // far. - select { - case batchTicker <- time.Now(): - case <-time.After(15 * time.Second): - t.Fatalf("could not force commit sig") - } - - // Do a commitment dance to lock in the Adds, we expect numHtlcs htlcs - // to be on each party's commitment transactions. - ctx.receiveCommitSigAliceToBob(numHtlcs) - ctx.sendRevAndAckBobToAlice() - ctx.sendCommitSigBobToAlice(numHtlcs) - ctx.receiveRevAndAckAliceToBob() - - // Check again that no preimages exist for these htlcs in Alice's cache. - checkHasPreimages(t, coreLink, htlcs, false) - - // Now, have Bob settle the HTLCs back to Alice using the preimages in - // the invoice corresponding to each of the HTLCs. - for i, invoice := range invoices { - ctx.sendSettleBobToAlice( - uint64(i), - *invoice.Terms.PaymentPreimage, - ) - } - - // Assert that Alice has not yet written the preimages, even though she - // has received them in the UpdateFulfillHTLC messages. - checkHasPreimages(t, coreLink, htlcs, false) - - // If this is the disconnect run, we will having Bob send Alice his - // CommitSig, and simply stop Alice's link. As she exits, we should - // detect that she has uncommitted preimages and write them to disk. - if disconnect { - aliceLink.Stop() - checkHasPreimages(t, coreLink, htlcs, true) - return - } - - // Otherwise, we are testing that Alice commits the preimages after - // receiving a CommitSig from Bob. Bob's commitment should now have 0 - // HTLCs. - ctx.sendCommitSigBobToAlice(0) - - // Since Alice will process the CommitSig asynchronously, we wait until - // she replies with her RevokeAndAck to ensure the tests reliably - // inspect her cache after advancing her state. - select { - - // Received Alice's RevokeAndAck, assert that she has written all of the - // uncommitted preimages learned in this commitment. - case <-aliceMsgs: - checkHasPreimages(t, coreLink, htlcs, true) - - // Alice didn't send her RevokeAndAck, something is wrong. - case <-time.After(15 * time.Second): - t.Fatalf("alice did not send her revocation") - } -} - -// TestChannelLinkCleanupSpuriousResponses tests that we properly cleanup -// references in the event that internal retransmission continues as a result of -// not properly cleaning up Add/SettleFailRefs. -func TestChannelLinkCleanupSpuriousResponses(t *testing.T) { - t.Parallel() - - chanAmt := btcutil.UnitsPerCoin() * 5 - chanReserve := btcutil.UnitsPerCoin() * 1 - aliceLink, bobChannel, _, start, cleanUp, _, err := - newSingleLinkTestHarness(chanAmt, chanReserve) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } - defer cleanUp() - - if err := start(); err != nil { - t.Fatalf("unable to start test harness: %v", err) - } - - var ( - coreLink = aliceLink.(*channelLink) - aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs - ) - - // Settle Alice in hodl ExitSettle mode so that she won't respond - // immediately to the htlc's meant for her. This allows us to control - // the responses she gives back to Bob. - coreLink.cfg.HodlMask = hodl.ExitSettle.Mask() - - // Add two HTLCs to Alice's registry, that Bob can pay. - htlc1 := generateHtlc(t, coreLink, 0) - htlc2 := generateHtlc(t, coreLink, 1) - - ctx := linkTestContext{ - t: t, - aliceLink: aliceLink, - aliceMsgs: aliceMsgs, - bobChannel: bobChannel, - } - - // We start with he following scenario: Bob sends Alice two HTLCs, and a - // commitment dance ensures, leaving two HTLCs that Alice can respond - // to. Since Alice is in ExitSettle mode, we will then take over and - // provide targeted fail messages to test the link's ability to cleanup - // spurious responses. - // - // Bob Alice - // |------ add-1 ----->| - // |------ add-2 ----->| - // |------ sig ----->| commits add-1 + add-2 - // |<----- rev ------| - // |<----- sig ------| commits add-1 + add-2 - // |------ rev ----->| - ctx.sendHtlcBobToAlice(htlc1) - ctx.sendHtlcBobToAlice(htlc2) - ctx.sendCommitSigBobToAlice(2) - ctx.receiveRevAndAckAliceToBob() - ctx.receiveCommitSigAliceToBob(2) - ctx.sendRevAndAckBobToAlice() - - // Give Alice to time to process the revocation. - time.Sleep(time.Second) - - aliceFwdPkgs, err := coreLink.channel.LoadFwdPkgs() - if err != nil { - t.Fatalf("unable to load alice's fwdpkgs: %v", err) - } - - // Alice should have exactly one forwarding package. - if len(aliceFwdPkgs) != 1 { - t.Fatalf("alice should have 1 fwd pkgs, has %d instead", - len(aliceFwdPkgs)) - } - - // We'll stash the height of these AddRefs, so that we can reconstruct - // the proper references later. - addHeight := aliceFwdPkgs[0].Height - - // The first fwdpkg should have exactly 2 entries, one for each Add that - // was added during the last dance. - if aliceFwdPkgs[0].AckFilter.Count() != 2 { - t.Fatalf("alice fwdpkg should have 2 Adds, has %d instead", - aliceFwdPkgs[0].AckFilter.Count()) - } - - // Both of the entries in the FwdFilter should be unacked. - for i := 0; i < 2; i++ { - if aliceFwdPkgs[0].AckFilter.Contains(uint16(i)) { - t.Fatalf("alice fwdpkg index %d should not "+ - "have ack", i) - } - } - - // Now, construct a Fail packet for Bob settling the first HTLC. This - // packet will NOT include a sourceRef, meaning the AddRef on disk will - // not be acked after committing this response. - fail0 := &htlcPacket{ - incomingChanID: bobChannel.ShortChanID(), - incomingHTLCID: 0, - obfuscator: NewMockObfuscator(), - htlc: &lnwire.UpdateFailHTLC{}, - } - aliceLink.HandleSwitchPacket(fail0) - - // Bob Alice - // |<----- fal-1 ------| - // |<----- sig ------| commits fal-1 - ctx.receiveFailAliceToBob() - ctx.receiveCommitSigAliceToBob(1) - - aliceFwdPkgs, err = coreLink.channel.LoadFwdPkgs() - if err != nil { - t.Fatalf("unable to load alice's fwdpkgs: %v", err) - } - - // Alice should still only have one fwdpkg, as she hasn't yet received - // another revocation from Bob. - if len(aliceFwdPkgs) != 1 { - t.Fatalf("alice should have 1 fwd pkgs, has %d instead", - len(aliceFwdPkgs)) - } - - // Assert the fwdpkg still has 2 entries for the original Adds. - if aliceFwdPkgs[0].AckFilter.Count() != 2 { - t.Fatalf("alice fwdpkg should have 2 Adds, has %d instead", - aliceFwdPkgs[0].AckFilter.Count()) - } - - // Since the fail packet was missing the AddRef, the forward filter for - // either HTLC should not have been modified. - for i := 0; i < 2; i++ { - if aliceFwdPkgs[0].AckFilter.Contains(uint16(i)) { - t.Fatalf("alice fwdpkg index %d should not "+ - "have ack", i) - } - } - - // Complete the rest of the commitment dance, now that the forwarding - // packages have been verified. - // - // Bob Alice - // |------ rev ----->| - // |------ sig ----->| - // |<----- rev ------| - ctx.sendRevAndAckBobToAlice() - ctx.sendCommitSigBobToAlice(1) - ctx.receiveRevAndAckAliceToBob() - - // Next, we'll construct a fail packet for add-2 (index 1), which we'll - // send to Bob and lock in. Since the AddRef is set on this instance, we - // should see the second HTLCs AddRef update the forward filter for the - // first fwd pkg. - fail1 := &htlcPacket{ - sourceRef: &channeldb.AddRef{ - Height: addHeight, - Index: 1, - }, - incomingChanID: bobChannel.ShortChanID(), - incomingHTLCID: 1, - obfuscator: NewMockObfuscator(), - htlc: &lnwire.UpdateFailHTLC{}, - } - aliceLink.HandleSwitchPacket(fail1) - - // Bob Alice - // |<----- fal-1 ------| - // |<----- sig ------| commits fal-1 - ctx.receiveFailAliceToBob() - ctx.receiveCommitSigAliceToBob(0) - - aliceFwdPkgs, err = coreLink.channel.LoadFwdPkgs() - if err != nil { - t.Fatalf("unable to load alice's fwdpkgs: %v", err) - } - - // Now that another commitment dance has completed, Alice should have 2 - // forwarding packages. - if len(aliceFwdPkgs) != 2 { - t.Fatalf("alice should have 2 fwd pkgs, has %d instead", - len(aliceFwdPkgs)) - } - - // The most recent package should have no new HTLCs, so it should be - // empty. - if aliceFwdPkgs[1].AckFilter.Count() != 0 { - t.Fatalf("alice fwdpkg height=%d should have 0 Adds, "+ - "has %d instead", aliceFwdPkgs[1].Height, - aliceFwdPkgs[1].AckFilter.Count()) - } - - // The index for the first AddRef should still be unacked, as the - // sourceRef was missing on the htlcPacket. - if aliceFwdPkgs[0].AckFilter.Contains(0) { - t.Fatalf("alice fwdpkg height=%d index=0 should not "+ - "have an ack", aliceFwdPkgs[0].Height) - } - - // The index for the second AddRef should now be acked, as it was - // properly constructed and committed in Alice's last commit sig. - if !aliceFwdPkgs[0].AckFilter.Contains(1) { - t.Fatalf("alice fwdpkg height=%d index=1 should have "+ - "an ack", aliceFwdPkgs[0].Height) - } - - // Complete the rest of the commitment dance. - // - // Bob Alice - // |------ rev ----->| - // |------ sig ----->| - // |<----- rev ------| - ctx.sendRevAndAckBobToAlice() - ctx.sendCommitSigBobToAlice(0) - ctx.receiveRevAndAckAliceToBob() - - // We'll do a quick sanity check, and blindly send the same fail packet - // for the first HTLC. Since this HTLC index has already been settled, - // this should trigger an attempt to cleanup the spurious response. - // However, we expect it to result in a NOP since it is still missing - // its sourceRef. - aliceLink.HandleSwitchPacket(fail0) - - // Allow the link enough time to process and reject the duplicate - // packet, we'll also check that this doesn't trigger Alice to send the - // fail to Bob. - select { - case <-aliceMsgs: - t.Fatalf("message sent for duplicate fail") - case <-time.After(time.Second): - } - - aliceFwdPkgs, err = coreLink.channel.LoadFwdPkgs() - if err != nil { - t.Fatalf("unable to load alice's fwdpkgs: %v", err) - } - - // Alice should now have 3 forwarding packages, and the latest should be - // empty. - if len(aliceFwdPkgs) != 3 { - t.Fatalf("alice should have 3 fwd pkgs, has %d instead", - len(aliceFwdPkgs)) - } - if aliceFwdPkgs[2].AckFilter.Count() != 0 { - t.Fatalf("alice fwdpkg height=%d should have 0 Adds, "+ - "has %d instead", aliceFwdPkgs[2].Height, - aliceFwdPkgs[2].AckFilter.Count()) - } - - // The state of the forwarding packages should be unmodified from the - // prior assertion, since the duplicate Fail for index 0 should have - // been ignored. - if aliceFwdPkgs[0].AckFilter.Contains(0) { - t.Fatalf("alice fwdpkg height=%d index=0 should not "+ - "have an ack", aliceFwdPkgs[0].Height) - } - if !aliceFwdPkgs[0].AckFilter.Contains(1) { - t.Fatalf("alice fwdpkg height=%d index=1 should have "+ - "an ack", aliceFwdPkgs[0].Height) - } - - // Finally, construct a new Fail packet for the first HTLC, this time - // with the sourceRef properly constructed. When the link handles this - // duplicate, it should clean up the remaining AddRef state maintained - // in Alice's link, but it should not result in anything being sent to - // Bob. - fail0 = &htlcPacket{ - sourceRef: &channeldb.AddRef{ - Height: addHeight, - Index: 0, - }, - incomingChanID: bobChannel.ShortChanID(), - incomingHTLCID: 0, - obfuscator: NewMockObfuscator(), - htlc: &lnwire.UpdateFailHTLC{}, - } - aliceLink.HandleSwitchPacket(fail0) - - // Allow the link enough time to process and reject the duplicate - // packet, we'll also check that this doesn't trigger Alice to send the - // fail to Bob. - select { - case <-aliceMsgs: - t.Fatalf("message sent for duplicate fail") - case <-time.After(time.Second): - } - - aliceFwdPkgs, err = coreLink.channel.LoadFwdPkgs() - if err != nil { - t.Fatalf("unable to load alice's fwdpkgs: %v", err) - } - - // Since no state transitions have been performed for the duplicate - // packets, Alice should still have the same 3 forwarding packages. - if len(aliceFwdPkgs) != 3 { - t.Fatalf("alice should have 3 fwd pkgs, has %d instead", - len(aliceFwdPkgs)) - } - - // Assert that all indices in our original forwarded have now been acked - // as a result of our spurious cleanup logic. - for i := 0; i < 2; i++ { - if !aliceFwdPkgs[0].AckFilter.Contains(uint16(i)) { - t.Fatalf("alice fwdpkg height=%d index=%d "+ - "should have ack", aliceFwdPkgs[0].Height, i) - } - } -} - -type mockPackager struct { - failLoadFwdPkgs bool -} - -func (*mockPackager) AddFwdPkg(tx kvdb.RwTx, fwdPkg *channeldb.FwdPkg) er.R { - return nil -} - -func (*mockPackager) SetFwdFilter(tx kvdb.RwTx, height uint64, - fwdFilter *channeldb.PkgFilter) er.R { - return nil -} - -func (*mockPackager) AckAddHtlcs(tx kvdb.RwTx, - addRefs ...channeldb.AddRef) er.R { - return nil -} - -func (m *mockPackager) LoadFwdPkgs(tx kvdb.RTx) ([]*channeldb.FwdPkg, er.R) { - if m.failLoadFwdPkgs { - return nil, er.Errorf("failing LoadFwdPkgs") - } - return nil, nil -} - -func (*mockPackager) RemovePkg(tx kvdb.RwTx, height uint64) er.R { - return nil -} - -func (*mockPackager) AckSettleFails(tx kvdb.RwTx, - settleFailRefs ...channeldb.SettleFailRef) er.R { - return nil -} - -// TestChannelLinkFail tests that we will fail the channel, and force close the -// channel in certain situations. -func TestChannelLinkFail(t *testing.T) { - t.Parallel() - - testCases := []struct { - // options is used to set up mocks and configure the link - // before it is started. - options func(*channelLink) - - // link test is used to execute the given test on the channel - // link after it is started. - linkTest func(*testing.T, *channelLink, *lnwallet.LightningChannel) - - // shouldForceClose indicates whether we expect the link to - // force close the channel in response to the actions performed - // during the linkTest. - shouldForceClose bool - - // permanentFailure indicates whether we expect the link to - // consider the failure permanent in response to the actions - // performed during the linkTest. - permanentFailure bool - }{ - { - // Test that we don't force close if syncing states - // fails at startup. - func(c *channelLink) { - c.cfg.SyncStates = true - - // Make the syncChanStateCall fail by making - // the SendMessage call fail. - c.cfg.Peer.(*mockPeer).disconnected = true - }, - func(t *testing.T, c *channelLink, _ *lnwallet.LightningChannel) { - // Should fail at startup. - }, - false, - false, - }, - { - // Test that we don't force closes the channel if - // resolving forward packages fails at startup. - func(c *channelLink) { - // We make the call to resolveFwdPkgs fail by - // making the underlying forwarder fail. - pkg := &mockPackager{ - failLoadFwdPkgs: true, - } - c.channel.State().Packager = pkg - }, - func(t *testing.T, c *channelLink, _ *lnwallet.LightningChannel) { - // Should fail at startup. - }, - false, - false, - }, - { - // Test that we force close the channel if we receive - // an invalid Settle message. - func(c *channelLink) { - }, - func(t *testing.T, c *channelLink, _ *lnwallet.LightningChannel) { - // Recevive an htlc settle for an htlc that was - // never added. - htlcSettle := &lnwire.UpdateFulfillHTLC{ - ID: 0, - PaymentPreimage: [32]byte{}, - } - c.HandleChannelUpdate(htlcSettle) - }, - true, - false, - }, - { - // Test that we force close the channel if we receive - // an invalid CommitSig, not containing enough HTLC - // sigs. - func(c *channelLink) { - }, - func(t *testing.T, c *channelLink, remoteChannel *lnwallet.LightningChannel) { - - // Generate an HTLC and send to the link. - htlc1 := generateHtlc(t, c, 0) - ctx := linkTestContext{ - t: t, - aliceLink: c, - bobChannel: remoteChannel, - } - ctx.sendHtlcBobToAlice(htlc1) - - // Sign a commitment that will include - // signature for the HTLC just sent. - sig, htlcSigs, _, err := - remoteChannel.SignNextCommitment() - if err != nil { - t.Fatalf("error signing commitment: %v", - err) - } - - // Remove the HTLC sig, such that the commit - // sig will be invalid. - commitSig := &lnwire.CommitSig{ - CommitSig: sig, - HtlcSigs: htlcSigs[1:], - } - - c.HandleChannelUpdate(commitSig) - }, - true, - false, - }, - { - // Test that we force close the channel if we receive - // an invalid CommitSig, where the sig itself is - // corrupted. - func(c *channelLink) { - }, - func(t *testing.T, c *channelLink, remoteChannel *lnwallet.LightningChannel) { - - // Generate an HTLC and send to the link. - htlc1 := generateHtlc(t, c, 0) - ctx := linkTestContext{ - t: t, - aliceLink: c, - bobChannel: remoteChannel, - } - - ctx.sendHtlcBobToAlice(htlc1) - - // Sign a commitment that will include - // signature for the HTLC just sent. - sig, htlcSigs, _, err := - remoteChannel.SignNextCommitment() - if err != nil { - t.Fatalf("error signing commitment: %v", - err) - } - - // Flip a bit on the signature, rendering it - // invalid. - sig[19] ^= 1 - commitSig := &lnwire.CommitSig{ - CommitSig: sig, - HtlcSigs: htlcSigs, - } - - c.HandleChannelUpdate(commitSig) - }, - true, - false, - }, - { - // Test that we consider the failure permanent if we - // receive a link error from the remote. - func(c *channelLink) { - }, - func(t *testing.T, c *channelLink, remoteChannel *lnwallet.LightningChannel) { - err := &lnwire.Error{} - c.HandleChannelUpdate(err) - }, - false, - // TODO(halseth) For compatibility with CL we currently - // don't treat Errors as permanent errors. - false, - }, - } - - chanAmt := btcutil.UnitsPerCoin() * 5 - - // Execute each test case. - for i, test := range testCases { - link, remoteChannel, _, start, cleanUp, _, err := - newSingleLinkTestHarness(chanAmt, 0) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } - - coreLink := link.(*channelLink) - - // Set up a channel used to check whether the link error - // force closed the channel. - linkErrors := make(chan LinkFailureError, 1) - coreLink.cfg.OnChannelFailure = func(_ lnwire.ChannelID, - _ lnwire.ShortChannelID, linkErr LinkFailureError) { - linkErrors <- linkErr - } - - // Set up the link before starting it. - test.options(coreLink) - if err := start(); err != nil { - t.Fatalf("unable to start test harness: %v", err) - } - - // Execute the test case. - test.linkTest(t, coreLink, remoteChannel) - - // Currently we expect all test cases to lead to link error. - var linkErr LinkFailureError - select { - case linkErr = <-linkErrors: - case <-time.After(10 * time.Second): - t.Fatalf("%d) Alice did not fail"+ - "channel", i) - } - - // If we expect the link to force close the channel in this - // case, check that it happens. If not, make sure it does not - // happen. - if test.shouldForceClose != linkErr.ForceClose { - t.Fatalf("%d) Expected Alice to force close(%v), "+ - "instead got(%v)", i, test.shouldForceClose, - linkErr.ForceClose) - } - - if test.permanentFailure != linkErr.PermanentFailure { - t.Fatalf("%d) Expected Alice set permanent failure(%v), "+ - "instead got(%v)", i, test.permanentFailure, - linkErr.PermanentFailure) - } - - // Clean up before starting next test case. - cleanUp() - } -} - -// TestExpectedFee tests calculation of ExpectedFee returns expected fee, given -// a baseFee, a feeRate, and an htlc amount. -func TestExpectedFee(t *testing.T) { - testCases := []struct { - baseFee lnwire.MilliSatoshi - feeRate lnwire.MilliSatoshi - htlcAmt lnwire.MilliSatoshi - expected lnwire.MilliSatoshi - }{ - { - lnwire.MilliSatoshi(0), - lnwire.MilliSatoshi(0), - lnwire.MilliSatoshi(0), - lnwire.MilliSatoshi(0), - }, - { - lnwire.MilliSatoshi(0), - lnwire.MilliSatoshi(1), - lnwire.MilliSatoshi(999999), - lnwire.MilliSatoshi(0), - }, - { - lnwire.MilliSatoshi(0), - lnwire.MilliSatoshi(1), - lnwire.MilliSatoshi(1000000), - lnwire.MilliSatoshi(1), - }, - { - lnwire.MilliSatoshi(0), - lnwire.MilliSatoshi(1), - lnwire.MilliSatoshi(1000001), - lnwire.MilliSatoshi(1), - }, - { - lnwire.MilliSatoshi(1), - lnwire.MilliSatoshi(1), - lnwire.MilliSatoshi(1000000), - lnwire.MilliSatoshi(2), - }, - } - - for _, test := range testCases { - f := ForwardingPolicy{ - BaseFee: test.baseFee, - FeeRate: test.feeRate, - } - fee := ExpectedFee(f, test.htlcAmt) - if fee != test.expected { - t.Errorf("expected fee to be (%v), instead got (%v)", test.expected, - fee) - } - } -} - -// TestForwardingAsymmetricTimeLockPolicies tests that each link is able to -// properly handle forwarding HTLCs when their outgoing channels have -// asymmetric policies w.r.t what they require for time locks. -func TestForwardingAsymmetricTimeLockPolicies(t *testing.T) { - t.Parallel() - - // First, we'll create our traditional three hop network. Bob - // interacting with and asserting the state of two of the end points - // for this test. - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*3, - btcutil.UnitsPerCoin()*5, - ) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newThreeHopNetwork( - t, channels.aliceToBob, channels.bobToAlice, channels.bobToCarol, - channels.carolToBob, testStartingHeight, - ) - if err := n.start(); err != nil { - t.Fatalf("unable to start three hop network: %v", err) - } - defer n.stop() - - // Now that each of the links are up, we'll modify the link from Alice - // -> Bob to have a greater time lock delta than that of the link of - // Bob -> Carol. - newPolicy := n.firstBobChannelLink.cfg.FwrdingPolicy - newPolicy.TimeLockDelta = 7 - n.firstBobChannelLink.UpdateForwardingPolicy(newPolicy) - - // Now that the Alice -> Bob link has been updated, we'll craft and - // send a payment from Alice -> Carol. This should succeed as normal, - // even though Bob has asymmetric time lock policies. - amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - htlcAmt, totalTimelock, hops := generateHops( - amount, testStartingHeight, n.firstBobChannelLink, - n.carolChannelLink, - ) - - firstHop := n.firstBobChannelLink.ShortChanID() - _, err = makePayment( - n.aliceServer, n.carolServer, firstHop, hops, amount, htlcAmt, - totalTimelock, - ).Wait(30 * time.Second) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } -} - -// TestCheckHtlcForward tests that a link is properly enforcing the HTLC -// forwarding policy. -func TestCheckHtlcForward(t *testing.T) { - - fetchLastChannelUpdate := func(lnwire.ShortChannelID) ( - *lnwire.ChannelUpdate, er.R) { - - return &lnwire.ChannelUpdate{}, nil - } - - testChannel, _, fCleanUp, err := createTestChannel( - alicePrivKey, bobPrivKey, 100000, 100000, - 1000, 1000, lnwire.ShortChannelID{}, - ) - if err != nil { - t.Fatal(err) - } - defer fCleanUp() - - link := channelLink{ - cfg: ChannelLinkConfig{ - FwrdingPolicy: ForwardingPolicy{ - TimeLockDelta: 20, - MinHTLCOut: 500, - MaxHTLC: 1000, - BaseFee: 10, - }, - FetchLastChannelUpdate: fetchLastChannelUpdate, - MaxOutgoingCltvExpiry: DefaultMaxOutgoingCltvExpiry, - HtlcNotifier: &mockHTLCNotifier{}, - }, - channel: testChannel.channel, - } - - var hash [32]byte - - t.Run("satisfied", func(t *testing.T) { - result := link.CheckHtlcForward(hash, 1500, 1000, - 200, 150, 0) - if result != nil { - t.Fatalf("expected policy to be satisfied") - } - }) - - t.Run("below minhtlc", func(t *testing.T) { - result := link.CheckHtlcForward(hash, 100, 50, - 200, 150, 0) - if _, ok := result.WireMessage().(*lnwire.FailAmountBelowMinimum); !ok { - t.Fatalf("expected FailAmountBelowMinimum failure code") - } - }) - - t.Run("above maxhtlc", func(t *testing.T) { - result := link.CheckHtlcForward(hash, 1500, 1200, - 200, 150, 0) - if _, ok := result.WireMessage().(*lnwire.FailTemporaryChannelFailure); !ok { - t.Fatalf("expected FailTemporaryChannelFailure failure code") - } - }) - - t.Run("insufficient fee", func(t *testing.T) { - result := link.CheckHtlcForward(hash, 1005, 1000, - 200, 150, 0) - if _, ok := result.WireMessage().(*lnwire.FailFeeInsufficient); !ok { - t.Fatalf("expected FailFeeInsufficient failure code") - } - }) - - t.Run("expiry too soon", func(t *testing.T) { - result := link.CheckHtlcForward(hash, 1500, 1000, - 200, 150, 190) - if _, ok := result.WireMessage().(*lnwire.FailExpiryTooSoon); !ok { - t.Fatalf("expected FailExpiryTooSoon failure code") - } - }) - - t.Run("incorrect cltv expiry", func(t *testing.T) { - result := link.CheckHtlcForward(hash, 1500, 1000, - 200, 190, 0) - if _, ok := result.WireMessage().(*lnwire.FailIncorrectCltvExpiry); !ok { - t.Fatalf("expected FailIncorrectCltvExpiry failure code") - } - - }) - - t.Run("cltv expiry too far in the future", func(t *testing.T) { - // Check that expiry isn't too far in the future. - result := link.CheckHtlcForward(hash, 1500, 1000, - 10200, 10100, 0) - if _, ok := result.WireMessage().(*lnwire.FailExpiryTooFar); !ok { - t.Fatalf("expected FailExpiryTooFar failure code") - } - }) -} - -// TestChannelLinkCanceledInvoice in this test checks the interaction -// between Alice and Bob for a canceled invoice. -func TestChannelLinkCanceledInvoice(t *testing.T) { - t.Parallel() - - // Setup a alice-bob network. - alice, bob, cleanUp, err := createTwoClusterChannels( - btcutil.UnitsPerCoin()*3, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newTwoHopNetwork(t, alice.channel, bob.channel, testStartingHeight) - if err := n.start(); err != nil { - t.Fatal(err) - } - defer n.stop() - - // Prepare an alice -> bob payment. - amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - htlcAmt, totalTimelock, hops := generateHops(amount, testStartingHeight, - n.bobChannelLink) - - firstHop := n.bobChannelLink.ShortChanID() - - invoice, payFunc, err := preparePayment( - n.aliceServer, n.bobServer, firstHop, hops, amount, htlcAmt, - totalTimelock, - ) - if err != nil { - t.Fatalf("unable to prepare the payment: %v", err) - } - - // Cancel the invoice at bob's end. - hash := invoice.Terms.PaymentPreimage.Hash() - err = n.bobServer.registry.CancelInvoice(hash) - if err != nil { - t.Fatal(err) - } - - // Have Alice fire the payment. - err = waitForPayFuncResult(payFunc, 30*time.Second) - - // Because the invoice is canceled, we expect an unknown payment hash - // result. - errr := er.Wrapped(err) - rtErr, ok := errr.(ClearTextError) - if !ok { - t.Fatalf("expected ClearTextError, but got %v", err) - } - _, ok = rtErr.WireMessage().(*lnwire.FailIncorrectDetails) - if !ok { - t.Fatalf("expected unknown payment hash, but got %v", err) - } -} - -type hodlInvoiceTestCtx struct { - n *twoHopNetwork - startBandwidthAlice lnwire.MilliSatoshi - startBandwidthBob lnwire.MilliSatoshi - hash lntypes.Hash - preimage lntypes.Preimage - amount lnwire.MilliSatoshi - errChan chan er.R - - restoreBob func() (*lnwallet.LightningChannel, er.R) - - cleanUp func() -} - -func newHodlInvoiceTestCtx(t *testing.T) (*hodlInvoiceTestCtx, er.R) { - // Setup a alice-bob network. - alice, bob, cleanUp, err := createTwoClusterChannels( - btcutil.UnitsPerCoin()*3, - btcutil.UnitsPerCoin()*5, - ) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - - n := newTwoHopNetwork(t, alice.channel, bob.channel, testStartingHeight) - if err := n.start(); err != nil { - t.Fatal(err) - } - - aliceBandwidthBefore := n.aliceChannelLink.Bandwidth() - bobBandwidthBefore := n.bobChannelLink.Bandwidth() - - debug := false - if debug { - // Log message that alice receives. - n.aliceServer.intersect( - createLogFunc("alice", n.aliceChannelLink.ChanID()), - ) - - // Log message that bob receives. - n.bobServer.intersect( - createLogFunc("bob", n.bobChannelLink.ChanID()), - ) - } - - amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - htlcAmt, totalTimelock, hops := generateHops( - amount, testStartingHeight, n.bobChannelLink, - ) - - // Generate hold invoice preimage. - r, err := generateRandomBytes(sha256.Size) - if err != nil { - t.Fatal(err) - } - preimage, err := lntypes.MakePreimage(r) - if err != nil { - t.Fatal(err) - } - hash := preimage.Hash() - - // Have alice pay the hodl invoice, wait for bob's commitment state to - // be updated and the invoice state to be updated. - receiver := n.bobServer - receiver.registry.settleChan = make(chan lntypes.Hash) - firstHop := n.bobChannelLink.ShortChanID() - errChan := n.makeHoldPayment( - n.aliceServer, receiver, firstHop, hops, amount, htlcAmt, - totalTimelock, preimage, - ) - - select { - case err := <-errChan: - t.Fatalf("no payment result expected: %v", err) - case <-time.After(5 * time.Second): - t.Fatal("timeout") - case h := <-receiver.registry.settleChan: - if hash != h { - t.Fatal("unexpect invoice settled") - } - } - - return &hodlInvoiceTestCtx{ - n: n, - startBandwidthAlice: aliceBandwidthBefore, - startBandwidthBob: bobBandwidthBefore, - preimage: preimage, - hash: hash, - amount: amount, - errChan: errChan, - restoreBob: bob.restore, - - cleanUp: func() { - cleanUp() - n.stop() - }, - }, nil -} - -// TestChannelLinkHoldInvoiceSettle asserts that a hodl invoice can be settled. -func TestChannelLinkHoldInvoiceSettle(t *testing.T) { - t.Parallel() - - defer timeout(t)() - - ctx, err := newHodlInvoiceTestCtx(t) - if err != nil { - t.Fatal(err) - } - defer ctx.cleanUp() - - err = ctx.n.bobServer.registry.SettleHodlInvoice(ctx.preimage) - if err != nil { - t.Fatal(err) - } - - // Wait for payment to succeed. - err = <-ctx.errChan - if err != nil { - t.Fatal(err) - } - - // Wait for Alice to receive the revocation. This is needed - // because the settles are pipelined to the switch and otherwise - // the bandwidth won't be updated by the time Alice receives a - // response here. - time.Sleep(2 * time.Second) - - if ctx.startBandwidthAlice-ctx.amount != - ctx.n.aliceChannelLink.Bandwidth() { - - t.Fatal("alice bandwidth should have decrease on payment " + - "amount") - } - - if ctx.startBandwidthBob+ctx.amount != - ctx.n.bobChannelLink.Bandwidth() { - - t.Fatalf("bob bandwidth isn't match: expected %v, got %v", - ctx.startBandwidthBob+ctx.amount, - ctx.n.bobChannelLink.Bandwidth()) - } -} - -// TestChannelLinkHoldInvoiceSettle asserts that a hodl invoice can be canceled. -func TestChannelLinkHoldInvoiceCancel(t *testing.T) { - t.Parallel() - - defer timeout(t)() - - ctx, err := newHodlInvoiceTestCtx(t) - if err != nil { - t.Fatal(err) - } - defer ctx.cleanUp() - - err = ctx.n.bobServer.registry.CancelInvoice(ctx.hash) - if err != nil { - t.Fatal(err) - } - - // Wait for payment to succeed. - err = <-ctx.errChan - assertFailureCode(t, err, lnwire.CodeIncorrectOrUnknownPaymentDetails) -} - -// TestChannelLinkHoldInvoiceRestart asserts hodl htlcs are held after blocks -// are mined and the link is restarted. The initial expiry checks should not -// apply to hodl htlcs after restart. -func TestChannelLinkHoldInvoiceRestart(t *testing.T) { - t.Parallel() - - defer timeout(t)() - - chanAmt := btcutil.UnitsPerCoin() * 5 - - // We'll start by creating a new link with our chanAmt (5 BTC). We will - // only be testing Alice's behavior, so the reference to Bob's channel - // state is unnecessary. - aliceLink, bobChannel, _, start, cleanUp, restore, err := - newSingleLinkTestHarness(chanAmt, 0) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } - defer cleanUp() - - alice := newPersistentLinkHarness( - t, aliceLink, nil, restore, - ) - - if err := start(); err != nil { - t.Fatalf("unable to start test harness: %v", err) - } - - var ( - coreLink = alice.coreLink - registry = coreLink.cfg.Registry.(*mockInvoiceRegistry) - ) - - registry.settleChan = make(chan lntypes.Hash) - - htlc, invoice := generateHtlcAndInvoice(t, 0) - - // Convert into a hodl invoice and save the preimage for later. - preimage := invoice.Terms.PaymentPreimage - invoice.Terms.PaymentPreimage = nil - invoice.HodlInvoice = true - - // We must add the invoice to the registry, such that Alice - // expects this payment. - err = registry.AddInvoice( - *invoice, htlc.PaymentHash, - ) - if err != nil { - t.Fatalf("unable to add invoice to registry: %v", err) - } - - ctx := linkTestContext{ - t: t, - aliceLink: alice.link, - aliceMsgs: alice.msgs, - bobChannel: bobChannel, - } - - // Lock in htlc paying the hodl invoice. - ctx.sendHtlcBobToAlice(htlc) - ctx.sendCommitSigBobToAlice(1) - ctx.receiveRevAndAckAliceToBob() - ctx.receiveCommitSigAliceToBob(1) - ctx.sendRevAndAckBobToAlice() - - // We expect a call to the invoice registry to notify the arrival of the - // htlc. - <-registry.settleChan - - // Increase block height. This height will be retrieved by the link - // after restart. - coreLink.cfg.Switch.bestHeight++ - - // Restart link. - alice.restart(false) - ctx.aliceLink = alice.link - ctx.aliceMsgs = alice.msgs - - // Expect htlc to be reprocessed. - <-registry.settleChan - - // Settle the invoice with the preimage. - err = registry.SettleHodlInvoice(*preimage) - if err != nil { - t.Fatalf("settle hodl invoice: %v", err) - } - - // Expect alice to send a settle and commitsig message to bob. - ctx.receiveSettleAliceToBob() - ctx.receiveCommitSigAliceToBob(0) - - // Stop the link - alice.link.Stop() - - // Check that no unexpected messages were sent. - select { - case msg := <-alice.msgs: - t.Fatalf("did not expect message %T", msg) - default: - } -} - -// TestChannelLinkRevocationWindowRegular asserts that htlcs paying to a regular -// invoice are settled even if the revocation window gets exhausted. -func TestChannelLinkRevocationWindowRegular(t *testing.T) { - t.Parallel() - - chanAmt := btcutil.UnitsPerCoin() * 5 - - // We'll start by creating a new link with our chanAmt (5 BTC). We will - // only be testing Alice's behavior, so the reference to Bob's channel - // state is unnecessary. - aliceLink, bobChannel, _, start, cleanUp, _, err := - newSingleLinkTestHarness(chanAmt, 0) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } - defer cleanUp() - - if err := start(); err != nil { - t.Fatalf("unable to start test harness: %v", err) - } - defer aliceLink.Stop() - - var ( - coreLink = aliceLink.(*channelLink) - registry = coreLink.cfg.Registry.(*mockInvoiceRegistry) - aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs - ) - - ctx := linkTestContext{ - t: t, - aliceLink: aliceLink, - aliceMsgs: aliceMsgs, - bobChannel: bobChannel, - } - - registry.settleChan = make(chan lntypes.Hash) - - htlc1, invoice1 := generateHtlcAndInvoice(t, 0) - htlc2, invoice2 := generateHtlcAndInvoice(t, 1) - - // We must add the invoice to the registry, such that Alice - // expects this payment. - err = registry.AddInvoice(*invoice1, htlc1.PaymentHash) - if err != nil { - t.Fatalf("unable to add invoice to registry: %v", err) - } - err = registry.AddInvoice(*invoice2, htlc2.PaymentHash) - if err != nil { - t.Fatalf("unable to add invoice to registry: %v", err) - } - - // Lock in htlc 1 on both sides. - ctx.sendHtlcBobToAlice(htlc1) - ctx.sendCommitSigBobToAlice(1) - ctx.receiveRevAndAckAliceToBob() - ctx.receiveCommitSigAliceToBob(1) - ctx.sendRevAndAckBobToAlice() - - // We expect a call to the invoice registry to notify the arrival of the - // htlc. - select { - case <-registry.settleChan: - case <-time.After(5 * time.Second): - t.Fatal("expected invoice to be settled") - } - - // Expect alice to send a settle and commitsig message to bob. Bob does - // not yet send the revocation. - ctx.receiveSettleAliceToBob() - ctx.receiveCommitSigAliceToBob(0) - - // Pay invoice 2. - ctx.sendHtlcBobToAlice(htlc2) - ctx.sendCommitSigBobToAlice(2) - ctx.receiveRevAndAckAliceToBob() - - // At this point, Alice cannot send a new commit sig to bob because the - // revocation window is exhausted. - - // Bob sends revocation and signs commit with htlc1 settled. - ctx.sendRevAndAckBobToAlice() - - // After the revocation, it is again possible for Alice to send a commit - // sig with htlc2. - ctx.receiveCommitSigAliceToBob(1) -} - -// TestChannelLinkRevocationWindowHodl asserts that htlcs paying to a hodl -// invoice are settled even if the revocation window gets exhausted. -func TestChannelLinkRevocationWindowHodl(t *testing.T) { - t.Parallel() - - chanAmt := btcutil.UnitsPerCoin() * 5 - - // We'll start by creating a new link with our chanAmt (5 BTC). We will - // only be testing Alice's behavior, so the reference to Bob's channel - // state is unnecessary. - aliceLink, bobChannel, batchTicker, start, cleanUp, _, err := - newSingleLinkTestHarness(chanAmt, 0) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } - defer cleanUp() - - if err := start(); err != nil { - t.Fatalf("unable to start test harness: %v", err) - } - - var ( - coreLink = aliceLink.(*channelLink) - registry = coreLink.cfg.Registry.(*mockInvoiceRegistry) - aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs - ) - - registry.settleChan = make(chan lntypes.Hash) - - // Generate two invoice-htlc pairs. - htlc1, invoice1 := generateHtlcAndInvoice(t, 0) - htlc2, invoice2 := generateHtlcAndInvoice(t, 1) - - // Convert into hodl invoices and save the preimages for later. - preimage1 := invoice1.Terms.PaymentPreimage - invoice1.Terms.PaymentPreimage = nil - invoice1.HodlInvoice = true - - preimage2 := invoice2.Terms.PaymentPreimage - invoice2.Terms.PaymentPreimage = nil - invoice2.HodlInvoice = true - - // We must add the invoices to the registry, such that Alice - // expects the payments. - err = registry.AddInvoice(*invoice1, htlc1.PaymentHash) - if err != nil { - t.Fatalf("unable to add invoice to registry: %v", err) - } - err = registry.AddInvoice(*invoice2, htlc2.PaymentHash) - if err != nil { - t.Fatalf("unable to add invoice to registry: %v", err) - } - - ctx := linkTestContext{ - t: t, - aliceLink: aliceLink, - aliceMsgs: aliceMsgs, - bobChannel: bobChannel, - } - - // Lock in htlc 1 on both sides. - ctx.sendHtlcBobToAlice(htlc1) - ctx.sendCommitSigBobToAlice(1) - ctx.receiveRevAndAckAliceToBob() - ctx.receiveCommitSigAliceToBob(1) - ctx.sendRevAndAckBobToAlice() - - // We expect a call to the invoice registry to notify the arrival of - // htlc 1. - select { - case <-registry.settleChan: - case <-time.After(15 * time.Second): - t.Fatal("exit hop notification not received") - } - - // Lock in htlc 2 on both sides. - ctx.sendHtlcBobToAlice(htlc2) - ctx.sendCommitSigBobToAlice(2) - ctx.receiveRevAndAckAliceToBob() - ctx.receiveCommitSigAliceToBob(2) - ctx.sendRevAndAckBobToAlice() - - select { - case <-registry.settleChan: - case <-time.After(15 * time.Second): - t.Fatal("exit hop notification not received") - } - - // Settle invoice 1 with the preimage. - err = registry.SettleHodlInvoice(*preimage1) - if err != nil { - t.Fatalf("settle hodl invoice: %v", err) - } - - // Expect alice to send a settle and commitsig message to bob. Bob does - // not yet send the revocation. - ctx.receiveSettleAliceToBob() - ctx.receiveCommitSigAliceToBob(1) - - // Settle invoice 2 with the preimage. - err = registry.SettleHodlInvoice(*preimage2) - if err != nil { - t.Fatalf("settle hodl invoice: %v", err) - } - - // Expect alice to send a settle for htlc 2. - ctx.receiveSettleAliceToBob() - - // At this point, Alice cannot send a new commit sig to bob because the - // revocation window is exhausted. - - // Sleep to let timer(s) expire. - time.Sleep(time.Second) - - // We don't expect a commitSig from Alice. - select { - case msg := <-aliceMsgs: - t.Fatalf("did not expect message %T", msg) - default: - } - - // Bob sends revocation and signs commit with htlc 1 settled. - ctx.sendRevAndAckBobToAlice() - - // Allow some time for it to be processed by the link. - time.Sleep(time.Second) - - // Trigger the batch timer as this may trigger Alice to send a commit - // sig. - batchTicker <- time.Time{} - - // After the revocation, it is again possible for Alice to send a commit - // sig no more htlcs. Bob acks the update. - ctx.receiveCommitSigAliceToBob(0) - ctx.sendRevAndAckBobToAlice() - - // Bob updates his remote commit tx. - ctx.sendCommitSigBobToAlice(0) - ctx.receiveRevAndAckAliceToBob() - - // Stop the link - aliceLink.Stop() - - // Check that no unexpected messages were sent. - select { - case msg := <-aliceMsgs: - t.Fatalf("did not expect message %T", msg) - default: - } -} - -// TestChannelLinkReceiveEmptySig tests the response of the link to receiving an -// empty commit sig. This should be tolerated, but we shouldn't send out an -// empty sig ourselves. -func TestChannelLinkReceiveEmptySig(t *testing.T) { - t.Parallel() - - chanAmt := btcutil.UnitsPerCoin() * 5 - chanReserve := btcutil.UnitsPerCoin() * 1 - aliceLink, bobChannel, batchTicker, start, cleanUp, _, err := - newSingleLinkTestHarness(chanAmt, chanReserve) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } - defer cleanUp() - - if err := start(); err != nil { - t.Fatalf("unable to start test harness: %v", err) - } - - var ( - coreLink = aliceLink.(*channelLink) - aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs - ) - - ctx := linkTestContext{ - t: t, - aliceLink: aliceLink, - aliceMsgs: aliceMsgs, - bobChannel: bobChannel, - } - - htlc, _ := generateHtlcAndInvoice(t, 0) - - // First, send an Add from Alice to Bob. - ctx.sendHtlcAliceToBob(0, htlc) - ctx.receiveHtlcAliceToBob() - - // Tick the batch ticker to trigger a commitsig from Alice->Bob. - select { - case batchTicker <- time.Now(): - case <-time.After(5 * time.Second): - t.Fatalf("could not force commit sig") - } - - // Make Bob send a CommitSig. Since Bob hasn't received Alice's sig, he - // cannot add the htlc to his remote tx yet. The commit sig that we - // force Bob to send will be empty. Note that this normally does not - // happen, because the link (which is not present for Bob in this test) - // check whether Bob actually owes a sig first. - ctx.sendCommitSigBobToAlice(0) - - // Receive a CommitSig from Alice covering the htlc from above. - ctx.receiveCommitSigAliceToBob(1) - - // Wait for RevokeAndAck Alice->Bob. Even though Bob sent an empty - // commit sig, Alice still needs to revoke the previous commitment tx. - ctx.receiveRevAndAckAliceToBob() - - // Send RevokeAndAck Bob->Alice to ack the added htlc. - ctx.sendRevAndAckBobToAlice() - - // We received an empty commit sig, we accepted it, but there is nothing - // new to sign for us. - - // No other messages are expected. - ctx.assertNoMsgFromAlice(time.Second) - - // Stop the link - aliceLink.Stop() -} - -// TestPendingCommitTicker tests that a link will fail itself after a timeout if -// the commitment dance stalls out. -func TestPendingCommitTicker(t *testing.T) { - t.Parallel() - - chanAmt := btcutil.UnitsPerCoin() * 5 - chanReserve := btcutil.UnitsPerCoin() * 1 - aliceLink, bobChannel, batchTicker, start, cleanUp, _, err := - newSingleLinkTestHarness(chanAmt, chanReserve) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } - - var ( - coreLink = aliceLink.(*channelLink) - aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs - ) - - coreLink.cfg.PendingCommitTicker = ticker.NewForce(time.Millisecond) - - linkErrs := make(chan LinkFailureError) - coreLink.cfg.OnChannelFailure = func(_ lnwire.ChannelID, - _ lnwire.ShortChannelID, linkErr LinkFailureError) { - - linkErrs <- linkErr - } - - if err := start(); err != nil { - t.Fatalf("unable to start test harness: %v", err) - } - defer cleanUp() - - ctx := linkTestContext{ - t: t, - aliceLink: aliceLink, - bobChannel: bobChannel, - aliceMsgs: aliceMsgs, - } - - // Send an HTLC from Alice to Bob, and signal the batch ticker to signa - // a commitment. - htlc, _ := generateHtlcAndInvoice(t, 0) - ctx.sendHtlcAliceToBob(0, htlc) - ctx.receiveHtlcAliceToBob() - batchTicker <- time.Now() - - select { - case msg := <-aliceMsgs: - if _, ok := msg.(*lnwire.CommitSig); !ok { - t.Fatalf("expected CommitSig, got: %T", msg) - } - case <-time.After(time.Second): - t.Fatalf("alice did not send commit sig") - } - - // Check that Alice hasn't failed. - select { - case linkErr := <-linkErrs: - t.Fatalf("link failed unexpectedly: %v", linkErr) - case <-time.After(50 * time.Millisecond): - } - - // Without completing the dance, send another HTLC from Alice to Bob. - // Since the revocation window has been exhausted, we should see the - // link fail itself immediately due to the low pending commit timeout. - // In production this would be much longer, e.g. a minute. - htlc, _ = generateHtlcAndInvoice(t, 1) - ctx.sendHtlcAliceToBob(1, htlc) - ctx.receiveHtlcAliceToBob() - batchTicker <- time.Now() - - // Assert that we get the expected link failure from Alice. - select { - case linkErr := <-linkErrs: - if linkErr.code != ErrRemoteUnresponsive { - t.Fatalf("error code mismatch, "+ - "want: ErrRemoteUnresponsive, got: %v", - linkErr.code) - } - - case <-time.After(time.Second): - t.Fatalf("did not receive failure") - } -} - -// assertFailureCode asserts that an error is of type ClearTextError and that -// the failure code is as expected. -func assertFailureCode(t *testing.T, err er.R, code lnwire.FailCode) { - errr := er.Wrapped(err) - rtErr, ok := errr.(ClearTextError) - if !ok { - t.Fatalf("expected ClearTextError but got %T", err) - } - - if rtErr.WireMessage().Code() != code { - t.Fatalf("expected %v but got %v", - code, rtErr.WireMessage().Code()) - } -} - -func TestMain(m *testing.M) { - globalcfg.SelectConfig(globalcfg.BitcoinDefaults()) - os.Exit(m.Run()) -} diff --git a/lnd/htlcswitch/linkfailure.go b/lnd/htlcswitch/linkfailure.go deleted file mode 100644 index 5a2a02bd..00000000 --- a/lnd/htlcswitch/linkfailure.go +++ /dev/null @@ -1,120 +0,0 @@ -package htlcswitch - -var ( - // ErrLinkShuttingDown signals that the link is shutting down. - ErrLinkShuttingDown = Err.CodeWithDetail("ErrLinkShuttingDown", "link shutting down") -) - -// errorCode encodes the possible types of errors that will make us fail the -// current link. -type errorCode uint8 - -const ( - // ErrInternalError indicates that something internal in the link - // failed. In this case we will send a generic error to our peer. - ErrInternalError errorCode = iota - - // ErrRemoteError indicates that our peer sent an error, prompting up - // to fail the link. - ErrRemoteError - - // ErrRemoteUnresponsive indicates that our peer took too long to - // complete a commitment dance. - ErrRemoteUnresponsive - - // ErrSyncError indicates that we failed synchronizing the state of the - // channel with our peer. - ErrSyncError - - // ErrInvalidUpdate indicates that the peer send us an invalid update. - ErrInvalidUpdate - - // ErrInvalidCommitment indicates that the remote peer sent us an - // invalid commitment signature. - ErrInvalidCommitment - - // ErrInvalidRevocation indicates that the remote peer send us an - // invalid revocation message. - ErrInvalidRevocation - - // ErrRecoveryError the channel was unable to be resumed, we need the - // remote party to force close the channel out on chain now as a - // result. - ErrRecoveryError -) - -// LinkFailureError encapsulates an error that will make us fail the current -// link. It contains the necessary information needed to determine if we should -// force close the channel in the process, and if any error data should be sent -// to the peer. -type LinkFailureError struct { - // code is the type of error this LinkFailureError encapsulates. - code errorCode - - // ForceClose indicates whether we should force close the channel - // because of this error. - ForceClose bool - - // PermanentFailure indicates whether this failure is permanent, and - // the channel should not be attempted loaded again. - PermanentFailure bool - - // SendData is a byte slice that will be sent to the peer. If nil a - // generic error will be sent. - SendData []byte -} - -// A compile time check to ensure LinkFailureError implements the error -// interface. -var _ error = (*LinkFailureError)(nil) - -// Error returns a generic error for the LinkFailureError. -// -// NOTE: Part of the error interface. -func (e LinkFailureError) Error() string { - switch e.code { - case ErrInternalError: - return "internal error" - case ErrRemoteError: - return "remote error" - case ErrRemoteUnresponsive: - return "remote unresponsive" - case ErrSyncError: - return "sync error" - case ErrInvalidUpdate: - return "invalid update" - case ErrInvalidCommitment: - return "invalid commitment" - case ErrInvalidRevocation: - return "invalid revocation" - case ErrRecoveryError: - return "unable to resume channel, recovery required" - default: - return "unknown error" - } -} - -// ShouldSendToPeer indicates whether we should send an error to the peer if -// the link fails with this LinkFailureError. -func (e LinkFailureError) ShouldSendToPeer() bool { - switch e.code { - - // Since sending an error can lead some nodes to force close the - // channel, create a whitelist of the failures we want to send so that - // newly added error codes aren't automatically sent to the remote peer. - case - ErrInternalError, - ErrRemoteError, - ErrSyncError, - ErrInvalidUpdate, - ErrInvalidCommitment, - ErrInvalidRevocation, - ErrRecoveryError: - - return true - - // In all other cases we will not attempt to send our peer an error. - default: - return false - } -} diff --git a/lnd/htlcswitch/mailbox.go b/lnd/htlcswitch/mailbox.go deleted file mode 100644 index a3319d97..00000000 --- a/lnd/htlcswitch/mailbox.go +++ /dev/null @@ -1,904 +0,0 @@ -package htlcswitch - -import ( - "bytes" - "container/list" - "sync" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/clock" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/pktlog/log" -) - -var ( - // ErrMailBoxShuttingDown is returned when the mailbox is interrupted by - // a shutdown request. - ErrMailBoxShuttingDown = Err.CodeWithDetail("ErrMailBoxShuttingDown", "mailbox is shutting down") - - // ErrPacketAlreadyExists signals that an attempt to add a packet failed - // because it already exists in the mailbox. - ErrPacketAlreadyExists = Err.CodeWithDetail("ErrPacketAlreadyExists", "mailbox already has packet") -) - -// MailBox is an interface which represents a concurrent-safe, in-order -// delivery queue for messages from the network and also from the main switch. -// This struct servers as a buffer between incoming messages, and messages to -// the handled by the link. Each of the mutating methods within this interface -// should be implemented in a non-blocking manner. -type MailBox interface { - // AddMessage appends a new message to the end of the message queue. - AddMessage(msg lnwire.Message) er.R - - // AddPacket appends a new message to the end of the packet queue. - AddPacket(pkt *htlcPacket) er.R - - // HasPacket queries the packets for a circuit key, this is used to drop - // packets bound for the switch that already have a queued response. - HasPacket(CircuitKey) bool - - // AckPacket removes a packet from the mailboxes in-memory replay - // buffer. This will prevent a packet from being delivered after a link - // restarts if the switch has remained online. The returned boolean - // indicates whether or not a packet with the passed incoming circuit - // key was removed. - AckPacket(CircuitKey) bool - - // FailAdd fails an UpdateAddHTLC that exists within the mailbox, - // removing it from the in-memory replay buffer. This will prevent the - // packet from being delivered after the link restarts if the switch has - // remained online. The generated LinkError will show an - // OutgoingFailureDownstreamHtlcAdd FailureDetail. - FailAdd(pkt *htlcPacket) - - // MessageOutBox returns a channel that any new messages ready for - // delivery will be sent on. - MessageOutBox() chan lnwire.Message - - // PacketOutBox returns a channel that any new packets ready for - // delivery will be sent on. - PacketOutBox() chan *htlcPacket - - // Clears any pending wire messages from the inbox. - ResetMessages() er.R - - // Reset the packet head to point at the first element in the list. - ResetPackets() er.R - - // Start starts the mailbox and any goroutines it needs to operate - // properly. - Start() - - // Stop signals the mailbox and its goroutines for a graceful shutdown. - Stop() -} - -type mailBoxConfig struct { - // shortChanID is the short channel id of the channel this mailbox - // belongs to. - shortChanID lnwire.ShortChannelID - - // fetchUpdate retreives the most recent channel update for the channel - // this mailbox belongs to. - fetchUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, er.R) - - // forwardPackets send a varidic number of htlcPackets to the switch to - // be routed. A quit channel should be provided so that the call can - // properly exit during shutdown. - forwardPackets func(chan struct{}, ...*htlcPacket) er.R - - // clock is a time source for the mailbox. - clock clock.Clock - - // expiry is the interval after which Adds will be cancelled if they - // have not been yet been delivered. The computed deadline will expiry - // this long after the Adds are added via AddPacket. - expiry time.Duration -} - -// memoryMailBox is an implementation of the MailBox struct backed by purely -// in-memory queues. -type memoryMailBox struct { - started sync.Once - stopped sync.Once - - cfg *mailBoxConfig - - wireMessages *list.List - wireMtx sync.Mutex - wireCond *sync.Cond - - messageOutbox chan lnwire.Message - msgReset chan chan struct{} - - // repPkts is a queue for reply packets, e.g. Settles and Fails. - repPkts *list.List - repIndex map[CircuitKey]*list.Element - repHead *list.Element - - // addPkts is a dedicated queue for Adds. - addPkts *list.List - addIndex map[CircuitKey]*list.Element - addHead *list.Element - - pktMtx sync.Mutex - pktCond *sync.Cond - - pktOutbox chan *htlcPacket - pktReset chan chan struct{} - - wireShutdown chan struct{} - pktShutdown chan struct{} - quit chan struct{} -} - -// newMemoryMailBox creates a new instance of the memoryMailBox. -func newMemoryMailBox(cfg *mailBoxConfig) *memoryMailBox { - box := &memoryMailBox{ - cfg: cfg, - wireMessages: list.New(), - repPkts: list.New(), - addPkts: list.New(), - messageOutbox: make(chan lnwire.Message), - pktOutbox: make(chan *htlcPacket), - msgReset: make(chan chan struct{}, 1), - pktReset: make(chan chan struct{}, 1), - repIndex: make(map[CircuitKey]*list.Element), - addIndex: make(map[CircuitKey]*list.Element), - wireShutdown: make(chan struct{}), - pktShutdown: make(chan struct{}), - quit: make(chan struct{}), - } - box.wireCond = sync.NewCond(&box.wireMtx) - box.pktCond = sync.NewCond(&box.pktMtx) - - return box -} - -// A compile time assertion to ensure that memoryMailBox meets the MailBox -// interface. -var _ MailBox = (*memoryMailBox)(nil) - -// courierType is an enum that reflects the distinct types of messages a -// MailBox can handle. Each type will be placed in an isolated mail box and -// will have a dedicated goroutine for delivering the messages. -type courierType uint8 - -const ( - // wireCourier is a type of courier that handles wire messages. - wireCourier courierType = iota - - // pktCourier is a type of courier that handles htlc packets. - pktCourier -) - -// Start starts the mailbox and any goroutines it needs to operate properly. -// -// NOTE: This method is part of the MailBox interface. -func (m *memoryMailBox) Start() { - m.started.Do(func() { - go m.mailCourier(wireCourier) - go m.mailCourier(pktCourier) - }) -} - -// ResetMessages blocks until all buffered wire messages are cleared. -func (m *memoryMailBox) ResetMessages() er.R { - msgDone := make(chan struct{}) - select { - case m.msgReset <- msgDone: - return m.signalUntilReset(wireCourier, msgDone) - case <-m.quit: - return ErrMailBoxShuttingDown.Default() - } -} - -// ResetPackets blocks until the head of packets buffer is reset, causing the -// packets to be redelivered in order. -func (m *memoryMailBox) ResetPackets() er.R { - pktDone := make(chan struct{}) - select { - case m.pktReset <- pktDone: - return m.signalUntilReset(pktCourier, pktDone) - case <-m.quit: - return ErrMailBoxShuttingDown.Default() - } -} - -// signalUntilReset strobes the condition variable for the specified inbox type -// until receiving a response that the mailbox has processed a reset. -func (m *memoryMailBox) signalUntilReset(cType courierType, - done chan struct{}) er.R { - - for { - - switch cType { - case wireCourier: - m.wireCond.Signal() - case pktCourier: - m.pktCond.Signal() - } - - select { - case <-time.After(time.Millisecond): - continue - case <-done: - return nil - case <-m.quit: - return ErrMailBoxShuttingDown.Default() - } - } -} - -// AckPacket removes the packet identified by it's incoming circuit key from the -// queue of packets to be delivered. The returned boolean indicates whether or -// not a packet with the passed incoming circuit key was removed. -// -// NOTE: It is safe to call this method multiple times for the same circuit key. -func (m *memoryMailBox) AckPacket(inKey CircuitKey) bool { - m.pktCond.L.Lock() - defer m.pktCond.L.Unlock() - - if entry, ok := m.repIndex[inKey]; ok { - // Check whether we are removing the head of the queue. If so, - // we must advance the head to the next packet before removing. - // It's possible that the courier has already advanced the - // repHead, so this check prevents the repHead from getting - // desynchronized. - if entry == m.repHead { - m.repHead = entry.Next() - } - m.repPkts.Remove(entry) - delete(m.repIndex, inKey) - - return true - } - - if entry, ok := m.addIndex[inKey]; ok { - // Check whether we are removing the head of the queue. If so, - // we must advance the head to the next add before removing. - // It's possible that the courier has already advanced the - // addHead, so this check prevents the addHead from getting - // desynchronized. - // - // NOTE: While this event is rare for Settles or Fails, it could - // be very common for Adds since the mailbox has the ability to - // cancel Adds before they are delivered. When that occurs, the - // head of addPkts has only been peeked and we expect to be - // removing the head of the queue. - if entry == m.addHead { - m.addHead = entry.Next() - } - - m.addPkts.Remove(entry) - delete(m.addIndex, inKey) - - return true - } - - return false -} - -// HasPacket queries the packets for a circuit key, this is used to drop packets -// bound for the switch that already have a queued response. -func (m *memoryMailBox) HasPacket(inKey CircuitKey) bool { - m.pktCond.L.Lock() - _, ok := m.repIndex[inKey] - m.pktCond.L.Unlock() - - return ok -} - -// Stop signals the mailbox and its goroutines for a graceful shutdown. -// -// NOTE: This method is part of the MailBox interface. -func (m *memoryMailBox) Stop() { - m.stopped.Do(func() { - close(m.quit) - - m.signalUntilShutdown(wireCourier) - m.signalUntilShutdown(pktCourier) - }) -} - -// signalUntilShutdown strobes the condition variable of the passed courier -// type, blocking until the worker has exited. -func (m *memoryMailBox) signalUntilShutdown(cType courierType) { - var ( - cond *sync.Cond - shutdown chan struct{} - ) - - switch cType { - case wireCourier: - cond = m.wireCond - shutdown = m.wireShutdown - case pktCourier: - cond = m.pktCond - shutdown = m.pktShutdown - } - - for { - select { - case <-time.After(time.Millisecond): - cond.Signal() - case <-shutdown: - return - } - } -} - -// pktWithExpiry wraps an incoming packet and records the time at which it it -// should be canceled from the mailbox. This will be used to detect if it gets -// stuck in the mailbox and inform when to cancel back. -type pktWithExpiry struct { - pkt *htlcPacket - expiry time.Time -} - -func (p *pktWithExpiry) deadline(clock clock.Clock) <-chan time.Time { - return clock.TickAfter(p.expiry.Sub(clock.Now())) -} - -// mailCourier is a dedicated goroutine whose job is to reliably deliver -// messages of a particular type. There are two types of couriers: wire -// couriers, and mail couriers. Depending on the passed courierType, this -// goroutine will assume one of two roles. -func (m *memoryMailBox) mailCourier(cType courierType) { - switch cType { - case wireCourier: - defer close(m.wireShutdown) - case pktCourier: - defer close(m.pktShutdown) - } - - // TODO(roasbeef): refactor... - - for { - // First, we'll check our condition. If our target mailbox is - // empty, then we'll wait until a new item is added. - switch cType { - case wireCourier: - m.wireCond.L.Lock() - for m.wireMessages.Front() == nil { - m.wireCond.Wait() - - select { - case msgDone := <-m.msgReset: - m.wireMessages.Init() - - close(msgDone) - case <-m.quit: - m.wireCond.L.Unlock() - return - default: - } - } - - case pktCourier: - m.pktCond.L.Lock() - for m.repHead == nil && m.addHead == nil { - m.pktCond.Wait() - - select { - // Resetting the packet queue means just moving - // our pointer to the front. This ensures that - // any un-ACK'd messages are re-delivered upon - // reconnect. - case pktDone := <-m.pktReset: - m.repHead = m.repPkts.Front() - m.addHead = m.addPkts.Front() - - close(pktDone) - - case <-m.quit: - m.pktCond.L.Unlock() - return - default: - } - } - } - - var ( - nextRep *htlcPacket - nextRepEl *list.Element - nextAdd *pktWithExpiry - nextAddEl *list.Element - nextMsg lnwire.Message - ) - switch cType { - // Grab the datum off the front of the queue, shifting the - // slice's reference down one in order to remove the datum from - // the queue. - case wireCourier: - entry := m.wireMessages.Front() - nextMsg = m.wireMessages.Remove(entry).(lnwire.Message) - - // For packets, we actually never remove an item until it has - // been ACK'd by the link. This ensures that if a read packet - // doesn't make it into a commitment, then it'll be - // re-delivered once the link comes back online. - case pktCourier: - // Peek at the head of the Settle/Fails and Add queues. - // We peak both even if there is a Settle/Fail present - // because we need to set a deadline for the next - // pending Add if it's present. Due to clock - // monotonicity, we know that the head of the Adds is - // the next to expire. - if m.repHead != nil { - nextRep = m.repHead.Value.(*htlcPacket) - nextRepEl = m.repHead - } - if m.addHead != nil { - nextAdd = m.addHead.Value.(*pktWithExpiry) - nextAddEl = m.addHead - } - } - - // Now that we're done with the condition, we can unlock it to - // allow any callers to append to the end of our target queue. - switch cType { - case wireCourier: - m.wireCond.L.Unlock() - case pktCourier: - m.pktCond.L.Unlock() - } - - // With the next message obtained, we'll now select to attempt - // to deliver the message. If we receive a kill signal, then - // we'll bail out. - switch cType { - case wireCourier: - select { - case m.messageOutbox <- nextMsg: - case msgDone := <-m.msgReset: - m.wireCond.L.Lock() - m.wireMessages.Init() - m.wireCond.L.Unlock() - - close(msgDone) - case <-m.quit: - return - } - - case pktCourier: - var ( - pktOutbox chan *htlcPacket - addOutbox chan *htlcPacket - add *htlcPacket - deadline <-chan time.Time - ) - - // Prioritize delivery of Settle/Fail packets over Adds. - // This ensures that we actively clear the commitment of - // existing HTLCs before trying to add new ones. This - // can help to improve forwarding performance since the - // time to sign a commitment is linear in the number of - // HTLCs manifested on the commitments. - // - // NOTE: Both types are eventually delivered over the - // same channel, but we can control which is delivered - // by exclusively making one nil and the other non-nil. - // We know from our loop condition that at least one - // nextRep and nextAdd are non-nil. - if nextRep != nil { - pktOutbox = m.pktOutbox - } else { - addOutbox = m.pktOutbox - } - - // If we have a pending Add, we'll also construct the - // deadline so we can fail it back if we are unable to - // deliver any message in time. We also dereference the - // nextAdd's packet, since we will need access to it in - // the case we are delivering it and/or if the deadline - // expires. - // - // NOTE: It's possible after this point for add to be - // nil, but this can only occur when addOutbox is also - // nil, hence we won't accidentally deliver a nil - // packet. - if nextAdd != nil { - add = nextAdd.pkt - deadline = nextAdd.deadline(m.cfg.clock) - } - - select { - case pktOutbox <- nextRep: - m.pktCond.L.Lock() - // Only advance the repHead if this Settle or - // Fail is still at the head of the queue. - if m.repHead != nil && m.repHead == nextRepEl { - m.repHead = m.repHead.Next() - } - m.pktCond.L.Unlock() - - case addOutbox <- add: - m.pktCond.L.Lock() - // Only advance the addHead if this Add is still - // at the head of the queue. - if m.addHead != nil && m.addHead == nextAddEl { - m.addHead = m.addHead.Next() - } - m.pktCond.L.Unlock() - - case <-deadline: - m.FailAdd(add) - - case pktDone := <-m.pktReset: - m.pktCond.L.Lock() - m.repHead = m.repPkts.Front() - m.addHead = m.addPkts.Front() - m.pktCond.L.Unlock() - - close(pktDone) - - case <-m.quit: - return - } - } - - } -} - -// AddMessage appends a new message to the end of the message queue. -// -// NOTE: This method is safe for concrete use and part of the MailBox -// interface. -func (m *memoryMailBox) AddMessage(msg lnwire.Message) er.R { - // First, we'll lock the condition, and add the message to the end of - // the wire message inbox. - m.wireCond.L.Lock() - m.wireMessages.PushBack(msg) - m.wireCond.L.Unlock() - - // With the message added, we signal to the mailCourier that there are - // additional messages to deliver. - m.wireCond.Signal() - - return nil -} - -// AddPacket appends a new message to the end of the packet queue. -// -// NOTE: This method is safe for concrete use and part of the MailBox -// interface. -func (m *memoryMailBox) AddPacket(pkt *htlcPacket) er.R { - m.pktCond.L.Lock() - switch htlc := pkt.htlc.(type) { - - // Split off Settle/Fail packets into the repPkts queue. - case *lnwire.UpdateFulfillHTLC, *lnwire.UpdateFailHTLC: - if _, ok := m.repIndex[pkt.inKey()]; ok { - m.pktCond.L.Unlock() - return ErrPacketAlreadyExists.Default() - } - - entry := m.repPkts.PushBack(pkt) - m.repIndex[pkt.inKey()] = entry - if m.repHead == nil { - m.repHead = entry - } - - // Split off Add packets into the addPkts queue. - case *lnwire.UpdateAddHTLC: - if _, ok := m.addIndex[pkt.inKey()]; ok { - m.pktCond.L.Unlock() - return ErrPacketAlreadyExists.Default() - } - - entry := m.addPkts.PushBack(&pktWithExpiry{ - pkt: pkt, - expiry: m.cfg.clock.Now().Add(m.cfg.expiry), - }) - m.addIndex[pkt.inKey()] = entry - if m.addHead == nil { - m.addHead = entry - } - - default: - m.pktCond.L.Unlock() - return er.Errorf("unknown htlc type: %T", htlc) - } - m.pktCond.L.Unlock() - - // With the packet added, we signal to the mailCourier that there are - // additional packets to consume. - m.pktCond.Signal() - - return nil -} - -// FailAdd fails an UpdateAddHTLC that exists within the mailbox, removing it -// from the in-memory replay buffer. This will prevent the packet from being -// delivered after the link restarts if the switch has remained online. The -// generated LinkError will show an OutgoingFailureDownstreamHtlcAdd -// FailureDetail. -func (m *memoryMailBox) FailAdd(pkt *htlcPacket) { - // First, remove the packet from mailbox. If we didn't find the packet - // because it has already been acked, we'll exit early to avoid sending - // a duplicate fail message through the switch. - if !m.AckPacket(pkt.inKey()) { - return - } - - var ( - localFailure = false - reason lnwire.OpaqueReason - ) - - // Create a temporary channel failure which we will send back to our - // peer if this is a forward, or report to the user if the failed - // payment was locally initiated. - var failure lnwire.FailureMessage - update, err := m.cfg.fetchUpdate(m.cfg.shortChanID) - if err != nil { - failure = &lnwire.FailTemporaryNodeFailure{} - } else { - failure = lnwire.NewTemporaryChannelFailure(update) - } - - // If the payment was locally initiated (which is indicated by a nil - // obfuscator), we do not need to encrypt it back to the sender. - if pkt.obfuscator == nil { - var b bytes.Buffer - err := lnwire.EncodeFailure(&b, failure, 0) - if err != nil { - log.Errorf("Unable to encode failure: %v", err) - return - } - reason = lnwire.OpaqueReason(b.Bytes()) - localFailure = true - } else { - // If the packet is part of a forward, (identified by a non-nil - // obfuscator) we need to encrypt the error back to the source. - var err er.R - reason, err = pkt.obfuscator.EncryptFirstHop(failure) - if err != nil { - log.Errorf("Unable to obfuscate error: %v", err) - return - } - } - - // Create a link error containing the temporary channel failure and a - // detail which indicates the we failed to add the htlc. - linkError := NewDetailedLinkError( - failure, OutgoingFailureDownstreamHtlcAdd, - ) - - failPkt := &htlcPacket{ - incomingChanID: pkt.incomingChanID, - incomingHTLCID: pkt.incomingHTLCID, - circuit: pkt.circuit, - sourceRef: pkt.sourceRef, - hasSource: true, - localFailure: localFailure, - linkFailure: linkError, - htlc: &lnwire.UpdateFailHTLC{ - Reason: reason, - }, - } - - if err := m.cfg.forwardPackets(m.quit, failPkt); err != nil { - log.Errorf("Unhandled error while reforwarding packets "+ - "settle/fail over htlcswitch: %v", err) - } -} - -// MessageOutBox returns a channel that any new messages ready for delivery -// will be sent on. -// -// NOTE: This method is part of the MailBox interface. -func (m *memoryMailBox) MessageOutBox() chan lnwire.Message { - return m.messageOutbox -} - -// PacketOutBox returns a channel that any new packets ready for delivery will -// be sent on. -// -// NOTE: This method is part of the MailBox interface. -func (m *memoryMailBox) PacketOutBox() chan *htlcPacket { - return m.pktOutbox -} - -// mailOrchestrator is responsible for coordinating the creation and lifecycle -// of mailboxes used within the switch. It supports the ability to create -// mailboxes, reassign their short channel id's, deliver htlc packets, and -// queue packets for mailboxes that have not been created due to a link's late -// registration. -type mailOrchestrator struct { - mu sync.RWMutex - - cfg *mailOrchConfig - - // mailboxes caches exactly one mailbox for all known channels. - mailboxes map[lnwire.ChannelID]MailBox - - // liveIndex maps a live short chan id to the primary mailbox key. - // An index in liveIndex map is only entered under two conditions: - // 1. A link has a non-zero short channel id at time of AddLink. - // 2. A link receives a non-zero short channel via UpdateShortChanID. - liveIndex map[lnwire.ShortChannelID]lnwire.ChannelID - - // TODO(conner): add another pair of indexes: - // chan_id -> short_chan_id - // short_chan_id -> mailbox - // so that Deliver can lookup mailbox directly once live, - // but still queriable by channel_id. - - // unclaimedPackets maps a live short chan id to queue of packets if no - // mailbox has been created. - unclaimedPackets map[lnwire.ShortChannelID][]*htlcPacket -} - -type mailOrchConfig struct { - // forwardPackets send a varidic number of htlcPackets to the switch to - // be routed. A quit channel should be provided so that the call can - // properly exit during shutdown. - forwardPackets func(chan struct{}, ...*htlcPacket) er.R - - // fetchUpdate retreives the most recent channel update for the channel - // this mailbox belongs to. - fetchUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, er.R) - - // clock is a time source for the generated mailboxes. - clock clock.Clock - - // expiry is the interval after which Adds will be cancelled if they - // have not been yet been delivered. The computed deadline will expiry - // this long after the Adds are added to a mailbox via AddPacket. - expiry time.Duration -} - -// newMailOrchestrator initializes a fresh mailOrchestrator. -func newMailOrchestrator(cfg *mailOrchConfig) *mailOrchestrator { - return &mailOrchestrator{ - cfg: cfg, - mailboxes: make(map[lnwire.ChannelID]MailBox), - liveIndex: make(map[lnwire.ShortChannelID]lnwire.ChannelID), - unclaimedPackets: make(map[lnwire.ShortChannelID][]*htlcPacket), - } -} - -// Stop instructs the orchestrator to stop all active mailboxes. -func (mo *mailOrchestrator) Stop() { - for _, mailbox := range mo.mailboxes { - mailbox.Stop() - } -} - -// GetOrCreateMailBox returns an existing mailbox belonging to `chanID`, or -// creates and returns a new mailbox if none is found. -func (mo *mailOrchestrator) GetOrCreateMailBox(chanID lnwire.ChannelID, - shortChanID lnwire.ShortChannelID) MailBox { - - // First, try lookup the mailbox directly using only the shared mutex. - mo.mu.RLock() - mailbox, ok := mo.mailboxes[chanID] - if ok { - mo.mu.RUnlock() - return mailbox - } - mo.mu.RUnlock() - - // Otherwise, we will try again with exclusive lock, creating a mailbox - // if one still has not been created. - mo.mu.Lock() - mailbox = mo.exclusiveGetOrCreateMailBox(chanID, shortChanID) - mo.mu.Unlock() - - return mailbox -} - -// exclusiveGetOrCreateMailBox checks for the existence of a mailbox for the -// given channel id. If none is found, a new one is creates, started, and -// recorded. -// -// NOTE: This method MUST be invoked with the mailOrchestrator's exclusive lock. -func (mo *mailOrchestrator) exclusiveGetOrCreateMailBox( - chanID lnwire.ChannelID, shortChanID lnwire.ShortChannelID) MailBox { - - mailbox, ok := mo.mailboxes[chanID] - if !ok { - mailbox = newMemoryMailBox(&mailBoxConfig{ - shortChanID: shortChanID, - fetchUpdate: mo.cfg.fetchUpdate, - forwardPackets: mo.cfg.forwardPackets, - clock: mo.cfg.clock, - expiry: mo.cfg.expiry, - }) - mailbox.Start() - mo.mailboxes[chanID] = mailbox - } - - return mailbox -} - -// BindLiveShortChanID registers that messages bound for a particular short -// channel id should be forwarded to the mailbox corresponding to the given -// channel id. This method also checks to see if there are any unclaimed -// packets for this short_chan_id. If any are found, they are delivered to the -// mailbox and removed (marked as claimed). -func (mo *mailOrchestrator) BindLiveShortChanID(mailbox MailBox, - cid lnwire.ChannelID, sid lnwire.ShortChannelID) { - - mo.mu.Lock() - // Update the mapping from short channel id to mailbox's channel id. - mo.liveIndex[sid] = cid - - // Retrieve any unclaimed packets destined for this mailbox. - pkts := mo.unclaimedPackets[sid] - delete(mo.unclaimedPackets, sid) - mo.mu.Unlock() - - // Deliver the unclaimed packets. - for _, pkt := range pkts { - mailbox.AddPacket(pkt) - } -} - -// Deliver lookups the target mailbox using the live index from short_chan_id -// to channel_id. If the mailbox is found, the message is delivered directly. -// Otherwise the packet is recorded as unclaimed, and will be delivered to the -// mailbox upon the subsequent call to BindLiveShortChanID. -func (mo *mailOrchestrator) Deliver( - sid lnwire.ShortChannelID, pkt *htlcPacket) er.R { - - var ( - mailbox MailBox - found bool - ) - - // First, try to find the channel id for the target short_chan_id. If - // the link is live, we will also look up the created mailbox. - mo.mu.RLock() - chanID, isLive := mo.liveIndex[sid] - if isLive { - mailbox, found = mo.mailboxes[chanID] - } - mo.mu.RUnlock() - - // The link is live and target mailbox was found, deliver immediately. - if isLive && found { - return mailbox.AddPacket(pkt) - } - - // If we detected that the link has not been made live, we will acquire - // the exclusive lock preemptively in order to queue this packet in the - // list of unclaimed packets. - mo.mu.Lock() - - // Double check to see if the mailbox has been not made live since the - // release of the shared lock. - // - // NOTE: Checking again with the exclusive lock held prevents a race - // condition where BindLiveShortChanID is interleaved between the - // release of the shared lock, and acquiring the exclusive lock. The - // result would be stuck packets, as they wouldn't be redelivered until - // the next call to BindLiveShortChanID, which is expected to occur - // infrequently. - chanID, isLive = mo.liveIndex[sid] - if isLive { - // Reaching this point indicates the mailbox is actually live. - // We'll try to load the mailbox using the fresh channel id. - // - // NOTE: This should never create a new mailbox, as the live - // index should only be set if the mailbox had been initialized - // beforehand. However, this does ensure that this case is - // handled properly in the event that it could happen. - mailbox = mo.exclusiveGetOrCreateMailBox(chanID, sid) - mo.mu.Unlock() - - // Deliver the packet to the mailbox if it was found or created. - return mailbox.AddPacket(pkt) - } - - // Finally, if the channel id is still not found in the live index, - // we'll add this to the list of unclaimed packets. These will be - // delivered upon the next call to BindLiveShortChanID. - mo.unclaimedPackets[sid] = append(mo.unclaimedPackets[sid], pkt) - mo.mu.Unlock() - - return nil -} diff --git a/lnd/htlcswitch/mailbox_test.go b/lnd/htlcswitch/mailbox_test.go deleted file mode 100644 index 646de57a..00000000 --- a/lnd/htlcswitch/mailbox_test.go +++ /dev/null @@ -1,675 +0,0 @@ -package htlcswitch - -import ( - prand "math/rand" - "reflect" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/clock" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -const testExpiry = time.Minute - -// TestMailBoxCouriers tests that both aspects of the mailBox struct works -// properly. Both packets and messages should be able to added to each -// respective mailbox concurrently, and also messages/packets should also be -// able to be received concurrently. -func TestMailBoxCouriers(t *testing.T) { - t.Parallel() - - // First, we'll create new instance of the current default mailbox - // type. - ctx := newMailboxContext(t, time.Now(), testExpiry) - defer ctx.mailbox.Stop() - - // We'll be adding 10 message of both types to the mailbox. - const numPackets = 10 - const halfPackets = numPackets / 2 - - // We'll add a set of random packets to the mailbox. - sentPackets := make([]*htlcPacket, numPackets) - for i := 0; i < numPackets; i++ { - pkt := &htlcPacket{ - outgoingChanID: lnwire.NewShortChanIDFromInt(uint64(prand.Int63())), - incomingChanID: lnwire.NewShortChanIDFromInt(uint64(prand.Int63())), - amount: lnwire.MilliSatoshi(prand.Int63()), - htlc: &lnwire.UpdateAddHTLC{ - ID: uint64(i), - }, - } - sentPackets[i] = pkt - - err := ctx.mailbox.AddPacket(pkt) - if err != nil { - t.Fatalf("unable to add packet: %v", err) - } - } - - // Next, we'll do the same, but this time adding wire messages. - sentMessages := make([]lnwire.Message, numPackets) - for i := 0; i < numPackets; i++ { - msg := &lnwire.UpdateAddHTLC{ - ID: uint64(prand.Int63()), - Amount: lnwire.MilliSatoshi(prand.Int63()), - } - sentMessages[i] = msg - - err := ctx.mailbox.AddMessage(msg) - if err != nil { - t.Fatalf("unable to add message: %v", err) - } - } - - // Now we'll attempt to read back the packets/messages we added to the - // mailbox. We'll alternative reading from the message outbox vs the - // packet outbox to ensure that they work concurrently properly. - recvdPackets := make([]*htlcPacket, 0, numPackets) - recvdMessages := make([]lnwire.Message, 0, numPackets) - for i := 0; i < numPackets*2; i++ { - timeout := time.After(time.Second * 5) - if i%2 == 0 { - select { - case <-timeout: - t.Fatalf("didn't recv pkt after timeout") - case pkt := <-ctx.mailbox.PacketOutBox(): - recvdPackets = append(recvdPackets, pkt) - } - } else { - select { - case <-timeout: - t.Fatalf("didn't recv message after timeout") - case msg := <-ctx.mailbox.MessageOutBox(): - recvdMessages = append(recvdMessages, msg) - } - } - } - - // The number of messages/packets we sent, and the number we received - // should match exactly. - if len(sentPackets) != len(recvdPackets) { - t.Fatalf("expected %v packets instead got %v", len(sentPackets), - len(recvdPackets)) - } - if len(sentMessages) != len(recvdMessages) { - t.Fatalf("expected %v messages instead got %v", len(sentMessages), - len(recvdMessages)) - } - - // Additionally, the set of packets should match exactly, as we should - // have received the packets in the exact same ordering that we added. - if !reflect.DeepEqual(sentPackets, recvdPackets) { - t.Fatalf("recvd packets mismatched: expected %v, got %v", - spew.Sdump(sentPackets), spew.Sdump(recvdPackets)) - } - if !reflect.DeepEqual(recvdMessages, recvdMessages) { - t.Fatalf("recvd messages mismatched: expected %v, got %v", - spew.Sdump(sentMessages), spew.Sdump(recvdMessages)) - } - - // Now that we've received all of the intended msgs/pkts, ack back half - // of the packets. - for _, recvdPkt := range recvdPackets[:halfPackets] { - ctx.mailbox.AckPacket(recvdPkt.inKey()) - } - - // With the packets drained and partially acked, we reset the mailbox, - // simulating a link shutting down and then coming back up. - err := ctx.mailbox.ResetMessages() - if err != nil { - t.Fatalf("unable to reset messages: %v", err) - } - err = ctx.mailbox.ResetPackets() - if err != nil { - t.Fatalf("unable to reset packets: %v", err) - } - - // Now, we'll use the same alternating strategy to read from our - // mailbox. All wire messages are dropped on startup, but any unacked - // packets will be replayed in the same order they were delivered - // initially. - recvdPackets2 := make([]*htlcPacket, 0, halfPackets) - for i := 0; i < 2*halfPackets; i++ { - timeout := time.After(time.Second * 5) - if i%2 == 0 { - select { - case <-timeout: - t.Fatalf("didn't recv pkt after timeout") - case pkt := <-ctx.mailbox.PacketOutBox(): - recvdPackets2 = append(recvdPackets2, pkt) - } - } else { - select { - case <-ctx.mailbox.MessageOutBox(): - t.Fatalf("should not receive wire msg after reset") - default: - } - } - } - - // The number of packets we received should match the number of unacked - // packets left in the mailbox. - if halfPackets != len(recvdPackets2) { - t.Fatalf("expected %v packets instead got %v", halfPackets, - len(recvdPackets)) - } - - // Additionally, the set of packets should match exactly with the - // unacked packets, and we should have received the packets in the exact - // same ordering that we added. - if !reflect.DeepEqual(recvdPackets[halfPackets:], recvdPackets2) { - t.Fatalf("recvd packets mismatched: expected %v, got %v", - spew.Sdump(sentPackets), spew.Sdump(recvdPackets)) - } -} - -// TestMailBoxResetAfterShutdown tests that ResetMessages and ResetPackets -// return ErrMailBoxShuttingDown after the mailbox has been stopped. -func TestMailBoxResetAfterShutdown(t *testing.T) { - t.Parallel() - - ctx := newMailboxContext(t, time.Now(), time.Second) - - // Stop the mailbox, then try to reset the message and packet couriers. - ctx.mailbox.Stop() - - err := ctx.mailbox.ResetMessages() - if !ErrMailBoxShuttingDown.Is(err) { - t.Fatalf("expected ErrMailBoxShuttingDown, got: %v", err) - } - - err = ctx.mailbox.ResetPackets() - if !ErrMailBoxShuttingDown.Is(err) { - t.Fatalf("expected ErrMailBoxShuttingDown, got: %v", err) - } -} - -type mailboxContext struct { - t *testing.T - mailbox MailBox - clock *clock.TestClock - forwards chan *htlcPacket -} - -func newMailboxContext(t *testing.T, startTime time.Time, - expiry time.Duration) *mailboxContext { - - ctx := &mailboxContext{ - t: t, - clock: clock.NewTestClock(startTime), - forwards: make(chan *htlcPacket, 1), - } - ctx.mailbox = newMemoryMailBox(&mailBoxConfig{ - fetchUpdate: func(sid lnwire.ShortChannelID) ( - *lnwire.ChannelUpdate, er.R) { - return &lnwire.ChannelUpdate{ - ShortChannelID: sid, - }, nil - }, - forwardPackets: ctx.forward, - clock: ctx.clock, - expiry: expiry, - }) - ctx.mailbox.Start() - - return ctx -} - -func (c *mailboxContext) forward(_ chan struct{}, - pkts ...*htlcPacket) er.R { - - for _, pkt := range pkts { - c.forwards <- pkt - } - - return nil -} - -func (c *mailboxContext) sendAdds(start, num int) []*htlcPacket { - c.t.Helper() - - sentPackets := make([]*htlcPacket, num) - for i := 0; i < num; i++ { - pkt := &htlcPacket{ - outgoingChanID: lnwire.NewShortChanIDFromInt( - uint64(prand.Int63())), - incomingChanID: lnwire.NewShortChanIDFromInt( - uint64(prand.Int63())), - incomingHTLCID: uint64(start + i), - amount: lnwire.MilliSatoshi(prand.Int63()), - htlc: &lnwire.UpdateAddHTLC{ - ID: uint64(start + i), - }, - } - sentPackets[i] = pkt - - err := c.mailbox.AddPacket(pkt) - if err != nil { - c.t.Fatalf("unable to add packet: %v", err) - } - } - - return sentPackets -} - -func (c *mailboxContext) receivePkts(pkts []*htlcPacket) { - c.t.Helper() - - for i, expPkt := range pkts { - select { - case pkt := <-c.mailbox.PacketOutBox(): - if reflect.DeepEqual(expPkt, pkt) { - continue - } - - c.t.Fatalf("inkey mismatch #%d, want: %v vs "+ - "got: %v", i, expPkt.inKey(), pkt.inKey()) - - case <-time.After(50 * time.Millisecond): - c.t.Fatalf("did not receive fail for index %d", i) - } - } -} - -func (c *mailboxContext) checkFails(adds []*htlcPacket) { - c.t.Helper() - - for i, add := range adds { - select { - case fail := <-c.forwards: - if add.inKey() == fail.inKey() { - continue - } - c.t.Fatalf("inkey mismatch #%d, add: %v vs fail: %v", - i, add.inKey(), fail.inKey()) - - case <-time.After(50 * time.Millisecond): - c.t.Fatalf("did not receive fail for index %d", i) - } - } - - select { - case pkt := <-c.forwards: - c.t.Fatalf("unexpected forward: %v", pkt) - case <-time.After(50 * time.Millisecond): - } -} - -// TestMailBoxFailAdd asserts that FailAdd returns a response to the switch -// under various interleavings with other operations on the mailbox. -func TestMailBoxFailAdd(t *testing.T) { - var ( - batchDelay = time.Second - expiry = time.Minute - firstBatchStart = time.Now() - secondBatchStart = time.Now().Add(batchDelay) - thirdBatchStart = time.Now().Add(2 * batchDelay) - thirdBatchExpiry = thirdBatchStart.Add(expiry) - ) - ctx := newMailboxContext(t, firstBatchStart, expiry) - defer ctx.mailbox.Stop() - - failAdds := func(adds []*htlcPacket) { - for _, add := range adds { - ctx.mailbox.FailAdd(add) - } - } - - const numBatchPackets = 5 - - // Send 10 adds, and pull them from the mailbox. - firstBatch := ctx.sendAdds(0, numBatchPackets) - ctx.receivePkts(firstBatch) - - // Fail all of these adds, simulating an error adding the HTLCs to the - // commitment. We should see a failure message for each. - go failAdds(firstBatch) - ctx.checkFails(firstBatch) - - // As a sanity check, Fail all of them again and assert that no - // duplicate fails are sent. - go failAdds(firstBatch) - ctx.checkFails(nil) - - // Now, send a second batch of adds after a short delay and deliver them - // to the link. - ctx.clock.SetTime(secondBatchStart) - secondBatch := ctx.sendAdds(numBatchPackets, numBatchPackets) - ctx.receivePkts(secondBatch) - - // Reset the packet queue w/o changing the current time. This simulates - // the link flapping and coming back up before the second batch's - // expiries have elapsed. We should see no failures sent back. - err := ctx.mailbox.ResetPackets() - if err != nil { - t.Fatalf("unable to reset packets: %v", err) - } - ctx.checkFails(nil) - - // Redeliver the second batch to the link and hold them there. - ctx.receivePkts(secondBatch) - - // Send a third batch of adds shortly after the second batch. - ctx.clock.SetTime(thirdBatchStart) - thirdBatch := ctx.sendAdds(2*numBatchPackets, numBatchPackets) - - // Advance the clock so that the third batch expires. We expect to only - // see fails for the third batch, since the second batch is still being - // held by the link. - ctx.clock.SetTime(thirdBatchExpiry) - ctx.checkFails(thirdBatch) - - // Finally, reset the link which should cause the second batch to be - // cancelled immediately. - err = ctx.mailbox.ResetPackets() - if err != nil { - t.Fatalf("unable to reset packets: %v", err) - } - ctx.checkFails(secondBatch) -} - -// TestMailBoxPacketPrioritization asserts that the mailbox will prioritize -// delivering Settle and Fail packets over Adds if both are available for -// delivery at the same time. -func TestMailBoxPacketPrioritization(t *testing.T) { - t.Parallel() - - // First, we'll create new instance of the current default mailbox - // type. - ctx := newMailboxContext(t, time.Now(), testExpiry) - defer ctx.mailbox.Stop() - - const numPackets = 5 - - _, _, aliceChanID, bobChanID := genIDs() - - // Next we'll send the following sequence of packets: - // - Settle1 - // - Add1 - // - Add2 - // - Fail - // - Settle2 - sentPackets := make([]*htlcPacket, numPackets) - for i := 0; i < numPackets; i++ { - pkt := &htlcPacket{ - outgoingChanID: aliceChanID, - outgoingHTLCID: uint64(i), - incomingChanID: bobChanID, - incomingHTLCID: uint64(i), - amount: lnwire.MilliSatoshi(prand.Int63()), - } - - switch i { - case 0, 4: - // First and last packets are a Settle. A non-Add is - // sent first to make the test deterministic w/o needing - // to sleep. - pkt.htlc = &lnwire.UpdateFulfillHTLC{ID: uint64(i)} - case 1, 2: - // Next two packets are Adds. - pkt.htlc = &lnwire.UpdateAddHTLC{ID: uint64(i)} - case 3: - // Last packet is a Fail. - pkt.htlc = &lnwire.UpdateFailHTLC{ID: uint64(i)} - } - - sentPackets[i] = pkt - - err := ctx.mailbox.AddPacket(pkt) - if err != nil { - t.Fatalf("failed to add packet: %v", err) - } - } - - // When dequeueing the packets, we expect the following sequence: - // - Settle1 - // - Fail - // - Settle2 - // - Add1 - // - Add2 - // - // We expect to see Fail and Settle2 to be delivered before either Add1 - // or Add2 due to the prioritization between the split queue. - for i := 0; i < numPackets; i++ { - select { - case pkt := <-ctx.mailbox.PacketOutBox(): - var expPkt *htlcPacket - switch i { - case 0: - // First packet should be Settle1. - expPkt = sentPackets[0] - case 1: - // Second packet should be Fail. - expPkt = sentPackets[3] - case 2: - // Third packet should be Settle2. - expPkt = sentPackets[4] - case 3: - // Fourth packet should be Add1. - expPkt = sentPackets[1] - case 4: - // Last packet should be Add2. - expPkt = sentPackets[2] - } - - if !reflect.DeepEqual(expPkt, pkt) { - t.Fatalf("recvd packet mismatch %d, want: %v, got: %v", - i, spew.Sdump(expPkt), spew.Sdump(pkt)) - } - - case <-time.After(50 * time.Millisecond): - t.Fatalf("didn't receive packet %d before timeout", i) - } - } -} - -// TestMailBoxAddExpiry asserts that the mailbox will cancel back Adds that have -// reached their expiry time. -func TestMailBoxAddExpiry(t *testing.T) { - var ( - expiry = time.Minute - batchDelay = time.Second - firstBatchStart = time.Now() - firstBatchExpiry = firstBatchStart.Add(expiry) - secondBatchStart = firstBatchStart.Add(batchDelay) - secondBatchExpiry = secondBatchStart.Add(expiry) - ) - - ctx := newMailboxContext(t, firstBatchStart, expiry) - defer ctx.mailbox.Stop() - - // Each batch will consist of 10 messages. - const numBatchPackets = 10 - - firstBatch := ctx.sendAdds(0, numBatchPackets) - - ctx.clock.SetTime(secondBatchStart) - ctx.checkFails(nil) - - secondBatch := ctx.sendAdds(numBatchPackets, numBatchPackets) - - ctx.clock.SetTime(firstBatchExpiry) - ctx.checkFails(firstBatch) - - ctx.clock.SetTime(secondBatchExpiry) - ctx.checkFails(secondBatch) -} - -// TestMailBoxDuplicateAddPacket asserts that the mailbox returns an -// ErrPacketAlreadyExists failure when two htlcPackets are added with identical -// incoming circuit keys. -func TestMailBoxDuplicateAddPacket(t *testing.T) { - t.Parallel() - - ctx := newMailboxContext(t, time.Now(), testExpiry) - ctx.mailbox.Start() - defer ctx.mailbox.Stop() - - addTwice := func(t *testing.T, pkt *htlcPacket) { - // The first add should succeed. - err := ctx.mailbox.AddPacket(pkt) - if err != nil { - t.Fatalf("unable to add packet: %v", err) - } - - // Adding again with the same incoming circuit key should fail. - err = ctx.mailbox.AddPacket(pkt) - if !ErrPacketAlreadyExists.Is(err) { - t.Fatalf("expected ErrPacketAlreadyExists, got: %v", err) - } - } - - // Assert duplicate AddPacket calls fail for all types of HTLCs. - addTwice(t, &htlcPacket{ - incomingHTLCID: 0, - htlc: &lnwire.UpdateAddHTLC{}, - }) - addTwice(t, &htlcPacket{ - incomingHTLCID: 1, - htlc: &lnwire.UpdateFulfillHTLC{}, - }) - addTwice(t, &htlcPacket{ - incomingHTLCID: 2, - htlc: &lnwire.UpdateFailHTLC{}, - }) -} - -// TestMailOrchestrator asserts that the orchestrator properly buffers packets -// for channels that haven't been made live, such that they are delivered -// immediately after BindLiveShortChanID. It also tests that packets are delivered -// readily to mailboxes for channels that are already in the live state. -func TestMailOrchestrator(t *testing.T) { - t.Parallel() - - // First, we'll create a new instance of our orchestrator. - mo := newMailOrchestrator(&mailOrchConfig{ - fetchUpdate: func(sid lnwire.ShortChannelID) ( - *lnwire.ChannelUpdate, er.R) { - return &lnwire.ChannelUpdate{ - ShortChannelID: sid, - }, nil - }, - forwardPackets: func(_ chan struct{}, - pkts ...*htlcPacket) er.R { - return nil - }, - clock: clock.NewTestClock(time.Now()), - expiry: testExpiry, - }) - defer mo.Stop() - - // We'll be delivering 10 htlc packets via the orchestrator. - const numPackets = 10 - const halfPackets = numPackets / 2 - - // Before any mailbox is created or made live, we will deliver half of - // the htlcs via the orchestrator. - chanID1, chanID2, aliceChanID, bobChanID := genIDs() - sentPackets := make([]*htlcPacket, halfPackets) - for i := 0; i < halfPackets; i++ { - pkt := &htlcPacket{ - outgoingChanID: aliceChanID, - outgoingHTLCID: uint64(i), - incomingChanID: bobChanID, - incomingHTLCID: uint64(i), - amount: lnwire.MilliSatoshi(prand.Int63()), - htlc: &lnwire.UpdateAddHTLC{ - ID: uint64(i), - }, - } - sentPackets[i] = pkt - - mo.Deliver(pkt.outgoingChanID, pkt) - } - - // Now, initialize a new mailbox for Alice's chanid. - mailbox := mo.GetOrCreateMailBox(chanID1, aliceChanID) - - // Verify that no messages are received, since Alice's mailbox has not - // been made live. - for i := 0; i < halfPackets; i++ { - timeout := time.After(50 * time.Millisecond) - select { - case <-mailbox.MessageOutBox(): - t.Fatalf("should not receive wire msg after reset") - case <-timeout: - } - } - - // Assign a short chan id to the existing mailbox, make it available for - // capturing incoming HTLCs. The HTLCs added above should be delivered - // immediately. - mo.BindLiveShortChanID(mailbox, chanID1, aliceChanID) - - // Verify that all of the packets are queued and delivered to Alice's - // mailbox. - recvdPackets := make([]*htlcPacket, 0, len(sentPackets)) - for i := 0; i < halfPackets; i++ { - timeout := time.After(5 * time.Second) - select { - case <-timeout: - t.Fatalf("didn't recv pkt %d after timeout", i) - case pkt := <-mailbox.PacketOutBox(): - recvdPackets = append(recvdPackets, pkt) - } - } - - // We should have received half of the total number of packets. - if len(recvdPackets) != halfPackets { - t.Fatalf("expected %v packets instead got %v", - halfPackets, len(recvdPackets)) - } - - // Check that the received packets are equal to the sent packets. - if !reflect.DeepEqual(recvdPackets, sentPackets) { - t.Fatalf("recvd packets mismatched: expected %v, got %v", - spew.Sdump(sentPackets), spew.Sdump(recvdPackets)) - } - - // For the second half of the test, create a new mailbox for Bob and - // immediately make it live with an assigned short chan id. - mailbox = mo.GetOrCreateMailBox(chanID2, bobChanID) - mo.BindLiveShortChanID(mailbox, chanID2, bobChanID) - - // Create the second half of our htlcs, and deliver them via the - // orchestrator. We should be able to receive each of these in order. - recvdPackets = make([]*htlcPacket, 0, len(sentPackets)) - for i := 0; i < halfPackets; i++ { - pkt := &htlcPacket{ - outgoingChanID: aliceChanID, - outgoingHTLCID: uint64(halfPackets + i), - incomingChanID: bobChanID, - incomingHTLCID: uint64(halfPackets + i), - amount: lnwire.MilliSatoshi(prand.Int63()), - htlc: &lnwire.UpdateAddHTLC{ - ID: uint64(halfPackets + i), - }, - } - sentPackets[i] = pkt - - mo.Deliver(pkt.incomingChanID, pkt) - - timeout := time.After(50 * time.Millisecond) - select { - case <-timeout: - t.Fatalf("didn't recv pkt %d after timeout", halfPackets+i) - case pkt := <-mailbox.PacketOutBox(): - recvdPackets = append(recvdPackets, pkt) - } - } - - // Again, we should have received half of the total number of packets. - if len(recvdPackets) != halfPackets { - t.Fatalf("expected %v packets instead got %v", - halfPackets, len(recvdPackets)) - } - - // Check that the received packets are equal to the sent packets. - if !reflect.DeepEqual(recvdPackets, sentPackets) { - t.Fatalf("recvd packets mismatched: expected %v, got %v", - spew.Sdump(sentPackets), spew.Sdump(recvdPackets)) - } -} diff --git a/lnd/htlcswitch/mock.go b/lnd/htlcswitch/mock.go deleted file mode 100644 index 60b04534..00000000 --- a/lnd/htlcswitch/mock.go +++ /dev/null @@ -1,947 +0,0 @@ -package htlcswitch - -import ( - "bytes" - "crypto/sha256" - "encoding/binary" - "io" - "io/ioutil" - "net" - "os" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - sphinx "github.com/pkt-cash/pktd/lightning-onion" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/clock" - "github.com/pkt-cash/pktd/lnd/contractcourt" - "github.com/pkt-cash/pktd/lnd/htlcswitch/hop" - "github.com/pkt-cash/pktd/lnd/invoices" - "github.com/pkt-cash/pktd/lnd/lnpeer" - "github.com/pkt-cash/pktd/lnd/lntest/mock" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/ticker" - "github.com/pkt-cash/pktd/wire" -) - -type mockPreimageCache struct { - sync.Mutex - preimageMap map[lntypes.Hash]lntypes.Preimage -} - -func newMockPreimageCache() *mockPreimageCache { - return &mockPreimageCache{ - preimageMap: make(map[lntypes.Hash]lntypes.Preimage), - } -} - -func (m *mockPreimageCache) LookupPreimage( - hash lntypes.Hash) (lntypes.Preimage, bool) { - - m.Lock() - defer m.Unlock() - - p, ok := m.preimageMap[hash] - return p, ok -} - -func (m *mockPreimageCache) AddPreimages(preimages ...lntypes.Preimage) er.R { - m.Lock() - defer m.Unlock() - - for _, preimage := range preimages { - m.preimageMap[preimage.Hash()] = preimage - } - - return nil -} - -func (m *mockPreimageCache) SubscribeUpdates() *contractcourt.WitnessSubscription { - return nil -} - -type mockFeeEstimator struct { - byteFeeIn chan chainfee.SatPerKWeight - - quit chan struct{} -} - -func (m *mockFeeEstimator) EstimateFeePerKW( - numBlocks uint32) (chainfee.SatPerKWeight, er.R) { - - select { - case feeRate := <-m.byteFeeIn: - return feeRate, nil - case <-m.quit: - return 0, er.Errorf("exiting") - } -} - -func (m *mockFeeEstimator) RelayFeePerKW() chainfee.SatPerKWeight { - return 1e3 -} - -func (m *mockFeeEstimator) Start() er.R { - return nil -} -func (m *mockFeeEstimator) Stop() er.R { - close(m.quit) - return nil -} - -var _ chainfee.Estimator = (*mockFeeEstimator)(nil) - -type mockForwardingLog struct { - sync.Mutex - - events map[time.Time]channeldb.ForwardingEvent -} - -func (m *mockForwardingLog) AddForwardingEvents(events []channeldb.ForwardingEvent) er.R { - m.Lock() - defer m.Unlock() - - for _, event := range events { - m.events[event.Timestamp] = event - } - - return nil -} - -type mockServer struct { - started int32 // To be used atomically. - shutdown int32 // To be used atomically. - wg sync.WaitGroup - quit chan struct{} - - t testing.TB - - name string - messages chan lnwire.Message - - id [33]byte - htlcSwitch *Switch - - registry *mockInvoiceRegistry - pCache *mockPreimageCache - interceptorFuncs []messageInterceptor -} - -var _ lnpeer.Peer = (*mockServer)(nil) - -func initDB() (*channeldb.DB, er.R) { - tempPath, errr := ioutil.TempDir("", "switchdb") - if errr != nil { - return nil, er.E(errr) - } - - db, err := channeldb.Open(tempPath) - if err != nil { - return nil, err - } - - return db, err -} - -func initSwitchWithDB(startingHeight uint32, db *channeldb.DB) (*Switch, er.R) { - var err er.R - - if db == nil { - db, err = initDB() - if err != nil { - return nil, err - } - } - - cfg := Config{ - DB: db, - SwitchPackager: channeldb.NewSwitchPackager(), - FwdingLog: &mockForwardingLog{ - events: make(map[time.Time]channeldb.ForwardingEvent), - }, - FetchLastChannelUpdate: func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, er.R) { - return nil, nil - }, - Notifier: &mock.ChainNotifier{ - SpendChan: make(chan *chainntnfs.SpendDetail), - EpochChan: make(chan *chainntnfs.BlockEpoch), - ConfChan: make(chan *chainntnfs.TxConfirmation), - }, - FwdEventTicker: ticker.NewForce(DefaultFwdEventInterval), - LogEventTicker: ticker.NewForce(DefaultLogInterval), - AckEventTicker: ticker.NewForce(DefaultAckInterval), - HtlcNotifier: &mockHTLCNotifier{}, - Clock: clock.NewDefaultClock(), - HTLCExpiry: time.Hour, - } - - return New(cfg, startingHeight) -} - -func newMockServer(t testing.TB, name string, startingHeight uint32, - db *channeldb.DB, defaultDelta uint32) (*mockServer, er.R) { - - var id [33]byte - h := sha256.Sum256([]byte(name)) - copy(id[:], h[:]) - - pCache := newMockPreimageCache() - - htlcSwitch, err := initSwitchWithDB(startingHeight, db) - if err != nil { - return nil, err - } - - registry := newMockRegistry(defaultDelta) - - return &mockServer{ - t: t, - id: id, - name: name, - messages: make(chan lnwire.Message, 3000), - quit: make(chan struct{}), - registry: registry, - htlcSwitch: htlcSwitch, - pCache: pCache, - interceptorFuncs: make([]messageInterceptor, 0), - }, nil -} - -func (s *mockServer) Start() er.R { - if !atomic.CompareAndSwapInt32(&s.started, 0, 1) { - return er.New("mock server already started") - } - - if err := s.htlcSwitch.Start(); err != nil { - return err - } - - s.wg.Add(1) - go func() { - defer s.wg.Done() - - defer func() { - s.htlcSwitch.Stop() - }() - - for { - select { - case msg := <-s.messages: - var shouldSkip bool - - for _, interceptor := range s.interceptorFuncs { - skip, err := interceptor(msg) - if err != nil { - s.t.Fatalf("%v: error in the "+ - "interceptor: %v", s.name, err) - return - } - shouldSkip = shouldSkip || skip - } - - if shouldSkip { - continue - } - - if err := s.readHandler(msg); err != nil { - s.t.Fatal(err) - return - } - case <-s.quit: - return - } - } - }() - - return nil -} - -func (s *mockServer) QuitSignal() <-chan struct{} { - return s.quit -} - -// mockHopIterator represents the test version of hop iterator which instead -// of encrypting the path in onion blob just stores the path as a list of hops. -type mockHopIterator struct { - hops []*hop.Payload -} - -func newMockHopIterator(hops ...*hop.Payload) hop.Iterator { - return &mockHopIterator{hops: hops} -} - -func (r *mockHopIterator) HopPayload() (*hop.Payload, er.R) { - h := r.hops[0] - r.hops = r.hops[1:] - return h, nil -} - -func (r *mockHopIterator) ExtraOnionBlob() []byte { - return nil -} - -func (r *mockHopIterator) ExtractErrorEncrypter( - extracter hop.ErrorEncrypterExtracter) (hop.ErrorEncrypter, - lnwire.FailCode) { - - return extracter(nil) -} - -func (r *mockHopIterator) EncodeNextHop(w io.Writer) er.R { - var hopLength [4]byte - binary.BigEndian.PutUint32(hopLength[:], uint32(len(r.hops))) - - if _, err := util.Write(w, hopLength[:]); err != nil { - return err - } - - for _, hop := range r.hops { - fwdInfo := hop.ForwardingInfo() - if err := encodeFwdInfo(w, &fwdInfo); err != nil { - return err - } - } - - return nil -} - -func encodeFwdInfo(w io.Writer, f *hop.ForwardingInfo) er.R { - if _, err := util.Write(w, []byte{byte(f.Network)}); err != nil { - return err - } - - if err := util.WriteBin(w, binary.BigEndian, f.NextHop); err != nil { - return err - } - - if err := util.WriteBin(w, binary.BigEndian, f.AmountToForward); err != nil { - return err - } - - if err := util.WriteBin(w, binary.BigEndian, f.OutgoingCTLV); err != nil { - return err - } - - return nil -} - -var _ hop.Iterator = (*mockHopIterator)(nil) - -// mockObfuscator mock implementation of the failure obfuscator which only -// encodes the failure and do not makes any onion obfuscation. -type mockObfuscator struct { - ogPacket *sphinx.OnionPacket - failure lnwire.FailureMessage -} - -// NewMockObfuscator initializes a dummy mockObfuscator used for testing. -func NewMockObfuscator() hop.ErrorEncrypter { - return &mockObfuscator{} -} - -func (o *mockObfuscator) OnionPacket() *sphinx.OnionPacket { - return o.ogPacket -} - -func (o *mockObfuscator) Type() hop.EncrypterType { - return hop.EncrypterTypeMock -} - -func (o *mockObfuscator) Encode(w io.Writer) er.R { - return nil -} - -func (o *mockObfuscator) Decode(r io.Reader) er.R { - return nil -} - -func (o *mockObfuscator) Reextract( - extracter hop.ErrorEncrypterExtracter) er.R { - - return nil -} - -func (o *mockObfuscator) EncryptFirstHop(failure lnwire.FailureMessage) ( - lnwire.OpaqueReason, er.R) { - - o.failure = failure - - var b bytes.Buffer - if err := lnwire.EncodeFailure(&b, failure, 0); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -func (o *mockObfuscator) IntermediateEncrypt(reason lnwire.OpaqueReason) lnwire.OpaqueReason { - return reason -} - -func (o *mockObfuscator) EncryptMalformedError(reason lnwire.OpaqueReason) lnwire.OpaqueReason { - return reason -} - -// mockDeobfuscator mock implementation of the failure deobfuscator which -// only decodes the failure do not makes any onion obfuscation. -type mockDeobfuscator struct{} - -func newMockDeobfuscator() ErrorDecrypter { - return &mockDeobfuscator{} -} - -func (o *mockDeobfuscator) DecryptError(reason lnwire.OpaqueReason) (*ForwardingError, er.R) { - - r := bytes.NewReader(reason) - failure, err := lnwire.DecodeFailure(r, 0) - if err != nil { - return nil, err - } - - return NewForwardingError(failure, 1), nil -} - -var _ ErrorDecrypter = (*mockDeobfuscator)(nil) - -// mockIteratorDecoder test version of hop iterator decoder which decodes the -// encoded array of hops. -type mockIteratorDecoder struct { - mu sync.RWMutex - - responses map[[32]byte][]hop.DecodeHopIteratorResponse - - decodeFail bool -} - -func newMockIteratorDecoder() *mockIteratorDecoder { - return &mockIteratorDecoder{ - responses: make(map[[32]byte][]hop.DecodeHopIteratorResponse), - } -} - -func (p *mockIteratorDecoder) DecodeHopIterator(r io.Reader, rHash []byte, - cltv uint32) (hop.Iterator, lnwire.FailCode) { - - var b [4]byte - _, err := r.Read(b[:]) - if err != nil { - return nil, lnwire.CodeTemporaryChannelFailure - } - hopLength := binary.BigEndian.Uint32(b[:]) - - hops := make([]*hop.Payload, hopLength) - for i := uint32(0); i < hopLength; i++ { - var f hop.ForwardingInfo - if err := decodeFwdInfo(r, &f); err != nil { - return nil, lnwire.CodeTemporaryChannelFailure - } - - var nextHopBytes [8]byte - binary.BigEndian.PutUint64(nextHopBytes[:], f.NextHop.ToUint64()) - - hops[i] = hop.NewLegacyPayload(&sphinx.HopData{ - Realm: [1]byte{}, // hop.BitcoinNetwork - NextAddress: nextHopBytes, - ForwardAmount: uint64(f.AmountToForward), - OutgoingCltv: f.OutgoingCTLV, - }) - } - - return newMockHopIterator(hops...), lnwire.CodeNone -} - -func (p *mockIteratorDecoder) DecodeHopIterators(id []byte, - reqs []hop.DecodeHopIteratorRequest) ( - []hop.DecodeHopIteratorResponse, er.R) { - - idHash := sha256.Sum256(id) - - p.mu.RLock() - if resps, ok := p.responses[idHash]; ok { - p.mu.RUnlock() - return resps, nil - } - p.mu.RUnlock() - - batchSize := len(reqs) - - resps := make([]hop.DecodeHopIteratorResponse, 0, batchSize) - for _, req := range reqs { - iterator, failcode := p.DecodeHopIterator( - req.OnionReader, req.RHash, req.IncomingCltv, - ) - - if p.decodeFail { - failcode = lnwire.CodeTemporaryChannelFailure - } - - resp := hop.DecodeHopIteratorResponse{ - HopIterator: iterator, - FailCode: failcode, - } - resps = append(resps, resp) - } - - p.mu.Lock() - p.responses[idHash] = resps - p.mu.Unlock() - - return resps, nil -} - -func decodeFwdInfo(r io.Reader, f *hop.ForwardingInfo) er.R { - var net [1]byte - if _, err := r.Read(net[:]); err != nil { - return er.E(err) - } - f.Network = hop.Network(net[0]) - - if err := util.ReadBin(r, binary.BigEndian, &f.NextHop); err != nil { - return err - } - - if err := util.ReadBin(r, binary.BigEndian, &f.AmountToForward); err != nil { - return err - } - - if err := util.ReadBin(r, binary.BigEndian, &f.OutgoingCTLV); err != nil { - return err - } - - return nil -} - -// messageInterceptor is function that handles the incoming peer messages and -// may decide should the peer skip the message or not. -type messageInterceptor func(m lnwire.Message) (bool, er.R) - -// Record is used to set the function which will be triggered when new -// lnwire message was received. -func (s *mockServer) intersect(f messageInterceptor) { - s.interceptorFuncs = append(s.interceptorFuncs, f) -} - -func (s *mockServer) SendMessage(sync bool, msgs ...lnwire.Message) er.R { - - for _, msg := range msgs { - select { - case s.messages <- msg: - case <-s.quit: - return er.New("server is stopped") - } - } - - return nil -} - -func (s *mockServer) SendMessageLazy(sync bool, msgs ...lnwire.Message) er.R { - panic("not implemented") -} - -func (s *mockServer) readHandler(message lnwire.Message) er.R { - var targetChan lnwire.ChannelID - - switch msg := message.(type) { - case *lnwire.UpdateAddHTLC: - targetChan = msg.ChanID - case *lnwire.UpdateFulfillHTLC: - targetChan = msg.ChanID - case *lnwire.UpdateFailHTLC: - targetChan = msg.ChanID - case *lnwire.UpdateFailMalformedHTLC: - targetChan = msg.ChanID - case *lnwire.RevokeAndAck: - targetChan = msg.ChanID - case *lnwire.CommitSig: - targetChan = msg.ChanID - case *lnwire.FundingLocked: - // Ignore - return nil - case *lnwire.ChannelReestablish: - targetChan = msg.ChanID - case *lnwire.UpdateFee: - targetChan = msg.ChanID - default: - return er.Errorf("unknown message type: %T", msg) - } - - // Dispatch the commitment update message to the proper channel link - // dedicated to this channel. If the link is not found, we will discard - // the message. - link, err := s.htlcSwitch.GetLink(targetChan) - if err != nil { - return nil - } - - // Create goroutine for this, in order to be able to properly stop - // the server when handler stacked (server unavailable) - link.HandleChannelUpdate(message) - - return nil -} - -func (s *mockServer) PubKey() [33]byte { - return s.id -} - -func (s *mockServer) IdentityKey() *btcec.PublicKey { - pubkey, _ := btcec.ParsePubKey(s.id[:], btcec.S256()) - return pubkey -} - -func (s *mockServer) Address() net.Addr { - return nil -} - -func (s *mockServer) AddNewChannel(channel *channeldb.OpenChannel, - cancel <-chan struct{}) er.R { - - return nil -} - -func (s *mockServer) WipeChannel(*wire.OutPoint) {} - -func (s *mockServer) LocalFeatures() *lnwire.FeatureVector { - return nil -} - -func (s *mockServer) RemoteFeatures() *lnwire.FeatureVector { - return nil -} - -func (s *mockServer) Stop() er.R { - if !atomic.CompareAndSwapInt32(&s.shutdown, 0, 1) { - return nil - } - - close(s.quit) - s.wg.Wait() - - return nil -} - -func (s *mockServer) String() string { - return s.name -} - -type mockChannelLink struct { - htlcSwitch *Switch - - shortChanID lnwire.ShortChannelID - - chanID lnwire.ChannelID - - peer lnpeer.Peer - - mailBox MailBox - - packets chan *htlcPacket - - eligible bool - - htlcID uint64 - - checkHtlcTransitResult *LinkError - - checkHtlcForwardResult *LinkError -} - -// completeCircuit is a helper method for adding the finalized payment circuit -// to the switch's circuit map. In testing, this should be executed after -// receiving an htlc from the downstream packets channel. -func (f *mockChannelLink) completeCircuit(pkt *htlcPacket) er.R { - switch htlc := pkt.htlc.(type) { - case *lnwire.UpdateAddHTLC: - pkt.outgoingChanID = f.shortChanID - pkt.outgoingHTLCID = f.htlcID - htlc.ID = f.htlcID - - keystone := Keystone{pkt.inKey(), pkt.outKey()} - if err := f.htlcSwitch.openCircuits(keystone); err != nil { - return err - } - - f.htlcID++ - - case *lnwire.UpdateFulfillHTLC, *lnwire.UpdateFailHTLC: - err := f.htlcSwitch.teardownCircuit(pkt) - if err != nil { - return err - } - } - - f.mailBox.AckPacket(pkt.inKey()) - - return nil -} - -func (f *mockChannelLink) deleteCircuit(pkt *htlcPacket) er.R { - return f.htlcSwitch.deleteCircuits(pkt.inKey()) -} - -func newMockChannelLink(htlcSwitch *Switch, chanID lnwire.ChannelID, - shortChanID lnwire.ShortChannelID, peer lnpeer.Peer, eligible bool, -) *mockChannelLink { - - return &mockChannelLink{ - htlcSwitch: htlcSwitch, - chanID: chanID, - shortChanID: shortChanID, - peer: peer, - eligible: eligible, - } -} - -func (f *mockChannelLink) HandleSwitchPacket(pkt *htlcPacket) er.R { - f.mailBox.AddPacket(pkt) - return nil -} - -func (f *mockChannelLink) HandleLocalAddPacket(pkt *htlcPacket) er.R { - _ = f.mailBox.AddPacket(pkt) - return nil -} - -func (f *mockChannelLink) HandleChannelUpdate(lnwire.Message) { -} - -func (f *mockChannelLink) UpdateForwardingPolicy(_ ForwardingPolicy) { -} -func (f *mockChannelLink) CheckHtlcForward([32]byte, lnwire.MilliSatoshi, - lnwire.MilliSatoshi, uint32, uint32, uint32) *LinkError { - - return f.checkHtlcForwardResult -} - -func (f *mockChannelLink) CheckHtlcTransit(payHash [32]byte, - amt lnwire.MilliSatoshi, timeout uint32, - heightNow uint32) *LinkError { - - return f.checkHtlcTransitResult -} - -func (f *mockChannelLink) Stats() (uint64, lnwire.MilliSatoshi, lnwire.MilliSatoshi) { - return 0, 0, 0 -} - -func (f *mockChannelLink) AttachMailBox(mailBox MailBox) { - f.mailBox = mailBox - f.packets = mailBox.PacketOutBox() -} - -func (f *mockChannelLink) Start() er.R { - f.mailBox.ResetMessages() - f.mailBox.ResetPackets() - return nil -} - -func (f *mockChannelLink) ChanID() lnwire.ChannelID { return f.chanID } -func (f *mockChannelLink) ShortChanID() lnwire.ShortChannelID { return f.shortChanID } -func (f *mockChannelLink) Bandwidth() lnwire.MilliSatoshi { return 99999999 } -func (f *mockChannelLink) Peer() lnpeer.Peer { return f.peer } -func (f *mockChannelLink) ChannelPoint() *wire.OutPoint { return &wire.OutPoint{} } -func (f *mockChannelLink) Stop() {} -func (f *mockChannelLink) EligibleToForward() bool { return f.eligible } -func (f *mockChannelLink) setLiveShortChanID(sid lnwire.ShortChannelID) { f.shortChanID = sid } -func (f *mockChannelLink) UpdateShortChanID() (lnwire.ShortChannelID, er.R) { - f.eligible = true - return f.shortChanID, nil -} - -var _ ChannelLink = (*mockChannelLink)(nil) - -func newDB() (*channeldb.DB, func(), er.R) { - // First, create a temporary directory to be used for the duration of - // this test. - tempDirName, errr := ioutil.TempDir("", "channeldb") - if errr != nil { - return nil, nil, er.E(errr) - } - - // Next, create channeldb for the first time. - cdb, err := channeldb.Open(tempDirName) - if err != nil { - os.RemoveAll(tempDirName) - return nil, nil, err - } - - cleanUp := func() { - cdb.Close() - os.RemoveAll(tempDirName) - } - - return cdb, cleanUp, nil -} - -const testInvoiceCltvExpiry = 6 - -type mockInvoiceRegistry struct { - settleChan chan lntypes.Hash - - registry *invoices.InvoiceRegistry - - cleanup func() -} - -func newMockRegistry(minDelta uint32) *mockInvoiceRegistry { - cdb, cleanup, err := newDB() - if err != nil { - panic(err) - } - - registry := invoices.NewRegistry( - cdb, - invoices.NewInvoiceExpiryWatcher(clock.NewDefaultClock()), - &invoices.RegistryConfig{ - FinalCltvRejectDelta: 5, - }, - ) - registry.Start() - - return &mockInvoiceRegistry{ - registry: registry, - cleanup: cleanup, - } -} - -func (i *mockInvoiceRegistry) LookupInvoice(rHash lntypes.Hash) ( - channeldb.Invoice, er.R) { - - return i.registry.LookupInvoice(rHash) -} - -func (i *mockInvoiceRegistry) SettleHodlInvoice(preimage lntypes.Preimage) er.R { - return i.registry.SettleHodlInvoice(preimage) -} - -func (i *mockInvoiceRegistry) NotifyExitHopHtlc(rhash lntypes.Hash, - amt lnwire.MilliSatoshi, expiry uint32, currentHeight int32, - circuitKey channeldb.CircuitKey, hodlChan chan<- interface{}, - payload invoices.Payload) (invoices.HtlcResolution, er.R) { - - event, err := i.registry.NotifyExitHopHtlc( - rhash, amt, expiry, currentHeight, circuitKey, hodlChan, - payload, - ) - if err != nil { - return nil, err - } - if i.settleChan != nil { - i.settleChan <- rhash - } - - return event, nil -} - -func (i *mockInvoiceRegistry) CancelInvoice(payHash lntypes.Hash) er.R { - return i.registry.CancelInvoice(payHash) -} - -func (i *mockInvoiceRegistry) AddInvoice(invoice channeldb.Invoice, - paymentHash lntypes.Hash) er.R { - - _, err := i.registry.AddInvoice(&invoice, paymentHash) - return err -} - -func (i *mockInvoiceRegistry) HodlUnsubscribeAll(subscriber chan<- interface{}) { - i.registry.HodlUnsubscribeAll(subscriber) -} - -var _ InvoiceDatabase = (*mockInvoiceRegistry)(nil) - -type mockCircuitMap struct { - lookup chan *PaymentCircuit -} - -var _ CircuitMap = (*mockCircuitMap)(nil) - -func (m *mockCircuitMap) OpenCircuits(...Keystone) er.R { - return nil -} - -func (m *mockCircuitMap) TrimOpenCircuits(chanID lnwire.ShortChannelID, - start uint64) er.R { - return nil -} - -func (m *mockCircuitMap) DeleteCircuits(inKeys ...CircuitKey) er.R { - return nil -} - -func (m *mockCircuitMap) CommitCircuits( - circuit ...*PaymentCircuit) (*CircuitFwdActions, er.R) { - - return nil, nil -} - -func (m *mockCircuitMap) CloseCircuit(outKey CircuitKey) (*PaymentCircuit, - er.R) { - return nil, nil -} - -func (m *mockCircuitMap) FailCircuit(inKey CircuitKey) (*PaymentCircuit, - er.R) { - return nil, nil -} - -func (m *mockCircuitMap) LookupCircuit(inKey CircuitKey) *PaymentCircuit { - return <-m.lookup -} - -func (m *mockCircuitMap) LookupOpenCircuit(outKey CircuitKey) *PaymentCircuit { - return nil -} - -func (m *mockCircuitMap) LookupByPaymentHash(hash [32]byte) []*PaymentCircuit { - return nil -} - -func (m *mockCircuitMap) NumPending() int { - return 0 -} - -func (m *mockCircuitMap) NumOpen() int { - return 0 -} - -type mockOnionErrorDecryptor struct { - sourceIdx int - message []byte - err er.R -} - -func (m *mockOnionErrorDecryptor) DecryptError(encryptedData []byte) ( - *sphinx.DecryptedError, er.R) { - - return &sphinx.DecryptedError{ - SenderIdx: m.sourceIdx, - Message: m.message, - }, m.err -} - -var _ htlcNotifier = (*mockHTLCNotifier)(nil) - -type mockHTLCNotifier struct{} - -func (h *mockHTLCNotifier) NotifyForwardingEvent(key HtlcKey, info HtlcInfo, - eventType HtlcEventType) { -} - -func (h *mockHTLCNotifier) NotifyLinkFailEvent(key HtlcKey, info HtlcInfo, - eventType HtlcEventType, linkErr *LinkError, incoming bool) { -} - -func (h *mockHTLCNotifier) NotifyForwardingFailEvent(key HtlcKey, - eventType HtlcEventType) { -} - -func (h *mockHTLCNotifier) NotifySettleEvent(key HtlcKey, eventType HtlcEventType) { -} diff --git a/lnd/htlcswitch/packet.go b/lnd/htlcswitch/packet.go deleted file mode 100644 index a3c1f163..00000000 --- a/lnd/htlcswitch/packet.go +++ /dev/null @@ -1,123 +0,0 @@ -package htlcswitch - -import ( - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/htlcswitch/hop" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/record" -) - -// htlcPacket is a wrapper around htlc lnwire update, which adds additional -// information which is needed by this package. -type htlcPacket struct { - // incomingChanID is the ID of the channel that we have received an incoming - // HTLC on. - incomingChanID lnwire.ShortChannelID - - // outgoingChanID is the ID of the channel that we have offered or will - // offer an outgoing HTLC on. - outgoingChanID lnwire.ShortChannelID - - // incomingHTLCID is the ID of the HTLC that we have received from the peer - // on the incoming channel. - incomingHTLCID uint64 - - // outgoingHTLCID is the ID of the HTLC that we offered to the peer on the - // outgoing channel. - outgoingHTLCID uint64 - - // sourceRef is used by forwarded htlcPackets to locate incoming Add - // entry in a fwdpkg owned by the incoming link. This value can be nil - // if there is no such entry, e.g. switch initiated payments. - sourceRef *channeldb.AddRef - - // destRef is used to locate a settle/fail entry in the outgoing link's - // fwdpkg. If sourceRef is non-nil, this reference should be to a - // settle/fail in response to the sourceRef. - destRef *channeldb.SettleFailRef - - // incomingAmount is the value in milli-satoshis that arrived on an - // incoming link. - incomingAmount lnwire.MilliSatoshi - - // amount is the value of the HTLC that is being created or modified. - amount lnwire.MilliSatoshi - - // htlc lnwire message type of which depends on switch request type. - htlc lnwire.Message - - // obfuscator contains the necessary state to allow the switch to wrap - // any forwarded errors in an additional layer of encryption. - obfuscator hop.ErrorEncrypter - - // localFailure is set to true if an HTLC fails for a local payment before - // the first hop. In this case, the failure reason is simply encoded, not - // encrypted with any shared secret. - localFailure bool - - // linkFailure is non-nil for htlcs that fail at our node. This may - // occur for our own payments which fail on the outgoing link, - // or for forwards which fail in the switch or on the outgoing link. - linkFailure *LinkError - - // convertedError is set to true if this is an HTLC fail that was - // created using an UpdateFailMalformedHTLC from the remote party. If - // this is true, then when forwarding this failure packet, we'll need - // to wrap it as if we were the first hop if it's a multi-hop HTLC. If - // it's a direct HTLC, then we'll decode the error as no encryption has - // taken place. - convertedError bool - - // hasSource is set to true if the incomingChanID and incomingHTLCID - // fields of a forwarded fail packet are already set and do not need to - // be looked up in the circuit map. - hasSource bool - - // isResolution is set to true if this packet was actually an incoming - // resolution message from an outside sub-system. We'll treat these as - // if they emanated directly from the switch. As a result, we'll - // encrypt all errors related to this packet as if we were the first - // hop. - isResolution bool - - // circuit holds a reference to an Add's circuit which is persisted in - // the switch during successful forwarding. - circuit *PaymentCircuit - - // incomingTimeout is the timeout that the incoming HTLC carried. This - // is the timeout of the HTLC applied to the incoming link. - incomingTimeout uint32 - - // outgoingTimeout is the timeout of the proposed outgoing HTLC. This - // will be extraced from the hop payload recevived by the incoming - // link. - outgoingTimeout uint32 - - // customRecords are user-defined records in the custom type range that - // were included in the payload. - customRecords record.CustomSet -} - -// inKey returns the circuit key used to identify the incoming htlc. -func (p *htlcPacket) inKey() CircuitKey { - return CircuitKey{ - ChanID: p.incomingChanID, - HtlcID: p.incomingHTLCID, - } -} - -// outKey returns the circuit key used to identify the outgoing, forwarded htlc. -func (p *htlcPacket) outKey() CircuitKey { - return CircuitKey{ - ChanID: p.outgoingChanID, - HtlcID: p.outgoingHTLCID, - } -} - -// keystone returns a tuple containing the incoming and outgoing circuit keys. -func (p *htlcPacket) keystone() Keystone { - return Keystone{ - InKey: p.inKey(), - OutKey: p.outKey(), - } -} diff --git a/lnd/htlcswitch/payment_result.go b/lnd/htlcswitch/payment_result.go deleted file mode 100644 index f0890cad..00000000 --- a/lnd/htlcswitch/payment_result.go +++ /dev/null @@ -1,310 +0,0 @@ -package htlcswitch - -import ( - "bytes" - "encoding/binary" - "io" - "sync" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/multimutex" - "github.com/pkt-cash/pktd/pktlog/log" -) - -var ( - - // networkResultStoreBucketKey is used for the root level bucket that - // stores the network result for each payment ID. - networkResultStoreBucketKey = []byte("network-result-store-bucket") - - // ErrPaymentIDNotFound is an error returned if the given paymentID is - // not found. - ErrPaymentIDNotFound = Err.CodeWithDetail("ErrPaymentIDNotFound", "paymentID not found") - - // ErrPaymentIDAlreadyExists is returned if we try to write a pending - // payment whose paymentID already exists. - ErrPaymentIDAlreadyExists = Err.CodeWithDetail("ErrPaymentIDAlreadyExists", "paymentID already exists") -) - -// PaymentResult wraps a decoded result received from the network after a -// payment attempt was made. This is what is eventually handed to the router -// for processing. -type PaymentResult struct { - // Preimage is set by the switch in case a sent HTLC was settled. - Preimage [32]byte - - // Error is non-nil in case a HTLC send failed, and the HTLC is now - // irrevocably canceled. If the payment failed during forwarding, this - // error will be a *ForwardingError. - Error er.R -} - -// networkResult is the raw result received from the network after a payment -// attempt has been made. Since the switch doesn't always have the necessary -// data to decode the raw message, we store it together with some meta data, -// and decode it when the router query for the final result. -type networkResult struct { - // msg is the received result. This should be of type UpdateFulfillHTLC - // or UpdateFailHTLC. - msg lnwire.Message - - // unencrypted indicates whether the failure encoded in the message is - // unencrypted, and hence doesn't need to be decrypted. - unencrypted bool - - // isResolution indicates whether this is a resolution message, in - // which the failure reason might not be included. - isResolution bool -} - -// serializeNetworkResult serializes the networkResult. -func serializeNetworkResult(w io.Writer, n *networkResult) er.R { - if _, err := lnwire.WriteMessage(w, n.msg, 0); err != nil { - return err - } - - return channeldb.WriteElements(w, n.unencrypted, n.isResolution) -} - -// deserializeNetworkResult deserializes the networkResult. -func deserializeNetworkResult(r io.Reader) (*networkResult, er.R) { - var ( - err er.R - ) - - n := &networkResult{} - - n.msg, err = lnwire.ReadMessage(r, 0) - if err != nil { - return nil, err - } - - if err := channeldb.ReadElements(r, - &n.unencrypted, &n.isResolution, - ); err != nil { - return nil, err - } - - return n, nil -} - -// networkResultStore is a persistent store that stores any results of HTLCs in -// flight on the network. Since payment results are inherently asynchronous, it -// is used as a common access point for senders of HTLCs, to know when a result -// is back. The Switch will checkpoint any received result to the store, and -// the store will keep results and notify the callers about them. -type networkResultStore struct { - db *channeldb.DB - - // results is a map from paymentIDs to channels where subscribers to - // payment results will be notified. - results map[uint64][]chan *networkResult - resultsMtx sync.Mutex - - // paymentIDMtx is a multimutex used to make sure the database and - // result subscribers map is consistent for each payment ID in case of - // concurrent callers. - paymentIDMtx *multimutex.Mutex -} - -func newNetworkResultStore(db *channeldb.DB) *networkResultStore { - return &networkResultStore{ - db: db, - results: make(map[uint64][]chan *networkResult), - paymentIDMtx: multimutex.NewMutex(), - } -} - -// storeResult stores the networkResult for the given paymentID, and -// notifies any subscribers. -func (store *networkResultStore) storeResult(paymentID uint64, - result *networkResult) er.R { - - // We get a mutex for this payment ID. This is needed to ensure - // consistency between the database state and the subscribers in case - // of concurrent calls. - store.paymentIDMtx.Lock(paymentID) - defer store.paymentIDMtx.Unlock(paymentID) - - // Serialize the payment result. - var b bytes.Buffer - if err := serializeNetworkResult(&b, result); err != nil { - return err - } - - var paymentIDBytes [8]byte - binary.BigEndian.PutUint64(paymentIDBytes[:], paymentID) - - err := kvdb.Batch(store.db.Backend, func(tx kvdb.RwTx) er.R { - networkResults, err := tx.CreateTopLevelBucket( - networkResultStoreBucketKey, - ) - if err != nil { - return err - } - - return networkResults.Put(paymentIDBytes[:], b.Bytes()) - }) - if err != nil { - return err - } - - // Now that the result is stored in the database, we can notify any - // active subscribers. - store.resultsMtx.Lock() - for _, res := range store.results[paymentID] { - res <- result - } - delete(store.results, paymentID) - store.resultsMtx.Unlock() - - return nil -} - -// subscribeResult is used to get the payment result for the given -// payment ID. It returns a channel on which the result will be delivered when -// ready. -func (store *networkResultStore) subscribeResult(paymentID uint64) ( - <-chan *networkResult, er.R) { - - // We get a mutex for this payment ID. This is needed to ensure - // consistency between the database state and the subscribers in case - // of concurrent calls. - store.paymentIDMtx.Lock(paymentID) - defer store.paymentIDMtx.Unlock(paymentID) - - var ( - result *networkResult - resultChan = make(chan *networkResult, 1) - ) - - err := kvdb.View(store.db, func(tx kvdb.RTx) er.R { - var err er.R - result, err = fetchResult(tx, paymentID) - switch { - - // Result not yet available, we will notify once a result is - // available. - case ErrPaymentIDNotFound.Is(err): - return nil - - case err != nil: - return err - - // The result was found, and will be returned immediately. - default: - return nil - } - }, func() { - result = nil - }) - if err != nil { - return nil, err - } - - // If the result was found, we can send it on the result channel - // imemdiately. - if result != nil { - resultChan <- result - return resultChan, nil - } - - // Otherwise we store the result channel for when the result is - // available. - store.resultsMtx.Lock() - store.results[paymentID] = append( - store.results[paymentID], resultChan, - ) - store.resultsMtx.Unlock() - - return resultChan, nil -} - -// getResult attempts to immediately fetch the result for the given pid from -// the store. If no result is available, ErrPaymentIDNotFound is returned. -func (store *networkResultStore) getResult(pid uint64) ( - *networkResult, er.R) { - - var result *networkResult - err := kvdb.View(store.db, func(tx kvdb.RTx) er.R { - var err er.R - result, err = fetchResult(tx, pid) - return err - }, func() { - result = nil - }) - if err != nil { - return nil, err - } - - return result, nil -} - -func fetchResult(tx kvdb.RTx, pid uint64) (*networkResult, er.R) { - var paymentIDBytes [8]byte - binary.BigEndian.PutUint64(paymentIDBytes[:], pid) - - networkResults := tx.ReadBucket(networkResultStoreBucketKey) - if networkResults == nil { - return nil, ErrPaymentIDNotFound.Default() - } - - // Check whether a result is already available. - resultBytes := networkResults.Get(paymentIDBytes[:]) - if resultBytes == nil { - return nil, ErrPaymentIDNotFound.Default() - } - - // Decode the result we found. - r := bytes.NewReader(resultBytes) - - return deserializeNetworkResult(r) -} - -// cleanStore removes all entries from the store, except the payment IDs given. -// NOTE: Since every result not listed in the keep map will be deleted, care -// should be taken to ensure no new payment attempts are being made -// concurrently while this process is ongoing, as its result might end up being -// deleted. -func (store *networkResultStore) cleanStore(keep map[uint64]struct{}) er.R { - return kvdb.Update(store.db.Backend, func(tx kvdb.RwTx) er.R { - networkResults, err := tx.CreateTopLevelBucket( - networkResultStoreBucketKey, - ) - if err != nil { - return err - } - - // Iterate through the bucket, deleting all items not in the - // keep map. - var toClean [][]byte - if err := networkResults.ForEach(func(k, _ []byte) er.R { - pid := binary.BigEndian.Uint64(k) - if _, ok := keep[pid]; ok { - return nil - } - - toClean = append(toClean, k) - return nil - }); err != nil { - return err - } - - for _, k := range toClean { - err := networkResults.Delete(k) - if err != nil { - return err - } - } - - if len(toClean) > 0 { - log.Infof("Removed %d stale entries from network "+ - "result store", len(toClean)) - } - - return nil - }, func() {}) -} diff --git a/lnd/htlcswitch/payment_result_test.go b/lnd/htlcswitch/payment_result_test.go deleted file mode 100644 index 3162aac7..00000000 --- a/lnd/htlcswitch/payment_result_test.go +++ /dev/null @@ -1,217 +0,0 @@ -package htlcswitch - -import ( - "bytes" - "io/ioutil" - "math/rand" - "reflect" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// TestNetworkResultSerialization checks that NetworkResults are properly -// (de)serialized. -func TestNetworkResultSerialization(t *testing.T) { - t.Parallel() - - var preimage lntypes.Preimage - if _, err := rand.Read(preimage[:]); err != nil { - t.Fatalf("unable gen rand preimag: %v", err) - } - - var chanID lnwire.ChannelID - if _, err := rand.Read(chanID[:]); err != nil { - t.Fatalf("unable gen rand chanid: %v", err) - } - - var reason [256]byte - if _, err := rand.Read(reason[:]); err != nil { - t.Fatalf("unable gen rand reason: %v", err) - } - - settle := &lnwire.UpdateFulfillHTLC{ - ChanID: chanID, - ID: 2, - PaymentPreimage: preimage, - } - - fail := &lnwire.UpdateFailHTLC{ - ChanID: chanID, - ID: 1, - Reason: []byte{}, - } - - fail2 := &lnwire.UpdateFailHTLC{ - ChanID: chanID, - ID: 1, - Reason: reason[:], - } - - testCases := []*networkResult{ - { - msg: settle, - }, - { - msg: fail, - unencrypted: false, - isResolution: false, - }, - { - msg: fail, - unencrypted: false, - isResolution: true, - }, - { - msg: fail2, - unencrypted: true, - isResolution: false, - }, - } - - for _, p := range testCases { - var buf bytes.Buffer - if err := serializeNetworkResult(&buf, p); err != nil { - t.Fatalf("serialize failed: %v", err) - } - - r := bytes.NewReader(buf.Bytes()) - p1, err := deserializeNetworkResult(r) - if err != nil { - t.Fatalf("unable to deserizlize: %v", err) - } - - if !reflect.DeepEqual(p, p1) { - t.Fatalf("not equal. %v vs %v", spew.Sdump(p), - spew.Sdump(p1)) - } - } -} - -// TestNetworkResultStore tests that the networkResult store behaves as -// expected, and that we can store, get and subscribe to results. -func TestNetworkResultStore(t *testing.T) { - t.Parallel() - - const numResults = 4 - - tempDir, errr := ioutil.TempDir("", "testdb") - if errr != nil { - t.Fatal(errr) - } - - db, err := channeldb.Open(tempDir) - if err != nil { - t.Fatal(err) - } - - store := newNetworkResultStore(db) - - var results []*networkResult - for i := 0; i < numResults; i++ { - n := &networkResult{ - msg: &lnwire.UpdateAddHTLC{}, - unencrypted: true, - isResolution: true, - } - results = append(results, n) - } - - // Subscribe to 2 of them. - var subs []<-chan *networkResult - for i := uint64(0); i < 2; i++ { - sub, err := store.subscribeResult(i) - if err != nil { - t.Fatalf("unable to subscribe: %v", err) - } - subs = append(subs, sub) - } - - // Store three of them. - for i := uint64(0); i < 3; i++ { - err := store.storeResult(i, results[i]) - if err != nil { - t.Fatalf("unable to store result: %v", err) - } - } - - // The two subscribers should be notified. - for _, sub := range subs { - select { - case <-sub: - case <-time.After(1 * time.Second): - t.Fatalf("no result received") - } - } - - // Let the third one subscribe now. THe result should be received - // immediately. - sub, err := store.subscribeResult(2) - if err != nil { - t.Fatalf("unable to subscribe: %v", err) - } - select { - case <-sub: - case <-time.After(1 * time.Second): - t.Fatalf("no result received") - } - - // Try fetching the result directly for the non-stored one. This should - // fail. - _, err = store.getResult(3) - if !ErrPaymentIDNotFound.Is(err) { - t.Fatalf("expected ErrPaymentIDNotFound, got %v", err) - } - - // Add the result and try again. - err = store.storeResult(3, results[3]) - if err != nil { - t.Fatalf("unable to store result: %v", err) - } - - _, err = store.getResult(3) - if err != nil { - t.Fatalf("unable to get result: %v", err) - } - - // Since we don't delete results from the store (yet), make sure we - // will get subscriptions for all of them. - for i := uint64(0); i < numResults; i++ { - sub, err := store.subscribeResult(i) - if err != nil { - t.Fatalf("unable to subscribe: %v", err) - } - - select { - case <-sub: - case <-time.After(1 * time.Second): - t.Fatalf("no result received") - } - } - - // Clean the store keeping the first two results. - toKeep := map[uint64]struct{}{ - 0: {}, - 1: {}, - } - // Finally, delete the result. - err = store.cleanStore(toKeep) - util.RequireNoErr(t, err) - - // Payment IDs 0 and 1 should be found, 2 and 3 should be deleted. - for i := uint64(0); i < numResults; i++ { - _, err = store.getResult(i) - if i <= 1 { - util.RequireNoErr(t, err, "unable to get result") - } - if i >= 2 && !ErrPaymentIDNotFound.Is(err) { - t.Fatalf("expected ErrPaymentIDNotFound, got %v", err) - } - - } -} diff --git a/lnd/htlcswitch/sequencer.go b/lnd/htlcswitch/sequencer.go deleted file mode 100644 index fc1cfa90..00000000 --- a/lnd/htlcswitch/sequencer.go +++ /dev/null @@ -1,130 +0,0 @@ -package htlcswitch - -import ( - "sync" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" -) - -// defaultSequenceBatchSize specifies the window of sequence numbers that are -// allocated for each write to disk made by the sequencer. -const defaultSequenceBatchSize = 1000 - -// Sequencer emits sequence numbers for locally initiated HTLCs. These are -// only used internally for tracking pending payments, however they must be -// unique in order to avoid circuit key collision in the circuit map. -type Sequencer interface { - // NextID returns a unique sequence number for each invocation. - NextID() (uint64, er.R) -} - -var ( - // nextPaymentIDKey identifies the bucket that will keep track of the - // persistent sequence numbers for payments. - nextPaymentIDKey = []byte("next-payment-id-key") - - // ErrSequencerCorrupted signals that the persistence engine was not - // initialized, or has been corrupted since startup. - ErrSequencerCorrupted = Err.CodeWithDetail("ErrSequencerCorrupted", - "sequencer database has been corrupted") -) - -// persistentSequencer is a concrete implementation of IDGenerator, that uses -// channeldb to allocate sequence numbers. -type persistentSequencer struct { - db *channeldb.DB - - mu sync.Mutex - - nextID uint64 - horizonID uint64 -} - -// NewPersistentSequencer initializes a new sequencer using a channeldb backend. -func NewPersistentSequencer(db *channeldb.DB) (Sequencer, er.R) { - g := &persistentSequencer{ - db: db, - } - - // Ensure the database bucket is created before any updates are - // performed. - if err := g.initDB(); err != nil { - return nil, err - } - - return g, nil -} - -// NextID returns a unique sequence number for every invocation, persisting the -// assignment to avoid reuse. -func (s *persistentSequencer) NextID() (uint64, er.R) { - - // nextID will be the unique sequence number returned if no errors are - // encountered. - var nextID uint64 - - // If our sequence batch has not been exhausted, we can allocate the - // next identifier in the range. - s.mu.Lock() - defer s.mu.Unlock() - - if s.nextID < s.horizonID { - nextID = s.nextID - s.nextID++ - - return nextID, nil - } - - // Otherwise, our sequence batch has been exhausted. We use the last - // known sequence number on disk to mark the beginning of the next - // sequence batch, and allocate defaultSequenceBatchSize (1000) at a - // time. - // - // NOTE: This also will happen on the first invocation after startup, - // i.e. when nextID and horizonID are both 0. The next sequence batch to be - // allocated will start from the last known tip on disk, which is fine - // as we only require uniqueness of the allocated numbers. - var nextHorizonID uint64 - if err := kvdb.Update(s.db, func(tx kvdb.RwTx) er.R { - nextIDBkt := tx.ReadWriteBucket(nextPaymentIDKey) - if nextIDBkt == nil { - return ErrSequencerCorrupted.Default() - } - - nextID = nextIDBkt.Sequence() - nextHorizonID = nextID + defaultSequenceBatchSize - - // Cannot fail when used in Update. - nextIDBkt.SetSequence(nextHorizonID) - - return nil - }, func() { - nextHorizonID = 0 - }); err != nil { - return 0, err - } - - // Never assign index zero, to avoid collisions with the EmptyKeystone. - if nextID == 0 { - nextID++ - } - - // If our batch sequence allocation succeed, update our in-memory values - // so we can continue to allocate sequence numbers without hitting disk. - // The nextID is incremented by one in memory so the in can be used - // issued directly on the next invocation. - s.nextID = nextID + 1 - s.horizonID = nextHorizonID - - return nextID, nil -} - -// initDB populates the bucket used to generate payment sequence numbers. -func (s *persistentSequencer) initDB() er.R { - return kvdb.Update(s.db, func(tx kvdb.RwTx) er.R { - _, err := tx.CreateTopLevelBucket(nextPaymentIDKey) - return err - }, func() {}) -} diff --git a/lnd/htlcswitch/switch.go b/lnd/htlcswitch/switch.go deleted file mode 100644 index 4a15ec80..00000000 --- a/lnd/htlcswitch/switch.go +++ /dev/null @@ -1,2249 +0,0 @@ -package htlcswitch - -import ( - "bytes" - "math/rand" - "sync" - "sync/atomic" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/clock" - "github.com/pkt-cash/pktd/lnd/contractcourt" - "github.com/pkt-cash/pktd/lnd/htlcswitch/hop" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/ticker" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/wire" -) - -const ( - // DefaultFwdEventInterval is the duration between attempts to flush - // pending forwarding events to disk. - DefaultFwdEventInterval = 15 * time.Second - - // DefaultLogInterval is the duration between attempts to log statistics - // about forwarding events. - DefaultLogInterval = 10 * time.Second - - // DefaultAckInterval is the duration between attempts to ack any settle - // fails in a forwarding package. - DefaultAckInterval = 15 * time.Second - - // DefaultHTLCExpiry is the duration after which Adds will be cancelled - // if they could not get added to an outgoing commitment. - DefaultHTLCExpiry = time.Minute -) - -var ( - Err = er.NewErrorType("lnd.htlcswitch") - // ErrChannelLinkNotFound is used when channel link hasn't been found. - ErrChannelLinkNotFound = Err.CodeWithDetail("ErrChannelLinkNotFound", "channel link not found") - - // ErrDuplicateAdd signals that the ADD htlc was already forwarded - // through the switch and is locked into another commitment txn. - ErrDuplicateAdd = Err.CodeWithDetail("ErrDuplicateAdd", "duplicate add HTLC detected") - - // ErrUnknownErrorDecryptor signals that we were unable to locate the - // error decryptor for this payment. This is likely due to restarting - // the daemon. - ErrUnknownErrorDecryptor = Err.CodeWithDetail("ErrUnknownErrorDecryptor", "unknown error decryptor") - - // ErrSwitchExiting signaled when the switch has received a shutdown - // request. - ErrSwitchExiting = Err.CodeWithDetail("ErrSwitchExiting", "htlcswitch shutting down") - - // ErrNoLinksFound is an error returned when we attempt to retrieve the - // active links in the switch for a specific destination. - ErrNoLinksFound = Err.CodeWithDetail("ErrNoLinksFound", "no channel links found") - - // ErrUnreadableFailureMessage is returned when the failure message - // cannot be decrypted. - ErrUnreadableFailureMessage = Err.CodeWithDetail("ErrUnreadableFailureMessage", "unreadable failure message") - - // ErrLocalAddFailed signals that the ADD htlc for a local payment - // failed to be processed. - ErrLocalAddFailed = Err.CodeWithDetail("ErrLocalAddFailed", "local add HTLC failed") -) - -// plexPacket encapsulates switch packet and adds error channel to receive -// error from request handler. -type plexPacket struct { - pkt *htlcPacket - err chan er.R -} - -// ChannelCloseType is an enum which signals the type of channel closure the -// peer should execute. -type ChannelCloseType uint8 - -const ( - // CloseRegular indicates a regular cooperative channel closure - // should be attempted. - CloseRegular ChannelCloseType = iota - - // CloseBreach indicates that a channel breach has been detected, and - // the link should immediately be marked as unavailable. - CloseBreach -) - -// ChanClose represents a request which close a particular channel specified by -// its id. -type ChanClose struct { - // CloseType is a variable which signals the type of channel closure the - // peer should execute. - CloseType ChannelCloseType - - // ChanPoint represent the id of the channel which should be closed. - ChanPoint *wire.OutPoint - - // TargetFeePerKw is the ideal fee that was specified by the caller. - // This value is only utilized if the closure type is CloseRegular. - // This will be the starting offered fee when the fee negotiation - // process for the cooperative closure transaction kicks off. - TargetFeePerKw chainfee.SatPerKWeight - - // DeliveryScript is an optional delivery script to pay funds out to. - DeliveryScript lnwire.DeliveryAddress - - // Updates is used by request creator to receive the notifications about - // execution of the close channel request. - Updates chan interface{} - - // Err is used by request creator to receive request execution error. - Err chan er.R -} - -// Config defines the configuration for the service. ALL elements within the -// configuration MUST be non-nil for the service to carry out its duties. -type Config struct { - // FwdingLog is an interface that will be used by the switch to log - // forwarding events. A forwarding event happens each time a payment - // circuit is successfully completed. So when we forward an HTLC, and a - // settle is eventually received. - FwdingLog ForwardingLog - - // LocalChannelClose kicks-off the workflow to execute a cooperative or - // forced unilateral closure of the channel initiated by a local - // subsystem. - LocalChannelClose func(pubKey []byte, request *ChanClose) - - // DB is the channeldb instance that will be used to back the switch's - // persistent circuit map. - DB *channeldb.DB - - // SwitchPackager provides access to the forwarding packages of all - // active channels. This gives the switch the ability to read arbitrary - // forwarding packages, and ack settles and fails contained within them. - SwitchPackager channeldb.FwdOperator - - // ExtractErrorEncrypter is an interface allowing switch to reextract - // error encrypters stored in the circuit map on restarts, since they - // are not stored directly within the database. - ExtractErrorEncrypter hop.ErrorEncrypterExtracter - - // FetchLastChannelUpdate retrieves the latest routing policy for a - // target channel. This channel will typically be the outgoing channel - // specified when we receive an incoming HTLC. This will be used to - // provide payment senders our latest policy when sending encrypted - // error messages. - FetchLastChannelUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, er.R) - - // Notifier is an instance of a chain notifier that we'll use to signal - // the switch when a new block has arrived. - Notifier chainntnfs.ChainNotifier - - // HtlcNotifier is an instance of a htlcNotifier which we will pipe htlc - // events through. - HtlcNotifier htlcNotifier - - // FwdEventTicker is a signal that instructs the htlcswitch to flush any - // pending forwarding events. - FwdEventTicker ticker.Ticker - - // LogEventTicker is a signal instructing the htlcswitch to log - // aggregate stats about it's forwarding during the last interval. - LogEventTicker ticker.Ticker - - // AckEventTicker is a signal instructing the htlcswitch to ack any settle - // fails in forwarding packages. - AckEventTicker ticker.Ticker - - // AllowCircularRoute is true if the user has configured their node to - // allow forwards that arrive and depart our node over the same channel. - AllowCircularRoute bool - - // RejectHTLC is a flag that instructs the htlcswitch to reject any - // HTLCs that are not from the source hop. - RejectHTLC bool - - // Clock is a time source for the switch. - Clock clock.Clock - - // HTLCExpiry is the interval after which Adds will be cancelled if they - // have not been yet been delivered to a link. The computed deadline - // will expiry this long after the Adds are added to a mailbox via - // AddPacket. - HTLCExpiry time.Duration -} - -// Switch is the central messaging bus for all incoming/outgoing HTLCs. -// Connected peers with active channels are treated as named interfaces which -// refer to active channels as links. A link is the switch's message -// communication point with the goroutine that manages an active channel. New -// links are registered each time a channel is created, and unregistered once -// the channel is closed. The switch manages the hand-off process for multi-hop -// HTLCs, forwarding HTLCs initiated from within the daemon, and finally -// notifies users local-systems concerning their outstanding payment requests. -type Switch struct { - started int32 // To be used atomically. - shutdown int32 // To be used atomically. - - // bestHeight is the best known height of the main chain. The links will - // be used this information to govern decisions based on HTLC timeouts. - // This will be retrieved by the registered links atomically. - bestHeight uint32 - - wg sync.WaitGroup - quit chan struct{} - - // cfg is a copy of the configuration struct that the htlc switch - // service was initialized with. - cfg *Config - - // networkResults stores the results of payments initiated by the user. - // results. The store is used to later look up the payments and notify - // the user of the result when they are complete. Each payment attempt - // should be given a unique integer ID when it is created, otherwise - // results might be overwritten. - networkResults *networkResultStore - - // circuits is storage for payment circuits which are used to - // forward the settle/fail htlc updates back to the add htlc initiator. - circuits CircuitMap - - // mailOrchestrator manages the lifecycle of mailboxes used throughout - // the switch, and facilitates delayed delivery of packets to links that - // later come online. - mailOrchestrator *mailOrchestrator - - // indexMtx is a read/write mutex that protects the set of indexes - // below. - indexMtx sync.RWMutex - - // pendingLinkIndex holds links that have not had their final, live - // short_chan_id assigned. These links can be transitioned into the - // primary linkIndex by using UpdateShortChanID to load their live id. - pendingLinkIndex map[lnwire.ChannelID]ChannelLink - - // links is a map of channel id and channel link which manages - // this channel. - linkIndex map[lnwire.ChannelID]ChannelLink - - // forwardingIndex is an index which is consulted by the switch when it - // needs to locate the next hop to forward an incoming/outgoing HTLC - // update to/from. - // - // TODO(roasbeef): eventually add a NetworkHop mapping before the - // ChannelLink - forwardingIndex map[lnwire.ShortChannelID]ChannelLink - - // interfaceIndex maps the compressed public key of a peer to all the - // channels that the switch maintains with that peer. - interfaceIndex map[[33]byte]map[lnwire.ChannelID]ChannelLink - - // htlcPlex is the channel which all connected links use to coordinate - // the setup/teardown of Sphinx (onion routing) payment circuits. - // Active links forward any add/settle messages over this channel each - // state transition, sending new adds/settles which are fully locked - // in. - htlcPlex chan *plexPacket - - // chanCloseRequests is used to transfer the channel close request to - // the channel close handler. - chanCloseRequests chan *ChanClose - - // resolutionMsgs is the channel that all external contract resolution - // messages will be sent over. - resolutionMsgs chan *resolutionMsg - - // pendingFwdingEvents is the set of forwarding events which have been - // collected during the current interval, but hasn't yet been written - // to the forwarding log. - fwdEventMtx sync.Mutex - pendingFwdingEvents []channeldb.ForwardingEvent - - // blockEpochStream is an active block epoch event stream backed by an - // active ChainNotifier instance. This will be used to retrieve the - // lastest height of the chain. - blockEpochStream *chainntnfs.BlockEpochEvent - - // pendingSettleFails is the set of settle/fail entries that we need to - // ack in the forwarding package of the outgoing link. This was added to - // make pipelining settles more efficient. - pendingSettleFails []channeldb.SettleFailRef -} - -// New creates the new instance of htlc switch. -func New(cfg Config, currentHeight uint32) (*Switch, er.R) { - circuitMap, err := NewCircuitMap(&CircuitMapConfig{ - DB: cfg.DB, - ExtractErrorEncrypter: cfg.ExtractErrorEncrypter, - }) - if err != nil { - return nil, err - } - - s := &Switch{ - bestHeight: currentHeight, - cfg: &cfg, - circuits: circuitMap, - linkIndex: make(map[lnwire.ChannelID]ChannelLink), - forwardingIndex: make(map[lnwire.ShortChannelID]ChannelLink), - interfaceIndex: make(map[[33]byte]map[lnwire.ChannelID]ChannelLink), - pendingLinkIndex: make(map[lnwire.ChannelID]ChannelLink), - networkResults: newNetworkResultStore(cfg.DB), - htlcPlex: make(chan *plexPacket), - chanCloseRequests: make(chan *ChanClose), - resolutionMsgs: make(chan *resolutionMsg), - quit: make(chan struct{}), - } - - s.mailOrchestrator = newMailOrchestrator(&mailOrchConfig{ - fetchUpdate: s.cfg.FetchLastChannelUpdate, - forwardPackets: s.ForwardPackets, - clock: s.cfg.Clock, - expiry: s.cfg.HTLCExpiry, - }) - - return s, nil -} - -// resolutionMsg is a struct that wraps an existing ResolutionMsg with a done -// channel. We'll use this channel to synchronize delivery of the message with -// the caller. -type resolutionMsg struct { - contractcourt.ResolutionMsg - - doneChan chan struct{} -} - -// ProcessContractResolution is called by active contract resolvers once a -// contract they are watching over has been fully resolved. The message carries -// an external signal that *would* have been sent if the outgoing channel -// didn't need to go to the chain in order to fulfill a contract. We'll process -// this message just as if it came from an active outgoing channel. -func (s *Switch) ProcessContractResolution(msg contractcourt.ResolutionMsg) er.R { - - done := make(chan struct{}) - - select { - case s.resolutionMsgs <- &resolutionMsg{ - ResolutionMsg: msg, - doneChan: done, - }: - case <-s.quit: - return ErrSwitchExiting.Default() - } - - select { - case <-done: - case <-s.quit: - return ErrSwitchExiting.Default() - } - - return nil -} - -// GetPaymentResult returns the the result of the payment attempt with the -// given paymentID. The method returns a channel where the payment result will -// be sent when available, or an error is encountered during forwarding. When a -// result is received on the channel, the HTLC is guaranteed to no longer be in -// flight. The switch shutting down is signaled by closing the channel. If the -// paymentID is unknown, ErrPaymentIDNotFound will be returned. -func (s *Switch) GetPaymentResult(paymentID uint64, paymentHash lntypes.Hash, - deobfuscator ErrorDecrypter) (<-chan *PaymentResult, er.R) { - - var ( - nChan <-chan *networkResult - err er.R - outKey = CircuitKey{ - ChanID: hop.Source, - HtlcID: paymentID, - } - ) - - // If the payment is not found in the circuit map, check whether a - // result is already available. - // Assumption: no one will add this payment ID other than the caller. - if s.circuits.LookupCircuit(outKey) == nil { - res, err := s.networkResults.getResult(paymentID) - if err != nil { - return nil, err - } - c := make(chan *networkResult, 1) - c <- res - nChan = c - } else { - // The payment was committed to the circuits, subscribe for a - // result. - nChan, err = s.networkResults.subscribeResult(paymentID) - if err != nil { - return nil, err - } - } - - resultChan := make(chan *PaymentResult, 1) - - // Since the payment was known, we can start a goroutine that can - // extract the result when it is available, and pass it on to the - // caller. - s.wg.Add(1) - go func() { - defer s.wg.Done() - - var n *networkResult - select { - case n = <-nChan: - case <-s.quit: - // We close the result channel to signal a shutdown. We - // don't send any result in this case since the HTLC is - // still in flight. - close(resultChan) - return - } - - // Extract the result and pass it to the result channel. - result, err := s.extractResult( - deobfuscator, n, paymentID, paymentHash, - ) - if err != nil { - e := er.Errorf("unable to extract result: %v", err) - log.Error(e) - resultChan <- &PaymentResult{ - Error: e, - } - return - } - resultChan <- result - }() - - return resultChan, nil -} - -// CleanStore calls the underlying result store, telling it is safe to delete -// all entries except the ones in the keepPids map. This should be called -// preiodically to let the switch clean up payment results that we have -// handled. -func (s *Switch) CleanStore(keepPids map[uint64]struct{}) er.R { - return s.networkResults.cleanStore(keepPids) -} - -// SendHTLC is used by other subsystems which aren't belong to htlc switch -// package in order to send the htlc update. The paymentID used MUST be unique -// for this HTLC, and MUST be used only once, otherwise the switch might reject -// it. -func (s *Switch) SendHTLC(firstHop lnwire.ShortChannelID, paymentID uint64, - htlc *lnwire.UpdateAddHTLC) er.R { - - // Generate and send new update packet, if error will be received on - // this stage it means that packet haven't left boundaries of our - // system and something wrong happened. - packet := &htlcPacket{ - incomingChanID: hop.Source, - incomingHTLCID: paymentID, - outgoingChanID: firstHop, - htlc: htlc, - } - - circuit := newPaymentCircuit(&htlc.PaymentHash, packet) - actions, err := s.circuits.CommitCircuits(circuit) - if err != nil { - log.Errorf("unable to commit circuit in switch: %v", err) - return err - } - - // Drop duplicate packet if it has already been seen. - switch { - case len(actions.Drops) == 1: - return ErrDuplicateAdd.Default() - - case len(actions.Fails) == 1: - return ErrLocalAddFailed.Default() - } - - // Send packet to link. - packet.circuit = circuit - - // User has created the htlc update therefore we should find the - // appropriate channel link and send the payment over this link. - link, linkErr := s.getLocalLink(packet, htlc) - if linkErr != nil { - // Notify the htlc notifier of a link failure on our - // outgoing link. Incoming timelock/amount values are - // not set because they are not present for local sends. - s.cfg.HtlcNotifier.NotifyLinkFailEvent( - newHtlcKey(packet), - HtlcInfo{ - OutgoingTimeLock: htlc.Expiry, - OutgoingAmt: htlc.Amount, - }, - HtlcEventTypeSend, - linkErr, - false, - ) - - return er.E(linkErr) - } - - return link.HandleLocalAddPacket(packet) -} - -// UpdateForwardingPolicies sends a message to the switch to update the -// forwarding policies for the set of target channels, keyed in chanPolicies. -// -// NOTE: This function is synchronous and will block until either the -// forwarding policies for all links have been updated, or the switch shuts -// down. -func (s *Switch) UpdateForwardingPolicies( - chanPolicies map[wire.OutPoint]ForwardingPolicy) { - - log.Tracef("Updating link policies: %v", log.C(func() string { - return spew.Sdump(chanPolicies) - })) - - s.indexMtx.RLock() - - // Update each link in chanPolicies. - for targetLink, policy := range chanPolicies { - cid := lnwire.NewChanIDFromOutPoint(&targetLink) - - link, ok := s.linkIndex[cid] - if !ok { - log.Debugf("Unable to find ChannelPoint(%v) to update "+ - "link policy", targetLink) - continue - } - - link.UpdateForwardingPolicy(policy) - } - - s.indexMtx.RUnlock() -} - -// IsForwardedHTLC checks for a given channel and htlc index if it is related -// to an opened circuit that represents a forwarded payment. -func (s *Switch) IsForwardedHTLC(chanID lnwire.ShortChannelID, - htlcIndex uint64) bool { - - circuit := s.circuits.LookupOpenCircuit(channeldb.CircuitKey{ - ChanID: chanID, - HtlcID: htlcIndex, - }) - return circuit != nil && circuit.Incoming.ChanID != hop.Source -} - -// ForwardPackets adds a list of packets to the switch for processing. Fails -// and settles are added on a first past, simultaneously constructing circuits -// for any adds. After persisting the circuits, another pass of the adds is -// given to forward them through the router. The sending link's quit channel is -// used to prevent deadlocks when the switch stops a link in the midst of -// forwarding. -func (s *Switch) ForwardPackets(linkQuit chan struct{}, - packets ...*htlcPacket) er.R { - - var ( - // fwdChan is a buffered channel used to receive err msgs from - // the htlcPlex when forwarding this batch. - fwdChan = make(chan er.R, len(packets)) - - // numSent keeps a running count of how many packets are - // forwarded to the switch, which determines how many responses - // we will wait for on the fwdChan.. - numSent int - ) - - // No packets, nothing to do. - if len(packets) == 0 { - return nil - } - - // Setup a barrier to prevent the background tasks from processing - // responses until this function returns to the user. - var wg sync.WaitGroup - wg.Add(1) - defer wg.Done() - - // Before spawning the following goroutine to proxy our error responses, - // check to see if we have already been issued a shutdown request. If - // so, we exit early to avoid incrementing the switch's waitgroup while - // it is already in the process of shutting down. - select { - case <-linkQuit: - return nil - case <-s.quit: - return nil - default: - // Spawn a goroutine to log the errors returned from failed packets. - s.wg.Add(1) - go s.logFwdErrs(&numSent, &wg, fwdChan) - } - - // Make a first pass over the packets, forwarding any settles or fails. - // As adds are found, we create a circuit and append it to our set of - // circuits to be written to disk. - var circuits []*PaymentCircuit - var addBatch []*htlcPacket - for _, packet := range packets { - switch htlc := packet.htlc.(type) { - case *lnwire.UpdateAddHTLC: - circuit := newPaymentCircuit(&htlc.PaymentHash, packet) - packet.circuit = circuit - circuits = append(circuits, circuit) - addBatch = append(addBatch, packet) - default: - err := s.routeAsync(packet, fwdChan, linkQuit) - if err != nil { - return er.Errorf("failed to forward packet %v", err) - } - numSent++ - } - } - - // If this batch did not contain any circuits to commit, we can return - // early. - if len(circuits) == 0 { - return nil - } - - // Write any circuits that we found to disk. - actions, err := s.circuits.CommitCircuits(circuits...) - if err != nil { - log.Errorf("unable to commit circuits in switch: %v", err) - } - - // Split the htlc packets by comparing an in-order seek to the head of - // the added, dropped, or failed circuits. - // - // NOTE: This assumes each list is guaranteed to be a subsequence of the - // circuits, and that the union of the sets results in the original set - // of circuits. - var addedPackets, failedPackets []*htlcPacket - for _, packet := range addBatch { - switch { - case len(actions.Adds) > 0 && packet.circuit == actions.Adds[0]: - addedPackets = append(addedPackets, packet) - actions.Adds = actions.Adds[1:] - - case len(actions.Drops) > 0 && packet.circuit == actions.Drops[0]: - actions.Drops = actions.Drops[1:] - - case len(actions.Fails) > 0 && packet.circuit == actions.Fails[0]: - failedPackets = append(failedPackets, packet) - actions.Fails = actions.Fails[1:] - } - } - - // Now, forward any packets for circuits that were successfully added to - // the switch's circuit map. - for _, packet := range addedPackets { - err := s.routeAsync(packet, fwdChan, linkQuit) - if err != nil { - return er.Errorf("failed to forward packet %v", err) - } - numSent++ - } - - // Lastly, for any packets that failed, this implies that they were - // left in a half added state, which can happen when recovering from - // failures. - if len(failedPackets) > 0 { - var failure lnwire.FailureMessage - update, err := s.cfg.FetchLastChannelUpdate( - failedPackets[0].incomingChanID, - ) - if err != nil { - failure = &lnwire.FailTemporaryNodeFailure{} - } else { - failure = lnwire.NewTemporaryChannelFailure(update) - } - linkError := NewDetailedLinkError( - failure, OutgoingFailureIncompleteForward, - ) - - for _, packet := range failedPackets { - // We don't handle the error here since this method - // always returns an error. - _ = s.failAddPacket(packet, linkError) - } - } - - return nil -} - -// logFwdErrs logs any errors received on `fwdChan` -func (s *Switch) logFwdErrs(num *int, wg *sync.WaitGroup, fwdChan chan er.R) { - defer s.wg.Done() - - // Wait here until the outer function has finished persisting - // and routing the packets. This guarantees we don't read from num until - // the value is accurate. - wg.Wait() - - numSent := *num - for i := 0; i < numSent; i++ { - select { - case err := <-fwdChan: - if err != nil { - log.Errorf("Unhandled error while reforwarding htlc "+ - "settle/fail over htlcswitch: %v", err) - } - case <-s.quit: - log.Errorf("unable to forward htlc packet " + - "htlc switch was stopped") - return - } - } -} - -// routeAsync sends a packet through the htlc switch, using the provided err -// chan to propagate errors back to the caller. The link's quit channel is -// provided so that the send can be canceled if either the link or the switch -// receive a shutdown requuest. This method does not wait for a response from -// the htlcForwarder before returning. -func (s *Switch) routeAsync(packet *htlcPacket, errChan chan er.R, - linkQuit chan struct{}) er.R { - - command := &plexPacket{ - pkt: packet, - err: errChan, - } - - select { - case s.htlcPlex <- command: - return nil - case <-linkQuit: - return ErrLinkShuttingDown.Default() - case <-s.quit: - return er.New("htlc switch was stopped") - } -} - -// getLocalLink handles the addition of a htlc for a send that originates from -// our node. It returns the link that the htlc should be forwarded outwards on, -// and a link error if the htlc cannot be forwarded. -func (s *Switch) getLocalLink(pkt *htlcPacket, htlc *lnwire.UpdateAddHTLC) ( - ChannelLink, *LinkError) { - - // Try to find links by node destination. - s.indexMtx.RLock() - link, err := s.getLinkByShortID(pkt.outgoingChanID) - s.indexMtx.RUnlock() - if err != nil { - log.Errorf("Link %v not found", pkt.outgoingChanID) - return nil, NewLinkError(&lnwire.FailUnknownNextPeer{}) - } - - if !link.EligibleToForward() { - log.Errorf("Link %v is not available to forward", - pkt.outgoingChanID) - - // The update does not need to be populated as the error - // will be returned back to the router. - return nil, NewDetailedLinkError( - lnwire.NewTemporaryChannelFailure(nil), - OutgoingFailureLinkNotEligible, - ) - } - - // Ensure that the htlc satisfies the outgoing channel policy. - currentHeight := atomic.LoadUint32(&s.bestHeight) - htlcErr := link.CheckHtlcTransit( - htlc.PaymentHash, htlc.Amount, htlc.Expiry, currentHeight, - ) - if htlcErr != nil { - log.Errorf("Link %v policy for local forward not "+ - "satisfied", pkt.outgoingChanID) - return nil, htlcErr - } - return link, nil -} - -// handleLocalResponse processes a Settle or Fail responding to a -// locally-initiated payment. This is handled asynchronously to avoid blocking -// the main event loop within the switch, as these operations can require -// multiple db transactions. The guarantees of the circuit map are stringent -// enough such that we are able to tolerate reordering of these operations -// without side effects. The primary operations handled are: -// 1. Save the payment result to the pending payment store. -// 2. Notify subscribers about the payment result. -// 3. Ack settle/fail references, to avoid resending this response internally -// 4. Teardown the closing circuit in the circuit map -// -// NOTE: This method MUST be spawned as a goroutine. -func (s *Switch) handleLocalResponse(pkt *htlcPacket) { - defer s.wg.Done() - - paymentID := pkt.incomingHTLCID - - // The error reason will be unencypted in case this a local - // failure or a converted error. - unencrypted := pkt.localFailure || pkt.convertedError - n := &networkResult{ - msg: pkt.htlc, - unencrypted: unencrypted, - isResolution: pkt.isResolution, - } - - // Store the result to the db. This will also notify subscribers about - // the result. - if err := s.networkResults.storeResult(paymentID, n); err != nil { - log.Errorf("Unable to complete payment for pid=%v: %v", - paymentID, err) - return - } - - // First, we'll clean up any fwdpkg references, circuit entries, and - // mark in our db that the payment for this payment hash has either - // succeeded or failed. - // - // If this response is contained in a forwarding package, we'll start by - // acking the settle/fail so that we don't continue to retransmit the - // HTLC internally. - if pkt.destRef != nil { - if err := s.ackSettleFail(*pkt.destRef); err != nil { - log.Warnf("Unable to ack settle/fail reference: %s: %v", - *pkt.destRef, err) - return - } - } - - // Next, we'll remove the circuit since we are about to complete an - // fulfill/fail of this HTLC. Since we've already removed the - // settle/fail fwdpkg reference, the response from the peer cannot be - // replayed internally if this step fails. If this happens, this logic - // will be executed when a provided resolution message comes through. - // This can only happen if the circuit is still open, which is why this - // ordering is chosen. - if err := s.teardownCircuit(pkt); err != nil { - log.Warnf("Unable to teardown circuit %s: %v", - pkt.inKey(), err) - return - } - - // Finally, notify on the htlc failure or success that has been handled. - key := newHtlcKey(pkt) - eventType := getEventType(pkt) - - switch pkt.htlc.(type) { - case *lnwire.UpdateFulfillHTLC: - s.cfg.HtlcNotifier.NotifySettleEvent(key, eventType) - - case *lnwire.UpdateFailHTLC: - s.cfg.HtlcNotifier.NotifyForwardingFailEvent(key, eventType) - } -} - -// extractResult uses the given deobfuscator to extract the payment result from -// the given network message. -func (s *Switch) extractResult(deobfuscator ErrorDecrypter, n *networkResult, - paymentID uint64, paymentHash lntypes.Hash) (*PaymentResult, er.R) { - - switch htlc := n.msg.(type) { - - // We've received a settle update which means we can finalize the user - // payment and return successful response. - case *lnwire.UpdateFulfillHTLC: - return &PaymentResult{ - Preimage: htlc.PaymentPreimage, - }, nil - - // We've received a fail update which means we can finalize the - // user payment and return fail response. - case *lnwire.UpdateFailHTLC: - paymentErr := s.parseFailedPayment( - deobfuscator, paymentID, paymentHash, n.unencrypted, - n.isResolution, htlc, - ) - - return &PaymentResult{ - Error: paymentErr, - }, nil - - default: - return nil, er.Errorf("received unknown response type: %T", - htlc) - } -} - -// parseFailedPayment determines the appropriate failure message to return to -// a user initiated payment. The three cases handled are: -// 1) An unencrypted failure, which should already plaintext. -// 2) A resolution from the chain arbitrator, which possibly has no failure -// reason attached. -// 3) A failure from the remote party, which will need to be decrypted using -// the payment deobfuscator. -func (s *Switch) parseFailedPayment(deobfuscator ErrorDecrypter, - paymentID uint64, paymentHash lntypes.Hash, unencrypted, - isResolution bool, htlc *lnwire.UpdateFailHTLC) er.R { - - switch { - - // The payment never cleared the link, so we don't need to - // decrypt the error, simply decode it them report back to the - // user. - case unencrypted: - r := bytes.NewReader(htlc.Reason) - failureMsg, err := lnwire.DecodeFailure(r, 0) - if err != nil { - // If we could not decode the failure reason, return a link - // error indicating that we failed to decode the onion. - linkError := NewDetailedLinkError( - // As this didn't even clear the link, we don't - // need to apply an update here since it goes - // directly to the router. - lnwire.NewTemporaryChannelFailure(nil), - OutgoingFailureDecodeError, - ) - - log.Errorf("%v: (hash=%v, pid=%d): %v", - linkError.FailureDetail.FailureString(), - paymentHash, paymentID, err) - - return er.E(linkError) - } - - // If we successfully decoded the failure reason, return it. - return er.E(NewLinkError(failureMsg)) - - // A payment had to be timed out on chain before it got past - // the first hop. In this case, we'll report a permanent - // channel failure as this means us, or the remote party had to - // go on chain. - case isResolution && htlc.Reason == nil: - linkError := NewDetailedLinkError( - &lnwire.FailPermanentChannelFailure{}, - OutgoingFailureOnChainTimeout, - ) - - log.Infof("%v: hash=%v, pid=%d", - linkError.FailureDetail.FailureString(), - paymentHash, paymentID) - - return er.E(linkError) - - // A regular multi-hop payment error that we'll need to - // decrypt. - default: - // We'll attempt to fully decrypt the onion encrypted - // error. If we're unable to then we'll bail early. - failure, err := deobfuscator.DecryptError(htlc.Reason) - if err != nil { - log.Errorf("unable to de-obfuscate onion failure "+ - "(hash=%v, pid=%d): %v", - paymentHash, paymentID, err) - - return ErrUnreadableFailureMessage.Default() - } - - return er.E(failure) - } -} - -// handlePacketForward is used in cases when we need forward the htlc update -// from one channel link to another and be able to propagate the settle/fail -// updates back. This behaviour is achieved by creation of payment circuits. -func (s *Switch) handlePacketForward(packet *htlcPacket) er.R { - switch htlc := packet.htlc.(type) { - - // Channel link forwarded us a new htlc, therefore we initiate the - // payment circuit within our internal state so we can properly forward - // the ultimate settle message back latter. - case *lnwire.UpdateAddHTLC: - // Check if the node is set to reject all onward HTLCs and also make - // sure that HTLC is not from the source node. - if s.cfg.RejectHTLC { - failure := NewDetailedLinkError( - &lnwire.FailChannelDisabled{}, - OutgoingFailureForwardsDisabled, - ) - - return s.failAddPacket(packet, failure) - } - - // Before we attempt to find a non-strict forwarding path for - // this htlc, check whether the htlc is being routed over the - // same incoming and outgoing channel. If our node does not - // allow forwards of this nature, we fail the htlc early. This - // check is in place to disallow inefficiently routed htlcs from - // locking up our balance. - linkErr := checkCircularForward( - packet.incomingChanID, packet.outgoingChanID, - s.cfg.AllowCircularRoute, htlc.PaymentHash, - ) - if linkErr != nil { - return s.failAddPacket(packet, linkErr) - } - - s.indexMtx.RLock() - targetLink, err := s.getLinkByShortID(packet.outgoingChanID) - if err != nil { - s.indexMtx.RUnlock() - - log.Debugf("unable to find link with "+ - "destination %v", packet.outgoingChanID) - - // If packet was forwarded from another channel link - // than we should notify this link that some error - // occurred. - linkError := NewLinkError( - &lnwire.FailUnknownNextPeer{}, - ) - - return s.failAddPacket(packet, linkError) - } - targetPeerKey := targetLink.Peer().PubKey() - interfaceLinks, _ := s.getLinks(targetPeerKey) - s.indexMtx.RUnlock() - - // We'll keep track of any HTLC failures during the link - // selection process. This way we can return the error for - // precise link that the sender selected, while optimistically - // trying all links to utilize our available bandwidth. - linkErrs := make(map[lnwire.ShortChannelID]*LinkError) - - // Find all destination channel links with appropriate - // bandwidth. - var destinations []ChannelLink - for _, link := range interfaceLinks { - var failure *LinkError - - // We'll skip any links that aren't yet eligible for - // forwarding. - if !link.EligibleToForward() { - failure = NewDetailedLinkError( - &lnwire.FailUnknownNextPeer{}, - OutgoingFailureLinkNotEligible, - ) - } else { - // We'll ensure that the HTLC satisfies the - // current forwarding conditions of this target - // link. - currentHeight := atomic.LoadUint32(&s.bestHeight) - failure = link.CheckHtlcForward( - htlc.PaymentHash, packet.incomingAmount, - packet.amount, packet.incomingTimeout, - packet.outgoingTimeout, currentHeight, - ) - } - - // If this link can forward the htlc, add it to the set - // of destinations. - if failure == nil { - destinations = append(destinations, link) - continue - } - - linkErrs[link.ShortChanID()] = failure - } - - // If we had a forwarding failure due to the HTLC not - // satisfying the current policy, then we'll send back an - // error, but ensure we send back the error sourced at the - // *target* link. - if len(destinations) == 0 { - // At this point, some or all of the links rejected the - // HTLC so we couldn't forward it. So we'll try to look - // up the error that came from the source. - linkErr, ok := linkErrs[packet.outgoingChanID] - if !ok { - // If we can't find the error of the source, - // then we'll return an unknown next peer, - // though this should never happen. - linkErr = NewLinkError( - &lnwire.FailUnknownNextPeer{}, - ) - log.Warnf("unable to find err source for "+ - "outgoing_link=%v, errors=%v", - packet.outgoingChanID, log.C(func() string { - return spew.Sdump(linkErrs) - })) - } - - log.Tracef("incoming HTLC(%x) violated "+ - "target outgoing link (id=%v) policy: %v", - htlc.PaymentHash[:], packet.outgoingChanID, - linkErr) - - return s.failAddPacket(packet, linkErr) - } - - // Choose a random link out of the set of links that can forward - // this htlc. The reason for randomization is to evenly - // distribute the htlc load without making assumptions about - // what the best channel is. - destination := destinations[rand.Intn(len(destinations))] - - // Send the packet to the destination channel link which - // manages the channel. - packet.outgoingChanID = destination.ShortChanID() - return destination.HandleSwitchPacket(packet) - - case *lnwire.UpdateFailHTLC, *lnwire.UpdateFulfillHTLC: - // If the source of this packet has not been set, use the - // circuit map to lookup the origin. - circuit, err := s.closeCircuit(packet) - if err != nil { - return err - } - - // closeCircuit returns a nil circuit when a settle packet returns an - // ErrUnknownCircuit error upon the inner call to CloseCircuit. - if circuit == nil { - return nil - } - - fail, isFail := htlc.(*lnwire.UpdateFailHTLC) - if isFail && !packet.hasSource { - switch { - // No message to encrypt, locally sourced payment. - case circuit.ErrorEncrypter == nil: - - // If this is a resolution message, then we'll need to - // encrypt it as it's actually internally sourced. - case packet.isResolution: - var err er.R - // TODO(roasbeef): don't need to pass actually? - failure := &lnwire.FailPermanentChannelFailure{} - fail.Reason, err = circuit.ErrorEncrypter.EncryptFirstHop( - failure, - ) - if err != nil { - err = er.Errorf("unable to obfuscate "+ - "error: %v", err) - log.Error(err) - } - - // Alternatively, if the remote party send us an - // UpdateFailMalformedHTLC, then we'll need to convert - // this into a proper well formatted onion error as - // there's no HMAC currently. - case packet.convertedError: - log.Infof("Converting malformed HTLC error "+ - "for circuit for Circuit(%x: "+ - "(%s, %d) <-> (%s, %d))", packet.circuit.PaymentHash, - packet.incomingChanID, packet.incomingHTLCID, - packet.outgoingChanID, packet.outgoingHTLCID) - - fail.Reason = circuit.ErrorEncrypter.EncryptMalformedError( - fail.Reason, - ) - - default: - // Otherwise, it's a forwarded error, so we'll perform a - // wrapper encryption as normal. - fail.Reason = circuit.ErrorEncrypter.IntermediateEncrypt( - fail.Reason, - ) - } - } else if !isFail && circuit.Outgoing != nil { - // If this is an HTLC settle, and it wasn't from a - // locally initiated HTLC, then we'll log a forwarding - // event so we can flush it to disk later. - // - // TODO(roasbeef): only do this once link actually - // fully settles? - localHTLC := packet.incomingChanID == hop.Source - if !localHTLC { - log.Infof("Forwarded HTLC(%x) of %v (fee: %v) "+ - "from IncomingChanID(%v) to OutgoingChanID(%v)", - circuit.PaymentHash[:], circuit.OutgoingAmount, - circuit.IncomingAmount-circuit.OutgoingAmount, - circuit.Incoming.ChanID, circuit.Outgoing.ChanID) - s.fwdEventMtx.Lock() - s.pendingFwdingEvents = append( - s.pendingFwdingEvents, - channeldb.ForwardingEvent{ - Timestamp: time.Now(), - IncomingChanID: circuit.Incoming.ChanID, - OutgoingChanID: circuit.Outgoing.ChanID, - AmtIn: circuit.IncomingAmount, - AmtOut: circuit.OutgoingAmount, - }, - ) - s.fwdEventMtx.Unlock() - } - } - - // A blank IncomingChanID in a circuit indicates that it is a pending - // user-initiated payment. - if packet.incomingChanID == hop.Source { - s.wg.Add(1) - go s.handleLocalResponse(packet) - return nil - } - - // Check to see that the source link is online before removing - // the circuit. - return s.mailOrchestrator.Deliver(packet.incomingChanID, packet) - - default: - return er.New("wrong update type") - } -} - -// checkCircularForward checks whether a forward is circular (arrives and -// departs on the same link) and returns a link error if the switch is -// configured to disallow this behaviour. -func checkCircularForward(incoming, outgoing lnwire.ShortChannelID, - allowCircular bool, paymentHash lntypes.Hash) *LinkError { - - // If the route is not circular we do not need to perform any further - // checks. - if incoming != outgoing { - return nil - } - - // If the incoming and outgoing link are equal, the htlc is part of a - // circular route which may be used to lock up our liquidity. If the - // switch is configured to allow circular routes, log that we are - // allowing the route then return nil. - if allowCircular { - log.Debugf("allowing circular route over link: %v "+ - "(payment hash: %x)", incoming, paymentHash) - return nil - } - - // If our node disallows circular routes, return a temporary channel - // failure. There is nothing wrong with the policy used by the remote - // node, so we do not include a channel update. - return NewDetailedLinkError( - lnwire.NewTemporaryChannelFailure(nil), - OutgoingFailureCircularRoute, - ) -} - -// failAddPacket encrypts a fail packet back to an add packet's source. -// The ciphertext will be derived from the failure message proivded by context. -// This method returns the failErr if all other steps complete successfully. -func (s *Switch) failAddPacket(packet *htlcPacket, failure *LinkError) er.R { - // Encrypt the failure so that the sender will be able to read the error - // message. Since we failed this packet, we use EncryptFirstHop to - // obfuscate the failure for their eyes only. - reason, err := packet.obfuscator.EncryptFirstHop(failure.WireMessage()) - if err != nil { - err := er.Errorf("unable to obfuscate "+ - "error: %v", err) - log.Error(err) - return err - } - - log.Error(failure.Error()) - - // Create a failure packet for this htlc. The the full set of - // information about the htlc failure is included so that they can - // be included in link failure notifications. - failPkt := &htlcPacket{ - sourceRef: packet.sourceRef, - incomingChanID: packet.incomingChanID, - incomingHTLCID: packet.incomingHTLCID, - outgoingChanID: packet.outgoingChanID, - outgoingHTLCID: packet.outgoingHTLCID, - incomingAmount: packet.incomingAmount, - amount: packet.amount, - incomingTimeout: packet.incomingTimeout, - outgoingTimeout: packet.outgoingTimeout, - circuit: packet.circuit, - linkFailure: failure, - htlc: &lnwire.UpdateFailHTLC{ - Reason: reason, - }, - } - - // Route a fail packet back to the source link. - err = s.mailOrchestrator.Deliver(failPkt.incomingChanID, failPkt) - if err != nil { - err = er.Errorf("source chanid=%v unable to "+ - "handle switch packet: %v", - packet.incomingChanID, err) - log.Error(err) - return err - } - - return er.E(failure) -} - -// closeCircuit accepts a settle or fail htlc and the associated htlc packet and -// attempts to determine the source that forwarded this htlc. This method will -// set the incoming chan and htlc ID of the given packet if the source was -// found, and will properly [re]encrypt any failure messages. -func (s *Switch) closeCircuit(pkt *htlcPacket) (*PaymentCircuit, er.R) { - // If the packet has its source, that means it was failed locally by - // the outgoing link. We fail it here to make sure only one response - // makes it through the switch. - if pkt.hasSource { - circuit, err := s.circuits.FailCircuit(pkt.inKey()) - switch { - - // Circuit successfully closed. - case err == nil: - return circuit, nil - - // Circuit was previously closed, but has not been deleted. - // We'll just drop this response until the circuit has been - // fully removed. - case ErrCircuitClosing.Is(err): - return nil, err - - // Failed to close circuit because it does not exist. This is - // likely because the circuit was already successfully closed. - // Since this packet failed locally, there is no forwarding - // package entry to acknowledge. - case ErrUnknownCircuit.Is(err): - return nil, err - - // Unexpected error. - default: - return nil, err - } - } - - // Otherwise, this is packet was received from the remote party. Use - // circuit map to find the incoming link to receive the settle/fail. - circuit, err := s.circuits.CloseCircuit(pkt.outKey()) - switch { - - // Open circuit successfully closed. - case err == nil: - pkt.incomingChanID = circuit.Incoming.ChanID - pkt.incomingHTLCID = circuit.Incoming.HtlcID - pkt.circuit = circuit - pkt.sourceRef = &circuit.AddRef - - pktType := "SETTLE" - if _, ok := pkt.htlc.(*lnwire.UpdateFailHTLC); ok { - pktType = "FAIL" - } - - log.Debugf("Closed completed %s circuit for %x: "+ - "(%s, %d) <-> (%s, %d)", pktType, pkt.circuit.PaymentHash, - pkt.incomingChanID, pkt.incomingHTLCID, - pkt.outgoingChanID, pkt.outgoingHTLCID) - - return circuit, nil - - // Circuit was previously closed, but has not been deleted. We'll just - // drop this response until the circuit has been removed. - case ErrCircuitClosing.Is(err): - return nil, err - - // Failed to close circuit because it does not exist. This is likely - // because the circuit was already successfully closed. - case ErrUnknownCircuit.Is(err): - if pkt.destRef != nil { - // Add this SettleFailRef to the set of pending settle/fail entries - // awaiting acknowledgement. - s.pendingSettleFails = append(s.pendingSettleFails, *pkt.destRef) - } - - // If this is a settle, we will not log an error message as settles - // are expected to hit the ErrUnknownCircuit case. The only way fails - // can hit this case if the link restarts after having just sent a fail - // to the switch. - _, isSettle := pkt.htlc.(*lnwire.UpdateFulfillHTLC) - if !isSettle { - err := er.Errorf("unable to find target channel "+ - "for HTLC fail: channel ID = %s, "+ - "HTLC ID = %d", pkt.outgoingChanID, - pkt.outgoingHTLCID) - log.Error(err) - - return nil, err - } - - return nil, nil - - // Unexpected error. - default: - return nil, err - } -} - -// ackSettleFail is used by the switch to ACK any settle/fail entries in the -// forwarding package of the outgoing link for a payment circuit. We do this if -// we're the originator of the payment, so the link stops attempting to -// re-broadcast. -func (s *Switch) ackSettleFail(settleFailRefs ...channeldb.SettleFailRef) er.R { - return kvdb.Batch(s.cfg.DB.Backend, func(tx kvdb.RwTx) er.R { - return s.cfg.SwitchPackager.AckSettleFails(tx, settleFailRefs...) - }) -} - -// teardownCircuit removes a pending or open circuit from the switch's circuit -// map and prints useful logging statements regarding the outcome. -func (s *Switch) teardownCircuit(pkt *htlcPacket) er.R { - var pktType string - switch htlc := pkt.htlc.(type) { - case *lnwire.UpdateFulfillHTLC: - pktType = "SETTLE" - case *lnwire.UpdateFailHTLC: - pktType = "FAIL" - default: - err := er.Errorf("cannot tear down packet of type: %T", htlc) - log.Errorf(err.String()) - return err - } - - switch { - case pkt.circuit.HasKeystone(): - log.Debugf("Tearing down open circuit with %s pkt, removing circuit=%v "+ - "with keystone=%v", pktType, pkt.inKey(), pkt.outKey()) - - err := s.circuits.DeleteCircuits(pkt.inKey()) - if err != nil { - log.Warnf("Failed to tear down open circuit (%s, %d) <-> (%s, %d) "+ - "with payment_hash-%v using %s pkt", - pkt.incomingChanID, pkt.incomingHTLCID, - pkt.outgoingChanID, pkt.outgoingHTLCID, - pkt.circuit.PaymentHash, pktType) - return err - } - - log.Debugf("Closed completed %s circuit for %x: "+ - "(%s, %d) <-> (%s, %d)", pktType, pkt.circuit.PaymentHash, - pkt.incomingChanID, pkt.incomingHTLCID, - pkt.outgoingChanID, pkt.outgoingHTLCID) - - default: - log.Debugf("Tearing down incomplete circuit with %s for inkey=%v", - pktType, pkt.inKey()) - - err := s.circuits.DeleteCircuits(pkt.inKey()) - if err != nil { - log.Warnf("Failed to tear down pending %s circuit for %x: "+ - "(%s, %d)", pktType, pkt.circuit.PaymentHash, - pkt.incomingChanID, pkt.incomingHTLCID) - return err - } - - log.Debugf("Removed pending onion circuit for %x: "+ - "(%s, %d)", pkt.circuit.PaymentHash, - pkt.incomingChanID, pkt.incomingHTLCID) - } - - return nil -} - -// CloseLink creates and sends the close channel command to the target link -// directing the specified closure type. If the closure type is CloseRegular, -// targetFeePerKw parameter should be the ideal fee-per-kw that will be used as -// a starting point for close negotiation. The deliveryScript parameter is an -// optional parameter which sets a user specified script to close out to. -func (s *Switch) CloseLink(chanPoint *wire.OutPoint, - closeType ChannelCloseType, targetFeePerKw chainfee.SatPerKWeight, - deliveryScript lnwire.DeliveryAddress) (chan interface{}, chan er.R) { - - // TODO(roasbeef) abstract out the close updates. - updateChan := make(chan interface{}, 2) - errChan := make(chan er.R, 1) - - command := &ChanClose{ - CloseType: closeType, - ChanPoint: chanPoint, - Updates: updateChan, - TargetFeePerKw: targetFeePerKw, - DeliveryScript: deliveryScript, - Err: errChan, - } - - select { - case s.chanCloseRequests <- command: - return updateChan, errChan - - case <-s.quit: - errChan <- ErrSwitchExiting.Default() - close(updateChan) - return updateChan, errChan - } -} - -// htlcForwarder is responsible for optimally forwarding (and possibly -// fragmenting) incoming/outgoing HTLCs amongst all active interfaces and their -// links. The duties of the forwarder are similar to that of a network switch, -// in that it facilitates multi-hop payments by acting as a central messaging -// bus. The switch communicates will active links to create, manage, and tear -// down active onion routed payments. Each active channel is modeled as -// networked device with metadata such as the available payment bandwidth, and -// total link capacity. -// -// NOTE: This MUST be run as a goroutine. -func (s *Switch) htlcForwarder() { - defer s.wg.Done() - - defer func() { - s.blockEpochStream.Cancel() - - // Remove all links once we've been signalled for shutdown. - var linksToStop []ChannelLink - s.indexMtx.Lock() - for _, link := range s.linkIndex { - activeLink := s.removeLink(link.ChanID()) - if activeLink == nil { - log.Errorf("unable to remove ChannelLink(%v) "+ - "on stop", link.ChanID()) - continue - } - linksToStop = append(linksToStop, activeLink) - } - for _, link := range s.pendingLinkIndex { - pendingLink := s.removeLink(link.ChanID()) - if pendingLink == nil { - log.Errorf("unable to remove ChannelLink(%v) "+ - "on stop", link.ChanID()) - continue - } - linksToStop = append(linksToStop, pendingLink) - } - s.indexMtx.Unlock() - - // Now that all pending and live links have been removed from - // the forwarding indexes, stop each one before shutting down. - // We'll shut them down in parallel to make exiting as fast as - // possible. - var wg sync.WaitGroup - for _, link := range linksToStop { - wg.Add(1) - go func(l ChannelLink) { - defer wg.Done() - l.Stop() - }(link) - } - wg.Wait() - - // Before we exit fully, we'll attempt to flush out any - // forwarding events that may still be lingering since the last - // batch flush. - if err := s.FlushForwardingEvents(); err != nil { - log.Errorf("unable to flush forwarding events: %v", err) - } - }() - - // TODO(roasbeef): cleared vs settled distinction - var ( - totalNumUpdates uint64 - totalSatSent btcutil.Amount - totalSatRecv btcutil.Amount - ) - s.cfg.LogEventTicker.Resume() - defer s.cfg.LogEventTicker.Stop() - - // Every 15 seconds, we'll flush out the forwarding events that - // occurred during that period. - s.cfg.FwdEventTicker.Resume() - defer s.cfg.FwdEventTicker.Stop() - - defer s.cfg.AckEventTicker.Stop() - -out: - for { - - // If the set of pending settle/fail entries is non-zero, - // reinstate the ack ticker so we can batch ack them. - if len(s.pendingSettleFails) > 0 { - s.cfg.AckEventTicker.Resume() - } - - select { - case blockEpoch, ok := <-s.blockEpochStream.Epochs: - if !ok { - break out - } - - atomic.StoreUint32(&s.bestHeight, uint32(blockEpoch.Height)) - - // A local close request has arrived, we'll forward this to the - // relevant link (if it exists) so the channel can be - // cooperatively closed (if possible). - case req := <-s.chanCloseRequests: - chanID := lnwire.NewChanIDFromOutPoint(req.ChanPoint) - - s.indexMtx.RLock() - link, ok := s.linkIndex[chanID] - if !ok { - s.indexMtx.RUnlock() - - req.Err <- er.Errorf("no peer for channel with "+ - "chan_id=%x", chanID[:]) - continue - } - s.indexMtx.RUnlock() - - peerPub := link.Peer().PubKey() - log.Debugf("Requesting local channel close: peer=%v, "+ - "chan_id=%x", link.Peer(), chanID[:]) - - go s.cfg.LocalChannelClose(peerPub[:], req) - - case resolutionMsg := <-s.resolutionMsgs: - pkt := &htlcPacket{ - outgoingChanID: resolutionMsg.SourceChan, - outgoingHTLCID: resolutionMsg.HtlcIndex, - isResolution: true, - } - - // Resolution messages will either be cancelling - // backwards an existing HTLC, or settling a previously - // outgoing HTLC. Based on this, we'll map the message - // to the proper htlcPacket. - if resolutionMsg.Failure != nil { - pkt.htlc = &lnwire.UpdateFailHTLC{} - } else { - pkt.htlc = &lnwire.UpdateFulfillHTLC{ - PaymentPreimage: *resolutionMsg.PreImage, - } - } - - log.Infof("Received outside contract resolution, "+ - "mapping to: %v", spew.Sdump(pkt)) - - // We don't check the error, as the only failure we can - // encounter is due to the circuit already being - // closed. This is fine, as processing this message is - // meant to be idempotent. - err := s.handlePacketForward(pkt) - if err != nil { - log.Errorf("Unable to forward resolution msg: %v", err) - } - - // With the message processed, we'll now close out - close(resolutionMsg.doneChan) - - // A new packet has arrived for forwarding, we'll interpret the - // packet concretely, then either forward it along, or - // interpret a return packet to a locally initialized one. - case cmd := <-s.htlcPlex: - cmd.err <- s.handlePacketForward(cmd.pkt) - - // When this time ticks, then it indicates that we should - // collect all the forwarding events since the last internal, - // and write them out to our log. - case <-s.cfg.FwdEventTicker.Ticks(): - s.wg.Add(1) - go func() { - defer s.wg.Done() - - if err := s.FlushForwardingEvents(); err != nil { - log.Errorf("unable to flush "+ - "forwarding events: %v", err) - } - }() - - // The log ticker has fired, so we'll calculate some forwarding - // stats for the last 10 seconds to display within the logs to - // users. - case <-s.cfg.LogEventTicker.Ticks(): - // First, we'll collate the current running tally of - // our forwarding stats. - prevSatSent := totalSatSent - prevSatRecv := totalSatRecv - prevNumUpdates := totalNumUpdates - - var ( - newNumUpdates uint64 - newSatSent btcutil.Amount - newSatRecv btcutil.Amount - ) - - // Next, we'll run through all the registered links and - // compute their up-to-date forwarding stats. - s.indexMtx.RLock() - for _, link := range s.linkIndex { - // TODO(roasbeef): when links first registered - // stats printed. - updates, sent, recv := link.Stats() - newNumUpdates += updates - newSatSent += sent.ToSatoshis() - newSatRecv += recv.ToSatoshis() - } - s.indexMtx.RUnlock() - - var ( - diffNumUpdates uint64 - diffSatSent btcutil.Amount - diffSatRecv btcutil.Amount - ) - - // If this is the first time we're computing these - // stats, then the diff is just the new value. We do - // this in order to avoid integer underflow issues. - if prevNumUpdates == 0 { - diffNumUpdates = newNumUpdates - diffSatSent = newSatSent - diffSatRecv = newSatRecv - } else { - diffNumUpdates = newNumUpdates - prevNumUpdates - diffSatSent = newSatSent - prevSatSent - diffSatRecv = newSatRecv - prevSatRecv - } - - // If the diff of num updates is zero, then we haven't - // forwarded anything in the last 10 seconds, so we can - // skip this update. - if diffNumUpdates == 0 { - continue - } - - // If the diff of num updates is negative, then some - // links may have been unregistered from the switch, so - // we'll update our stats to only include our registered - // links. - if int64(diffNumUpdates) < 0 { - totalNumUpdates = newNumUpdates - totalSatSent = newSatSent - totalSatRecv = newSatRecv - continue - } - - // Otherwise, we'll log this diff, then accumulate the - // new stats into the running total. - log.Debugf("Sent %d satoshis and received %d satoshis "+ - "in the last 10 seconds (%f tx/sec)", - diffSatSent, diffSatRecv, - float64(diffNumUpdates)/10) - - totalNumUpdates += diffNumUpdates - totalSatSent += diffSatSent - totalSatRecv += diffSatRecv - - // The ack ticker has fired so if we have any settle/fail entries - // for a forwarding package to ack, we will do so here in a batch - // db call. - case <-s.cfg.AckEventTicker.Ticks(): - // If the current set is empty, pause the ticker. - if len(s.pendingSettleFails) == 0 { - s.cfg.AckEventTicker.Pause() - continue - } - - // Batch ack the settle/fail entries. - if err := s.ackSettleFail(s.pendingSettleFails...); err != nil { - log.Errorf("Unable to ack batch of settle/fails: %v", err) - continue - } - - log.Tracef("Acked %d settle fails: %v", len(s.pendingSettleFails), - log.C(func() string { - return spew.Sdump(s.pendingSettleFails) - })) - - // Reset the pendingSettleFails buffer while keeping acquired - // memory. - s.pendingSettleFails = s.pendingSettleFails[:0] - - case <-s.quit: - return - } - } -} - -// Start starts all helper goroutines required for the operation of the switch. -func (s *Switch) Start() er.R { - if !atomic.CompareAndSwapInt32(&s.started, 0, 1) { - log.Warn("Htlc Switch already started") - return er.New("htlc switch already started") - } - - log.Infof("Starting HTLC Switch") - - blockEpochStream, err := s.cfg.Notifier.RegisterBlockEpochNtfn(nil) - if err != nil { - return err - } - s.blockEpochStream = blockEpochStream - - s.wg.Add(1) - go s.htlcForwarder() - - if err := s.reforwardResponses(); err != nil { - s.Stop() - log.Errorf("unable to reforward responses: %v", err) - return err - } - - return nil -} - -// reforwardResponses for every known, non-pending channel, loads all associated -// forwarding packages and reforwards any Settle or Fail HTLCs found. This is -// used to resurrect the switch's mailboxes after a restart. -func (s *Switch) reforwardResponses() er.R { - openChannels, err := s.cfg.DB.FetchAllOpenChannels() - if err != nil { - return err - } - - for _, openChannel := range openChannels { - shortChanID := openChannel.ShortChanID() - - // Locally-initiated payments never need reforwarding. - if shortChanID == hop.Source { - continue - } - - // If the channel is pending, it should have no forwarding - // packages, and nothing to reforward. - if openChannel.IsPending { - continue - } - - // Channels in open or waiting-close may still have responses in - // their forwarding packages. We will continue to reattempt - // forwarding on startup until the channel is fully-closed. - // - // Load this channel's forwarding packages, and deliver them to - // the switch. - fwdPkgs, err := s.loadChannelFwdPkgs(shortChanID) - if err != nil { - log.Errorf("unable to load forwarding "+ - "packages for %v: %v", shortChanID, err) - return err - } - - s.reforwardSettleFails(fwdPkgs) - } - - return nil -} - -// loadChannelFwdPkgs loads all forwarding packages owned by the `source` short -// channel identifier. -func (s *Switch) loadChannelFwdPkgs(source lnwire.ShortChannelID) ([]*channeldb.FwdPkg, er.R) { - - var fwdPkgs []*channeldb.FwdPkg - if err := kvdb.View(s.cfg.DB, func(tx kvdb.RTx) er.R { - var err er.R - fwdPkgs, err = s.cfg.SwitchPackager.LoadChannelFwdPkgs( - tx, source, - ) - return err - }, func() { - fwdPkgs = nil - }); err != nil { - return nil, err - } - - return fwdPkgs, nil -} - -// reforwardSettleFails parses the Settle and Fail HTLCs from the list of -// forwarding packages, and reforwards those that have not been acknowledged. -// This is intended to occur on startup, in order to recover the switch's -// mailboxes, and to ensure that responses can be propagated in case the -// outgoing link never comes back online. -// -// NOTE: This should mimic the behavior processRemoteSettleFails. -func (s *Switch) reforwardSettleFails(fwdPkgs []*channeldb.FwdPkg) { - for _, fwdPkg := range fwdPkgs { - settleFails, err := lnwallet.PayDescsFromRemoteLogUpdates( - fwdPkg.Source, fwdPkg.Height, fwdPkg.SettleFails, - ) - if err != nil { - log.Errorf("Unable to process remote log updates: %v", - err) - continue - } - - switchPackets := make([]*htlcPacket, 0, len(settleFails)) - for i, pd := range settleFails { - - // Skip any settles or fails that have already been - // acknowledged by the incoming link that originated the - // forwarded Add. - if fwdPkg.SettleFailFilter.Contains(uint16(i)) { - continue - } - - switch pd.EntryType { - - // A settle for an HTLC we previously forwarded HTLC has - // been received. So we'll forward the HTLC to the - // switch which will handle propagating the settle to - // the prior hop. - case lnwallet.Settle: - settlePacket := &htlcPacket{ - outgoingChanID: fwdPkg.Source, - outgoingHTLCID: pd.ParentIndex, - destRef: pd.DestRef, - htlc: &lnwire.UpdateFulfillHTLC{ - PaymentPreimage: pd.RPreimage, - }, - } - - // Add the packet to the batch to be forwarded, and - // notify the overflow queue that a spare spot has been - // freed up within the commitment state. - switchPackets = append(switchPackets, settlePacket) - - // A failureCode message for a previously forwarded HTLC has been - // received. As a result a new slot will be freed up in our - // commitment state, so we'll forward this to the switch so the - // backwards undo can continue. - case lnwallet.Fail: - // Fetch the reason the HTLC was canceled so - // we can continue to propagate it. This - // failure originated from another node, so - // the linkFailure field is not set on this - // packet. - failPacket := &htlcPacket{ - outgoingChanID: fwdPkg.Source, - outgoingHTLCID: pd.ParentIndex, - destRef: pd.DestRef, - htlc: &lnwire.UpdateFailHTLC{ - Reason: lnwire.OpaqueReason(pd.FailReason), - }, - } - - // Add the packet to the batch to be forwarded, and - // notify the overflow queue that a spare spot has been - // freed up within the commitment state. - switchPackets = append(switchPackets, failPacket) - } - } - - // Since this send isn't tied to a specific link, we pass a nil - // link quit channel, meaning the send will fail only if the - // switch receives a shutdown request. - if err := s.ForwardPackets(nil, switchPackets...); err != nil { - log.Errorf("Unhandled error while reforwarding packets "+ - "settle/fail over htlcswitch: %v", err) - } - } -} - -// Stop gracefully stops all active helper goroutines, then waits until they've -// exited. -func (s *Switch) Stop() er.R { - if !atomic.CompareAndSwapInt32(&s.shutdown, 0, 1) { - log.Warn("Htlc Switch already stopped") - return er.New("htlc switch already shutdown") - } - - log.Infof("HTLC Switch shutting down") - - close(s.quit) - - s.wg.Wait() - - // Wait until all active goroutines have finished exiting before - // stopping the mailboxes, otherwise the mailbox map could still be - // accessed and modified. - s.mailOrchestrator.Stop() - - return nil -} - -// AddLink is used to initiate the handling of the add link command. The -// request will be propagated and handled in the main goroutine. -func (s *Switch) AddLink(link ChannelLink) er.R { - s.indexMtx.Lock() - defer s.indexMtx.Unlock() - - chanID := link.ChanID() - - // First, ensure that this link is not already active in the switch. - _, err := s.getLink(chanID) - if err == nil { - return er.Errorf("unable to add ChannelLink(%v), already "+ - "active", chanID) - } - - // Get and attach the mailbox for this link, which buffers packets in - // case there packets that we tried to deliver while this link was - // offline. - shortChanID := link.ShortChanID() - mailbox := s.mailOrchestrator.GetOrCreateMailBox(chanID, shortChanID) - link.AttachMailBox(mailbox) - - if err := link.Start(); err != nil { - s.removeLink(chanID) - return err - } - - if shortChanID == hop.Source { - log.Infof("Adding pending link chan_id=%v, short_chan_id=%v", - chanID, shortChanID) - - s.pendingLinkIndex[chanID] = link - } else { - log.Infof("Adding live link chan_id=%v, short_chan_id=%v", - chanID, shortChanID) - - s.addLiveLink(link) - s.mailOrchestrator.BindLiveShortChanID( - mailbox, chanID, shortChanID, - ) - } - - return nil -} - -// addLiveLink adds a link to all associated forwarding index, this makes it a -// candidate for forwarding HTLCs. -func (s *Switch) addLiveLink(link ChannelLink) { - // We'll add the link to the linkIndex which lets us quickly - // look up a channel when we need to close or register it, and - // the forwarding index which'll be used when forwarding HTLC's - // in the multi-hop setting. - s.linkIndex[link.ChanID()] = link - s.forwardingIndex[link.ShortChanID()] = link - - // Next we'll add the link to the interface index so we can - // quickly look up all the channels for a particular node. - peerPub := link.Peer().PubKey() - if _, ok := s.interfaceIndex[peerPub]; !ok { - s.interfaceIndex[peerPub] = make(map[lnwire.ChannelID]ChannelLink) - } - s.interfaceIndex[peerPub][link.ChanID()] = link -} - -// GetLink is used to initiate the handling of the get link command. The -// request will be propagated/handled to/in the main goroutine. -func (s *Switch) GetLink(chanID lnwire.ChannelID) (ChannelLink, er.R) { - s.indexMtx.RLock() - defer s.indexMtx.RUnlock() - - return s.getLink(chanID) -} - -// getLink returns the link stored in either the pending index or the live -// lindex. -func (s *Switch) getLink(chanID lnwire.ChannelID) (ChannelLink, er.R) { - link, ok := s.linkIndex[chanID] - if !ok { - link, ok = s.pendingLinkIndex[chanID] - if !ok { - return nil, ErrChannelLinkNotFound.Default() - } - } - - return link, nil -} - -// getLinkByShortID attempts to return the link which possesses the target -// short channel ID. -// -// NOTE: This MUST be called with the indexMtx held. -func (s *Switch) getLinkByShortID(chanID lnwire.ShortChannelID) (ChannelLink, er.R) { - link, ok := s.forwardingIndex[chanID] - if !ok { - return nil, ErrChannelLinkNotFound.Default() - } - - return link, nil -} - -// HasActiveLink returns true if the given channel ID has a link in the link -// index AND the link is eligible to forward. -func (s *Switch) HasActiveLink(chanID lnwire.ChannelID) bool { - s.indexMtx.RLock() - defer s.indexMtx.RUnlock() - - if link, ok := s.linkIndex[chanID]; ok { - return link.EligibleToForward() - } - - return false -} - -// RemoveLink purges the switch of any link associated with chanID. If a pending -// or active link is not found, this method does nothing. Otherwise, the method -// returns after the link has been completely shutdown. -func (s *Switch) RemoveLink(chanID lnwire.ChannelID) { - s.indexMtx.Lock() - link := s.removeLink(chanID) - s.indexMtx.Unlock() - - if link != nil { - link.Stop() - } -} - -// removeLink is used to remove and stop the channel link. -// -// NOTE: This MUST be called with the indexMtx held. -func (s *Switch) removeLink(chanID lnwire.ChannelID) ChannelLink { - log.Infof("Removing channel link with ChannelID(%v)", chanID) - - link, err := s.getLink(chanID) - if err != nil { - return nil - } - - // Remove the channel from live link indexes. - delete(s.pendingLinkIndex, link.ChanID()) - delete(s.linkIndex, link.ChanID()) - delete(s.forwardingIndex, link.ShortChanID()) - - // If the link has been added to the peer index, then we'll move to - // delete the entry within the index. - peerPub := link.Peer().PubKey() - if peerIndex, ok := s.interfaceIndex[peerPub]; ok { - delete(peerIndex, link.ChanID()) - - // If after deletion, there are no longer any links, then we'll - // remove the interface map all together. - if len(peerIndex) == 0 { - delete(s.interfaceIndex, peerPub) - } - } - - return link -} - -// UpdateShortChanID updates the short chan ID for an existing channel. This is -// required in the case of a re-org and re-confirmation or a channel, or in the -// case that a link was added to the switch before its short chan ID was known. -func (s *Switch) UpdateShortChanID(chanID lnwire.ChannelID) er.R { - s.indexMtx.Lock() - defer s.indexMtx.Unlock() - - // Locate the target link in the pending link index. If no such link - // exists, then we will ignore the request. - link, ok := s.pendingLinkIndex[chanID] - if !ok { - return er.Errorf("link %v not found", chanID) - } - - oldShortChanID := link.ShortChanID() - - // Try to update the link's short channel ID, returning early if this - // update failed. - shortChanID, err := link.UpdateShortChanID() - if err != nil { - return err - } - - // Reject any blank short channel ids. - if shortChanID == hop.Source { - return er.Errorf("refusing trivial short_chan_id for chan_id=%v"+ - "live link", chanID) - } - - log.Infof("Updated short_chan_id for ChannelLink(%v): old=%v, new=%v", - chanID, oldShortChanID, shortChanID) - - // Since the link was in the pending state before, we will remove it - // from the pending link index and add it to the live link index so that - // it can be available in forwarding. - delete(s.pendingLinkIndex, chanID) - s.addLiveLink(link) - - // Finally, alert the mail orchestrator to the change of short channel - // ID, and deliver any unclaimed packets to the link. - mailbox := s.mailOrchestrator.GetOrCreateMailBox(chanID, shortChanID) - s.mailOrchestrator.BindLiveShortChanID( - mailbox, chanID, shortChanID, - ) - - return nil -} - -// GetLinksByInterface fetches all the links connected to a particular node -// identified by the serialized compressed form of its public key. -func (s *Switch) GetLinksByInterface(hop [33]byte) ([]ChannelLink, er.R) { - s.indexMtx.RLock() - defer s.indexMtx.RUnlock() - - return s.getLinks(hop) -} - -// getLinks is function which returns the channel links of the peer by hop -// destination id. -// -// NOTE: This MUST be called with the indexMtx held. -func (s *Switch) getLinks(destination [33]byte) ([]ChannelLink, er.R) { - links, ok := s.interfaceIndex[destination] - if !ok { - return nil, ErrNoLinksFound.Default() - } - - channelLinks := make([]ChannelLink, 0, len(links)) - for _, link := range links { - channelLinks = append(channelLinks, link) - } - - return channelLinks, nil -} - -// CircuitModifier returns a reference to subset of the interfaces provided by -// the circuit map, to allow links to open and close circuits. -func (s *Switch) CircuitModifier() CircuitModifier { - return s.circuits -} - -// CircuitLookup returns a reference to subset of the interfaces provided by the -// circuit map, to allow looking up circuits. -func (s *Switch) CircuitLookup() CircuitLookup { - return s.circuits -} - -// commitCircuits persistently adds a circuit to the switch's circuit map. -func (s *Switch) commitCircuits(circuits ...*PaymentCircuit) ( - *CircuitFwdActions, er.R) { - - return s.circuits.CommitCircuits(circuits...) -} - -// openCircuits preemptively writes the keystones for Adds that are about to be -// added to a commitment txn. -func (s *Switch) openCircuits(keystones ...Keystone) er.R { - return s.circuits.OpenCircuits(keystones...) -} - -// deleteCircuits persistently removes the circuit, and keystone if present, -// from the circuit map. -func (s *Switch) deleteCircuits(inKeys ...CircuitKey) er.R { - return s.circuits.DeleteCircuits(inKeys...) -} - -// FlushForwardingEvents flushes out the set of pending forwarding events to -// the persistent log. This will be used by the switch to periodically flush -// out the set of forwarding events to disk. External callers can also use this -// method to ensure all data is flushed to dis before querying the log. -func (s *Switch) FlushForwardingEvents() er.R { - // First, we'll obtain a copy of the current set of pending forwarding - // events. - s.fwdEventMtx.Lock() - - // If we won't have any forwarding events, then we can exit early. - if len(s.pendingFwdingEvents) == 0 { - s.fwdEventMtx.Unlock() - return nil - } - - events := make([]channeldb.ForwardingEvent, len(s.pendingFwdingEvents)) - copy(events[:], s.pendingFwdingEvents[:]) - - // With the copy obtained, we can now clear out the header pointer of - // the current slice. This way, we can re-use the underlying storage - // allocated for the slice. - s.pendingFwdingEvents = s.pendingFwdingEvents[:0] - s.fwdEventMtx.Unlock() - - // Finally, we'll write out the copied events to the persistent - // forwarding log. - return s.cfg.FwdingLog.AddForwardingEvents(events) -} - -// BestHeight returns the best height known to the switch. -func (s *Switch) BestHeight() uint32 { - return atomic.LoadUint32(&s.bestHeight) -} diff --git a/lnd/htlcswitch/switch_test.go b/lnd/htlcswitch/switch_test.go deleted file mode 100644 index e1216be5..00000000 --- a/lnd/htlcswitch/switch_test.go +++ /dev/null @@ -1,3303 +0,0 @@ -package htlcswitch - -import ( - "crypto/rand" - "crypto/sha256" - "io/ioutil" - "reflect" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/htlcswitch/hop" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/ticker" -) - -var zeroCircuit = channeldb.CircuitKey{} - -func genPreimage() ([32]byte, er.R) { - var preimage [32]byte - if _, err := util.ReadFull(rand.Reader, preimage[:]); err != nil { - return preimage, err - } - return preimage, nil -} - -// TestSwitchAddDuplicateLink tests that the switch will reject duplicate links -// for both pending and live links. It also tests that we can successfully -// add a link after having removed it. -func TestSwitchAddDuplicateLink(t *testing.T) { - t.Parallel() - - alicePeer, err := newMockServer( - t, "alice", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } - - s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } - if err := s.Start(); err != nil { - t.Fatalf("unable to start switch: %v", err) - } - defer s.Stop() - - chanID1, _, aliceChanID, _ := genIDs() - - pendingChanID := lnwire.ShortChannelID{} - - aliceChannelLink := newMockChannelLink( - s, chanID1, pendingChanID, alicePeer, false, - ) - if err := s.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } - - // Alice should have a pending link, adding again should fail. - if err := s.AddLink(aliceChannelLink); err == nil { - t.Fatalf("adding duplicate link should have failed") - } - - // Update the short chan id of the channel, so that the link goes live. - aliceChannelLink.setLiveShortChanID(aliceChanID) - err = s.UpdateShortChanID(chanID1) - if err != nil { - t.Fatalf("unable to update alice short_chan_id: %v", err) - } - - // Alice should have a live link, adding again should fail. - if err := s.AddLink(aliceChannelLink); err == nil { - t.Fatalf("adding duplicate link should have failed") - } - - // Remove the live link to ensure the indexes are cleared. - s.RemoveLink(chanID1) - - // Alice has no links, adding should succeed. - if err := s.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } -} - -// TestSwitchHasActiveLink tests the behavior of HasActiveLink, and asserts that -// it only returns true if a link's short channel id has confirmed (meaning the -// channel is no longer pending) and it's EligibleToForward method returns true, -// i.e. it has received FundingLocked from the remote peer. -func TestSwitchHasActiveLink(t *testing.T) { - t.Parallel() - - alicePeer, err := newMockServer( - t, "alice", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } - - s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } - if err := s.Start(); err != nil { - t.Fatalf("unable to start switch: %v", err) - } - defer s.Stop() - - chanID1, _, aliceChanID, _ := genIDs() - - pendingChanID := lnwire.ShortChannelID{} - - aliceChannelLink := newMockChannelLink( - s, chanID1, pendingChanID, alicePeer, false, - ) - if err := s.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } - - // The link has been added, but it's still pending. HasActiveLink should - // return false since the link has not been added to the linkIndex - // containing live links. - if s.HasActiveLink(chanID1) { - t.Fatalf("link should not be active yet, still pending") - } - - // Update the short chan id of the channel, so that the link goes live. - aliceChannelLink.setLiveShortChanID(aliceChanID) - err = s.UpdateShortChanID(chanID1) - if err != nil { - t.Fatalf("unable to update alice short_chan_id: %v", err) - } - - // UpdateShortChanID will cause the mock link to become eligible to - // forward. However, we can simulate the event where the short chan id - // is confirmed, but funding locked has yet to be received by resetting - // the mock link's eligibility to false. - aliceChannelLink.eligible = false - - // Now, even though the link has been added to the linkIndex because the - // short channel id has confirmed, we should still see HasActiveLink - // fail because EligibleToForward should return false. - if s.HasActiveLink(chanID1) { - t.Fatalf("link should not be active yet, still ineligible") - } - - // Finally, simulate the link receiving funding locked by setting its - // eligibility to true. - aliceChannelLink.eligible = true - - // The link should now be reported as active, since EligibleToForward - // returns true and the link is in the linkIndex. - if !s.HasActiveLink(chanID1) { - t.Fatalf("link should not be active now") - } -} - -// TestSwitchSendPending checks the inability of htlc switch to forward adds -// over pending links, and the UpdateShortChanID makes a pending link live. -func TestSwitchSendPending(t *testing.T) { - t.Parallel() - - alicePeer, err := newMockServer( - t, "alice", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } - - bobPeer, err := newMockServer( - t, "bob", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } - - s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } - if err := s.Start(); err != nil { - t.Fatalf("unable to start switch: %v", err) - } - defer s.Stop() - - chanID1, chanID2, aliceChanID, bobChanID := genIDs() - - pendingChanID := lnwire.ShortChannelID{} - - aliceChannelLink := newMockChannelLink( - s, chanID1, pendingChanID, alicePeer, false, - ) - if err := s.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } - - bobChannelLink := newMockChannelLink( - s, chanID2, bobChanID, bobPeer, true, - ) - if err := s.AddLink(bobChannelLink); err != nil { - t.Fatalf("unable to add bob link: %v", err) - } - - // Create request which should is being forwarded from Bob channel - // link to Alice channel link. - preimage, err := genPreimage() - if err != nil { - t.Fatalf("unable to generate preimage: %v", err) - } - rhash := sha256.Sum256(preimage[:]) - packet := &htlcPacket{ - incomingChanID: bobChanID, - incomingHTLCID: 0, - outgoingChanID: aliceChanID, - obfuscator: NewMockObfuscator(), - htlc: &lnwire.UpdateAddHTLC{ - PaymentHash: rhash, - Amount: 1, - }, - } - - // Send the ADD packet, this should not be forwarded out to the link - // since there are no eligible links. - if err = s.ForwardPackets(nil, packet); err != nil { - t.Fatal(err) - } - select { - case p := <-bobChannelLink.packets: - if p.linkFailure != nil { - err = er.E(p.linkFailure) - } - case <-time.After(time.Second): - t.Fatal("no timely reply from switch") - } - errr := er.Wrapped(err) - linkErr, ok := errr.(*LinkError) - if !ok { - t.Fatalf("expected link error, got: %T", err) - } - if linkErr.WireMessage().Code() != lnwire.CodeUnknownNextPeer { - t.Fatalf("expected fail unknown next peer, got: %T", - linkErr.WireMessage().Code()) - } - - // No message should be sent, since the packet was failed. - select { - case <-aliceChannelLink.packets: - t.Fatal("expected not to receive message") - case <-time.After(time.Second): - } - - // Since the packet should have been failed, there should be no active - // circuits. - if s.circuits.NumOpen() != 0 { - t.Fatal("wrong amount of circuits") - } - - // Now, update Alice's link with her final short channel id. This should - // move the link to the live state. - aliceChannelLink.setLiveShortChanID(aliceChanID) - err = s.UpdateShortChanID(chanID1) - if err != nil { - t.Fatalf("unable to update alice short_chan_id: %v", err) - } - - // Increment the packet's HTLC index, so that it does not collide with - // the prior attempt. - packet.incomingHTLCID++ - - // Handle the request and checks that bob channel link received it. - if err := s.ForwardPackets(nil, packet); err != nil { - t.Fatalf("unexpected forward failure: %v", err) - } - - // Since Alice's link is now active, this packet should succeed. - select { - case <-aliceChannelLink.packets: - case <-time.After(time.Second): - t.Fatal("request was not propagated to alice") - } -} - -// TestSwitchForward checks the ability of htlc switch to forward add/settle -// requests. -func TestSwitchForward(t *testing.T) { - t.Parallel() - - alicePeer, err := newMockServer( - t, "alice", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } - bobPeer, err := newMockServer( - t, "bob", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } - - s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } - if err := s.Start(); err != nil { - t.Fatalf("unable to start switch: %v", err) - } - defer s.Stop() - - chanID1, chanID2, aliceChanID, bobChanID := genIDs() - - aliceChannelLink := newMockChannelLink( - s, chanID1, aliceChanID, alicePeer, true, - ) - bobChannelLink := newMockChannelLink( - s, chanID2, bobChanID, bobPeer, true, - ) - if err := s.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } - if err := s.AddLink(bobChannelLink); err != nil { - t.Fatalf("unable to add bob link: %v", err) - } - - // Create request which should be forwarded from Alice channel link to - // bob channel link. - preimage, err := genPreimage() - if err != nil { - t.Fatalf("unable to generate preimage: %v", err) - } - rhash := sha256.Sum256(preimage[:]) - packet := &htlcPacket{ - incomingChanID: aliceChannelLink.ShortChanID(), - incomingHTLCID: 0, - outgoingChanID: bobChannelLink.ShortChanID(), - obfuscator: NewMockObfuscator(), - htlc: &lnwire.UpdateAddHTLC{ - PaymentHash: rhash, - Amount: 1, - }, - } - - // Handle the request and checks that bob channel link received it. - if err := s.ForwardPackets(nil, packet); err != nil { - t.Fatal(err) - } - - select { - case <-bobChannelLink.packets: - if err := bobChannelLink.completeCircuit(packet); err != nil { - t.Fatalf("unable to complete payment circuit: %v", err) - } - case <-time.After(time.Second): - t.Fatal("request was not propagated to destination") - } - - if s.circuits.NumOpen() != 1 { - t.Fatal("wrong amount of circuits") - } - - if !s.IsForwardedHTLC(bobChannelLink.ShortChanID(), 0) { - t.Fatal("htlc should be identified as forwarded") - } - - // Create settle request pretending that bob link handled the add htlc - // request and sent the htlc settle request back. This request should - // be forwarder back to Alice link. - packet = &htlcPacket{ - outgoingChanID: bobChannelLink.ShortChanID(), - outgoingHTLCID: 0, - amount: 1, - htlc: &lnwire.UpdateFulfillHTLC{ - PaymentPreimage: preimage, - }, - } - - // Handle the request and checks that payment circuit works properly. - if err := s.ForwardPackets(nil, packet); err != nil { - t.Fatal(err) - } - - select { - case pkt := <-aliceChannelLink.packets: - if err := aliceChannelLink.deleteCircuit(pkt); err != nil { - t.Fatalf("unable to remove circuit: %v", err) - } - case <-time.After(time.Second): - t.Fatal("request was not propagated to channelPoint") - } - - if s.circuits.NumOpen() != 0 { - t.Fatal("wrong amount of circuits") - } -} - -func TestSwitchForwardFailAfterFullAdd(t *testing.T) { - t.Parallel() - - chanID1, chanID2, aliceChanID, bobChanID := genIDs() - - alicePeer, err := newMockServer( - t, "alice", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } - bobPeer, err := newMockServer( - t, "bob", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } - - tempPath, errr := ioutil.TempDir("", "circuitdb") - if errr != nil { - t.Fatalf("unable to temporary path: %v", errr) - } - - cdb, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to open channeldb: %v", err) - } - - s, err := initSwitchWithDB(testStartingHeight, cdb) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } - if err := s.Start(); err != nil { - t.Fatalf("unable to start switch: %v", err) - } - - // Even though we intend to Stop s later in the test, it is safe to - // defer this Stop since its execution it is protected by an atomic - // guard, guaranteeing it executes at most once. - defer s.Stop() - - aliceChannelLink := newMockChannelLink( - s, chanID1, aliceChanID, alicePeer, true, - ) - bobChannelLink := newMockChannelLink( - s, chanID2, bobChanID, bobPeer, true, - ) - if err := s.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } - if err := s.AddLink(bobChannelLink); err != nil { - t.Fatalf("unable to add bob link: %v", err) - } - - // Create request which should be forwarded from Alice channel link to - // bob channel link. - preimage := [sha256.Size]byte{1} - rhash := sha256.Sum256(preimage[:]) - ogPacket := &htlcPacket{ - incomingChanID: aliceChannelLink.ShortChanID(), - incomingHTLCID: 0, - outgoingChanID: bobChannelLink.ShortChanID(), - obfuscator: NewMockObfuscator(), - htlc: &lnwire.UpdateAddHTLC{ - PaymentHash: rhash, - Amount: 1, - }, - } - - if s.circuits.NumPending() != 0 { - t.Fatalf("wrong amount of half circuits") - } - if s.circuits.NumOpen() != 0 { - t.Fatalf("wrong amount of circuits") - } - - // Handle the request and checks that bob channel link received it. - if err := s.ForwardPackets(nil, ogPacket); err != nil { - t.Fatal(err) - } - - if s.circuits.NumPending() != 1 { - t.Fatalf("wrong amount of half circuits") - } - if s.circuits.NumOpen() != 0 { - t.Fatalf("wrong amount of circuits") - } - - // Pull packet from bob's link, but do not perform a full add. - select { - case packet := <-bobChannelLink.packets: - // Complete the payment circuit and assign the outgoing htlc id - // before restarting. - if err := bobChannelLink.completeCircuit(packet); err != nil { - t.Fatalf("unable to complete payment circuit: %v", err) - } - - case <-time.After(time.Second): - t.Fatal("request was not propagated to destination") - } - - if s.circuits.NumPending() != 1 { - t.Fatalf("wrong amount of half circuits") - } - if s.circuits.NumOpen() != 1 { - t.Fatalf("wrong amount of circuits") - } - - // Now we will restart bob, leaving the forwarding decision for this - // htlc is in the half-added state. - if err := s.Stop(); err != nil { - t.Fatalf(err.String()) - } - - if err := cdb.Close(); err != nil { - t.Fatalf(err.String()) - } - - cdb2, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to reopen channeldb: %v", err) - } - - s2, err := initSwitchWithDB(testStartingHeight, cdb2) - if err != nil { - t.Fatalf("unable reinit switch: %v", err) - } - if err := s2.Start(); err != nil { - t.Fatalf("unable to restart switch: %v", err) - } - - // Even though we intend to Stop s2 later in the test, it is safe to - // defer this Stop since its execution it is protected by an atomic - // guard, guaranteeing it executes at most once. - defer s2.Stop() - - aliceChannelLink = newMockChannelLink( - s2, chanID1, aliceChanID, alicePeer, true, - ) - bobChannelLink = newMockChannelLink( - s2, chanID2, bobChanID, bobPeer, true, - ) - if err := s2.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } - if err := s2.AddLink(bobChannelLink); err != nil { - t.Fatalf("unable to add bob link: %v", err) - } - - if s2.circuits.NumPending() != 1 { - t.Fatalf("wrong amount of half circuits") - } - if s2.circuits.NumOpen() != 1 { - t.Fatalf("wrong amount of circuits") - } - - // Craft a failure message from the remote peer. - fail := &htlcPacket{ - outgoingChanID: bobChannelLink.ShortChanID(), - outgoingHTLCID: 0, - amount: 1, - htlc: &lnwire.UpdateFailHTLC{}, - } - - // Send the fail packet from the remote peer through the switch. - if err := s2.ForwardPackets(nil, fail); err != nil { - t.Fatalf(err.String()) - } - - // Pull packet from alice's link, as it should have gone through - // successfully. - select { - case pkt := <-aliceChannelLink.packets: - if err := aliceChannelLink.completeCircuit(pkt); err != nil { - t.Fatalf("unable to remove circuit: %v", err) - } - case <-time.After(time.Second): - t.Fatal("request was not propagated to destination") - } - - // Circuit map should be empty now. - if s2.circuits.NumPending() != 0 { - t.Fatalf("wrong amount of half circuits") - } - if s2.circuits.NumOpen() != 0 { - t.Fatalf("wrong amount of circuits") - } - - // Send the fail packet from the remote peer through the switch. - if err := s.ForwardPackets(nil, fail); err != nil { - t.Fatal(err) - } - select { - case <-aliceChannelLink.packets: - t.Fatalf("expected duplicate fail to not arrive at the destination") - case <-time.After(time.Second): - } -} - -func TestSwitchForwardSettleAfterFullAdd(t *testing.T) { - t.Parallel() - - chanID1, chanID2, aliceChanID, bobChanID := genIDs() - - alicePeer, err := newMockServer( - t, "alice", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } - bobPeer, err := newMockServer( - t, "bob", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } - - tempPath, errr := ioutil.TempDir("", "circuitdb") - if errr != nil { - t.Fatalf("unable to temporary path: %v", errr) - } - - cdb, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to open channeldb: %v", err) - } - - s, err := initSwitchWithDB(testStartingHeight, cdb) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } - if err := s.Start(); err != nil { - t.Fatalf("unable to start switch: %v", err) - } - - // Even though we intend to Stop s later in the test, it is safe to - // defer this Stop since its execution it is protected by an atomic - // guard, guaranteeing it executes at most once. - defer s.Stop() - - aliceChannelLink := newMockChannelLink( - s, chanID1, aliceChanID, alicePeer, true, - ) - bobChannelLink := newMockChannelLink( - s, chanID2, bobChanID, bobPeer, true, - ) - if err := s.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } - if err := s.AddLink(bobChannelLink); err != nil { - t.Fatalf("unable to add bob link: %v", err) - } - - // Create request which should be forwarded from Alice channel link to - // bob channel link. - preimage := [sha256.Size]byte{1} - rhash := sha256.Sum256(preimage[:]) - ogPacket := &htlcPacket{ - incomingChanID: aliceChannelLink.ShortChanID(), - incomingHTLCID: 0, - outgoingChanID: bobChannelLink.ShortChanID(), - obfuscator: NewMockObfuscator(), - htlc: &lnwire.UpdateAddHTLC{ - PaymentHash: rhash, - Amount: 1, - }, - } - - if s.circuits.NumPending() != 0 { - t.Fatalf("wrong amount of half circuits") - } - if s.circuits.NumOpen() != 0 { - t.Fatalf("wrong amount of circuits") - } - - // Handle the request and checks that bob channel link received it. - if err := s.ForwardPackets(nil, ogPacket); err != nil { - t.Fatal(err) - } - - if s.circuits.NumPending() != 1 { - t.Fatalf("wrong amount of half circuits") - } - if s.circuits.NumOpen() != 0 { - t.Fatalf("wrong amount of circuits") - } - - // Pull packet from bob's link, but do not perform a full add. - select { - case packet := <-bobChannelLink.packets: - // Complete the payment circuit and assign the outgoing htlc id - // before restarting. - if err := bobChannelLink.completeCircuit(packet); err != nil { - t.Fatalf("unable to complete payment circuit: %v", err) - } - - case <-time.After(time.Second): - t.Fatal("request was not propagated to destination") - } - - if s.circuits.NumPending() != 1 { - t.Fatalf("wrong amount of half circuits") - } - if s.circuits.NumOpen() != 1 { - t.Fatalf("wrong amount of circuits") - } - - // Now we will restart bob, leaving the forwarding decision for this - // htlc is in the half-added state. - if err := s.Stop(); err != nil { - t.Fatalf(err.String()) - } - - if err := cdb.Close(); err != nil { - t.Fatalf(err.String()) - } - - cdb2, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to reopen channeldb: %v", err) - } - - s2, err := initSwitchWithDB(testStartingHeight, cdb2) - if err != nil { - t.Fatalf("unable reinit switch: %v", err) - } - if err := s2.Start(); err != nil { - t.Fatalf("unable to restart switch: %v", err) - } - - // Even though we intend to Stop s2 later in the test, it is safe to - // defer this Stop since its execution it is protected by an atomic - // guard, guaranteeing it executes at most once. - defer s2.Stop() - - aliceChannelLink = newMockChannelLink( - s2, chanID1, aliceChanID, alicePeer, true, - ) - bobChannelLink = newMockChannelLink( - s2, chanID2, bobChanID, bobPeer, true, - ) - if err := s2.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } - if err := s2.AddLink(bobChannelLink); err != nil { - t.Fatalf("unable to add bob link: %v", err) - } - - if s2.circuits.NumPending() != 1 { - t.Fatalf("wrong amount of half circuits") - } - if s2.circuits.NumOpen() != 1 { - t.Fatalf("wrong amount of circuits") - } - - // Craft a settle message from the remote peer. - settle := &htlcPacket{ - outgoingChanID: bobChannelLink.ShortChanID(), - outgoingHTLCID: 0, - amount: 1, - htlc: &lnwire.UpdateFulfillHTLC{ - PaymentPreimage: preimage, - }, - } - - // Send the settle packet from the remote peer through the switch. - if err := s2.ForwardPackets(nil, settle); err != nil { - t.Fatalf(err.String()) - } - - // Pull packet from alice's link, as it should have gone through - // successfully. - select { - case packet := <-aliceChannelLink.packets: - if err := aliceChannelLink.completeCircuit(packet); err != nil { - t.Fatalf("unable to complete circuit with in key=%s: %v", - packet.inKey(), err) - } - case <-time.After(time.Second): - t.Fatal("request was not propagated to destination") - } - - // Circuit map should be empty now. - if s2.circuits.NumPending() != 0 { - t.Fatalf("wrong amount of half circuits") - } - if s2.circuits.NumOpen() != 0 { - t.Fatalf("wrong amount of circuits") - } - - // Send the settle packet again, which not arrive at destination. - if err := s2.ForwardPackets(nil, settle); err != nil { - t.Fatal(err) - } - select { - case <-bobChannelLink.packets: - t.Fatalf("expected duplicate fail to not arrive at the destination") - case <-time.After(time.Second): - } -} - -func TestSwitchForwardDropAfterFullAdd(t *testing.T) { - t.Parallel() - - chanID1, chanID2, aliceChanID, bobChanID := genIDs() - - alicePeer, err := newMockServer( - t, "alice", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } - bobPeer, err := newMockServer( - t, "bob", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } - - tempPath, errr := ioutil.TempDir("", "circuitdb") - if errr != nil { - t.Fatalf("unable to temporary path: %v", errr) - } - - cdb, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to open channeldb: %v", err) - } - - s, err := initSwitchWithDB(testStartingHeight, cdb) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } - if err := s.Start(); err != nil { - t.Fatalf("unable to start switch: %v", err) - } - - // Even though we intend to Stop s later in the test, it is safe to - // defer this Stop since its execution it is protected by an atomic - // guard, guaranteeing it executes at most once. - defer s.Stop() - - aliceChannelLink := newMockChannelLink( - s, chanID1, aliceChanID, alicePeer, true, - ) - bobChannelLink := newMockChannelLink( - s, chanID2, bobChanID, bobPeer, true, - ) - if err := s.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } - if err := s.AddLink(bobChannelLink); err != nil { - t.Fatalf("unable to add bob link: %v", err) - } - - // Create request which should be forwarded from Alice channel link to - // bob channel link. - preimage := [sha256.Size]byte{1} - rhash := sha256.Sum256(preimage[:]) - ogPacket := &htlcPacket{ - incomingChanID: aliceChannelLink.ShortChanID(), - incomingHTLCID: 0, - outgoingChanID: bobChannelLink.ShortChanID(), - obfuscator: NewMockObfuscator(), - htlc: &lnwire.UpdateAddHTLC{ - PaymentHash: rhash, - Amount: 1, - }, - } - - if s.circuits.NumPending() != 0 { - t.Fatalf("wrong amount of half circuits") - } - if s.circuits.NumOpen() != 0 { - t.Fatalf("wrong amount of circuits") - } - - // Handle the request and checks that bob channel link received it. - if err := s.ForwardPackets(nil, ogPacket); err != nil { - t.Fatal(err) - } - - if s.circuits.NumPending() != 1 { - t.Fatalf("wrong amount of half circuits") - } - if s.circuits.NumOpen() != 0 { - t.Fatalf("wrong amount of half circuits") - } - - // Pull packet from bob's link, but do not perform a full add. - select { - case packet := <-bobChannelLink.packets: - // Complete the payment circuit and assign the outgoing htlc id - // before restarting. - if err := bobChannelLink.completeCircuit(packet); err != nil { - t.Fatalf("unable to complete payment circuit: %v", err) - } - case <-time.After(time.Second): - t.Fatal("request was not propagated to destination") - } - - // Now we will restart bob, leaving the forwarding decision for this - // htlc is in the half-added state. - if err := s.Stop(); err != nil { - t.Fatalf(err.String()) - } - - if err := cdb.Close(); err != nil { - t.Fatalf(err.String()) - } - - cdb2, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to reopen channeldb: %v", err) - } - - s2, err := initSwitchWithDB(testStartingHeight, cdb2) - if err != nil { - t.Fatalf("unable reinit switch: %v", err) - } - if err := s2.Start(); err != nil { - t.Fatalf("unable to restart switch: %v", err) - } - - // Even though we intend to Stop s2 later in the test, it is safe to - // defer this Stop since its execution it is protected by an atomic - // guard, guaranteeing it executes at most once. - defer s2.Stop() - - aliceChannelLink = newMockChannelLink( - s2, chanID1, aliceChanID, alicePeer, true, - ) - bobChannelLink = newMockChannelLink( - s2, chanID2, bobChanID, bobPeer, true, - ) - if err := s2.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } - if err := s2.AddLink(bobChannelLink); err != nil { - t.Fatalf("unable to add bob link: %v", err) - } - - if s2.circuits.NumPending() != 1 { - t.Fatalf("wrong amount of half circuits") - } - if s2.circuits.NumOpen() != 1 { - t.Fatalf("wrong amount of half circuits") - } - - // Resend the failed htlc. The packet will be dropped silently since the - // switch will detect that it has been half added previously. - if err := s2.ForwardPackets(nil, ogPacket); err != nil { - t.Fatal(err) - } - - // After detecting an incomplete forward, the fail packet should have - // been returned to the sender. - select { - case <-aliceChannelLink.packets: - t.Fatal("request should not have returned to source") - case <-bobChannelLink.packets: - t.Fatal("request should not have forwarded to destination") - case <-time.After(time.Second): - } -} - -func TestSwitchForwardFailAfterHalfAdd(t *testing.T) { - t.Parallel() - - chanID1, chanID2, aliceChanID, bobChanID := genIDs() - - alicePeer, err := newMockServer( - t, "alice", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } - bobPeer, err := newMockServer( - t, "bob", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } - - tempPath, errr := ioutil.TempDir("", "circuitdb") - if errr != nil { - t.Fatalf("unable to temporary path: %v", errr) - } - - cdb, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to open channeldb: %v", err) - } - - s, err := initSwitchWithDB(testStartingHeight, cdb) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } - if err := s.Start(); err != nil { - t.Fatalf("unable to start switch: %v", err) - } - - // Even though we intend to Stop s later in the test, it is safe to - // defer this Stop since its execution it is protected by an atomic - // guard, guaranteeing it executes at most once. - defer s.Stop() - - aliceChannelLink := newMockChannelLink( - s, chanID1, aliceChanID, alicePeer, true, - ) - bobChannelLink := newMockChannelLink( - s, chanID2, bobChanID, bobPeer, true, - ) - if err := s.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } - if err := s.AddLink(bobChannelLink); err != nil { - t.Fatalf("unable to add bob link: %v", err) - } - - // Create request which should be forwarded from Alice channel link to - // bob channel link. - preimage := [sha256.Size]byte{1} - rhash := sha256.Sum256(preimage[:]) - ogPacket := &htlcPacket{ - incomingChanID: aliceChannelLink.ShortChanID(), - incomingHTLCID: 0, - outgoingChanID: bobChannelLink.ShortChanID(), - obfuscator: NewMockObfuscator(), - htlc: &lnwire.UpdateAddHTLC{ - PaymentHash: rhash, - Amount: 1, - }, - } - - if s.circuits.NumPending() != 0 { - t.Fatalf("wrong amount of half circuits") - } - if s.circuits.NumOpen() != 0 { - t.Fatalf("wrong amount of circuits") - } - - // Handle the request and checks that bob channel link received it. - if err := s.ForwardPackets(nil, ogPacket); err != nil { - t.Fatal(err) - } - - if s.circuits.NumPending() != 1 { - t.Fatalf("wrong amount of half circuits") - } - if s.circuits.NumOpen() != 0 { - t.Fatalf("wrong amount of half circuits") - } - - // Pull packet from bob's link, but do not perform a full add. - select { - case <-bobChannelLink.packets: - case <-time.After(time.Second): - t.Fatal("request was not propagated to destination") - } - - // Now we will restart bob, leaving the forwarding decision for this - // htlc is in the half-added state. - if err := s.Stop(); err != nil { - t.Fatalf(err.String()) - } - - if err := cdb.Close(); err != nil { - t.Fatalf(err.String()) - } - - cdb2, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to reopen channeldb: %v", err) - } - - s2, err := initSwitchWithDB(testStartingHeight, cdb2) - if err != nil { - t.Fatalf("unable reinit switch: %v", err) - } - if err := s2.Start(); err != nil { - t.Fatalf("unable to restart switch: %v", err) - } - - // Even though we intend to Stop s2 later in the test, it is safe to - // defer this Stop since its execution it is protected by an atomic - // guard, guaranteeing it executes at most once. - defer s2.Stop() - - aliceChannelLink = newMockChannelLink( - s2, chanID1, aliceChanID, alicePeer, true, - ) - bobChannelLink = newMockChannelLink( - s2, chanID2, bobChanID, bobPeer, true, - ) - if err := s2.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } - if err := s2.AddLink(bobChannelLink); err != nil { - t.Fatalf("unable to add bob link: %v", err) - } - - if s2.circuits.NumPending() != 1 { - t.Fatalf("wrong amount of half circuits") - } - if s2.circuits.NumOpen() != 0 { - t.Fatalf("wrong amount of half circuits") - } - - // Resend the failed htlc, it should be returned to alice since the - // switch will detect that it has been half added previously. - err = s2.ForwardPackets(nil, ogPacket) - if err != nil { - t.Fatal(err) - } - - // After detecting an incomplete forward, the fail packet should have - // been returned to the sender. - select { - case pkt := <-aliceChannelLink.packets: - linkErr := pkt.linkFailure - if linkErr.FailureDetail != OutgoingFailureIncompleteForward { - t.Fatalf("expected incomplete forward, got: %v", - linkErr.FailureDetail) - } - case <-time.After(time.Second): - t.Fatal("request was not propagated to destination") - } -} - -// TestSwitchForwardCircuitPersistence checks the ability of htlc switch to -// maintain the proper entries in the circuit map in the face of restarts. -func TestSwitchForwardCircuitPersistence(t *testing.T) { - t.Parallel() - - chanID1, chanID2, aliceChanID, bobChanID := genIDs() - - alicePeer, err := newMockServer( - t, "alice", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } - bobPeer, err := newMockServer( - t, "bob", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } - - tempPath, errr := ioutil.TempDir("", "circuitdb") - if errr != nil { - t.Fatalf("unable to temporary path: %v", errr) - } - - cdb, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to open channeldb: %v", err) - } - - s, err := initSwitchWithDB(testStartingHeight, cdb) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } - if err := s.Start(); err != nil { - t.Fatalf("unable to start switch: %v", err) - } - - // Even though we intend to Stop s later in the test, it is safe to - // defer this Stop since its execution it is protected by an atomic - // guard, guaranteeing it executes at most once. - defer s.Stop() - - aliceChannelLink := newMockChannelLink( - s, chanID1, aliceChanID, alicePeer, true, - ) - bobChannelLink := newMockChannelLink( - s, chanID2, bobChanID, bobPeer, true, - ) - if err := s.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } - if err := s.AddLink(bobChannelLink); err != nil { - t.Fatalf("unable to add bob link: %v", err) - } - - // Create request which should be forwarded from Alice channel link to - // bob channel link. - preimage := [sha256.Size]byte{1} - rhash := sha256.Sum256(preimage[:]) - ogPacket := &htlcPacket{ - incomingChanID: aliceChannelLink.ShortChanID(), - incomingHTLCID: 0, - outgoingChanID: bobChannelLink.ShortChanID(), - obfuscator: NewMockObfuscator(), - htlc: &lnwire.UpdateAddHTLC{ - PaymentHash: rhash, - Amount: 1, - }, - } - - if s.circuits.NumPending() != 0 { - t.Fatalf("wrong amount of half circuits") - } - if s.circuits.NumOpen() != 0 { - t.Fatalf("wrong amount of circuits") - } - - // Handle the request and checks that bob channel link received it. - if err := s.ForwardPackets(nil, ogPacket); err != nil { - t.Fatal(err) - } - - if s.circuits.NumPending() != 1 { - t.Fatalf("wrong amount of half circuits") - } - if s.circuits.NumOpen() != 0 { - t.Fatalf("wrong amount of circuits") - } - - // Retrieve packet from outgoing link and cache until after restart. - var packet *htlcPacket - select { - case packet = <-bobChannelLink.packets: - case <-time.After(time.Second): - t.Fatal("request was not propagated to destination") - } - - if err := s.Stop(); err != nil { - t.Fatalf(err.String()) - } - - if err := cdb.Close(); err != nil { - t.Fatalf(err.String()) - } - - cdb2, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to reopen channeldb: %v", err) - } - - s2, err := initSwitchWithDB(testStartingHeight, cdb2) - if err != nil { - t.Fatalf("unable reinit switch: %v", err) - } - if err := s2.Start(); err != nil { - t.Fatalf("unable to restart switch: %v", err) - } - - // Even though we intend to Stop s2 later in the test, it is safe to - // defer this Stop since its execution it is protected by an atomic - // guard, guaranteeing it executes at most once. - defer s2.Stop() - - aliceChannelLink = newMockChannelLink( - s2, chanID1, aliceChanID, alicePeer, true, - ) - bobChannelLink = newMockChannelLink( - s2, chanID2, bobChanID, bobPeer, true, - ) - if err := s2.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } - if err := s2.AddLink(bobChannelLink); err != nil { - t.Fatalf("unable to add bob link: %v", err) - } - - if s2.circuits.NumPending() != 1 { - t.Fatalf("wrong amount of half circuits") - } - if s2.circuits.NumOpen() != 0 { - t.Fatalf("wrong amount of half circuits") - } - - // Now that the switch has restarted, complete the payment circuit. - if err := bobChannelLink.completeCircuit(packet); err != nil { - t.Fatalf("unable to complete payment circuit: %v", err) - } - - if s2.circuits.NumPending() != 1 { - t.Fatalf("wrong amount of half circuits") - } - if s2.circuits.NumOpen() != 1 { - t.Fatal("wrong amount of circuits") - } - - // Create settle request pretending that bob link handled the add htlc - // request and sent the htlc settle request back. This request should - // be forwarder back to Alice link. - ogPacket = &htlcPacket{ - outgoingChanID: bobChannelLink.ShortChanID(), - outgoingHTLCID: 0, - amount: 1, - htlc: &lnwire.UpdateFulfillHTLC{ - PaymentPreimage: preimage, - }, - } - - // Handle the request and checks that payment circuit works properly. - if err := s2.ForwardPackets(nil, ogPacket); err != nil { - t.Fatal(err) - } - - select { - case packet = <-aliceChannelLink.packets: - if err := aliceChannelLink.completeCircuit(packet); err != nil { - t.Fatalf("unable to complete circuit with in key=%s: %v", - packet.inKey(), err) - } - case <-time.After(time.Second): - t.Fatal("request was not propagated to channelPoint") - } - - if s2.circuits.NumPending() != 0 { - t.Fatalf("wrong amount of half circuits, want 1, got %d", - s2.circuits.NumPending()) - } - if s2.circuits.NumOpen() != 0 { - t.Fatal("wrong amount of circuits") - } - - if err := s2.Stop(); err != nil { - t.Fatal(err) - } - - if err := cdb2.Close(); err != nil { - t.Fatalf(err.String()) - } - - cdb3, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to reopen channeldb: %v", err) - } - - s3, err := initSwitchWithDB(testStartingHeight, cdb3) - if err != nil { - t.Fatalf("unable reinit switch: %v", err) - } - if err := s3.Start(); err != nil { - t.Fatalf("unable to restart switch: %v", err) - } - defer s3.Stop() - - aliceChannelLink = newMockChannelLink( - s3, chanID1, aliceChanID, alicePeer, true, - ) - bobChannelLink = newMockChannelLink( - s3, chanID2, bobChanID, bobPeer, true, - ) - if err := s3.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } - if err := s3.AddLink(bobChannelLink); err != nil { - t.Fatalf("unable to add bob link: %v", err) - } - - if s3.circuits.NumPending() != 0 { - t.Fatalf("wrong amount of half circuits") - } - if s3.circuits.NumOpen() != 0 { - t.Fatalf("wrong amount of circuits") - } -} - -type multiHopFwdTest struct { - name string - eligible1, eligible2 bool - failure1, failure2 *LinkError - expectedReply lnwire.FailCode -} - -// TestCircularForwards tests the allowing/disallowing of circular payments -// through the same channel in the case where the switch is configured to allow -// and disallow same channel circular forwards. -func TestCircularForwards(t *testing.T) { - chanID1, aliceChanID := genID() - preimage := [sha256.Size]byte{1} - hash := sha256.Sum256(preimage[:]) - - tests := []struct { - name string - allowCircularPayment bool - expectedErr error - }{ - { - name: "circular payment allowed", - allowCircularPayment: true, - expectedErr: nil, - }, - { - name: "circular payment disallowed", - allowCircularPayment: false, - expectedErr: NewDetailedLinkError( - lnwire.NewTemporaryChannelFailure(nil), - OutgoingFailureCircularRoute, - ), - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - alicePeer, err := newMockServer( - t, "alice", testStartingHeight, nil, - testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create alice server: %v", - err) - } - - s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } - if err := s.Start(); err != nil { - t.Fatalf("unable to start switch: %v", err) - } - defer func() { _ = s.Stop() }() - - // Set the switch to allow or disallow circular routes - // according to the test's requirements. - s.cfg.AllowCircularRoute = test.allowCircularPayment - - aliceChannelLink := newMockChannelLink( - s, chanID1, aliceChanID, alicePeer, true, - ) - - if err := s.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } - - // Create a new packet that loops through alice's link - // in a circle. - obfuscator := NewMockObfuscator() - packet := &htlcPacket{ - incomingChanID: aliceChannelLink.ShortChanID(), - outgoingChanID: aliceChannelLink.ShortChanID(), - htlc: &lnwire.UpdateAddHTLC{ - PaymentHash: hash, - Amount: 1, - }, - obfuscator: obfuscator, - } - - // Attempt to forward the packet and check for the expected - // error. - if err = s.ForwardPackets(nil, packet); err != nil { - t.Fatal(err) - } - select { - case p := <-aliceChannelLink.packets: - if p.linkFailure != nil { - err = er.E(p.linkFailure) - } - case <-time.After(time.Second): - t.Fatal("no timely reply from switch") - } - errr := er.Wrapped(err) - if !reflect.DeepEqual(errr, test.expectedErr) { - t.Fatalf("expected: %v, got: %v", - test.expectedErr, err) - } - - // Ensure that no circuits were opened. - if s.circuits.NumOpen() > 0 { - t.Fatal("do not expect any open circuits") - } - }) - } -} - -// TestCheckCircularForward tests the error returned by checkCircularForward -// in cases where we allow and disallow same channel circular forwards. -func TestCheckCircularForward(t *testing.T) { - tests := []struct { - name string - - // allowCircular determines whether we should allow circular - // forwards. - allowCircular bool - - // incomingLink is the link that the htlc arrived on. - incomingLink lnwire.ShortChannelID - - // outgoingLink is the link that the htlc forward - // is destined to leave on. - outgoingLink lnwire.ShortChannelID - - // expectedErr is the error we expect to be returned. - expectedErr *LinkError - }{ - { - name: "not circular, allowed in config", - allowCircular: true, - incomingLink: lnwire.NewShortChanIDFromInt(123), - outgoingLink: lnwire.NewShortChanIDFromInt(321), - expectedErr: nil, - }, - { - name: "not circular, not allowed in config", - allowCircular: false, - incomingLink: lnwire.NewShortChanIDFromInt(123), - outgoingLink: lnwire.NewShortChanIDFromInt(321), - expectedErr: nil, - }, - { - name: "circular, allowed in config", - allowCircular: true, - incomingLink: lnwire.NewShortChanIDFromInt(123), - outgoingLink: lnwire.NewShortChanIDFromInt(123), - expectedErr: nil, - }, - { - name: "circular, not allowed in config", - allowCircular: false, - incomingLink: lnwire.NewShortChanIDFromInt(123), - outgoingLink: lnwire.NewShortChanIDFromInt(123), - expectedErr: NewDetailedLinkError( - lnwire.NewTemporaryChannelFailure(nil), - OutgoingFailureCircularRoute, - ), - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - // Check for a circular forward, the hash passed can - // be nil because it is only used for logging. - err := checkCircularForward( - test.incomingLink, test.outgoingLink, - test.allowCircular, lntypes.Hash{}, - ) - if !reflect.DeepEqual(err, test.expectedErr) { - t.Fatalf("expected: %v, got: %v", - test.expectedErr, err) - } - }) - } -} - -// TestSkipIneligibleLinksMultiHopForward tests that if a multi-hop HTLC comes -// along, then we won't attempt to froward it down al ink that isn't yet able -// to forward any HTLC's. -func TestSkipIneligibleLinksMultiHopForward(t *testing.T) { - tests := []multiHopFwdTest{ - // None of the channels is eligible. - { - name: "not eligible", - expectedReply: lnwire.CodeUnknownNextPeer, - }, - - // Channel one has a policy failure and the other channel isn't - // available. - { - name: "policy fail", - eligible1: true, - failure1: NewLinkError( - lnwire.NewFinalIncorrectCltvExpiry(0), - ), - expectedReply: lnwire.CodeFinalIncorrectCltvExpiry, - }, - - // The requested channel is not eligible, but the packet is - // forwarded through the other channel. - { - name: "non-strict success", - eligible2: true, - expectedReply: lnwire.CodeNone, - }, - - // The requested channel has insufficient bandwidth and the - // other channel's policy isn't satisfied. - { - name: "non-strict policy fail", - eligible1: true, - failure1: NewDetailedLinkError( - lnwire.NewTemporaryChannelFailure(nil), - OutgoingFailureInsufficientBalance, - ), - eligible2: true, - failure2: NewLinkError( - lnwire.NewFinalIncorrectCltvExpiry(0), - ), - expectedReply: lnwire.CodeTemporaryChannelFailure, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - testSkipIneligibleLinksMultiHopForward(t, &test) - }) - } -} - -// testSkipIneligibleLinksMultiHopForward tests that if a multi-hop HTLC comes -// along, then we won't attempt to froward it down al ink that isn't yet able -// to forward any HTLC's. -func testSkipIneligibleLinksMultiHopForward(t *testing.T, - testCase *multiHopFwdTest) { - - t.Parallel() - - var packet *htlcPacket - - alicePeer, err := newMockServer( - t, "alice", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } - bobPeer, err := newMockServer( - t, "bob", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } - - s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } - if err := s.Start(); err != nil { - t.Fatalf("unable to start switch: %v", err) - } - defer s.Stop() - - chanID1, aliceChanID := genID() - aliceChannelLink := newMockChannelLink( - s, chanID1, aliceChanID, alicePeer, true, - ) - - // We'll create a link for Bob, but mark the link as unable to forward - // any new outgoing HTLC's. - chanID2, bobChanID2 := genID() - bobChannelLink1 := newMockChannelLink( - s, chanID2, bobChanID2, bobPeer, testCase.eligible1, - ) - bobChannelLink1.checkHtlcForwardResult = testCase.failure1 - - chanID3, bobChanID3 := genID() - bobChannelLink2 := newMockChannelLink( - s, chanID3, bobChanID3, bobPeer, testCase.eligible2, - ) - bobChannelLink2.checkHtlcForwardResult = testCase.failure2 - - if err := s.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } - if err := s.AddLink(bobChannelLink1); err != nil { - t.Fatalf("unable to add bob link: %v", err) - } - if err := s.AddLink(bobChannelLink2); err != nil { - t.Fatalf("unable to add bob link: %v", err) - } - - // Create a new packet that's destined for Bob as an incoming HTLC from - // Alice. - preimage := [sha256.Size]byte{1} - rhash := sha256.Sum256(preimage[:]) - obfuscator := NewMockObfuscator() - packet = &htlcPacket{ - incomingChanID: aliceChannelLink.ShortChanID(), - incomingHTLCID: 0, - outgoingChanID: bobChannelLink1.ShortChanID(), - htlc: &lnwire.UpdateAddHTLC{ - PaymentHash: rhash, - Amount: 1, - }, - obfuscator: obfuscator, - } - - // The request to forward should fail as - if err := s.ForwardPackets(nil, packet); err != nil { - t.Fatal(err) - } - - // We select from all links and extract the error if exists. - // The packet must be selected but we don't always expect a link error. - var linkError *LinkError - select { - case p := <-aliceChannelLink.packets: - linkError = p.linkFailure - case p := <-bobChannelLink1.packets: - linkError = p.linkFailure - case p := <-bobChannelLink2.packets: - linkError = p.linkFailure - case <-time.After(time.Second): - t.Fatal("no timely reply from switch") - } - failure := obfuscator.(*mockObfuscator).failure - if testCase.expectedReply == lnwire.CodeNone { - if linkError != nil { - t.Fatalf("forwarding should have succeeded") - } - if failure != nil { - t.Fatalf("unexpected failure %T", failure) - } - } else { - if linkError == nil { - t.Fatalf("forwarding should have failed due to " + - "inactive link") - } - if failure.Code() != testCase.expectedReply { - t.Fatalf("unexpected failure %T", failure) - } - } - - if s.circuits.NumOpen() != 0 { - t.Fatal("wrong amount of circuits") - } -} - -// TestSkipIneligibleLinksLocalForward ensures that the switch will not attempt -// to forward any HTLC's down a link that isn't yet eligible for forwarding. -func TestSkipIneligibleLinksLocalForward(t *testing.T) { - t.Parallel() - - testSkipLinkLocalForward(t, false, nil) -} - -// TestSkipPolicyUnsatisfiedLinkLocalForward ensures that the switch will not -// attempt to send locally initiated HTLCs that would violate the channel policy -// down a link. -func TestSkipPolicyUnsatisfiedLinkLocalForward(t *testing.T) { - t.Parallel() - - testSkipLinkLocalForward(t, true, lnwire.NewTemporaryChannelFailure(nil)) -} - -func testSkipLinkLocalForward(t *testing.T, eligible bool, - policyResult lnwire.FailureMessage) { - - // We'll create a single link for this test, marking it as being unable - // to forward form the get go. - alicePeer, err := newMockServer( - t, "alice", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } - - s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } - if err := s.Start(); err != nil { - t.Fatalf("unable to start switch: %v", err) - } - defer s.Stop() - - chanID1, _, aliceChanID, _ := genIDs() - - aliceChannelLink := newMockChannelLink( - s, chanID1, aliceChanID, alicePeer, eligible, - ) - aliceChannelLink.checkHtlcTransitResult = NewLinkError( - policyResult, - ) - if err := s.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } - - preimage, err := genPreimage() - if err != nil { - t.Fatalf("unable to generate preimage: %v", err) - } - rhash := sha256.Sum256(preimage[:]) - addMsg := &lnwire.UpdateAddHTLC{ - PaymentHash: rhash, - Amount: 1, - } - - // We'll attempt to send out a new HTLC that has Alice as the first - // outgoing link. This should fail as Alice isn't yet able to forward - // any active HTLC's. - err = s.SendHTLC(aliceChannelLink.ShortChanID(), 0, addMsg) - if err == nil { - t.Fatalf("local forward should fail due to inactive link") - } - - if s.circuits.NumOpen() != 0 { - t.Fatal("wrong amount of circuits") - } -} - -// TestSwitchCancel checks that if htlc was rejected we remove unused -// circuits. -func TestSwitchCancel(t *testing.T) { - t.Parallel() - - alicePeer, err := newMockServer( - t, "alice", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } - bobPeer, err := newMockServer( - t, "bob", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } - - s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } - if err := s.Start(); err != nil { - t.Fatalf("unable to start switch: %v", err) - } - defer s.Stop() - - chanID1, chanID2, aliceChanID, bobChanID := genIDs() - - aliceChannelLink := newMockChannelLink( - s, chanID1, aliceChanID, alicePeer, true, - ) - bobChannelLink := newMockChannelLink( - s, chanID2, bobChanID, bobPeer, true, - ) - if err := s.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } - if err := s.AddLink(bobChannelLink); err != nil { - t.Fatalf("unable to add bob link: %v", err) - } - - // Create request which should be forwarder from alice channel link - // to bob channel link. - preimage, err := genPreimage() - if err != nil { - t.Fatalf("unable to generate preimage: %v", err) - } - rhash := sha256.Sum256(preimage[:]) - request := &htlcPacket{ - incomingChanID: aliceChannelLink.ShortChanID(), - incomingHTLCID: 0, - outgoingChanID: bobChannelLink.ShortChanID(), - obfuscator: NewMockObfuscator(), - htlc: &lnwire.UpdateAddHTLC{ - PaymentHash: rhash, - Amount: 1, - }, - } - - // Handle the request and checks that bob channel link received it. - if err := s.ForwardPackets(nil, request); err != nil { - t.Fatal(err) - } - - select { - case packet := <-bobChannelLink.packets: - if err := bobChannelLink.completeCircuit(packet); err != nil { - t.Fatalf("unable to complete payment circuit: %v", err) - } - - case <-time.After(time.Second): - t.Fatal("request was not propagated to destination") - } - - if s.circuits.NumPending() != 1 { - t.Fatalf("wrong amount of half circuits") - } - if s.circuits.NumOpen() != 1 { - t.Fatal("wrong amount of circuits") - } - - // Create settle request pretending that bob channel link handled - // the add htlc request and sent the htlc settle request back. This - // request should be forwarder back to alice channel link. - request = &htlcPacket{ - outgoingChanID: bobChannelLink.ShortChanID(), - outgoingHTLCID: 0, - amount: 1, - htlc: &lnwire.UpdateFailHTLC{}, - } - - // Handle the request and checks that payment circuit works properly. - if err := s.ForwardPackets(nil, request); err != nil { - t.Fatal(err) - } - - select { - case pkt := <-aliceChannelLink.packets: - if err := aliceChannelLink.completeCircuit(pkt); err != nil { - t.Fatalf("unable to remove circuit: %v", err) - } - - case <-time.After(time.Second): - t.Fatal("request was not propagated to channelPoint") - } - - if s.circuits.NumPending() != 0 { - t.Fatal("wrong amount of circuits") - } - if s.circuits.NumOpen() != 0 { - t.Fatal("wrong amount of circuits") - } -} - -// TestSwitchAddSamePayment tests that we send the payment with the same -// payment hash. -func TestSwitchAddSamePayment(t *testing.T) { - t.Parallel() - - chanID1, chanID2, aliceChanID, bobChanID := genIDs() - - alicePeer, err := newMockServer( - t, "alice", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } - bobPeer, err := newMockServer( - t, "bob", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } - - s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } - if err := s.Start(); err != nil { - t.Fatalf("unable to start switch: %v", err) - } - defer s.Stop() - - aliceChannelLink := newMockChannelLink( - s, chanID1, aliceChanID, alicePeer, true, - ) - bobChannelLink := newMockChannelLink( - s, chanID2, bobChanID, bobPeer, true, - ) - if err := s.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } - if err := s.AddLink(bobChannelLink); err != nil { - t.Fatalf("unable to add bob link: %v", err) - } - - // Create request which should be forwarder from alice channel link - // to bob channel link. - preimage, err := genPreimage() - if err != nil { - t.Fatalf("unable to generate preimage: %v", err) - } - rhash := sha256.Sum256(preimage[:]) - request := &htlcPacket{ - incomingChanID: aliceChannelLink.ShortChanID(), - incomingHTLCID: 0, - outgoingChanID: bobChannelLink.ShortChanID(), - obfuscator: NewMockObfuscator(), - htlc: &lnwire.UpdateAddHTLC{ - PaymentHash: rhash, - Amount: 1, - }, - } - - // Handle the request and checks that bob channel link received it. - if err := s.ForwardPackets(nil, request); err != nil { - t.Fatal(err) - } - - select { - case packet := <-bobChannelLink.packets: - if err := bobChannelLink.completeCircuit(packet); err != nil { - t.Fatalf("unable to complete payment circuit: %v", err) - } - - case <-time.After(time.Second): - t.Fatal("request was not propagated to destination") - } - - if s.circuits.NumOpen() != 1 { - t.Fatal("wrong amount of circuits") - } - - request = &htlcPacket{ - incomingChanID: aliceChannelLink.ShortChanID(), - incomingHTLCID: 1, - outgoingChanID: bobChannelLink.ShortChanID(), - obfuscator: NewMockObfuscator(), - htlc: &lnwire.UpdateAddHTLC{ - PaymentHash: rhash, - Amount: 1, - }, - } - - // Handle the request and checks that bob channel link received it. - if err := s.ForwardPackets(nil, request); err != nil { - t.Fatal(err) - } - - select { - case packet := <-bobChannelLink.packets: - if err := bobChannelLink.completeCircuit(packet); err != nil { - t.Fatalf("unable to complete payment circuit: %v", err) - } - - case <-time.After(time.Second): - t.Fatal("request was not propagated to destination") - } - - if s.circuits.NumOpen() != 2 { - t.Fatal("wrong amount of circuits") - } - - // Create settle request pretending that bob channel link handled - // the add htlc request and sent the htlc settle request back. This - // request should be forwarder back to alice channel link. - request = &htlcPacket{ - outgoingChanID: bobChannelLink.ShortChanID(), - outgoingHTLCID: 0, - amount: 1, - htlc: &lnwire.UpdateFailHTLC{}, - } - - // Handle the request and checks that payment circuit works properly. - if err := s.ForwardPackets(nil, request); err != nil { - t.Fatal(err) - } - - select { - case pkt := <-aliceChannelLink.packets: - if err := aliceChannelLink.completeCircuit(pkt); err != nil { - t.Fatalf("unable to remove circuit: %v", err) - } - - case <-time.After(time.Second): - t.Fatal("request was not propagated to channelPoint") - } - - if s.circuits.NumOpen() != 1 { - t.Fatal("wrong amount of circuits") - } - - request = &htlcPacket{ - outgoingChanID: bobChannelLink.ShortChanID(), - outgoingHTLCID: 1, - amount: 1, - htlc: &lnwire.UpdateFailHTLC{}, - } - - // Handle the request and checks that payment circuit works properly. - if err := s.ForwardPackets(nil, request); err != nil { - t.Fatal(err) - } - - select { - case pkt := <-aliceChannelLink.packets: - if err := aliceChannelLink.completeCircuit(pkt); err != nil { - t.Fatalf("unable to remove circuit: %v", err) - } - - case <-time.After(time.Second): - t.Fatal("request was not propagated to channelPoint") - } - - if s.circuits.NumOpen() != 0 { - t.Fatal("wrong amount of circuits") - } -} - -// TestSwitchSendPayment tests ability of htlc switch to respond to the -// users when response is came back from channel link. -func TestSwitchSendPayment(t *testing.T) { - t.Parallel() - - alicePeer, err := newMockServer( - t, "alice", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } - - s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } - if err := s.Start(); err != nil { - t.Fatalf("unable to start switch: %v", err) - } - defer s.Stop() - - chanID1, _, aliceChanID, _ := genIDs() - - aliceChannelLink := newMockChannelLink( - s, chanID1, aliceChanID, alicePeer, true, - ) - if err := s.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add link: %v", err) - } - - // Create request which should be forwarder from alice channel link - // to bob channel link. - preimage, err := genPreimage() - if err != nil { - t.Fatalf("unable to generate preimage: %v", err) - } - rhash := sha256.Sum256(preimage[:]) - update := &lnwire.UpdateAddHTLC{ - PaymentHash: rhash, - Amount: 1, - } - paymentID := uint64(123) - - // First check that the switch will correctly respond that this payment - // ID is unknown. - _, err = s.GetPaymentResult( - paymentID, rhash, newMockDeobfuscator(), - ) - if !ErrPaymentIDNotFound.Is(err) { - t.Fatalf("expected ErrPaymentIDNotFound, got %v", err) - } - - // Handle the request and checks that bob channel link received it. - errChan := make(chan er.R) - go func() { - err := s.SendHTLC( - aliceChannelLink.ShortChanID(), paymentID, update, - ) - if err != nil { - errChan <- err - return - } - - resultChan, err := s.GetPaymentResult( - paymentID, rhash, newMockDeobfuscator(), - ) - if err != nil { - errChan <- err - return - } - - result, ok := <-resultChan - if !ok { - errChan <- er.Errorf("shutting down") - } - - if result.Error != nil { - errChan <- result.Error - return - } - - errChan <- nil - }() - - select { - case packet := <-aliceChannelLink.packets: - if err := aliceChannelLink.completeCircuit(packet); err != nil { - t.Fatalf("unable to complete payment circuit: %v", err) - } - - case err := <-errChan: - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } - case <-time.After(time.Second): - t.Fatal("request was not propagated to destination") - } - - if s.circuits.NumOpen() != 1 { - t.Fatal("wrong amount of circuits") - } - - // Create fail request pretending that bob channel link handled - // the add htlc request with error and sent the htlc fail request - // back. This request should be forwarded back to alice channel link. - obfuscator := NewMockObfuscator() - failure := lnwire.NewFailIncorrectDetails(update.Amount, 100) - reason, err := obfuscator.EncryptFirstHop(failure) - if err != nil { - t.Fatalf("unable obfuscate failure: %v", err) - } - - if s.IsForwardedHTLC(aliceChannelLink.ShortChanID(), update.ID) { - t.Fatal("htlc should be identified as not forwarded") - } - packet := &htlcPacket{ - outgoingChanID: aliceChannelLink.ShortChanID(), - outgoingHTLCID: 0, - amount: 1, - htlc: &lnwire.UpdateFailHTLC{ - Reason: reason, - }, - } - - if err := s.ForwardPackets(nil, packet); err != nil { - t.Fatalf("can't forward htlc packet: %v", err) - } - - select { - case err := <-errChan: - assertFailureCode( - t, err, lnwire.CodeIncorrectOrUnknownPaymentDetails, - ) - case <-time.After(time.Second): - t.Fatal("err wasn't received") - } -} - -// TestLocalPaymentNoForwardingEvents tests that if we send a series of locally -// initiated payments, then they aren't reflected in the forwarding log. -func TestLocalPaymentNoForwardingEvents(t *testing.T) { - t.Parallel() - - // First, we'll create our traditional three hop network. We'll only be - // interacting with and asserting the state of the first end point for - // this test. - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*3, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, testStartingHeight) - if err := n.start(); err != nil { - t.Fatalf("unable to start three hop network: %v", err) - } - - // We'll now craft and send a payment from Alice to Bob. - amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - htlcAmt, totalTimelock, hops := generateHops( - amount, testStartingHeight, n.firstBobChannelLink, - ) - - // With the payment crafted, we'll send it from Alice to Bob. We'll - // wait for Alice to receive the preimage for the payment before - // proceeding. - receiver := n.bobServer - firstHop := n.firstBobChannelLink.ShortChanID() - _, err = makePayment( - n.aliceServer, receiver, firstHop, hops, amount, htlcAmt, - totalTimelock, - ).Wait(30 * time.Second) - if err != nil { - t.Fatalf("unable to make the payment: %v", err) - } - - // At this point, we'll forcibly stop the three hop network. Doing - // this will cause any pending forwarding events to be flushed by the - // various switches in the network. - n.stop() - - // With all the switches stopped, we'll fetch Alice's mock forwarding - // event log. - log, ok := n.aliceServer.htlcSwitch.cfg.FwdingLog.(*mockForwardingLog) - if !ok { - t.Fatalf("mockForwardingLog assertion failed") - } - log.Lock() - defer log.Unlock() - - // If we examine the memory of the forwarding log, then it should be - // blank. - if len(log.events) != 0 { - t.Fatalf("log should have no events, instead has: %v", - spew.Sdump(log.events)) - } -} - -// TestMultiHopPaymentForwardingEvents tests that if we send a series of -// multi-hop payments via Alice->Bob->Carol. Then Bob properly logs forwarding -// events, while Alice and Carol don't. -func TestMultiHopPaymentForwardingEvents(t *testing.T) { - t.Parallel() - - // First, we'll create our traditional three hop network. - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*3, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, testStartingHeight) - if err := n.start(); err != nil { - t.Fatalf("unable to start three hop network: %v", err) - } - - // We'll make now 10 payments, of 100k satoshis each from Alice to - // Carol via Bob. - const numPayments = 10 - finalAmt := lnwire.NewMSatFromSatoshis(100000) - htlcAmt, totalTimelock, hops := generateHops( - finalAmt, testStartingHeight, n.firstBobChannelLink, - n.carolChannelLink, - ) - firstHop := n.firstBobChannelLink.ShortChanID() - for i := 0; i < numPayments/2; i++ { - _, err := makePayment( - n.aliceServer, n.carolServer, firstHop, hops, finalAmt, - htlcAmt, totalTimelock, - ).Wait(30 * time.Second) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } - } - - bobLog, ok := n.bobServer.htlcSwitch.cfg.FwdingLog.(*mockForwardingLog) - if !ok { - t.Fatalf("mockForwardingLog assertion failed") - } - - // After sending 5 of the payments, trigger the forwarding ticker, to - // make sure the events are properly flushed. - bobTicker, ok := n.bobServer.htlcSwitch.cfg.FwdEventTicker.(*ticker.Force) - if !ok { - t.Fatalf("mockTicker assertion failed") - } - - // We'll trigger the ticker, and wait for the events to appear in Bob's - // forwarding log. - timeout := time.After(15 * time.Second) - for { - select { - case bobTicker.Force <- time.Now(): - case <-time.After(1 * time.Second): - t.Fatalf("unable to force tick") - } - - // If all 5 events is found in Bob's log, we can break out and - // continue the test. - bobLog.Lock() - if len(bobLog.events) == 5 { - bobLog.Unlock() - break - } - bobLog.Unlock() - - // Otherwise wait a little bit before checking again. - select { - case <-time.After(50 * time.Millisecond): - case <-timeout: - bobLog.Lock() - defer bobLog.Unlock() - t.Fatalf("expected 5 events in event log, instead "+ - "found: %v", spew.Sdump(bobLog.events)) - } - } - - // Send the remaining payments. - for i := numPayments / 2; i < numPayments; i++ { - _, err := makePayment( - n.aliceServer, n.carolServer, firstHop, hops, finalAmt, - htlcAmt, totalTimelock, - ).Wait(30 * time.Second) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } - } - - // With all 10 payments sent. We'll now manually stop each of the - // switches so we can examine their end state. - n.stop() - - // Alice and Carol shouldn't have any recorded forwarding events, as - // they were the source and the sink for these payment flows. - aliceLog, ok := n.aliceServer.htlcSwitch.cfg.FwdingLog.(*mockForwardingLog) - if !ok { - t.Fatalf("mockForwardingLog assertion failed") - } - aliceLog.Lock() - defer aliceLog.Unlock() - if len(aliceLog.events) != 0 { - t.Fatalf("log should have no events, instead has: %v", - spew.Sdump(aliceLog.events)) - } - - carolLog, ok := n.carolServer.htlcSwitch.cfg.FwdingLog.(*mockForwardingLog) - if !ok { - t.Fatalf("mockForwardingLog assertion failed") - } - carolLog.Lock() - defer carolLog.Unlock() - if len(carolLog.events) != 0 { - t.Fatalf("log should have no events, instead has: %v", - spew.Sdump(carolLog.events)) - } - - // Bob on the other hand, should have 10 events. - bobLog.Lock() - defer bobLog.Unlock() - if len(bobLog.events) != 10 { - t.Fatalf("log should have 10 events, instead has: %v", - spew.Sdump(bobLog.events)) - } - - // Each of the 10 events should have had all fields set properly. - for _, event := range bobLog.events { - // The incoming and outgoing channels should properly be set for - // the event. - if event.IncomingChanID != n.aliceChannelLink.ShortChanID() { - t.Fatalf("chan id mismatch: expected %v, got %v", - event.IncomingChanID, - n.aliceChannelLink.ShortChanID()) - } - if event.OutgoingChanID != n.carolChannelLink.ShortChanID() { - t.Fatalf("chan id mismatch: expected %v, got %v", - event.OutgoingChanID, - n.carolChannelLink.ShortChanID()) - } - - // Additionally, the incoming and outgoing amounts should also - // be properly set. - if event.AmtIn != htlcAmt { - t.Fatalf("incoming amt mismatch: expected %v, got %v", - event.AmtIn, htlcAmt) - } - if event.AmtOut != finalAmt { - t.Fatalf("outgoing amt mismatch: expected %v, got %v", - event.AmtOut, finalAmt) - } - } -} - -// TestUpdateFailMalformedHTLCErrorConversion tests that we're able to properly -// convert malformed HTLC errors that originate at the direct link, as well as -// during multi-hop HTLC forwarding. -func TestUpdateFailMalformedHTLCErrorConversion(t *testing.T) { - t.Parallel() - - // First, we'll create our traditional three hop network. - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*3, btcutil.UnitsPerCoin()*5, - ) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newThreeHopNetwork( - t, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, testStartingHeight, - ) - if err := n.start(); err != nil { - t.Fatalf("unable to start three hop network: %v", err) - } - - assertPaymentFailure := func(t *testing.T) { - // With the decoder modified, we'll now attempt to send a - // payment from Alice to carol. - finalAmt := lnwire.NewMSatFromSatoshis(100000) - htlcAmt, totalTimelock, hops := generateHops( - finalAmt, testStartingHeight, n.firstBobChannelLink, - n.carolChannelLink, - ) - firstHop := n.firstBobChannelLink.ShortChanID() - _, err = makePayment( - n.aliceServer, n.carolServer, firstHop, hops, finalAmt, - htlcAmt, totalTimelock, - ).Wait(30 * time.Second) - - // The payment should fail as Carol is unable to decode the - // onion blob sent to her. - if err == nil { - t.Fatalf("unable to send payment: %v", err) - } - - errr := er.Wrapped(err) - routingErr := errr.(ClearTextError) - failureMsg := routingErr.WireMessage() - if _, ok := failureMsg.(*lnwire.FailInvalidOnionKey); !ok { - t.Fatalf("expected onion failure instead got: %v", - routingErr.WireMessage()) - } - } - - t.Run("multi-hop error conversion", func(t *testing.T) { - // Now that we have our network up, we'll modify the hop - // iterator for the Bob <-> Carol channel to fail to decode in - // order to simulate either a replay attack or an issue - // decoding the onion. - n.carolOnionDecoder.decodeFail = true - - assertPaymentFailure(t) - }) - - t.Run("direct channel error conversion", func(t *testing.T) { - // Similar to the above test case, we'll now make the Alice <-> - // Bob link always fail to decode an onion. This differs from - // the above test case in that there's no encryption on the - // error at all since Alice will directly receive a - // UpdateFailMalformedHTLC message. - n.bobOnionDecoder.decodeFail = true - - assertPaymentFailure(t) - }) -} - -// TestSwitchGetPaymentResult tests that the switch interacts as expected with -// the circuit map and network result store when looking up the result of a -// payment ID. This is important for not to lose results under concurrent -// lookup and receiving results. -func TestSwitchGetPaymentResult(t *testing.T) { - t.Parallel() - - const paymentID = 123 - var preimg lntypes.Preimage - preimg[0] = 3 - - s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } - if err := s.Start(); err != nil { - t.Fatalf("unable to start switch: %v", err) - } - defer s.Stop() - - lookup := make(chan *PaymentCircuit, 1) - s.circuits = &mockCircuitMap{ - lookup: lookup, - } - - // If the payment circuit is not found in the circuit map, the payment - // result must be found in the store if available. Since we haven't - // added anything to the store yet, ErrPaymentIDNotFound should be - // returned. - lookup <- nil - _, err = s.GetPaymentResult( - paymentID, lntypes.Hash{}, newMockDeobfuscator(), - ) - if !ErrPaymentIDNotFound.Is(err) { - t.Fatalf("expected ErrPaymentIDNotFound, got %v", err) - } - - // Next let the lookup find the circuit in the circuit map. It should - // subscribe to payment results, and return the result when available. - lookup <- &PaymentCircuit{} - resultChan, err := s.GetPaymentResult( - paymentID, lntypes.Hash{}, newMockDeobfuscator(), - ) - if err != nil { - t.Fatalf("unable to get payment result: %v", err) - } - - // Add the result to the store. - n := &networkResult{ - msg: &lnwire.UpdateFulfillHTLC{ - PaymentPreimage: preimg, - }, - unencrypted: true, - isResolution: true, - } - - err = s.networkResults.storeResult(paymentID, n) - if err != nil { - t.Fatalf("unable to store result: %v", err) - } - - // The result should be availble. - select { - case res, ok := <-resultChan: - if !ok { - t.Fatalf("channel was closed") - } - - if res.Error != nil { - t.Fatalf("got unexpected error result") - } - - if res.Preimage != preimg { - t.Fatalf("expected preimg %v, got %v", - preimg, res.Preimage) - } - - case <-time.After(1 * time.Second): - t.Fatalf("result not received") - } - - // As a final test, try to get the result again. Now that is no longer - // in the circuit map, it should be immediately available from the - // store. - lookup <- nil - resultChan, err = s.GetPaymentResult( - paymentID, lntypes.Hash{}, newMockDeobfuscator(), - ) - if err != nil { - t.Fatalf("unable to get payment result: %v", err) - } - - select { - case res, ok := <-resultChan: - if !ok { - t.Fatalf("channel was closed") - } - - if res.Error != nil { - t.Fatalf("got unexpected error result") - } - - if res.Preimage != preimg { - t.Fatalf("expected preimg %v, got %v", - preimg, res.Preimage) - } - - case <-time.After(1 * time.Second): - t.Fatalf("result not received") - } -} - -// TestInvalidFailure tests that the switch returns an unreadable failure error -// if the failure cannot be decrypted. -func TestInvalidFailure(t *testing.T) { - t.Parallel() - - alicePeer, err := newMockServer( - t, "alice", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } - - s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } - if err := s.Start(); err != nil { - t.Fatalf("unable to start switch: %v", err) - } - defer s.Stop() - - chanID1, _, aliceChanID, _ := genIDs() - - // Set up a mock channel link. - aliceChannelLink := newMockChannelLink( - s, chanID1, aliceChanID, alicePeer, true, - ) - if err := s.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add link: %v", err) - } - - // Create a request which should be forwarded to the mock channel link. - preimage, err := genPreimage() - if err != nil { - t.Fatalf("unable to generate preimage: %v", err) - } - rhash := sha256.Sum256(preimage[:]) - update := &lnwire.UpdateAddHTLC{ - PaymentHash: rhash, - Amount: 1, - } - - paymentID := uint64(123) - - // Send the request. - err = s.SendHTLC( - aliceChannelLink.ShortChanID(), paymentID, update, - ) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } - - // Catch the packet and complete the circuit so that the switch is ready - // for a response. - select { - case packet := <-aliceChannelLink.packets: - if err := aliceChannelLink.completeCircuit(packet); err != nil { - t.Fatalf("unable to complete payment circuit: %v", err) - } - - case <-time.After(time.Second): - t.Fatal("request was not propagated to destination") - } - - // Send response packet with an unreadable failure message to the - // switch. The reason failed is not relevant, because we mock the - // decryption. - packet := &htlcPacket{ - outgoingChanID: aliceChannelLink.ShortChanID(), - outgoingHTLCID: 0, - amount: 1, - htlc: &lnwire.UpdateFailHTLC{ - Reason: []byte{1, 2, 3}, - }, - } - - if err := s.ForwardPackets(nil, packet); err != nil { - t.Fatalf("can't forward htlc packet: %v", err) - } - - // Get payment result from switch. We expect an unreadable failure - // message error. - deobfuscator := SphinxErrorDecrypter{ - OnionErrorDecrypter: &mockOnionErrorDecryptor{ - err: ErrUnreadableFailureMessage.Default(), - }, - } - - resultChan, err := s.GetPaymentResult( - paymentID, rhash, &deobfuscator, - ) - if err != nil { - t.Fatal(err) - } - - select { - case result := <-resultChan: - if !ErrUnreadableFailureMessage.Is(result.Error) { - t.Fatal("expected unreadable failure message") - } - - case <-time.After(time.Second): - t.Fatal("err wasn't received") - } - - // Modify the decryption to simulate that decryption went alright, but - // the failure cannot be decoded. - deobfuscator = SphinxErrorDecrypter{ - OnionErrorDecrypter: &mockOnionErrorDecryptor{ - sourceIdx: 2, - message: []byte{200}, - }, - } - - resultChan, err = s.GetPaymentResult( - paymentID, rhash, &deobfuscator, - ) - if err != nil { - t.Fatal(err) - } - - select { - case result := <-resultChan: - errr := er.Wrapped(result.Error) - rtErr, ok := errr.(ClearTextError) - if !ok { - t.Fatal("expected ClearTextError") - } - source, ok := rtErr.(*ForwardingError) - if !ok { - t.Fatalf("expected forwarding error, got: %T", rtErr) - } - if source.FailureSourceIdx != 2 { - t.Fatal("unexpected error source index") - } - if rtErr.WireMessage() != nil { - t.Fatal("expected empty failure message") - } - - case <-time.After(time.Second): - t.Fatal("err wasn't received") - } -} - -// htlcNotifierEvents is a function that generates a set of expected htlc -// notifier evetns for each node in a three hop network with the dynamic -// values provided. These functions take dynamic values so that changes to -// external systems (such as our default timelock delta) do not break -// these tests. -type htlcNotifierEvents func(channels *clusterChannels, htlcID uint64, - ts time.Time, htlc *lnwire.UpdateAddHTLC, - hops []*hop.Payload) ([]interface{}, []interface{}, []interface{}) - -// TestHtlcNotifier tests the notifying of htlc events that are routed over a -// three hop network. It sets up an Alice -> Bob -> Carol network and routes -// payments from Alice -> Carol to test events from the perspective of a -// sending (Alice), forwarding (Bob) and receiving (Carol) node. Test cases -// are present for saduccessful and failed payments. -func TestHtlcNotifier(t *testing.T) { - tests := []struct { - name string - - // Options is a set of options to apply to the three hop - // network's servers. - options []serverOption - - // expectedEvents is a function which returns an expected set - // of events for the test. - expectedEvents htlcNotifierEvents - - // iterations is the number of times we will send a payment, - // this is used to send more than one payment to force non- - // zero htlc indexes to make sure we aren't just checking - // default values. - iterations int - }{ - { - name: "successful three hop payment", - options: nil, - expectedEvents: func(channels *clusterChannels, - htlcID uint64, ts time.Time, - htlc *lnwire.UpdateAddHTLC, - hops []*hop.Payload) ([]interface{}, - []interface{}, []interface{}) { - - return getThreeHopEvents( - channels, htlcID, ts, htlc, hops, nil, - ) - }, - iterations: 2, - }, - { - name: "failed at forwarding link", - // Set a functional option which disables bob as a - // forwarding node to force a payment error. - options: []serverOption{ - serverOptionRejectHtlc(false, true, false), - }, - expectedEvents: func(channels *clusterChannels, - htlcID uint64, ts time.Time, - htlc *lnwire.UpdateAddHTLC, - hops []*hop.Payload) ([]interface{}, - []interface{}, []interface{}) { - - return getThreeHopEvents( - channels, htlcID, ts, htlc, hops, - &LinkError{ - msg: &lnwire.FailChannelDisabled{}, - FailureDetail: OutgoingFailureForwardsDisabled, - }, - ) - }, - iterations: 1, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - testHtcNotifier( - t, test.options, test.iterations, - test.expectedEvents, - ) - }) - } -} - -// testHtcNotifier runs a htlc notifier test. -func testHtcNotifier(t *testing.T, testOpts []serverOption, iterations int, - getEvents htlcNotifierEvents) { - - t.Parallel() - - // First, we'll create our traditional three hop - // network. - channels, cleanUp, _, err := createClusterChannels( - btcutil.UnitsPerCoin()*3, - btcutil.UnitsPerCoin()*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - // Mock time so that all events are reported with a static timestamp. - now := time.Now() - mockTime := func() time.Time { - return now - } - - // Create htlc notifiers for each server in the three hop network and - // start them. - aliceNotifier := NewHtlcNotifier(mockTime) - if err := aliceNotifier.Start(); err != nil { - t.Fatalf("could not start alice notifier") - } - defer aliceNotifier.Stop() - - bobNotifier := NewHtlcNotifier(mockTime) - if err := bobNotifier.Start(); err != nil { - t.Fatalf("could not start bob notifier") - } - defer bobNotifier.Stop() - - carolNotifier := NewHtlcNotifier(mockTime) - if err := carolNotifier.Start(); err != nil { - t.Fatalf("could not start carol notifier") - } - defer carolNotifier.Stop() - - // Create a notifier server option which will set our htlc notifiers - // for the three hop network. - notifierOption := serverOptionWithHtlcNotifier( - aliceNotifier, bobNotifier, carolNotifier, - ) - - // Add the htlcNotifier option to any other options - // set in the test. - options := append(testOpts, notifierOption) - - n := newThreeHopNetwork( - t, channels.aliceToBob, - channels.bobToAlice, channels.bobToCarol, - channels.carolToBob, testStartingHeight, - options..., - ) - if err := n.start(); err != nil { - t.Fatalf("unable to start three hop "+ - "network: %v", err) - } - defer n.stop() - - // Before we forward anything, subscribe to htlc events - // from each notifier. - aliceEvents, err := aliceNotifier.SubscribeHtlcEvents() - if err != nil { - t.Fatalf("could not subscribe to alice's"+ - " events: %v", err) - } - defer aliceEvents.Cancel() - - bobEvents, err := bobNotifier.SubscribeHtlcEvents() - if err != nil { - t.Fatalf("could not subscribe to bob's"+ - " events: %v", err) - } - defer bobEvents.Cancel() - - carolEvents, err := carolNotifier.SubscribeHtlcEvents() - if err != nil { - t.Fatalf("could not subscribe to carol's"+ - " events: %v", err) - } - defer carolEvents.Cancel() - - // Send multiple payments, as specified by the test to test incrementing - // of htlc ids. - for i := 0; i < iterations; i++ { - // We'll start off by making a payment from - // Alice -> Bob -> Carol. - htlc, hops := n.sendThreeHopPayment(t) - - alice, bob, carol := getEvents( - channels, uint64(i), now, htlc, hops, - ) - - checkHtlcEvents(t, aliceEvents.Updates(), alice) - checkHtlcEvents(t, bobEvents.Updates(), bob) - checkHtlcEvents(t, carolEvents.Updates(), carol) - - } -} - -// checkHtlcEvents checks that a subscription has the set of htlc events -// we expect it to have. -func checkHtlcEvents(t *testing.T, events <-chan interface{}, - expectedEvents []interface{}) { - - t.Helper() - - for _, expected := range expectedEvents { - select { - case event := <-events: - if !reflect.DeepEqual(event, expected) { - t.Fatalf("expected %v, got: %v", expected, - event) - } - - case <-time.After(5 * time.Second): - t.Fatalf("expected event: %v", expected) - } - } -} - -// sendThreeHopPayment is a helper function which sends a payment over -// Alice -> Bob -> Carol in a three hop network and returns Alice's first htlc -// and the remainder of the hops. -func (n *threeHopNetwork) sendThreeHopPayment(t *testing.T) (*lnwire.UpdateAddHTLC, - []*hop.Payload) { - - amount := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - - htlcAmt, totalTimelock, hops := generateHops(amount, testStartingHeight, - n.firstBobChannelLink, n.carolChannelLink) - blob, err := generateRoute(hops...) - if err != nil { - t.Fatal(err) - } - invoice, htlc, pid, err := generatePayment( - amount, htlcAmt, totalTimelock, blob, - ) - if err != nil { - t.Fatal(err) - } - - err = n.carolServer.registry.AddInvoice(*invoice, htlc.PaymentHash) - if err != nil { - t.Fatalf("unable to add invoice in carol registry: %v", err) - } - - if err := n.aliceServer.htlcSwitch.SendHTLC( - n.firstBobChannelLink.ShortChanID(), pid, htlc, - ); err != nil { - t.Fatalf("could not send htlc") - } - - return htlc, hops -} - -// getThreeHopEvents gets the set of htlc events that we expect for a payment -// from Alice -> Bob -> Carol. If a non-nil link error is provided, the set -// of events will fail on Bob's outgoing link. -func getThreeHopEvents(channels *clusterChannels, htlcID uint64, - ts time.Time, htlc *lnwire.UpdateAddHTLC, hops []*hop.Payload, - linkError *LinkError) ([]interface{}, []interface{}, []interface{}) { - - aliceKey := HtlcKey{ - IncomingCircuit: zeroCircuit, - OutgoingCircuit: channeldb.CircuitKey{ - ChanID: channels.aliceToBob.ShortChanID(), - HtlcID: htlcID, - }, - } - - // Alice always needs a forwarding event because she initiates the - // send. - aliceEvents := []interface{}{ - &ForwardingEvent{ - HtlcKey: aliceKey, - HtlcInfo: HtlcInfo{ - OutgoingTimeLock: htlc.Expiry, - OutgoingAmt: htlc.Amount, - }, - HtlcEventType: HtlcEventTypeSend, - Timestamp: ts, - }, - } - - bobKey := HtlcKey{ - IncomingCircuit: channeldb.CircuitKey{ - ChanID: channels.bobToAlice.ShortChanID(), - HtlcID: htlcID, - }, - OutgoingCircuit: channeldb.CircuitKey{ - ChanID: channels.bobToCarol.ShortChanID(), - HtlcID: htlcID, - }, - } - - bobInfo := HtlcInfo{ - IncomingTimeLock: htlc.Expiry, - IncomingAmt: htlc.Amount, - OutgoingTimeLock: hops[1].FwdInfo.OutgoingCTLV, - OutgoingAmt: hops[1].FwdInfo.AmountToForward, - } - - // If we expect the payment to fail, we add failures for alice and - // bob, and no events for carol because the payment never reaches her. - if linkError != nil { - aliceEvents = append(aliceEvents, - &ForwardingFailEvent{ - HtlcKey: aliceKey, - HtlcEventType: HtlcEventTypeSend, - Timestamp: ts, - }, - ) - - bobEvents := []interface{}{ - &LinkFailEvent{ - HtlcKey: bobKey, - HtlcInfo: bobInfo, - HtlcEventType: HtlcEventTypeForward, - LinkError: linkError, - Incoming: false, - Timestamp: ts, - }, - } - - return aliceEvents, bobEvents, nil - } - - // If we want to get events for a successful payment, we add a settle - // for alice, a forward and settle for bob and a receive settle for - // carol. - aliceEvents = append( - aliceEvents, - &SettleEvent{ - HtlcKey: aliceKey, - HtlcEventType: HtlcEventTypeSend, - Timestamp: ts, - }, - ) - - bobEvents := []interface{}{ - &ForwardingEvent{ - HtlcKey: bobKey, - HtlcInfo: bobInfo, - HtlcEventType: HtlcEventTypeForward, - Timestamp: ts, - }, - &SettleEvent{ - HtlcKey: bobKey, - HtlcEventType: HtlcEventTypeForward, - Timestamp: ts, - }, - } - - carolEvents := []interface{}{ - &SettleEvent{ - HtlcKey: HtlcKey{ - IncomingCircuit: channeldb.CircuitKey{ - ChanID: channels.carolToBob.ShortChanID(), - HtlcID: htlcID, - }, - OutgoingCircuit: zeroCircuit, - }, - HtlcEventType: HtlcEventTypeReceive, - Timestamp: ts, - }, - } - - return aliceEvents, bobEvents, carolEvents -} - -type mockForwardInterceptor struct { - intercepted InterceptedForward -} - -func (m *mockForwardInterceptor) InterceptForwardHtlc(intercepted InterceptedForward) bool { - - m.intercepted = intercepted - return true -} - -func (m *mockForwardInterceptor) settle(preimage lntypes.Preimage) er.R { - return m.intercepted.Settle(preimage) -} - -func (m *mockForwardInterceptor) fail() er.R { - return m.intercepted.Fail() -} - -func (m *mockForwardInterceptor) resume() er.R { - return m.intercepted.Resume() -} - -func assertNumCircuits(t *testing.T, s *Switch, pending, opened int) { - if s.circuits.NumPending() != pending { - t.Fatal("wrong amount of half circuits") - } - if s.circuits.NumOpen() != opened { - t.Fatal("wrong amount of circuits") - } -} - -func assertOutgoingLinkReceive(t *testing.T, targetLink *mockChannelLink, - expectReceive bool) { - - // Pull packet from targetLink link. - select { - case packet := <-targetLink.packets: - if !expectReceive { - t.Fatal("forward was intercepted, shouldn't land at bob link") - } else if err := targetLink.completeCircuit(packet); err != nil { - t.Fatalf("unable to complete payment circuit: %v", err) - } - - case <-time.After(time.Second): - if expectReceive { - t.Fatal("request was not propagated to destination") - } - } -} - -func TestSwitchHoldForward(t *testing.T) { - t.Parallel() - - chanID1, chanID2, aliceChanID, bobChanID := genIDs() - - alicePeer, err := newMockServer( - t, "alice", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } - bobPeer, err := newMockServer( - t, "bob", testStartingHeight, nil, testDefaultDelta, - ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } - - tempPath, errr := ioutil.TempDir("", "circuitdb") - if errr != nil { - t.Fatalf("unable to temporary path: %v", errr) - } - - cdb, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to open channeldb: %v", err) - } - - s, err := initSwitchWithDB(testStartingHeight, cdb) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } - if err := s.Start(); err != nil { - t.Fatalf("unable to start switch: %v", err) - } - - defer func() { - if err := s.Stop(); err != nil { - t.Fatalf(err.String()) - } - }() - - aliceChannelLink := newMockChannelLink( - s, chanID1, aliceChanID, alicePeer, true, - ) - bobChannelLink := newMockChannelLink( - s, chanID2, bobChanID, bobPeer, true, - ) - if err := s.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } - if err := s.AddLink(bobChannelLink); err != nil { - t.Fatalf("unable to add bob link: %v", err) - } - - // Create request which should be forwarded from Alice channel link to - // bob channel link. - preimage := [sha256.Size]byte{1} - rhash := sha256.Sum256(preimage[:]) - ogPacket := &htlcPacket{ - incomingChanID: aliceChannelLink.ShortChanID(), - incomingHTLCID: 0, - outgoingChanID: bobChannelLink.ShortChanID(), - obfuscator: NewMockObfuscator(), - htlc: &lnwire.UpdateAddHTLC{ - PaymentHash: rhash, - Amount: 1, - }, - } - - forwardInterceptor := &mockForwardInterceptor{} - switchForwardInterceptor := NewInterceptableSwitch(s) - switchForwardInterceptor.SetInterceptor(forwardInterceptor.InterceptForwardHtlc) - linkQuit := make(chan struct{}) - - // Test resume a hold forward - assertNumCircuits(t, s, 0, 0) - if err := switchForwardInterceptor.ForwardPackets(linkQuit, ogPacket); err != nil { - t.Fatalf("can't forward htlc packet: %v", err) - } - assertNumCircuits(t, s, 0, 0) - assertOutgoingLinkReceive(t, bobChannelLink, false) - - if err := forwardInterceptor.resume(); err != nil { - t.Fatalf("failed to resume forward") - } - assertOutgoingLinkReceive(t, bobChannelLink, true) - assertNumCircuits(t, s, 1, 1) - - // settling the htlc to close the circuit. - settle := &htlcPacket{ - outgoingChanID: bobChannelLink.ShortChanID(), - outgoingHTLCID: 0, - amount: 1, - htlc: &lnwire.UpdateFulfillHTLC{ - PaymentPreimage: preimage, - }, - } - if err := switchForwardInterceptor.ForwardPackets(linkQuit, settle); err != nil { - t.Fatalf("can't forward htlc packet: %v", err) - } - assertOutgoingLinkReceive(t, aliceChannelLink, true) - assertNumCircuits(t, s, 0, 0) - - // Test failing a hold forward - if err := switchForwardInterceptor.ForwardPackets(linkQuit, ogPacket); err != nil { - t.Fatalf("can't forward htlc packet: %v", err) - } - assertNumCircuits(t, s, 0, 0) - assertOutgoingLinkReceive(t, bobChannelLink, false) - - if err := forwardInterceptor.fail(); err != nil { - t.Fatalf("failed to cancel forward %v", err) - } - assertOutgoingLinkReceive(t, bobChannelLink, false) - assertOutgoingLinkReceive(t, aliceChannelLink, true) - assertNumCircuits(t, s, 0, 0) - - // Test settling a hold forward - if err := switchForwardInterceptor.ForwardPackets(linkQuit, ogPacket); err != nil { - t.Fatalf("can't forward htlc packet: %v", err) - } - assertNumCircuits(t, s, 0, 0) - assertOutgoingLinkReceive(t, bobChannelLink, false) - - if err := forwardInterceptor.settle(preimage); err != nil { - t.Fatal("failed to cancel forward") - } - assertOutgoingLinkReceive(t, bobChannelLink, false) - assertOutgoingLinkReceive(t, aliceChannelLink, true) - assertNumCircuits(t, s, 0, 0) -} diff --git a/lnd/htlcswitch/test_utils.go b/lnd/htlcswitch/test_utils.go deleted file mode 100644 index 2a704a86..00000000 --- a/lnd/htlcswitch/test_utils.go +++ /dev/null @@ -1,1425 +0,0 @@ -package htlcswitch - -import ( - "bytes" - crand "crypto/rand" - "crypto/sha256" - "encoding/binary" - "io/ioutil" - "math/big" - "net" - "os" - "runtime" - "runtime/pprof" - "sync/atomic" - "testing" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - sphinx "github.com/pkt-cash/pktd/lightning-onion" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" - "github.com/pkt-cash/pktd/lnd/contractcourt" - "github.com/pkt-cash/pktd/lnd/htlcswitch/hop" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/lnpeer" - "github.com/pkt-cash/pktd/lnd/lntest/mock" - "github.com/pkt-cash/pktd/lnd/lntest/wait" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/shachain" - "github.com/pkt-cash/pktd/lnd/ticker" - "github.com/pkt-cash/pktd/wire" -) - -var ( - alicePrivKey = []byte("alice priv key") - bobPrivKey = []byte("bob priv key") - carolPrivKey = []byte("carol priv key") - - testSig = &btcec.Signature{ - R: new(big.Int), - S: new(big.Int), - } - wireSig, _ = lnwire.NewSigFromSignature(testSig) - - _, _ = testSig.R.SetString("6372440660162918006277497454296753625158993"+ - "5445068131219452686511677818569431", 10) - _, _ = testSig.S.SetString("1880105606924982582529128710493133386286603"+ - "3135609736119018462340006816851118", 10) - - // testTx is used as the default funding txn for single-funder channels. - testTx = &wire.MsgTx{ - Version: 1, - TxIn: []*wire.TxIn{ - { - PreviousOutPoint: wire.OutPoint{ - Hash: chainhash.Hash{}, - Index: 0xffffffff, - }, - SignatureScript: []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62}, - Sequence: 0xffffffff, - }, - }, - TxOut: []*wire.TxOut{ - { - Value: 5000000000, - PkScript: []byte{ - 0x41, // OP_DATA_65 - 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5, - 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42, - 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1, - 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24, - 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97, - 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78, - 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20, - 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63, - 0xa6, // 65-byte signature - 0xac, // OP_CHECKSIG - }, - }, - }, - LockTime: 5, - } - - testBatchTimeout = 50 * time.Millisecond -) - -var idSeqNum uint64 - -// genID generates a unique tuple to identify a test channel. -func genID() (lnwire.ChannelID, lnwire.ShortChannelID) { - id := atomic.AddUint64(&idSeqNum, 1) - - var scratch [8]byte - - binary.BigEndian.PutUint64(scratch[:], id) - hash1, _ := chainhash.NewHash(bytes.Repeat(scratch[:], 4)) - - chanPoint1 := wire.NewOutPoint(hash1, uint32(id)) - chanID1 := lnwire.NewChanIDFromOutPoint(chanPoint1) - aliceChanID := lnwire.NewShortChanIDFromInt(id) - - return chanID1, aliceChanID -} - -// genIDs generates ids for two test channels. -func genIDs() (lnwire.ChannelID, lnwire.ChannelID, lnwire.ShortChannelID, - lnwire.ShortChannelID) { - - chanID1, aliceChanID := genID() - chanID2, bobChanID := genID() - - return chanID1, chanID2, aliceChanID, bobChanID -} - -// mockGetChanUpdateMessage helper function which returns topology update of -// the channel -func mockGetChanUpdateMessage(cid lnwire.ShortChannelID) (*lnwire.ChannelUpdate, er.R) { - return &lnwire.ChannelUpdate{ - Signature: wireSig, - }, nil -} - -// generateRandomBytes returns securely generated random bytes. -// It will return an error if the system's secure random -// number generator fails to function correctly, in which -// case the caller should not continue. -func generateRandomBytes(n int) ([]byte, er.R) { - b := make([]byte, n) - - // TODO(roasbeef): should use counter in tests (atomic) rather than - // this - - _, err := crand.Read(b) - // Note that Err == nil only if we read len(b) bytes. - if err != nil { - return nil, er.E(err) - } - - return b, nil -} - -type testLightningChannel struct { - channel *lnwallet.LightningChannel - restore func() (*lnwallet.LightningChannel, er.R) -} - -// createTestChannel creates the channel and returns our and remote channels -// representations. -// -// TODO(roasbeef): need to factor out, similar func re-used in many parts of codebase -func createTestChannel(alicePrivKey, bobPrivKey []byte, - aliceAmount, bobAmount, aliceReserve, bobReserve btcutil.Amount, - chanID lnwire.ShortChannelID) (*testLightningChannel, - *testLightningChannel, func(), er.R) { - - aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(btcec.S256(), alicePrivKey) - bobKeyPriv, bobKeyPub := btcec.PrivKeyFromBytes(btcec.S256(), bobPrivKey) - - channelCapacity := aliceAmount + bobAmount - csvTimeoutAlice := uint32(5) - csvTimeoutBob := uint32(4) - - aliceConstraints := &channeldb.ChannelConstraints{ - DustLimit: btcutil.Amount(200), - MaxPendingAmount: lnwire.NewMSatFromSatoshis( - channelCapacity), - ChanReserve: aliceReserve, - MinHTLC: 0, - MaxAcceptedHtlcs: input.MaxHTLCNumber / 2, - CsvDelay: uint16(csvTimeoutAlice), - } - - bobConstraints := &channeldb.ChannelConstraints{ - DustLimit: btcutil.Amount(800), - MaxPendingAmount: lnwire.NewMSatFromSatoshis( - channelCapacity), - ChanReserve: bobReserve, - MinHTLC: 0, - MaxAcceptedHtlcs: input.MaxHTLCNumber / 2, - CsvDelay: uint16(csvTimeoutBob), - } - - var hash [sha256.Size]byte - randomSeed, err := generateRandomBytes(sha256.Size) - if err != nil { - return nil, nil, nil, err - } - copy(hash[:], randomSeed) - - prevOut := &wire.OutPoint{ - Hash: chainhash.Hash(hash), - Index: 0, - } - fundingTxIn := wire.NewTxIn(prevOut, nil, nil) - - aliceCfg := channeldb.ChannelConfig{ - ChannelConstraints: *aliceConstraints, - MultiSigKey: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - RevocationBasePoint: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - PaymentBasePoint: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - DelayBasePoint: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - HtlcBasePoint: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - } - bobCfg := channeldb.ChannelConfig{ - ChannelConstraints: *bobConstraints, - MultiSigKey: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - RevocationBasePoint: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - PaymentBasePoint: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - DelayBasePoint: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - HtlcBasePoint: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - } - - bobRoot, err := chainhash.NewHash(bobKeyPriv.Serialize()) - if err != nil { - return nil, nil, nil, err - } - bobPreimageProducer := shachain.NewRevocationProducer(*bobRoot) - bobFirstRevoke, err := bobPreimageProducer.AtIndex(0) - if err != nil { - return nil, nil, nil, err - } - bobCommitPoint := input.ComputeCommitmentPoint(bobFirstRevoke[:]) - - aliceRoot, err := chainhash.NewHash(aliceKeyPriv.Serialize()) - if err != nil { - return nil, nil, nil, err - } - alicePreimageProducer := shachain.NewRevocationProducer(*aliceRoot) - aliceFirstRevoke, err := alicePreimageProducer.AtIndex(0) - if err != nil { - return nil, nil, nil, err - } - aliceCommitPoint := input.ComputeCommitmentPoint(aliceFirstRevoke[:]) - - aliceCommitTx, bobCommitTx, err := lnwallet.CreateCommitmentTxns( - aliceAmount, bobAmount, &aliceCfg, &bobCfg, aliceCommitPoint, - bobCommitPoint, *fundingTxIn, channeldb.SingleFunderTweaklessBit, - ) - if err != nil { - return nil, nil, nil, err - } - - alicePath, errr := ioutil.TempDir("", "alicedb") - if errr != nil { - return nil, nil, nil, er.E(errr) - } - - dbAlice, err := channeldb.Open(alicePath) - if err != nil { - return nil, nil, nil, err - } - - bobPath, errr := ioutil.TempDir("", "bobdb") - if errr != nil { - return nil, nil, nil, er.E(errr) - } - - dbBob, err := channeldb.Open(bobPath) - if err != nil { - return nil, nil, nil, err - } - - estimator := chainfee.NewStaticEstimator(6000, 0) - feePerKw, err := estimator.EstimateFeePerKW(1) - if err != nil { - return nil, nil, nil, err - } - commitFee := feePerKw.FeeForWeight(724) - - const broadcastHeight = 1 - bobAddr := &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18555, - } - - aliceAddr := &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18556, - } - - aliceCommit := channeldb.ChannelCommitment{ - CommitHeight: 0, - LocalBalance: lnwire.NewMSatFromSatoshis(aliceAmount - commitFee), - RemoteBalance: lnwire.NewMSatFromSatoshis(bobAmount), - CommitFee: commitFee, - FeePerKw: btcutil.Amount(feePerKw), - CommitTx: aliceCommitTx, - CommitSig: bytes.Repeat([]byte{1}, 71), - } - bobCommit := channeldb.ChannelCommitment{ - CommitHeight: 0, - LocalBalance: lnwire.NewMSatFromSatoshis(bobAmount), - RemoteBalance: lnwire.NewMSatFromSatoshis(aliceAmount - commitFee), - CommitFee: commitFee, - FeePerKw: btcutil.Amount(feePerKw), - CommitTx: bobCommitTx, - CommitSig: bytes.Repeat([]byte{1}, 71), - } - - aliceChannelState := &channeldb.OpenChannel{ - LocalChanCfg: aliceCfg, - RemoteChanCfg: bobCfg, - IdentityPub: aliceKeyPub, - FundingOutpoint: *prevOut, - ChanType: channeldb.SingleFunderTweaklessBit, - IsInitiator: true, - Capacity: channelCapacity, - RemoteCurrentRevocation: bobCommitPoint, - RevocationProducer: alicePreimageProducer, - RevocationStore: shachain.NewRevocationStore(), - LocalCommitment: aliceCommit, - RemoteCommitment: aliceCommit, - ShortChannelID: chanID, - Db: dbAlice, - Packager: channeldb.NewChannelPackager(chanID), - FundingTxn: testTx, - } - - bobChannelState := &channeldb.OpenChannel{ - LocalChanCfg: bobCfg, - RemoteChanCfg: aliceCfg, - IdentityPub: bobKeyPub, - FundingOutpoint: *prevOut, - ChanType: channeldb.SingleFunderTweaklessBit, - IsInitiator: false, - Capacity: channelCapacity, - RemoteCurrentRevocation: aliceCommitPoint, - RevocationProducer: bobPreimageProducer, - RevocationStore: shachain.NewRevocationStore(), - LocalCommitment: bobCommit, - RemoteCommitment: bobCommit, - ShortChannelID: chanID, - Db: dbBob, - Packager: channeldb.NewChannelPackager(chanID), - } - - if err := aliceChannelState.SyncPending(bobAddr, broadcastHeight); err != nil { - return nil, nil, nil, err - } - - if err := bobChannelState.SyncPending(aliceAddr, broadcastHeight); err != nil { - return nil, nil, nil, err - } - - cleanUpFunc := func() { - dbAlice.Close() - dbBob.Close() - os.RemoveAll(bobPath) - os.RemoveAll(alicePath) - } - - aliceSigner := &mock.SingleSigner{Privkey: aliceKeyPriv} - bobSigner := &mock.SingleSigner{Privkey: bobKeyPriv} - - alicePool := lnwallet.NewSigPool(runtime.NumCPU(), aliceSigner) - channelAlice, err := lnwallet.NewLightningChannel( - aliceSigner, aliceChannelState, alicePool, - ) - if err != nil { - return nil, nil, nil, err - } - alicePool.Start() - - bobPool := lnwallet.NewSigPool(runtime.NumCPU(), bobSigner) - channelBob, err := lnwallet.NewLightningChannel( - bobSigner, bobChannelState, bobPool, - ) - if err != nil { - return nil, nil, nil, err - } - bobPool.Start() - - // Now that the channel are open, simulate the start of a session by - // having Alice and Bob extend their revocation windows to each other. - aliceNextRevoke, err := channelAlice.NextRevocationKey() - if err != nil { - return nil, nil, nil, err - } - if err := channelBob.InitNextRevocation(aliceNextRevoke); err != nil { - return nil, nil, nil, err - } - - bobNextRevoke, err := channelBob.NextRevocationKey() - if err != nil { - return nil, nil, nil, err - } - if err := channelAlice.InitNextRevocation(bobNextRevoke); err != nil { - return nil, nil, nil, err - } - - restoreAlice := func() (*lnwallet.LightningChannel, er.R) { - aliceStoredChannels, err := dbAlice.FetchOpenChannels(aliceKeyPub) - switch { - case err == nil: - case kvdb.ErrDatabaseNotOpen.Is(err): - dbAlice, err = channeldb.Open(dbAlice.Path()) - if err != nil { - return nil, er.Errorf("unable to reopen alice "+ - "db: %v", err) - } - - aliceStoredChannels, err = dbAlice.FetchOpenChannels(aliceKeyPub) - if err != nil { - return nil, er.Errorf("unable to fetch alice "+ - "channel: %v", err) - } - default: - return nil, er.Errorf("unable to fetch alice channel: "+ - "%v", err) - } - - var aliceStoredChannel *channeldb.OpenChannel - for _, channel := range aliceStoredChannels { - if channel.FundingOutpoint.String() == prevOut.String() { - aliceStoredChannel = channel - break - } - } - - if aliceStoredChannel == nil { - return nil, er.New("unable to find stored alice channel") - } - - newAliceChannel, errr := lnwallet.NewLightningChannel( - aliceSigner, aliceStoredChannel, alicePool, - ) - if errr != nil { - return nil, er.Errorf("unable to create new channel: %v", - errr) - } - - return newAliceChannel, nil - } - - restoreBob := func() (*lnwallet.LightningChannel, er.R) { - bobStoredChannels, err := dbBob.FetchOpenChannels(bobKeyPub) - switch { - case err == nil: - case kvdb.ErrDatabaseNotOpen.Is(err): - dbBob, errr := channeldb.Open(dbBob.Path()) - if errr != nil { - return nil, er.Errorf("unable to reopen bob "+ - "db: %v", errr) - } - - bobStoredChannels, err = dbBob.FetchOpenChannels(bobKeyPub) - if err != nil { - return nil, er.Errorf("unable to fetch bob "+ - "channel: %v", err) - } - default: - return nil, er.Errorf("unable to fetch bob channel: "+ - "%v", err) - } - - var bobStoredChannel *channeldb.OpenChannel - for _, channel := range bobStoredChannels { - if channel.FundingOutpoint.String() == prevOut.String() { - bobStoredChannel = channel - break - } - } - - if bobStoredChannel == nil { - return nil, er.New("unable to find stored bob channel") - } - - newBobChannel, errr := lnwallet.NewLightningChannel( - bobSigner, bobStoredChannel, bobPool, - ) - if errr != nil { - return nil, er.Errorf("unable to create new channel: %v", - errr) - } - return newBobChannel, nil - } - - testLightningChannelAlice := &testLightningChannel{ - channel: channelAlice, - restore: restoreAlice, - } - - testLightningChannelBob := &testLightningChannel{ - channel: channelBob, - restore: restoreBob, - } - - return testLightningChannelAlice, testLightningChannelBob, cleanUpFunc, - nil -} - -// getChanID retrieves the channel point from an lnnwire message. -func getChanID(msg lnwire.Message) (lnwire.ChannelID, er.R) { - var chanID lnwire.ChannelID - switch msg := msg.(type) { - case *lnwire.UpdateAddHTLC: - chanID = msg.ChanID - case *lnwire.UpdateFulfillHTLC: - chanID = msg.ChanID - case *lnwire.UpdateFailHTLC: - chanID = msg.ChanID - case *lnwire.RevokeAndAck: - chanID = msg.ChanID - case *lnwire.CommitSig: - chanID = msg.ChanID - case *lnwire.ChannelReestablish: - chanID = msg.ChanID - case *lnwire.FundingLocked: - chanID = msg.ChanID - case *lnwire.UpdateFee: - chanID = msg.ChanID - default: - return chanID, er.Errorf("unknown type: %T", msg) - } - - return chanID, nil -} - -// generateHoldPayment generates the htlc add request by given path blob and -// invoice which should be added by destination peer. -func generatePaymentWithPreimage(invoiceAmt, htlcAmt lnwire.MilliSatoshi, - timelock uint32, blob [lnwire.OnionPacketSize]byte, - preimage *lntypes.Preimage, rhash, payAddr [32]byte) ( - *channeldb.Invoice, *lnwire.UpdateAddHTLC, uint64, er.R) { - - // Create the db invoice. Normally the payment requests needs to be set, - // because it is decoded in InvoiceRegistry to obtain the cltv expiry. - // But because the mock registry used in tests is mocking the decode - // step and always returning the value of testInvoiceCltvExpiry, we - // don't need to bother here with creating and signing a payment - // request. - - invoice := &channeldb.Invoice{ - CreationDate: time.Now(), - Terms: channeldb.ContractTerm{ - FinalCltvDelta: testInvoiceCltvExpiry, - Value: invoiceAmt, - PaymentPreimage: preimage, - PaymentAddr: payAddr, - Features: lnwire.NewFeatureVector( - nil, lnwire.Features, - ), - }, - HodlInvoice: preimage == nil, - } - - htlc := &lnwire.UpdateAddHTLC{ - PaymentHash: rhash, - Amount: htlcAmt, - Expiry: timelock, - OnionBlob: blob, - } - - pid, err := generateRandomBytes(8) - if err != nil { - return nil, nil, 0, err - } - paymentID := binary.BigEndian.Uint64(pid) - - return invoice, htlc, paymentID, nil -} - -// generatePayment generates the htlc add request by given path blob and -// invoice which should be added by destination peer. -func generatePayment(invoiceAmt, htlcAmt lnwire.MilliSatoshi, timelock uint32, - blob [lnwire.OnionPacketSize]byte) (*channeldb.Invoice, - *lnwire.UpdateAddHTLC, uint64, er.R) { - - var preimage lntypes.Preimage - r, err := generateRandomBytes(sha256.Size) - if err != nil { - return nil, nil, 0, err - } - copy(preimage[:], r) - - rhash := sha256.Sum256(preimage[:]) - - var payAddr [sha256.Size]byte - r, err = generateRandomBytes(sha256.Size) - if err != nil { - return nil, nil, 0, err - } - copy(payAddr[:], r) - - return generatePaymentWithPreimage( - invoiceAmt, htlcAmt, timelock, blob, &preimage, rhash, payAddr, - ) -} - -// generateRoute generates the path blob by given array of peers. -func generateRoute(hops ...*hop.Payload) ( - [lnwire.OnionPacketSize]byte, er.R) { - - var blob [lnwire.OnionPacketSize]byte - if len(hops) == 0 { - return blob, er.New("empty path") - } - - iterator := newMockHopIterator(hops...) - - w := bytes.NewBuffer(blob[0:0]) - if err := iterator.EncodeNextHop(w); err != nil { - return blob, err - } - - return blob, nil - -} - -// threeHopNetwork is used for managing the created cluster of 3 hops. -type threeHopNetwork struct { - aliceServer *mockServer - aliceChannelLink *channelLink - aliceOnionDecoder *mockIteratorDecoder - - bobServer *mockServer - firstBobChannelLink *channelLink - secondBobChannelLink *channelLink - bobOnionDecoder *mockIteratorDecoder - - carolServer *mockServer - carolChannelLink *channelLink - carolOnionDecoder *mockIteratorDecoder - - hopNetwork -} - -// generateHops creates the per hop payload, the total amount to be sent, and -// also the time lock value needed to route an HTLC with the target amount over -// the specified path. -func generateHops(payAmt lnwire.MilliSatoshi, startingHeight uint32, - path ...*channelLink) (lnwire.MilliSatoshi, uint32, []*hop.Payload) { - - totalTimelock := startingHeight - runningAmt := payAmt - - hops := make([]*hop.Payload, len(path)) - for i := len(path) - 1; i >= 0; i-- { - // If this is the last hop, then the next hop is the special - // "exit node". Otherwise, we look to the "prior" hop. - nextHop := hop.Exit - if i != len(path)-1 { - nextHop = path[i+1].channel.ShortChanID() - } - - var timeLock uint32 - // If this is the last, hop, then the time lock will be their - // specified delta policy plus our starting height. - if i == len(path)-1 { - totalTimelock += testInvoiceCltvExpiry - timeLock = totalTimelock - } else { - // Otherwise, the outgoing time lock should be the - // incoming timelock minus their specified delta. - delta := path[i+1].cfg.FwrdingPolicy.TimeLockDelta - totalTimelock += delta - timeLock = totalTimelock - delta - } - - // Finally, we'll need to calculate the amount to forward. For - // the last hop, it's just the payment amount. - amount := payAmt - if i != len(path)-1 { - prevHop := hops[i+1] - prevAmount := prevHop.ForwardingInfo().AmountToForward - - fee := ExpectedFee(path[i].cfg.FwrdingPolicy, prevAmount) - runningAmt += fee - - // Otherwise, for a node to forward an HTLC, then - // following inequality most hold true: - // * amt_in - fee >= amt_to_forward - amount = runningAmt - fee - } - - var nextHopBytes [8]byte - binary.BigEndian.PutUint64(nextHopBytes[:], nextHop.ToUint64()) - - hops[i] = hop.NewLegacyPayload(&sphinx.HopData{ - Realm: [1]byte{}, // hop.BitcoinNetwork - NextAddress: nextHopBytes, - ForwardAmount: uint64(amount), - OutgoingCltv: timeLock, - }) - } - - return runningAmt, totalTimelock, hops -} - -type paymentResponse struct { - rhash lntypes.Hash - err chan er.R -} - -func (r *paymentResponse) Wait(d time.Duration) (lntypes.Hash, er.R) { - return r.rhash, waitForPaymentResult(r.err, d) -} - -// waitForPaymentResult waits for either an error to be received on c or a -// timeout. -func waitForPaymentResult(c chan er.R, d time.Duration) er.R { - select { - case err := <-c: - close(c) - return err - case <-time.After(d): - return er.New("htlc was not settled in time") - } -} - -// waitForPayFuncResult executes the given function and waits for a result with -// a timeout. -func waitForPayFuncResult(payFunc func() er.R, d time.Duration) er.R { - errChan := make(chan er.R) - go func() { - errChan <- payFunc() - }() - - return waitForPaymentResult(errChan, d) -} - -// makePayment takes the destination node and amount as input, sends the -// payment and returns the error channel to wait for error to be received and -// invoice in order to check its status after the payment finished. -// -// With this function you can send payments: -// * from Alice to Bob -// * from Alice to Carol through the Bob -// * from Alice to some another peer through the Bob -func makePayment(sendingPeer, receivingPeer lnpeer.Peer, - firstHop lnwire.ShortChannelID, hops []*hop.Payload, - invoiceAmt, htlcAmt lnwire.MilliSatoshi, - timelock uint32) *paymentResponse { - - paymentErr := make(chan er.R, 1) - var rhash lntypes.Hash - - invoice, payFunc, err := preparePayment(sendingPeer, receivingPeer, - firstHop, hops, invoiceAmt, htlcAmt, timelock, - ) - if err != nil { - paymentErr <- err - return &paymentResponse{ - rhash: rhash, - err: paymentErr, - } - } - - rhash = invoice.Terms.PaymentPreimage.Hash() - - // Send payment and expose err channel. - go func() { - paymentErr <- payFunc() - }() - - return &paymentResponse{ - rhash: rhash, - err: paymentErr, - } -} - -// preparePayment creates an invoice at the receivingPeer and returns a function -// that, when called, launches the payment from the sendingPeer. -func preparePayment(sendingPeer, receivingPeer lnpeer.Peer, - firstHop lnwire.ShortChannelID, hops []*hop.Payload, - invoiceAmt, htlcAmt lnwire.MilliSatoshi, - timelock uint32) (*channeldb.Invoice, func() er.R, er.R) { - - sender := sendingPeer.(*mockServer) - receiver := receivingPeer.(*mockServer) - - // Generate route convert it to blob, and return next destination for - // htlc add request. - blob, err := generateRoute(hops...) - if err != nil { - return nil, nil, err - } - - // Generate payment: invoice and htlc. - invoice, htlc, pid, err := generatePayment( - invoiceAmt, htlcAmt, timelock, blob, - ) - if err != nil { - return nil, nil, err - } - - // Check who is last in the route and add invoice to server registry. - hash := invoice.Terms.PaymentPreimage.Hash() - if err := receiver.registry.AddInvoice(*invoice, hash); err != nil { - return nil, nil, err - } - - // Send payment and expose err channel. - return invoice, func() er.R { - err := sender.htlcSwitch.SendHTLC( - firstHop, pid, htlc, - ) - if err != nil { - return err - } - resultChan, err := sender.htlcSwitch.GetPaymentResult( - pid, hash, newMockDeobfuscator(), - ) - if err != nil { - return err - } - - result, ok := <-resultChan - if !ok { - return er.Errorf("shutting down") - } - - if result.Error != nil { - return result.Error - } - - return nil - }, nil -} - -// start starts the three hop network alice,bob,carol servers. -func (n *threeHopNetwork) start() er.R { - if err := n.aliceServer.Start(); err != nil { - return err - } - if err := n.bobServer.Start(); err != nil { - return err - } - if err := n.carolServer.Start(); err != nil { - return err - } - - return waitLinksEligible(map[string]*channelLink{ - "alice": n.aliceChannelLink, - "bob first": n.firstBobChannelLink, - "bob second": n.secondBobChannelLink, - "carol": n.carolChannelLink, - }) -} - -// stop stops nodes and cleanup its databases. -func (n *threeHopNetwork) stop() { - done := make(chan struct{}) - go func() { - n.aliceServer.Stop() - done <- struct{}{} - }() - - go func() { - n.bobServer.Stop() - done <- struct{}{} - }() - - go func() { - n.carolServer.Stop() - done <- struct{}{} - }() - - for i := 0; i < 3; i++ { - <-done - } -} - -type clusterChannels struct { - aliceToBob *lnwallet.LightningChannel - bobToAlice *lnwallet.LightningChannel - bobToCarol *lnwallet.LightningChannel - carolToBob *lnwallet.LightningChannel -} - -// createClusterChannels creates lightning channels which are needed for -// network cluster to be initialized. -func createClusterChannels(aliceToBob, bobToCarol btcutil.Amount) ( - *clusterChannels, func(), func() (*clusterChannels, er.R), er.R) { - - _, _, firstChanID, secondChanID := genIDs() - - // Create lightning channels between Alice<->Bob and Bob<->Carol - aliceChannel, firstBobChannel, cleanAliceBob, err := - createTestChannel(alicePrivKey, bobPrivKey, aliceToBob, - aliceToBob, 0, 0, firstChanID) - if err != nil { - return nil, nil, nil, er.Errorf("unable to create "+ - "alice<->bob channel: %v", err) - } - - secondBobChannel, carolChannel, cleanBobCarol, err := - createTestChannel(bobPrivKey, carolPrivKey, bobToCarol, - bobToCarol, 0, 0, secondChanID) - if err != nil { - cleanAliceBob() - return nil, nil, nil, er.Errorf("unable to create "+ - "bob<->carol channel: %v", err) - } - - cleanUp := func() { - cleanAliceBob() - cleanBobCarol() - } - - restoreFromDb := func() (*clusterChannels, er.R) { - - a2b, err := aliceChannel.restore() - if err != nil { - return nil, err - } - - b2a, err := firstBobChannel.restore() - if err != nil { - return nil, err - } - - b2c, err := secondBobChannel.restore() - if err != nil { - return nil, err - } - - c2b, err := carolChannel.restore() - if err != nil { - return nil, err - } - - return &clusterChannels{ - aliceToBob: a2b, - bobToAlice: b2a, - bobToCarol: b2c, - carolToBob: c2b, - }, nil - } - - return &clusterChannels{ - aliceToBob: aliceChannel.channel, - bobToAlice: firstBobChannel.channel, - bobToCarol: secondBobChannel.channel, - carolToBob: carolChannel.channel, - }, cleanUp, restoreFromDb, nil -} - -// newThreeHopNetwork function creates the following topology and returns the -// control object to manage this cluster: -// -// alice bob carol -// server - <-connection-> - server - - <-connection-> - - - server -// | | | -// alice htlc bob htlc carol htlc -// switch switch \ switch -// | | \ | -// | | \ | -// alice first bob second bob carol -// channel link channel link channel link channel link -// -// This function takes server options which can be used to apply custom -// settings to alice, bob and carol. -func newThreeHopNetwork(t testing.TB, aliceChannel, firstBobChannel, - secondBobChannel, carolChannel *lnwallet.LightningChannel, - startingHeight uint32, opts ...serverOption) *threeHopNetwork { - - aliceDb := aliceChannel.State().Db - bobDb := firstBobChannel.State().Db - carolDb := carolChannel.State().Db - - hopNetwork := newHopNetwork() - - // Create three peers/servers. - aliceServer, err := newMockServer( - t, "alice", startingHeight, aliceDb, hopNetwork.defaultDelta, - ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } - bobServer, err := newMockServer( - t, "bob", startingHeight, bobDb, hopNetwork.defaultDelta, - ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } - carolServer, err := newMockServer( - t, "carol", startingHeight, carolDb, hopNetwork.defaultDelta, - ) - if err != nil { - t.Fatalf("unable to create carol server: %v", err) - } - - // Apply all additional functional options to the servers before - // creating any links. - for _, option := range opts { - option(aliceServer, bobServer, carolServer) - } - - // Create mock decoder instead of sphinx one in order to mock the route - // which htlc should follow. - aliceDecoder := newMockIteratorDecoder() - bobDecoder := newMockIteratorDecoder() - carolDecoder := newMockIteratorDecoder() - - aliceChannelLink, err := hopNetwork.createChannelLink(aliceServer, - bobServer, aliceChannel, aliceDecoder, - ) - if err != nil { - t.Fatal(err) - } - - firstBobChannelLink, err := hopNetwork.createChannelLink(bobServer, - aliceServer, firstBobChannel, bobDecoder) - if err != nil { - t.Fatal(err) - } - - secondBobChannelLink, err := hopNetwork.createChannelLink(bobServer, - carolServer, secondBobChannel, bobDecoder) - if err != nil { - t.Fatal(err) - } - - carolChannelLink, err := hopNetwork.createChannelLink(carolServer, - bobServer, carolChannel, carolDecoder) - if err != nil { - t.Fatal(err) - } - - return &threeHopNetwork{ - aliceServer: aliceServer, - aliceChannelLink: aliceChannelLink.(*channelLink), - aliceOnionDecoder: aliceDecoder, - - bobServer: bobServer, - firstBobChannelLink: firstBobChannelLink.(*channelLink), - secondBobChannelLink: secondBobChannelLink.(*channelLink), - bobOnionDecoder: bobDecoder, - - carolServer: carolServer, - carolChannelLink: carolChannelLink.(*channelLink), - carolOnionDecoder: carolDecoder, - - hopNetwork: *hopNetwork, - } -} - -// serverOption is a function which alters the three servers created for -// a three hop network to allow custom settings on each server. -type serverOption func(aliceServer, bobServer, carolServer *mockServer) - -// serverOptionWithHtlcNotifier is a functional option for the creation of -// three hop network servers which allows setting of htlc notifiers. -// Note that these notifiers should be started and stopped by the calling -// function. -func serverOptionWithHtlcNotifier(alice, bob, - carol *HtlcNotifier) serverOption { - - return func(aliceServer, bobServer, carolServer *mockServer) { - aliceServer.htlcSwitch.cfg.HtlcNotifier = alice - bobServer.htlcSwitch.cfg.HtlcNotifier = bob - carolServer.htlcSwitch.cfg.HtlcNotifier = carol - } -} - -// serverOptionRejectHtlc is the functional option for setting the reject -// htlc config option in each server's switch. -func serverOptionRejectHtlc(alice, bob, carol bool) serverOption { - return func(aliceServer, bobServer, carolServer *mockServer) { - aliceServer.htlcSwitch.cfg.RejectHTLC = alice - bobServer.htlcSwitch.cfg.RejectHTLC = bob - carolServer.htlcSwitch.cfg.RejectHTLC = carol - } -} - -// createTwoClusterChannels creates lightning channels which are needed for -// a 2 hop network cluster to be initialized. -func createTwoClusterChannels(aliceToBob, bobToCarol btcutil.Amount) ( - *testLightningChannel, *testLightningChannel, - func(), er.R) { - - _, _, firstChanID, _ := genIDs() - - // Create lightning channels between Alice<->Bob and Bob<->Carol - alice, bob, cleanAliceBob, err := - createTestChannel(alicePrivKey, bobPrivKey, aliceToBob, - aliceToBob, 0, 0, firstChanID) - if err != nil { - return nil, nil, nil, er.Errorf("unable to create "+ - "alice<->bob channel: %v", err) - } - - return alice, bob, cleanAliceBob, nil -} - -// hopNetwork is the base struct for two and three hop networks -type hopNetwork struct { - feeEstimator *mockFeeEstimator - globalPolicy ForwardingPolicy - obfuscator hop.ErrorEncrypter - - defaultDelta uint32 -} - -func newHopNetwork() *hopNetwork { - defaultDelta := uint32(6) - - globalPolicy := ForwardingPolicy{ - MinHTLCOut: lnwire.NewMSatFromSatoshis(5), - BaseFee: lnwire.NewMSatFromSatoshis(1), - TimeLockDelta: defaultDelta, - } - obfuscator := NewMockObfuscator() - - feeEstimator := &mockFeeEstimator{ - byteFeeIn: make(chan chainfee.SatPerKWeight), - quit: make(chan struct{}), - } - - return &hopNetwork{ - feeEstimator: feeEstimator, - globalPolicy: globalPolicy, - obfuscator: obfuscator, - defaultDelta: defaultDelta, - } -} - -func (h *hopNetwork) createChannelLink(server, peer *mockServer, - channel *lnwallet.LightningChannel, - decoder *mockIteratorDecoder) (ChannelLink, er.R) { - - const ( - fwdPkgTimeout = 15 * time.Second - minFeeUpdateTimeout = 30 * time.Minute - maxFeeUpdateTimeout = 40 * time.Minute - ) - - link := NewChannelLink( - ChannelLinkConfig{ - Switch: server.htlcSwitch, - FwrdingPolicy: h.globalPolicy, - Peer: peer, - Circuits: server.htlcSwitch.CircuitModifier(), - ForwardPackets: server.htlcSwitch.ForwardPackets, - DecodeHopIterators: decoder.DecodeHopIterators, - ExtractErrorEncrypter: func(*btcec.PublicKey) ( - hop.ErrorEncrypter, lnwire.FailCode) { - return h.obfuscator, lnwire.CodeNone - }, - FetchLastChannelUpdate: mockGetChanUpdateMessage, - Registry: server.registry, - FeeEstimator: h.feeEstimator, - PreimageCache: server.pCache, - UpdateContractSignals: func(*contractcourt.ContractSignals) er.R { - return nil - }, - ChainEvents: &contractcourt.ChainEventSubscription{}, - SyncStates: true, - BatchSize: 10, - BatchTicker: ticker.NewForce(testBatchTimeout), - FwdPkgGCTicker: ticker.NewForce(fwdPkgTimeout), - PendingCommitTicker: ticker.NewForce(2 * time.Minute), - MinFeeUpdateTimeout: minFeeUpdateTimeout, - MaxFeeUpdateTimeout: maxFeeUpdateTimeout, - OnChannelFailure: func(lnwire.ChannelID, lnwire.ShortChannelID, LinkFailureError) {}, - OutgoingCltvRejectDelta: 3, - MaxOutgoingCltvExpiry: DefaultMaxOutgoingCltvExpiry, - MaxFeeAllocation: DefaultMaxLinkFeeAllocation, - NotifyActiveLink: func(wire.OutPoint) {}, - NotifyActiveChannel: func(wire.OutPoint) {}, - NotifyInactiveChannel: func(wire.OutPoint) {}, - HtlcNotifier: server.htlcSwitch.cfg.HtlcNotifier, - }, - channel, - ) - if err := server.htlcSwitch.AddLink(link); err != nil { - return nil, er.Errorf("unable to add channel link: %v", err) - } - - go func() { - for { - select { - case <-link.(*channelLink).htlcUpdates: - case <-link.(*channelLink).quit: - return - } - } - }() - - return link, nil -} - -// twoHopNetwork is used for managing the created cluster of 2 hops. -type twoHopNetwork struct { - hopNetwork - - aliceServer *mockServer - aliceChannelLink *channelLink - - bobServer *mockServer - bobChannelLink *channelLink -} - -// newTwoHopNetwork function creates the following topology and returns the -// control object to manage this cluster: -// -// alice bob -// server - <-connection-> - server -// | | -// alice htlc bob htlc -// switch switch -// | | -// | | -// alice bob -// channel link channel link -// -func newTwoHopNetwork(t testing.TB, - aliceChannel, bobChannel *lnwallet.LightningChannel, - startingHeight uint32) *twoHopNetwork { - - aliceDb := aliceChannel.State().Db - bobDb := bobChannel.State().Db - - hopNetwork := newHopNetwork() - - // Create two peers/servers. - aliceServer, err := newMockServer( - t, "alice", startingHeight, aliceDb, hopNetwork.defaultDelta, - ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } - bobServer, err := newMockServer( - t, "bob", startingHeight, bobDb, hopNetwork.defaultDelta, - ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } - - // Create mock decoder instead of sphinx one in order to mock the route - // which htlc should follow. - aliceDecoder := newMockIteratorDecoder() - bobDecoder := newMockIteratorDecoder() - - aliceChannelLink, err := hopNetwork.createChannelLink( - aliceServer, bobServer, aliceChannel, aliceDecoder, - ) - if err != nil { - t.Fatal(err) - } - - bobChannelLink, err := hopNetwork.createChannelLink( - bobServer, aliceServer, bobChannel, bobDecoder, - ) - if err != nil { - t.Fatal(err) - } - - return &twoHopNetwork{ - aliceServer: aliceServer, - aliceChannelLink: aliceChannelLink.(*channelLink), - - bobServer: bobServer, - bobChannelLink: bobChannelLink.(*channelLink), - - hopNetwork: *hopNetwork, - } -} - -// start starts the two hop network alice,bob servers. -func (n *twoHopNetwork) start() er.R { - if err := n.aliceServer.Start(); err != nil { - return err - } - if err := n.bobServer.Start(); err != nil { - n.aliceServer.Stop() - return err - } - - return waitLinksEligible(map[string]*channelLink{ - "alice": n.aliceChannelLink, - "bob": n.bobChannelLink, - }) -} - -// stop stops nodes and cleanup its databases. -func (n *twoHopNetwork) stop() { - done := make(chan struct{}) - go func() { - n.aliceServer.Stop() - done <- struct{}{} - }() - - go func() { - n.bobServer.Stop() - done <- struct{}{} - }() - - for i := 0; i < 2; i++ { - <-done - } -} - -func (n *twoHopNetwork) makeHoldPayment(sendingPeer, receivingPeer lnpeer.Peer, - firstHop lnwire.ShortChannelID, hops []*hop.Payload, - invoiceAmt, htlcAmt lnwire.MilliSatoshi, - timelock uint32, preimage lntypes.Preimage) chan er.R { - - paymentErr := make(chan er.R, 1) - - sender := sendingPeer.(*mockServer) - receiver := receivingPeer.(*mockServer) - - // Generate route convert it to blob, and return next destination for - // htlc add request. - blob, err := generateRoute(hops...) - if err != nil { - paymentErr <- err - return paymentErr - } - - rhash := preimage.Hash() - - var payAddr [32]byte - if _, err := crand.Read(payAddr[:]); err != nil { - panic(err) - } - - // Generate payment: invoice and htlc. - invoice, htlc, pid, err := generatePaymentWithPreimage( - invoiceAmt, htlcAmt, timelock, blob, - nil, rhash, payAddr, - ) - if err != nil { - paymentErr <- err - return paymentErr - } - - // Check who is last in the route and add invoice to server registry. - if err := receiver.registry.AddInvoice(*invoice, rhash); err != nil { - paymentErr <- err - return paymentErr - } - - // Send payment and expose err channel. - err = sender.htlcSwitch.SendHTLC(firstHop, pid, htlc) - if err != nil { - paymentErr <- err - return paymentErr - } - - go func() { - resultChan, err := sender.htlcSwitch.GetPaymentResult( - pid, rhash, newMockDeobfuscator(), - ) - if err != nil { - paymentErr <- err - return - } - - result, ok := <-resultChan - if !ok { - paymentErr <- er.Errorf("shutting down") - return - } - - if result.Error != nil { - paymentErr <- result.Error - return - } - paymentErr <- nil - }() - - return paymentErr -} - -// waitLinksEligible blocks until all links the provided name-to-link map are -// eligible to forward HTLCs. -func waitLinksEligible(links map[string]*channelLink) er.R { - return wait.NoError(func() er.R { - for name, link := range links { - if link.EligibleToForward() { - continue - } - return er.Errorf("%s channel link not eligible", name) - } - return nil - }, 3*time.Second) -} - -// timeout implements a test level timeout. -func timeout(t *testing.T) func() { - done := make(chan struct{}) - go func() { - select { - case <-time.After(10 * time.Second): - pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) - - panic("test timeout") - case <-done: - } - }() - - return func() { - close(done) - } -} diff --git a/lnd/input/input.go b/lnd/input/input.go deleted file mode 100644 index 7e125861..00000000 --- a/lnd/input/input.go +++ /dev/null @@ -1,248 +0,0 @@ -package input - -import ( - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/txscript" - "github.com/pkt-cash/pktd/wire" -) - -// Input represents an abstract UTXO which is to be spent using a sweeping -// transaction. The method provided give the caller all information needed to -// construct a valid input within a sweeping transaction to sweep this -// lingering UTXO. -type Input interface { - // Outpoint returns the reference to the output being spent, used to - // construct the corresponding transaction input. - OutPoint() *wire.OutPoint - - // RequiredTxOut returns a non-nil TxOut if input commits to a certain - // transaction output. This is used in the SINGLE|ANYONECANPAY case to - // make sure any presigned input is still valid by including the - // output. - RequiredTxOut() *wire.TxOut - - // RequiredLockTime returns whether this input commits to a tx locktime - // that must be used in the transaction including it. - RequiredLockTime() (uint32, bool) - - // WitnessType returns an enum specifying the type of witness that must - // be generated in order to spend this output. - WitnessType() WitnessType - - // SignDesc returns a reference to a spendable output's sign - // descriptor, which is used during signing to compute a valid witness - // that spends this output. - SignDesc() *SignDescriptor - - // CraftInputScript returns a valid set of input scripts allowing this - // output to be spent. The returns input scripts should target the - // input at location txIndex within the passed transaction. The input - // scripts generated by this method support spending p2wkh, p2wsh, and - // also nested p2sh outputs. - CraftInputScript(signer Signer, txn *wire.MsgTx, - hashCache *txscript.TxSigHashes, - txinIdx int) (*Script, er.R) - - // BlocksToMaturity returns the relative timelock, as a number of - // blocks, that must be built on top of the confirmation height before - // the output can be spent. For non-CSV locked inputs this is always - // zero. - BlocksToMaturity() uint32 - - // HeightHint returns the minimum height at which a confirmed spending - // tx can occur. - HeightHint() uint32 - - // UnconfParent returns information about a possibly unconfirmed parent - // tx. - UnconfParent() *TxInfo -} - -// TxInfo describes properties of a parent tx that are relevant for CPFP. -type TxInfo struct { - // Fee is the fee of the tx. - Fee btcutil.Amount - - // Weight is the weight of the tx. - Weight int64 -} - -type inputKit struct { - outpoint wire.OutPoint - witnessType WitnessType - signDesc SignDescriptor - heightHint uint32 - blockToMaturity uint32 - - // unconfParent contains information about a potential unconfirmed - // parent transaction. - unconfParent *TxInfo -} - -// OutPoint returns the breached output's identifier that is to be included as -// a transaction input. -func (i *inputKit) OutPoint() *wire.OutPoint { - return &i.outpoint -} - -// RequiredTxOut returns a nil for the base input type. -func (i *inputKit) RequiredTxOut() *wire.TxOut { - return nil -} - -// RequiredLockTime returns whether this input commits to a tx locktime that -// must be used in the transaction including it. This will be false for the -// base input type since we can re-sign for any lock time. -func (i *inputKit) RequiredLockTime() (uint32, bool) { - return 0, false -} - -// WitnessType returns the type of witness that must be generated to spend the -// breached output. -func (i *inputKit) WitnessType() WitnessType { - return i.witnessType -} - -// SignDesc returns the breached output's SignDescriptor, which is used during -// signing to compute the witness. -func (i *inputKit) SignDesc() *SignDescriptor { - return &i.signDesc -} - -// HeightHint returns the minimum height at which a confirmed spending -// tx can occur. -func (i *inputKit) HeightHint() uint32 { - return i.heightHint -} - -// BlocksToMaturity returns the relative timelock, as a number of blocks, that -// must be built on top of the confirmation height before the output can be -// spent. For non-CSV locked inputs this is always zero. -func (i *inputKit) BlocksToMaturity() uint32 { - return i.blockToMaturity -} - -// Cpfp returns information about a possibly unconfirmed parent tx. -func (i *inputKit) UnconfParent() *TxInfo { - return i.unconfParent -} - -// BaseInput contains all the information needed to sweep a basic output -// (CSV/CLTV/no time lock) -type BaseInput struct { - inputKit -} - -// MakeBaseInput assembles a new BaseInput that can be used to construct a -// sweep transaction. -func MakeBaseInput(outpoint *wire.OutPoint, witnessType WitnessType, - signDescriptor *SignDescriptor, heightHint uint32, - unconfParent *TxInfo) BaseInput { - - return BaseInput{ - inputKit{ - outpoint: *outpoint, - witnessType: witnessType, - signDesc: *signDescriptor, - heightHint: heightHint, - unconfParent: unconfParent, - }, - } -} - -// NewBaseInput allocates and assembles a new *BaseInput that can be used to -// construct a sweep transaction. -func NewBaseInput(outpoint *wire.OutPoint, witnessType WitnessType, - signDescriptor *SignDescriptor, heightHint uint32) *BaseInput { - - input := MakeBaseInput( - outpoint, witnessType, signDescriptor, heightHint, nil, - ) - - return &input -} - -// NewCsvInput assembles a new csv-locked input that can be used to -// construct a sweep transaction. -func NewCsvInput(outpoint *wire.OutPoint, witnessType WitnessType, - signDescriptor *SignDescriptor, heightHint uint32, - blockToMaturity uint32) *BaseInput { - - return &BaseInput{ - inputKit{ - outpoint: *outpoint, - witnessType: witnessType, - signDesc: *signDescriptor, - heightHint: heightHint, - blockToMaturity: blockToMaturity, - }, - } -} - -// CraftInputScript returns a valid set of input scripts allowing this output -// to be spent. The returned input scripts should target the input at location -// txIndex within the passed transaction. The input scripts generated by this -// method support spending p2wkh, p2wsh, and also nested p2sh outputs. -func (bi *BaseInput) CraftInputScript(signer Signer, txn *wire.MsgTx, - hashCache *txscript.TxSigHashes, txinIdx int) (*Script, er.R) { - - witnessFunc := bi.witnessType.WitnessGenerator(signer, bi.SignDesc()) - - return witnessFunc(txn, hashCache, txinIdx) -} - -// HtlcSucceedInput constitutes a sweep input that needs a pre-image. The input -// is expected to reside on the commitment tx of the remote party and should -// not be a second level tx output. -type HtlcSucceedInput struct { - inputKit - - preimage []byte -} - -// MakeHtlcSucceedInput assembles a new redeem input that can be used to -// construct a sweep transaction. -func MakeHtlcSucceedInput(outpoint *wire.OutPoint, - signDescriptor *SignDescriptor, preimage []byte, heightHint, - blocksToMaturity uint32) HtlcSucceedInput { - - return HtlcSucceedInput{ - inputKit: inputKit{ - outpoint: *outpoint, - witnessType: HtlcAcceptedRemoteSuccess, - signDesc: *signDescriptor, - heightHint: heightHint, - blockToMaturity: blocksToMaturity, - }, - preimage: preimage, - } -} - -// CraftInputScript returns a valid set of input scripts allowing this output -// to be spent. The returns input scripts should target the input at location -// txIndex within the passed transaction. The input scripts generated by this -// method support spending p2wkh, p2wsh, and also nested p2sh outputs. -func (h *HtlcSucceedInput) CraftInputScript(signer Signer, txn *wire.MsgTx, - hashCache *txscript.TxSigHashes, txinIdx int) (*Script, er.R) { - - desc := h.signDesc - desc.SigHashes = hashCache - desc.InputIndex = txinIdx - - witness, err := SenderHtlcSpendRedeem( - signer, &desc, txn, h.preimage, - ) - if err != nil { - return nil, err - } - - return &Script{ - Witness: witness, - }, nil -} - -// Compile-time constraints to ensure each input struct implement the Input -// interface. -var _ Input = (*BaseInput)(nil) -var _ Input = (*HtlcSucceedInput)(nil) diff --git a/lnd/input/script_utils.go b/lnd/input/script_utils.go deleted file mode 100644 index ed26dbea..00000000 --- a/lnd/input/script_utils.go +++ /dev/null @@ -1,1301 +0,0 @@ -package input - -import ( - "bytes" - "crypto/sha256" - "math/big" - - "golang.org/x/crypto/ripemd160" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/txscript" - "github.com/pkt-cash/pktd/txscript/opcode" - "github.com/pkt-cash/pktd/txscript/params" - "github.com/pkt-cash/pktd/txscript/scriptbuilder" - "github.com/pkt-cash/pktd/wire" -) - -var ( - // TODO(roasbeef): remove these and use the one's defined in txscript - // within testnet-L. - - // SequenceLockTimeSeconds is the 22nd bit which indicates the lock - // time is in seconds. - SequenceLockTimeSeconds = uint32(1 << 22) -) - -// Signature is an interface for objects that can populate signatures during -// witness construction. -type Signature interface { - // Serialize returns a DER-encoded ECDSA signature. - Serialize() []byte - - // Verify return true if the ECDSA signature is valid for the passed - // message digest under the provided public key. - Verify([]byte, *btcec.PublicKey) bool -} - -// WitnessScriptHash generates a pay-to-witness-script-hash public key script -// paying to a version 0 witness program paying to the passed redeem script. -func WitnessScriptHash(witnessScript []byte) ([]byte, er.R) { - bldr := scriptbuilder.NewScriptBuilder() - - bldr.AddOp(opcode.OP_0) - scriptHash := sha256.Sum256(witnessScript) - bldr.AddData(scriptHash[:]) - return bldr.Script() -} - -// GenMultiSigScript generates the non-p2sh'd multisig script for 2 of 2 -// pubkeys. -func GenMultiSigScript(aPub, bPub []byte) ([]byte, er.R) { - if len(aPub) != 33 || len(bPub) != 33 { - return nil, er.Errorf("pubkey size error: compressed pubkeys only") - } - - // Swap to sort pubkeys if needed. Keys are sorted in lexicographical - // order. The signatures within the scriptSig must also adhere to the - // order, ensuring that the signatures for each public key appears in - // the proper order on the stack. - if bytes.Compare(aPub, bPub) == 1 { - aPub, bPub = bPub, aPub - } - - bldr := scriptbuilder.NewScriptBuilder() - bldr.AddOp(opcode.OP_2) - bldr.AddData(aPub) // Add both pubkeys (sorted). - bldr.AddData(bPub) - bldr.AddOp(opcode.OP_2) - bldr.AddOp(opcode.OP_CHECKMULTISIG) - return bldr.Script() -} - -// GenFundingPkScript creates a redeem script, and its matching p2wsh -// output for the funding transaction. -func GenFundingPkScript(aPub, bPub []byte, amt int64) ([]byte, *wire.TxOut, er.R) { - // As a sanity check, ensure that the passed amount is above zero. - if amt <= 0 { - return nil, nil, er.Errorf("can't create FundTx script with " + - "zero, or negative coins") - } - - // First, create the 2-of-2 multi-sig script itself. - witnessScript, err := GenMultiSigScript(aPub, bPub) - if err != nil { - return nil, nil, err - } - - // With the 2-of-2 script in had, generate a p2wsh script which pays - // to the funding script. - pkScript, err := WitnessScriptHash(witnessScript) - if err != nil { - return nil, nil, err - } - - return witnessScript, wire.NewTxOut(amt, pkScript), nil -} - -// SpendMultiSig generates the witness stack required to redeem the 2-of-2 p2wsh -// multi-sig output. -func SpendMultiSig(witnessScript, pubA []byte, sigA Signature, - pubB []byte, sigB Signature) [][]byte { - - witness := make([][]byte, 4) - - // When spending a p2wsh multi-sig script, rather than an OP_0, we add - // a nil stack element to eat the extra pop. - witness[0] = nil - - // When initially generating the witnessScript, we sorted the serialized - // public keys in descending order. So we do a quick comparison in order - // ensure the signatures appear on the Script Virtual Machine stack in - // the correct order. - if bytes.Compare(pubA, pubB) == 1 { - witness[1] = append(sigB.Serialize(), byte(params.SigHashAll)) - witness[2] = append(sigA.Serialize(), byte(params.SigHashAll)) - } else { - witness[1] = append(sigA.Serialize(), byte(params.SigHashAll)) - witness[2] = append(sigB.Serialize(), byte(params.SigHashAll)) - } - - // Finally, add the preimage as the last witness element. - witness[3] = witnessScript - - return witness -} - -// FindScriptOutputIndex finds the index of the public key script output -// matching 'script'. Additionally, a boolean is returned indicating if a -// matching output was found at all. -// -// NOTE: The search stops after the first matching script is found. -func FindScriptOutputIndex(tx *wire.MsgTx, script []byte) (bool, uint32) { - found := false - index := uint32(0) - for i, txOut := range tx.TxOut { - if bytes.Equal(txOut.PkScript, script) { - found = true - index = uint32(i) - break - } - } - - return found, index -} - -// Ripemd160H calculates the ripemd160 of the passed byte slice. This is used to -// calculate the intermediate hash for payment pre-images. Payment hashes are -// the result of ripemd160(sha256(paymentPreimage)). As a result, the value -// passed in should be the sha256 of the payment hash. -func Ripemd160H(d []byte) []byte { - h := ripemd160.New() - h.Write(d) - return h.Sum(nil) -} - -// SenderHTLCScript constructs the public key script for an outgoing HTLC -// output payment for the sender's version of the commitment transaction. The -// possible script paths from this output include: -// -// * The sender timing out the HTLC using the second level HTLC timeout -// transaction. -// * The receiver of the HTLC claiming the output on-chain with the payment -// preimage. -// * The receiver of the HTLC sweeping all the funds in the case that a -// revoked commitment transaction bearing this HTLC was broadcast. -// -// If confirmedSpend=true, a 1 OP_CSV check will be added to the non-revocation -// cases, to allow sweeping only after confirmation. -// -// Possible Input Scripts: -// SENDR: <0> <0> (spend using HTLC timeout transaction) -// RECVR: -// REVOK: -// * receiver revoke -// -// OP_DUP OP_HASH160 OP_EQUAL -// OP_IF -// OP_CHECKSIG -// OP_ELSE -// -// OP_SWAP OP_SIZE 32 OP_EQUAL -// OP_NOTIF -// OP_DROP 2 OP_SWAP 2 OP_CHECKMULTISIG -// OP_ELSE -// OP_HASH160 OP_EQUALVERIFY -// OP_CHECKSIG -// OP_ENDIF -// [1 OP_CHECKSEQUENCEVERIFY OP_DROP] <- if allowing confirmed spend only. -// OP_ENDIF -func SenderHTLCScript(senderHtlcKey, receiverHtlcKey, - revocationKey *btcec.PublicKey, paymentHash []byte, - confirmedSpend bool) ([]byte, er.R) { - - builder := scriptbuilder.NewScriptBuilder() - - // The opening operations are used to determine if this is the receiver - // of the HTLC attempting to sweep all the funds due to a contract - // breach. In this case, they'll place the revocation key at the top of - // the stack. - builder.AddOp(opcode.OP_DUP) - builder.AddOp(opcode.OP_HASH160) - builder.AddData(btcutil.Hash160(revocationKey.SerializeCompressed())) - builder.AddOp(opcode.OP_EQUAL) - - // If the hash matches, then this is the revocation clause. The output - // can be spent if the check sig operation passes. - builder.AddOp(opcode.OP_IF) - builder.AddOp(opcode.OP_CHECKSIG) - - // Otherwise, this may either be the receiver of the HTLC claiming with - // the pre-image, or the sender of the HTLC sweeping the output after - // it has timed out. - builder.AddOp(opcode.OP_ELSE) - - // We'll do a bit of set up by pushing the receiver's key on the top of - // the stack. This will be needed later if we decide that this is the - // sender activating the time out clause with the HTLC timeout - // transaction. - builder.AddData(receiverHtlcKey.SerializeCompressed()) - - // Atm, the top item of the stack is the receiverKey's so we use a swap - // to expose what is either the payment pre-image or a signature. - builder.AddOp(opcode.OP_SWAP) - - // With the top item swapped, check if it's 32 bytes. If so, then this - // *may* be the payment pre-image. - builder.AddOp(opcode.OP_SIZE) - builder.AddInt64(32) - builder.AddOp(opcode.OP_EQUAL) - - // If it isn't then this might be the sender of the HTLC activating the - // time out clause. - builder.AddOp(opcode.OP_NOTIF) - - // We'll drop the OP_IF return value off the top of the stack so we can - // reconstruct the multi-sig script used as an off-chain covenant. If - // two valid signatures are provided, ten then output will be deemed as - // spendable. - builder.AddOp(opcode.OP_DROP) - builder.AddOp(opcode.OP_2) - builder.AddOp(opcode.OP_SWAP) - builder.AddData(senderHtlcKey.SerializeCompressed()) - builder.AddOp(opcode.OP_2) - builder.AddOp(opcode.OP_CHECKMULTISIG) - - // Otherwise, then the only other case is that this is the receiver of - // the HTLC sweeping it on-chain with the payment pre-image. - builder.AddOp(opcode.OP_ELSE) - - // Hash the top item of the stack and compare it with the hash160 of - // the payment hash, which is already the sha256 of the payment - // pre-image. By using this little trick we're able save space on-chain - // as the witness includes a 20-byte hash rather than a 32-byte hash. - builder.AddOp(opcode.OP_HASH160) - builder.AddData(Ripemd160H(paymentHash)) - builder.AddOp(opcode.OP_EQUALVERIFY) - - // This checks the receiver's signature so that a third party with - // knowledge of the payment preimage still cannot steal the output. - builder.AddOp(opcode.OP_CHECKSIG) - - // Close out the OP_IF statement above. - builder.AddOp(opcode.OP_ENDIF) - - // Add 1 block CSV delay if a confirmation is required for the - // non-revocation clauses. - if confirmedSpend { - builder.AddOp(opcode.OP_1) - builder.AddOp(opcode.OP_CHECKSEQUENCEVERIFY) - builder.AddOp(opcode.OP_DROP) - } - - // Close out the OP_IF statement at the top of the script. - builder.AddOp(opcode.OP_ENDIF) - - return builder.Script() -} - -// SenderHtlcSpendRevokeWithKey constructs a valid witness allowing the receiver of an -// HTLC to claim the output with knowledge of the revocation private key in the -// scenario that the sender of the HTLC broadcasts a previously revoked -// commitment transaction. A valid spend requires knowledge of the private key -// that corresponds to their revocation base point and also the private key fro -// the per commitment point, and a valid signature under the combined public -// key. -func SenderHtlcSpendRevokeWithKey(signer Signer, signDesc *SignDescriptor, - revokeKey *btcec.PublicKey, sweepTx *wire.MsgTx) (wire.TxWitness, er.R) { - - sweepSig, err := signer.SignOutputRaw(sweepTx, signDesc) - if err != nil { - return nil, err - } - - // The stack required to sweep a revoke HTLC output consists simply of - // the exact witness stack as one of a regular p2wkh spend. The only - // difference is that the keys used were derived in an adversarial - // manner in order to encode the revocation contract into a sig+key - // pair. - witnessStack := wire.TxWitness(make([][]byte, 3)) - witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType)) - witnessStack[1] = revokeKey.SerializeCompressed() - witnessStack[2] = signDesc.WitnessScript - - return witnessStack, nil -} - -// SenderHtlcSpendRevoke constructs a valid witness allowing the receiver of an -// HTLC to claim the output with knowledge of the revocation private key in the -// scenario that the sender of the HTLC broadcasts a previously revoked -// commitment transaction. This method first derives the appropriate revocation -// key, and requires that the provided SignDescriptor has a local revocation -// basepoint and commitment secret in the PubKey and DoubleTweak fields, -// respectively. -func SenderHtlcSpendRevoke(signer Signer, signDesc *SignDescriptor, - sweepTx *wire.MsgTx) (wire.TxWitness, er.R) { - - if signDesc.KeyDesc.PubKey == nil { - return nil, er.Errorf("cannot generate witness with nil " + - "KeyDesc pubkey") - } - - // Derive the revocation key using the local revocation base point and - // commitment point. - revokeKey := DeriveRevocationPubkey( - signDesc.KeyDesc.PubKey, - signDesc.DoubleTweak.PubKey(), - ) - - return SenderHtlcSpendRevokeWithKey(signer, signDesc, revokeKey, sweepTx) -} - -// SenderHtlcSpendRedeem constructs a valid witness allowing the receiver of an -// HTLC to redeem the pending output in the scenario that the sender broadcasts -// their version of the commitment transaction. A valid spend requires -// knowledge of the payment preimage, and a valid signature under the receivers -// public key. -func SenderHtlcSpendRedeem(signer Signer, signDesc *SignDescriptor, - sweepTx *wire.MsgTx, paymentPreimage []byte) (wire.TxWitness, er.R) { - - sweepSig, err := signer.SignOutputRaw(sweepTx, signDesc) - if err != nil { - return nil, err - } - - // The stack required to spend this output is simply the signature - // generated above under the receiver's public key, and the payment - // pre-image. - witnessStack := wire.TxWitness(make([][]byte, 3)) - witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType)) - witnessStack[1] = paymentPreimage - witnessStack[2] = signDesc.WitnessScript - - return witnessStack, nil -} - -// SenderHtlcSpendTimeout constructs a valid witness allowing the sender of an -// HTLC to activate the time locked covenant clause of a soon to be expired -// HTLC. This script simply spends the multi-sig output using the -// pre-generated HTLC timeout transaction. -func SenderHtlcSpendTimeout(receiverSig Signature, - receiverSigHash params.SigHashType, signer Signer, - signDesc *SignDescriptor, htlcTimeoutTx *wire.MsgTx) ( - wire.TxWitness, er.R) { - - sweepSig, err := signer.SignOutputRaw(htlcTimeoutTx, signDesc) - if err != nil { - return nil, err - } - - // We place a zero as the first item of the evaluated witness stack in - // order to force Script execution to the HTLC timeout clause. The - // second zero is required to consume the extra pop due to a bug in the - // original OP_CHECKMULTISIG. - witnessStack := wire.TxWitness(make([][]byte, 5)) - witnessStack[0] = nil - witnessStack[1] = append(receiverSig.Serialize(), byte(receiverSigHash)) - witnessStack[2] = append(sweepSig.Serialize(), byte(signDesc.HashType)) - witnessStack[3] = nil - witnessStack[4] = signDesc.WitnessScript - - return witnessStack, nil -} - -// ReceiverHTLCScript constructs the public key script for an incoming HTLC -// output payment for the receiver's version of the commitment transaction. The -// possible execution paths from this script include: -// * The receiver of the HTLC uses its second level HTLC transaction to -// advance the state of the HTLC into the delay+claim state. -// * The sender of the HTLC sweeps all the funds of the HTLC as a breached -// commitment was broadcast. -// * The sender of the HTLC sweeps the HTLC on-chain after the timeout period -// of the HTLC has passed. -// -// If confirmedSpend=true, a 1 OP_CSV check will be added to the non-revocation -// cases, to allow sweeping only after confirmation. -// -// Possible Input Scripts: -// RECVR: <0> (spend using HTLC success transaction) -// REVOK: -// SENDR: 0 -// -// -// OP_DUP OP_HASH160 OP_EQUAL -// OP_IF -// OP_CHECKSIG -// OP_ELSE -// -// OP_SWAP OP_SIZE 32 OP_EQUAL -// OP_IF -// OP_HASH160 OP_EQUALVERIFY -// 2 OP_SWAP 2 OP_CHECKMULTISIG -// OP_ELSE -// OP_DROP OP_CHECKLOCKTIMEVERIFY OP_DROP -// OP_CHECKSIG -// OP_ENDIF -// [1 OP_CHECKSEQUENCEVERIFY OP_DROP] <- if allowing confirmed spend only. -// OP_ENDIF -func ReceiverHTLCScript(cltvExpiry uint32, senderHtlcKey, - receiverHtlcKey, revocationKey *btcec.PublicKey, - paymentHash []byte, confirmedSpend bool) ([]byte, er.R) { - - builder := scriptbuilder.NewScriptBuilder() - - // The opening operations are used to determine if this is the sender - // of the HTLC attempting to sweep all the funds due to a contract - // breach. In this case, they'll place the revocation key at the top of - // the stack. - builder.AddOp(opcode.OP_DUP) - builder.AddOp(opcode.OP_HASH160) - builder.AddData(btcutil.Hash160(revocationKey.SerializeCompressed())) - builder.AddOp(opcode.OP_EQUAL) - - // If the hash matches, then this is the revocation clause. The output - // can be spent if the check sig operation passes. - builder.AddOp(opcode.OP_IF) - builder.AddOp(opcode.OP_CHECKSIG) - - // Otherwise, this may either be the receiver of the HTLC starting the - // claiming process via the second level HTLC success transaction and - // the pre-image, or the sender of the HTLC sweeping the output after - // it has timed out. - builder.AddOp(opcode.OP_ELSE) - - // We'll do a bit of set up by pushing the sender's key on the top of - // the stack. This will be needed later if we decide that this is the - // receiver transitioning the output to the claim state using their - // second-level HTLC success transaction. - builder.AddData(senderHtlcKey.SerializeCompressed()) - - // Atm, the top item of the stack is the sender's key so we use a swap - // to expose what is either the payment pre-image or something else. - builder.AddOp(opcode.OP_SWAP) - - // With the top item swapped, check if it's 32 bytes. If so, then this - // *may* be the payment pre-image. - builder.AddOp(opcode.OP_SIZE) - builder.AddInt64(32) - builder.AddOp(opcode.OP_EQUAL) - - // If the item on the top of the stack is 32-bytes, then it is the - // proper size, so this indicates that the receiver of the HTLC is - // attempting to claim the output on-chain by transitioning the state - // of the HTLC to delay+claim. - builder.AddOp(opcode.OP_IF) - - // Next we'll hash the item on the top of the stack, if it matches the - // payment pre-image, then we'll continue. Otherwise, we'll end the - // script here as this is the invalid payment pre-image. - builder.AddOp(opcode.OP_HASH160) - builder.AddData(Ripemd160H(paymentHash)) - builder.AddOp(opcode.OP_EQUALVERIFY) - - // If the payment hash matches, then we'll also need to satisfy the - // multi-sig covenant by providing both signatures of the sender and - // receiver. If the convenient is met, then we'll allow the spending of - // this output, but only by the HTLC success transaction. - builder.AddOp(opcode.OP_2) - builder.AddOp(opcode.OP_SWAP) - builder.AddData(receiverHtlcKey.SerializeCompressed()) - builder.AddOp(opcode.OP_2) - builder.AddOp(opcode.OP_CHECKMULTISIG) - - // Otherwise, this might be the sender of the HTLC attempting to sweep - // it on-chain after the timeout. - builder.AddOp(opcode.OP_ELSE) - - // We'll drop the extra item (which is the output from evaluating the - // OP_EQUAL) above from the stack. - builder.AddOp(opcode.OP_DROP) - - // With that item dropped off, we can now enforce the absolute - // lock-time required to timeout the HTLC. If the time has passed, then - // we'll proceed with a checksig to ensure that this is actually the - // sender of he original HTLC. - builder.AddInt64(int64(cltvExpiry)) - builder.AddOp(opcode.OP_CHECKLOCKTIMEVERIFY) - builder.AddOp(opcode.OP_DROP) - builder.AddOp(opcode.OP_CHECKSIG) - - // Close out the inner if statement. - builder.AddOp(opcode.OP_ENDIF) - - // Add 1 block CSV delay for non-revocation clauses if confirmation is - // required. - if confirmedSpend { - builder.AddOp(opcode.OP_1) - builder.AddOp(opcode.OP_CHECKSEQUENCEVERIFY) - builder.AddOp(opcode.OP_DROP) - } - - // Close out the outer if statement. - builder.AddOp(opcode.OP_ENDIF) - - return builder.Script() -} - -// ReceiverHtlcSpendRedeem constructs a valid witness allowing the receiver of -// an HTLC to redeem the conditional payment in the event that their commitment -// transaction is broadcast. This clause transitions the state of the HLTC -// output into the delay+claim state by activating the off-chain covenant bound -// by the 2-of-2 multi-sig output. The HTLC success timeout transaction being -// signed has a relative timelock delay enforced by its sequence number. This -// delay give the sender of the HTLC enough time to revoke the output if this -// is a breach commitment transaction. -func ReceiverHtlcSpendRedeem(senderSig Signature, - senderSigHash params.SigHashType, paymentPreimage []byte, - signer Signer, signDesc *SignDescriptor, htlcSuccessTx *wire.MsgTx) ( - wire.TxWitness, er.R) { - - // First, we'll generate a signature for the HTLC success transaction. - // The signDesc should be signing with the public key used as the - // receiver's public key and also the correct single tweak. - sweepSig, err := signer.SignOutputRaw(htlcSuccessTx, signDesc) - if err != nil { - return nil, err - } - - // The final witness stack is used the provide the script with the - // payment pre-image, and also execute the multi-sig clause after the - // pre-images matches. We add a nil item at the bottom of the stack in - // order to consume the extra pop within OP_CHECKMULTISIG. - witnessStack := wire.TxWitness(make([][]byte, 5)) - witnessStack[0] = nil - witnessStack[1] = append(senderSig.Serialize(), byte(senderSigHash)) - witnessStack[2] = append(sweepSig.Serialize(), byte(signDesc.HashType)) - witnessStack[3] = paymentPreimage - witnessStack[4] = signDesc.WitnessScript - - return witnessStack, nil -} - -// ReceiverHtlcSpendRevokeWithKey constructs a valid witness allowing the sender of an -// HTLC within a previously revoked commitment transaction to re-claim the -// pending funds in the case that the receiver broadcasts this revoked -// commitment transaction. -func ReceiverHtlcSpendRevokeWithKey(signer Signer, signDesc *SignDescriptor, - revokeKey *btcec.PublicKey, sweepTx *wire.MsgTx) (wire.TxWitness, er.R) { - - // First, we'll generate a signature for the sweep transaction. The - // signDesc should be signing with the public key used as the fully - // derived revocation public key and also the correct double tweak - // value. - sweepSig, err := signer.SignOutputRaw(sweepTx, signDesc) - if err != nil { - return nil, err - } - - // We place a zero, then one as the first items in the evaluated - // witness stack in order to force script execution to the HTLC - // revocation clause. - witnessStack := wire.TxWitness(make([][]byte, 3)) - witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType)) - witnessStack[1] = revokeKey.SerializeCompressed() - witnessStack[2] = signDesc.WitnessScript - - return witnessStack, nil -} - -// ReceiverHtlcSpendRevoke constructs a valid witness allowing the sender of an -// HTLC within a previously revoked commitment transaction to re-claim the -// pending funds in the case that the receiver broadcasts this revoked -// commitment transaction. This method first derives the appropriate revocation -// key, and requires that the provided SignDescriptor has a local revocation -// basepoint and commitment secret in the PubKey and DoubleTweak fields, -// respectively. -func ReceiverHtlcSpendRevoke(signer Signer, signDesc *SignDescriptor, - sweepTx *wire.MsgTx) (wire.TxWitness, er.R) { - - if signDesc.KeyDesc.PubKey == nil { - return nil, er.Errorf("cannot generate witness with nil " + - "KeyDesc pubkey") - } - - // Derive the revocation key using the local revocation base point and - // commitment point. - revokeKey := DeriveRevocationPubkey( - signDesc.KeyDesc.PubKey, - signDesc.DoubleTweak.PubKey(), - ) - - return ReceiverHtlcSpendRevokeWithKey(signer, signDesc, revokeKey, sweepTx) -} - -// ReceiverHtlcSpendTimeout constructs a valid witness allowing the sender of -// an HTLC to recover the pending funds after an absolute timeout in the -// scenario that the receiver of the HTLC broadcasts their version of the -// commitment transaction. If the caller has already set the lock time on the -// spending transaction, than a value of -1 can be passed for the cltvExpiry -// value. -// -// NOTE: The target input of the passed transaction MUST NOT have a final -// sequence number. Otherwise, the OP_CHECKLOCKTIMEVERIFY check will fail. -func ReceiverHtlcSpendTimeout(signer Signer, signDesc *SignDescriptor, - sweepTx *wire.MsgTx, cltvExpiry int32) (wire.TxWitness, er.R) { - - // If the caller set a proper timeout value, then we'll apply it - // directly to the transaction. - if cltvExpiry != -1 { - // The HTLC output has an absolute time period before we are - // permitted to recover the pending funds. Therefore we need to - // set the locktime on this sweeping transaction in order to - // pass Script verification. - sweepTx.LockTime = uint32(cltvExpiry) - } - - // With the lock time on the transaction set, we'll not generate a - // signature for the sweep transaction. The passed sign descriptor - // should be created using the raw public key of the sender (w/o the - // single tweak applied), and the single tweak set to the proper value - // taking into account the current state's point. - sweepSig, err := signer.SignOutputRaw(sweepTx, signDesc) - if err != nil { - return nil, err - } - - witnessStack := wire.TxWitness(make([][]byte, 3)) - witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType)) - witnessStack[1] = nil - witnessStack[2] = signDesc.WitnessScript - - return witnessStack, nil -} - -// SecondLevelHtlcScript is the uniform script that's used as the output for -// the second-level HTLC transactions. The second level transaction act as a -// sort of covenant, ensuring that a 2-of-2 multi-sig output can only be -// spent in a particular way, and to a particular output. -// -// Possible Input Scripts: -// * To revoke an HTLC output that has been transitioned to the claim+delay -// state: -// * 1 -// -// * To claim and HTLC output, either with a pre-image or due to a timeout: -// * 0 -// -// OP_IF -// -// OP_ELSE -// -// OP_CHECKSEQUENCEVERIFY -// OP_DROP -// -// OP_ENDIF -// OP_CHECKSIG -// -// TODO(roasbeef): possible renames for second-level -// * transition? -// * covenant output -func SecondLevelHtlcScript(revocationKey, delayKey *btcec.PublicKey, - csvDelay uint32) ([]byte, er.R) { - - builder := scriptbuilder.NewScriptBuilder() - - // If this is the revocation clause for this script is to be executed, - // the spender will push a 1, forcing us to hit the true clause of this - // if statement. - builder.AddOp(opcode.OP_IF) - - // If this this is the revocation case, then we'll push the revocation - // public key on the stack. - builder.AddData(revocationKey.SerializeCompressed()) - - // Otherwise, this is either the sender or receiver of the HTLC - // attempting to claim the HTLC output. - builder.AddOp(opcode.OP_ELSE) - - // In order to give the other party time to execute the revocation - // clause above, we require a relative timeout to pass before the - // output can be spent. - builder.AddInt64(int64(csvDelay)) - builder.AddOp(opcode.OP_CHECKSEQUENCEVERIFY) - builder.AddOp(opcode.OP_DROP) - - // If the relative timelock passes, then we'll add the delay key to the - // stack to ensure that we properly authenticate the spending party. - builder.AddData(delayKey.SerializeCompressed()) - - // Close out the if statement. - builder.AddOp(opcode.OP_ENDIF) - - // In either case, we'll ensure that only either the party possessing - // the revocation private key, or the delay private key is able to - // spend this output. - builder.AddOp(opcode.OP_CHECKSIG) - - return builder.Script() -} - -// HtlcSpendSuccess spends a second-level HTLC output. This function is to be -// used by the sender of an HTLC to claim the output after a relative timeout -// or the receiver of the HTLC to claim on-chain with the pre-image. -func HtlcSpendSuccess(signer Signer, signDesc *SignDescriptor, - sweepTx *wire.MsgTx, csvDelay uint32) (wire.TxWitness, er.R) { - - // We're required to wait a relative period of time before we can sweep - // the output in order to allow the other party to contest our claim of - // validity to this version of the commitment transaction. - sweepTx.TxIn[0].Sequence = LockTimeToSequence(false, csvDelay) - - // Finally, OP_CSV requires that the version of the transaction - // spending a pkscript with OP_CSV within it *must* be >= 2. - sweepTx.Version = 2 - - // As we mutated the transaction, we'll re-calculate the sighashes for - // this instance. - signDesc.SigHashes = txscript.NewTxSigHashes(sweepTx) - - // With the proper sequence and version set, we'll now sign the timeout - // transaction using the passed signed descriptor. In order to generate - // a valid signature, then signDesc should be using the base delay - // public key, and the proper single tweak bytes. - sweepSig, err := signer.SignOutputRaw(sweepTx, signDesc) - if err != nil { - return nil, err - } - - // We set a zero as the first element the witness stack (ignoring the - // witness script), in order to force execution to the second portion - // of the if clause. - witnessStack := wire.TxWitness(make([][]byte, 3)) - witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType)) - witnessStack[1] = nil - witnessStack[2] = signDesc.WitnessScript - - return witnessStack, nil -} - -// HtlcSpendRevoke spends a second-level HTLC output. This function is to be -// used by the sender or receiver of an HTLC to claim the HTLC after a revoked -// commitment transaction was broadcast. -func HtlcSpendRevoke(signer Signer, signDesc *SignDescriptor, - revokeTx *wire.MsgTx) (wire.TxWitness, er.R) { - - // We don't need any spacial modifications to the transaction as this - // is just sweeping a revoked HTLC output. So we'll generate a regular - // witness signature. - sweepSig, err := signer.SignOutputRaw(revokeTx, signDesc) - if err != nil { - return nil, err - } - - // We set a one as the first element the witness stack (ignoring the - // witness script), in order to force execution to the revocation - // clause in the second level HTLC script. - witnessStack := wire.TxWitness(make([][]byte, 3)) - witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType)) - witnessStack[1] = []byte{1} - witnessStack[2] = signDesc.WitnessScript - - return witnessStack, nil -} - -// HtlcSecondLevelSpend exposes the public witness generation function for -// spending an HTLC success transaction, either due to an expiring time lock or -// having had the payment preimage. This method is able to spend any -// second-level HTLC transaction, assuming the caller sets the locktime or -// seqno properly. -// -// NOTE: The caller MUST set the txn version, sequence number, and sign -// descriptor's sig hash cache before invocation. -func HtlcSecondLevelSpend(signer Signer, signDesc *SignDescriptor, - sweepTx *wire.MsgTx) (wire.TxWitness, er.R) { - - // With the proper sequence and version set, we'll now sign the timeout - // transaction using the passed signed descriptor. In order to generate - // a valid signature, then signDesc should be using the base delay - // public key, and the proper single tweak bytes. - sweepSig, err := signer.SignOutputRaw(sweepTx, signDesc) - if err != nil { - return nil, err - } - - // We set a zero as the first element the witness stack (ignoring the - // witness script), in order to force execution to the second portion - // of the if clause. - witnessStack := wire.TxWitness(make([][]byte, 3)) - witnessStack[0] = append(sweepSig.Serialize(), byte(params.SigHashAll)) - witnessStack[1] = nil - witnessStack[2] = signDesc.WitnessScript - - return witnessStack, nil -} - -// LockTimeToSequence converts the passed relative locktime to a sequence -// number in accordance to BIP-68. -// See: https://github.com/bitcoin/bips/blob/master/bip-0068.mediawiki -// * (Compatibility) -func LockTimeToSequence(isSeconds bool, locktime uint32) uint32 { - if !isSeconds { - // The locktime is to be expressed in confirmations. - return locktime - } - - // Set the 22nd bit which indicates the lock time is in seconds, then - // shift the locktime over by 9 since the time granularity is in - // 512-second intervals (2^9). This results in a max lock-time of - // 33,554,431 seconds, or 1.06 years. - return SequenceLockTimeSeconds | (locktime >> 9) -} - -// CommitScriptToSelf constructs the public key script for the output on the -// commitment transaction paying to the "owner" of said commitment transaction. -// If the other party learns of the preimage to the revocation hash, then they -// can claim all the settled funds in the channel, plus the unsettled funds. -// -// Possible Input Scripts: -// REVOKE: 1 -// SENDRSWEEP: -// -// Output Script: -// OP_IF -// -// OP_ELSE -// OP_CHECKSEQUENCEVERIFY OP_DROP -// -// OP_ENDIF -// OP_CHECKSIG -func CommitScriptToSelf(csvTimeout uint32, selfKey, revokeKey *btcec.PublicKey) ([]byte, er.R) { - // This script is spendable under two conditions: either the - // 'csvTimeout' has passed and we can redeem our funds, or they can - // produce a valid signature with the revocation public key. The - // revocation public key will *only* be known to the other party if we - // have divulged the revocation hash, allowing them to homomorphically - // derive the proper private key which corresponds to the revoke public - // key. - builder := scriptbuilder.NewScriptBuilder() - - builder.AddOp(opcode.OP_IF) - - // If a valid signature using the revocation key is presented, then - // allow an immediate spend provided the proper signature. - builder.AddData(revokeKey.SerializeCompressed()) - - builder.AddOp(opcode.OP_ELSE) - - // Otherwise, we can re-claim our funds after a CSV delay of - // 'csvTimeout' timeout blocks, and a valid signature. - builder.AddInt64(int64(csvTimeout)) - builder.AddOp(opcode.OP_CHECKSEQUENCEVERIFY) - builder.AddOp(opcode.OP_DROP) - builder.AddData(selfKey.SerializeCompressed()) - - builder.AddOp(opcode.OP_ENDIF) - - // Finally, we'll validate the signature against the public key that's - // left on the top of the stack. - builder.AddOp(opcode.OP_CHECKSIG) - - return builder.Script() -} - -// CommitSpendTimeout constructs a valid witness allowing the owner of a -// particular commitment transaction to spend the output returning settled -// funds back to themselves after a relative block timeout. In order to -// properly spend the transaction, the target input's sequence number should be -// set accordingly based off of the target relative block timeout within the -// redeem script. Additionally, OP_CSV requires that the version of the -// transaction spending a pkscript with OP_CSV within it *must* be >= 2. -func CommitSpendTimeout(signer Signer, signDesc *SignDescriptor, - sweepTx *wire.MsgTx) (wire.TxWitness, er.R) { - - // Ensure the transaction version supports the validation of sequence - // locks and CSV semantics. - if sweepTx.Version < 2 { - return nil, er.Errorf("version of passed transaction MUST "+ - "be >= 2, not %v", sweepTx.Version) - } - - // With the sequence number in place, we're now able to properly sign - // off on the sweep transaction. - sweepSig, err := signer.SignOutputRaw(sweepTx, signDesc) - if err != nil { - return nil, err - } - - // Place an empty byte as the first item in the evaluated witness stack - // to force script execution to the timeout spend clause. We need to - // place an empty byte in order to ensure our script is still valid - // from the PoV of nodes that are enforcing minimal OP_IF/OP_NOTIF. - witnessStack := wire.TxWitness(make([][]byte, 3)) - witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType)) - witnessStack[1] = nil - witnessStack[2] = signDesc.WitnessScript - - return witnessStack, nil -} - -// CommitSpendRevoke constructs a valid witness allowing a node to sweep the -// settled output of a malicious counterparty who broadcasts a revoked -// commitment transaction. -// -// NOTE: The passed SignDescriptor should include the raw (untweaked) -// revocation base public key of the receiver and also the proper double tweak -// value based on the commitment secret of the revoked commitment. -func CommitSpendRevoke(signer Signer, signDesc *SignDescriptor, - sweepTx *wire.MsgTx) (wire.TxWitness, er.R) { - - sweepSig, err := signer.SignOutputRaw(sweepTx, signDesc) - if err != nil { - return nil, err - } - - // Place a 1 as the first item in the evaluated witness stack to - // force script execution to the revocation clause. - witnessStack := wire.TxWitness(make([][]byte, 3)) - witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType)) - witnessStack[1] = []byte{1} - witnessStack[2] = signDesc.WitnessScript - - return witnessStack, nil -} - -// CommitSpendNoDelay constructs a valid witness allowing a node to spend their -// settled no-delay output on the counterparty's commitment transaction. If the -// tweakless field is true, then we'll omit the set where we tweak the pubkey -// with a random set of bytes, and use it directly in the witness stack. -// -// NOTE: The passed SignDescriptor should include the raw (untweaked) public -// key of the receiver and also the proper single tweak value based on the -// current commitment point. -func CommitSpendNoDelay(signer Signer, signDesc *SignDescriptor, - sweepTx *wire.MsgTx, tweakless bool) (wire.TxWitness, er.R) { - - if signDesc.KeyDesc.PubKey == nil { - return nil, er.Errorf("cannot generate witness with nil " + - "KeyDesc pubkey") - } - - // This is just a regular p2wkh spend which looks something like: - // * witness: - sweepSig, err := signer.SignOutputRaw(sweepTx, signDesc) - if err != nil { - return nil, err - } - - // Finally, we'll manually craft the witness. The witness here is the - // exact same as a regular p2wkh witness, depending on the value of the - // tweakless bool. - witness := make([][]byte, 2) - witness[0] = append(sweepSig.Serialize(), byte(signDesc.HashType)) - - switch tweakless { - // If we're tweaking the key, then we use the tweaked public key as the - // last item in the witness stack which was originally used to created - // the pkScript we're spending. - case false: - witness[1] = TweakPubKeyWithTweak( - signDesc.KeyDesc.PubKey, signDesc.SingleTweak, - ).SerializeCompressed() - - // Otherwise, we can just use the raw pubkey, since there's no random - // value to be combined. - case true: - witness[1] = signDesc.KeyDesc.PubKey.SerializeCompressed() - } - - return witness, nil -} - -// CommitScriptUnencumbered constructs the public key script on the commitment -// transaction paying to the "other" party. The constructed output is a normal -// p2wkh output spendable immediately, requiring no contestation period. -func CommitScriptUnencumbered(key *btcec.PublicKey) ([]byte, er.R) { - // This script goes to the "other" party, and is spendable immediately. - builder := scriptbuilder.NewScriptBuilder() - builder.AddOp(opcode.OP_0) - builder.AddData(btcutil.Hash160(key.SerializeCompressed())) - - return builder.Script() -} - -// CommitScriptToRemoteConfirmed constructs the script for the output on the -// commitment transaction paying to the remote party of said commitment -// transaction. The money can only be spend after one confirmation. -// -// Possible Input Scripts: -// SWEEP: -// -// Output Script: -// OP_CHECKSIGVERIFY -// 1 OP_CHECKSEQUENCEVERIFY -func CommitScriptToRemoteConfirmed(key *btcec.PublicKey) ([]byte, er.R) { - builder := scriptbuilder.NewScriptBuilder() - - // Only the given key can spend the output. - builder.AddData(key.SerializeCompressed()) - builder.AddOp(opcode.OP_CHECKSIGVERIFY) - - // Check that the it has one confirmation. - builder.AddOp(opcode.OP_1) - builder.AddOp(opcode.OP_CHECKSEQUENCEVERIFY) - - return builder.Script() -} - -// CommitSpendToRemoteConfirmed constructs a valid witness allowing a node to -// spend their settled output on the counterparty's commitment transaction when -// it has one confirmetion. This is used for the anchor channel type. The -// spending key will always be non-tweaked for this output type. -func CommitSpendToRemoteConfirmed(signer Signer, signDesc *SignDescriptor, - sweepTx *wire.MsgTx) (wire.TxWitness, er.R) { - - if signDesc.KeyDesc.PubKey == nil { - return nil, er.Errorf("cannot generate witness with nil " + - "KeyDesc pubkey") - } - - // Similar to non delayed output, only a signature is needed. - sweepSig, err := signer.SignOutputRaw(sweepTx, signDesc) - if err != nil { - return nil, err - } - - // Finally, we'll manually craft the witness. The witness here is the - // signature and the redeem script. - witnessStack := make([][]byte, 2) - witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType)) - witnessStack[1] = signDesc.WitnessScript - - return witnessStack, nil -} - -// CommitScriptAnchor constructs the script for the anchor output spendable by -// the given key immediately, or by anyone after 16 confirmations. -// -// Possible Input Scripts: -// By owner: -// By anyone (after 16 conf): -// -// Output Script: -// OP_CHECKSIG OP_IFDUP -// OP_NOTIF -// OP_16 OP_CSV -// OP_ENDIF -func CommitScriptAnchor(key *btcec.PublicKey) ([]byte, er.R) { - builder := scriptbuilder.NewScriptBuilder() - - // Spend immediately with key. - builder.AddData(key.SerializeCompressed()) - builder.AddOp(opcode.OP_CHECKSIG) - - // Duplicate the value if true, since it will be consumed by the NOTIF. - builder.AddOp(opcode.OP_IFDUP) - - // Otherwise spendable by anyone after 16 confirmations. - builder.AddOp(opcode.OP_NOTIF) - builder.AddOp(opcode.OP_16) - builder.AddOp(opcode.OP_CHECKSEQUENCEVERIFY) - builder.AddOp(opcode.OP_ENDIF) - - return builder.Script() -} - -// CommitSpendAnchor constructs a valid witness allowing a node to spend their -// anchor output on the commitment transaction using their funding key. This is -// used for the anchor channel type. -func CommitSpendAnchor(signer Signer, signDesc *SignDescriptor, - sweepTx *wire.MsgTx) (wire.TxWitness, er.R) { - - if signDesc.KeyDesc.PubKey == nil { - return nil, er.Errorf("cannot generate witness with nil " + - "KeyDesc pubkey") - } - - // Create a signature. - sweepSig, err := signer.SignOutputRaw(sweepTx, signDesc) - if err != nil { - return nil, err - } - - // The witness here is just a signature and the redeem script. - witnessStack := make([][]byte, 2) - witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType)) - witnessStack[1] = signDesc.WitnessScript - - return witnessStack, nil -} - -// CommitSpendAnchorAnyone constructs a witness allowing anyone to spend the -// anchor output after it has gotten 16 confirmations. Since no signing is -// required, only knowledge of the redeem script is necessary to spend it. -func CommitSpendAnchorAnyone(script []byte) (wire.TxWitness, er.R) { - // The witness here is just the redeem script. - witnessStack := make([][]byte, 2) - witnessStack[0] = nil - witnessStack[1] = script - - return witnessStack, nil -} - -// SingleTweakBytes computes set of bytes we call the single tweak. The purpose -// of the single tweak is to randomize all regular delay and payment base -// points. To do this, we generate a hash that binds the commitment point to -// the pay/delay base point. The end end results is that the basePoint is -// tweaked as follows: -// -// * key = basePoint + sha256(commitPoint || basePoint)*G -func SingleTweakBytes(commitPoint, basePoint *btcec.PublicKey) []byte { - h := sha256.New() - h.Write(commitPoint.SerializeCompressed()) - h.Write(basePoint.SerializeCompressed()) - return h.Sum(nil) -} - -// TweakPubKey tweaks a public base point given a per commitment point. The per -// commitment point is a unique point on our target curve for each commitment -// transaction. When tweaking a local base point for use in a remote commitment -// transaction, the remote party's current per commitment point is to be used. -// The opposite applies for when tweaking remote keys. Precisely, the following -// operation is used to "tweak" public keys: -// -// tweakPub := basePoint + sha256(commitPoint || basePoint) * G -// := G*k + sha256(commitPoint || basePoint)*G -// := G*(k + sha256(commitPoint || basePoint)) -// -// Therefore, if a party possess the value k, the private key of the base -// point, then they are able to derive the proper private key for the -// revokeKey by computing: -// -// revokePriv := k + sha256(commitPoint || basePoint) mod N -// -// Where N is the order of the sub-group. -// -// The rationale for tweaking all public keys used within the commitment -// contracts is to ensure that all keys are properly delinearized to avoid any -// funny business when jointly collaborating to compute public and private -// keys. Additionally, the use of the per commitment point ensures that each -// commitment state houses a unique set of keys which is useful when creating -// blinded channel outsourcing protocols. -// -// TODO(roasbeef): should be using double-scalar mult here -func TweakPubKey(basePoint, commitPoint *btcec.PublicKey) *btcec.PublicKey { - tweakBytes := SingleTweakBytes(commitPoint, basePoint) - return TweakPubKeyWithTweak(basePoint, tweakBytes) -} - -// TweakPubKeyWithTweak is the exact same as the TweakPubKey function, however -// it accepts the raw tweak bytes directly rather than the commitment point. -func TweakPubKeyWithTweak(pubKey *btcec.PublicKey, tweakBytes []byte) *btcec.PublicKey { - curve := btcec.S256() - tweakX, tweakY := curve.ScalarBaseMult(tweakBytes) - - // TODO(roasbeef): check that both passed on curve? - x, y := curve.Add(pubKey.X, pubKey.Y, tweakX, tweakY) - return &btcec.PublicKey{ - X: x, - Y: y, - Curve: curve, - } -} - -// TweakPrivKey tweaks the private key of a public base point given a per -// commitment point. The per commitment secret is the revealed revocation -// secret for the commitment state in question. This private key will only need -// to be generated in the case that a channel counter party broadcasts a -// revoked state. Precisely, the following operation is used to derive a -// tweaked private key: -// -// * tweakPriv := basePriv + sha256(commitment || basePub) mod N -// -// Where N is the order of the sub-group. -func TweakPrivKey(basePriv *btcec.PrivateKey, commitTweak []byte) *btcec.PrivateKey { - // tweakInt := sha256(commitPoint || basePub) - tweakInt := new(big.Int).SetBytes(commitTweak) - - tweakInt = tweakInt.Add(tweakInt, basePriv.D) - tweakInt = tweakInt.Mod(tweakInt, btcec.S256().N) - - tweakPriv, _ := btcec.PrivKeyFromBytes(btcec.S256(), tweakInt.Bytes()) - return tweakPriv -} - -// DeriveRevocationPubkey derives the revocation public key given the -// counterparty's commitment key, and revocation preimage derived via a -// pseudo-random-function. In the event that we (for some reason) broadcast a -// revoked commitment transaction, then if the other party knows the revocation -// preimage, then they'll be able to derive the corresponding private key to -// this private key by exploiting the homomorphism in the elliptic curve group: -// * https://en.wikipedia.org/wiki/Group_homomorphism#Homomorphisms_of_abelian_groups -// -// The derivation is performed as follows: -// -// revokeKey := revokeBase * sha256(revocationBase || commitPoint) + -// commitPoint * sha256(commitPoint || revocationBase) -// -// := G*(revokeBasePriv * sha256(revocationBase || commitPoint)) + -// G*(commitSecret * sha256(commitPoint || revocationBase)) -// -// := G*(revokeBasePriv * sha256(revocationBase || commitPoint) + -// commitSecret * sha256(commitPoint || revocationBase)) -// -// Therefore, once we divulge the revocation secret, the remote peer is able to -// compute the proper private key for the revokeKey by computing: -// -// revokePriv := (revokeBasePriv * sha256(revocationBase || commitPoint)) + -// (commitSecret * sha256(commitPoint || revocationBase)) mod N -// -// Where N is the order of the sub-group. -func DeriveRevocationPubkey(revokeBase, commitPoint *btcec.PublicKey) *btcec.PublicKey { - - // R = revokeBase * sha256(revocationBase || commitPoint) - revokeTweakBytes := SingleTweakBytes(revokeBase, commitPoint) - rX, rY := btcec.S256().ScalarMult(revokeBase.X, revokeBase.Y, - revokeTweakBytes) - - // C = commitPoint * sha256(commitPoint || revocationBase) - commitTweakBytes := SingleTweakBytes(commitPoint, revokeBase) - cX, cY := btcec.S256().ScalarMult(commitPoint.X, commitPoint.Y, - commitTweakBytes) - - // Now that we have the revocation point, we add this to their commitment - // public key in order to obtain the revocation public key. - // - // P = R + C - revX, revY := btcec.S256().Add(rX, rY, cX, cY) - return &btcec.PublicKey{ - X: revX, - Y: revY, - Curve: btcec.S256(), - } -} - -// DeriveRevocationPrivKey derives the revocation private key given a node's -// commitment private key, and the preimage to a previously seen revocation -// hash. Using this derived private key, a node is able to claim the output -// within the commitment transaction of a node in the case that they broadcast -// a previously revoked commitment transaction. -// -// The private key is derived as follows: -// revokePriv := (revokeBasePriv * sha256(revocationBase || commitPoint)) + -// (commitSecret * sha256(commitPoint || revocationBase)) mod N -// -// Where N is the order of the sub-group. -func DeriveRevocationPrivKey(revokeBasePriv *btcec.PrivateKey, - commitSecret *btcec.PrivateKey) *btcec.PrivateKey { - - // r = sha256(revokeBasePub || commitPoint) - revokeTweakBytes := SingleTweakBytes(revokeBasePriv.PubKey(), - commitSecret.PubKey()) - revokeTweakInt := new(big.Int).SetBytes(revokeTweakBytes) - - // c = sha256(commitPoint || revokeBasePub) - commitTweakBytes := SingleTweakBytes(commitSecret.PubKey(), - revokeBasePriv.PubKey()) - commitTweakInt := new(big.Int).SetBytes(commitTweakBytes) - - // Finally to derive the revocation secret key we'll perform the - // following operation: - // - // k = (revocationPriv * r) + (commitSecret * c) mod N - // - // This works since: - // P = (G*a)*b + (G*c)*d - // P = G*(a*b) + G*(c*d) - // P = G*(a*b + c*d) - revokeHalfPriv := revokeTweakInt.Mul(revokeTweakInt, revokeBasePriv.D) - commitHalfPriv := commitTweakInt.Mul(commitTweakInt, commitSecret.D) - - revocationPriv := revokeHalfPriv.Add(revokeHalfPriv, commitHalfPriv) - revocationPriv = revocationPriv.Mod(revocationPriv, btcec.S256().N) - - priv, _ := btcec.PrivKeyFromBytes(btcec.S256(), revocationPriv.Bytes()) - return priv -} - -// ComputeCommitmentPoint generates a commitment point given a commitment -// secret. The commitment point for each state is used to randomize each key in -// the key-ring and also to used as a tweak to derive new public+private keys -// for the state. -func ComputeCommitmentPoint(commitSecret []byte) *btcec.PublicKey { - x, y := btcec.S256().ScalarBaseMult(commitSecret) - - return &btcec.PublicKey{ - X: x, - Y: y, - Curve: btcec.S256(), - } -} diff --git a/lnd/input/script_utils_test.go b/lnd/input/script_utils_test.go deleted file mode 100644 index 68319471..00000000 --- a/lnd/input/script_utils_test.go +++ /dev/null @@ -1,1471 +0,0 @@ -package input - -import ( - "bytes" - "crypto/sha256" - "encoding/hex" - "fmt" - "testing" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/txscript" - "github.com/pkt-cash/pktd/txscript/params" - "github.com/pkt-cash/pktd/wire" - "github.com/pkt-cash/pktd/wire/constants" -) - -// assertEngineExecution executes the VM returned by the newEngine closure, -// asserting the result matches the validity expectation. In the case where it -// doesn't match the expectation, it executes the script step-by-step and -// prints debug information to stdout. -func assertEngineExecution(t *testing.T, testNum int, valid bool, - newEngine func() (*txscript.Engine, er.R)) { - t.Helper() - - // Get a new VM to execute. - vm, err := newEngine() - if err != nil { - t.Fatalf("unable to create engine: %v", err) - } - - // Execute the VM, only go on to the step-by-step execution if - // it doesn't validate as expected. - vmErr := vm.Execute() - if valid == (vmErr == nil) { - return - } - - // Now that the execution didn't match what we expected, fetch a new VM - // to step through. - vm, err = newEngine() - if err != nil { - t.Fatalf("unable to create engine: %v", err) - } - - // This buffer will trace execution of the Script, dumping out - // to stdout. - var debugBuf bytes.Buffer - - done := false - for !done { - dis, err := vm.DisasmPC() - if err != nil { - t.Fatalf("stepping (%v)\n", err) - } - debugBuf.WriteString(fmt.Sprintf("stepping %v\n", dis)) - - done, err = vm.Step() - if err != nil && valid { - fmt.Println(debugBuf.String()) - t.Fatalf("spend test case #%v failed, spend "+ - "should be valid: %v", testNum, err) - } else if err == nil && !valid && done { - fmt.Println(debugBuf.String()) - t.Fatalf("spend test case #%v succeed, spend "+ - "should be invalid: %v", testNum, err) - } - - debugBuf.WriteString(fmt.Sprintf("Stack: %v", vm.GetStack())) - //debugBuf.WriteString(fmt.Sprintf("AltStack: %v", vm.GetAltStack())) - } - - // If we get to this point the unexpected case was not reached - // during step execution, which happens for some checks, like - // the clean-stack rule. - validity := "invalid" - if valid { - validity = "valid" - } - - fmt.Println(debugBuf.String()) - t.Fatalf("%v spend test case #%v execution ended with: %v", validity, testNum, vmErr) -} - -// TestRevocationKeyDerivation tests that given a public key, and a revocation -// hash, the homomorphic revocation public and private key derivation work -// properly. -func TestRevocationKeyDerivation(t *testing.T) { - t.Parallel() - - // First, we'll generate a commitment point, and a commitment secret. - // These will be used to derive the ultimate revocation keys. - revocationPreimage := testHdSeed.CloneBytes() - commitSecret, commitPoint := btcec.PrivKeyFromBytes(btcec.S256(), - revocationPreimage) - - // With the commitment secrets generated, we'll now create the base - // keys we'll use to derive the revocation key from. - basePriv, basePub := btcec.PrivKeyFromBytes(btcec.S256(), - testWalletPrivKey) - - // With the point and key obtained, we can now derive the revocation - // key itself. - revocationPub := DeriveRevocationPubkey(basePub, commitPoint) - - // The revocation public key derived from the original public key, and - // the one derived from the private key should be identical. - revocationPriv := DeriveRevocationPrivKey(basePriv, commitSecret) - if !revocationPub.IsEqual(revocationPriv.PubKey()) { - t.Fatalf("derived public keys don't match!") - } -} - -// TestTweakKeyDerivation tests that given a public key, and commitment tweak, -// then we're able to properly derive a tweaked private key that corresponds to -// the computed tweak public key. This scenario ensure that our key derivation -// for any of the non revocation keys on the commitment transaction is correct. -func TestTweakKeyDerivation(t *testing.T) { - t.Parallel() - - // First, we'll generate a base public key that we'll be "tweaking". - baseSecret := testHdSeed.CloneBytes() - basePriv, basePub := btcec.PrivKeyFromBytes(btcec.S256(), baseSecret) - - // With the base key create, we'll now create a commitment point, and - // from that derive the bytes we'll used to tweak the base public key. - commitPoint := ComputeCommitmentPoint(bobsPrivKey) - commitTweak := SingleTweakBytes(commitPoint, basePub) - - // Next, we'll modify the public key. When we apply the same operation - // to the private key we should get a key that matches. - tweakedPub := TweakPubKey(basePub, commitPoint) - - // Finally, attempt to re-generate the private key that matches the - // tweaked public key. The derived key should match exactly. - derivedPriv := TweakPrivKey(basePriv, commitTweak) - if !derivedPriv.PubKey().IsEqual(tweakedPub) { - t.Fatalf("pub keys don't match") - } -} - -// makeWitnessTestCase is a helper function used within test cases involving -// the validity of a crafted witness. This function is a wrapper function which -// allows constructing table-driven tests. In the case of an error while -// constructing the witness, the test fails fatally. -func makeWitnessTestCase(t *testing.T, - f func() (wire.TxWitness, er.R)) func() wire.TxWitness { - - return func() wire.TxWitness { - witness, err := f() - if err != nil { - t.Fatalf("unable to create witness test case: %v", err) - } - - return witness - } -} - -// TestHTLCSenderSpendValidation tests all possible valid+invalid redemption -// paths in the script used within the sender's commitment transaction for an -// outgoing HTLC. -// -// The following cases are exercised by this test: -// sender script: -// * receiver spends -// * revoke w/ sig -// * HTLC with invalid preimage size -// * HTLC with valid preimage size + sig -// * sender spends -// * invalid lock-time for CLTV -// * invalid sequence for CSV -// * valid lock-time+sequence, valid sig -func TestHTLCSenderSpendValidation(t *testing.T) { - t.Parallel() - - // We generate a fake output, and the corresponding txin. This output - // doesn't need to exist, as we'll only be validating spending from the - // transaction that references this. - txid, err := chainhash.NewHash(testHdSeed.CloneBytes()) - if err != nil { - t.Fatalf("unable to create txid: %v", err) - } - fundingOut := &wire.OutPoint{ - Hash: *txid, - Index: 50, - } - fakeFundingTxIn := wire.NewTxIn(fundingOut, nil, nil) - - // Next we'll the commitment secret for our commitment tx and also the - // revocation key that we'll use as well. - revokePreimage := testHdSeed.CloneBytes() - commitSecret, commitPoint := btcec.PrivKeyFromBytes(btcec.S256(), - revokePreimage) - - // Generate a payment preimage to be used below. - paymentPreimage := revokePreimage - paymentPreimage[0] ^= 1 - paymentHash := sha256.Sum256(paymentPreimage[:]) - - // We'll also need some tests keys for alice and bob, and metadata of - // the HTLC output. - aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(btcec.S256(), - testWalletPrivKey) - bobKeyPriv, bobKeyPub := btcec.PrivKeyFromBytes(btcec.S256(), - bobsPrivKey) - paymentAmt := btcutil.Amount(1 * 10e8) - - aliceLocalKey := TweakPubKey(aliceKeyPub, commitPoint) - bobLocalKey := TweakPubKey(bobKeyPub, commitPoint) - - // As we'll be modeling spends from Alice's commitment transaction, - // we'll be using Bob's base point for the revocation key. - revocationKey := DeriveRevocationPubkey(bobKeyPub, commitPoint) - - bobCommitTweak := SingleTweakBytes(commitPoint, bobKeyPub) - aliceCommitTweak := SingleTweakBytes(commitPoint, aliceKeyPub) - - // Finally, we'll create mock signers for both of them based on their - // private keys. This test simplifies a bit and uses the same key as - // the base point for all scripts and derivations. - bobSigner := &MockSigner{Privkeys: []*btcec.PrivateKey{bobKeyPriv}} - aliceSigner := &MockSigner{Privkeys: []*btcec.PrivateKey{aliceKeyPriv}} - - var ( - htlcWitnessScript, htlcPkScript []byte - htlcOutput *wire.TxOut - sweepTxSigHashes *txscript.TxSigHashes - senderCommitTx, sweepTx *wire.MsgTx - bobRecvrSig *btcec.Signature - bobSigHash params.SigHashType - ) - - // genCommitTx generates a commitment tx where the htlc output requires - // confirmation to be spent according to 'confirmed'. - genCommitTx := func(confirmed bool) { - // Generate the raw HTLC redemption scripts, and its p2wsh - // counterpart. - htlcWitnessScript, err = SenderHTLCScript( - aliceLocalKey, bobLocalKey, revocationKey, - paymentHash[:], confirmed, - ) - if err != nil { - t.Fatalf("unable to create htlc sender script: %v", err) - } - htlcPkScript, err = WitnessScriptHash(htlcWitnessScript) - if err != nil { - t.Fatalf("unable to create p2wsh htlc script: %v", err) - } - - // This will be Alice's commitment transaction. In this - // scenario Alice is sending an HTLC to a node she has a path - // to (could be Bob, could be multiple hops down, it doesn't - // really matter). - htlcOutput = &wire.TxOut{ - Value: int64(paymentAmt), - PkScript: htlcPkScript, - } - senderCommitTx = wire.NewMsgTx(2) - senderCommitTx.AddTxIn(fakeFundingTxIn) - senderCommitTx.AddTxOut(htlcOutput) - } - - // genSweepTx generates a sweep of the senderCommitTx, and sets the - // sequence and sighash single|anyonecanspend if confirmed is true. - genSweepTx := func(confirmed bool) { - prevOut := &wire.OutPoint{ - Hash: senderCommitTx.TxHash(), - Index: 0, - } - - sweepTx = wire.NewMsgTx(2) - - sweepTx.AddTxIn(wire.NewTxIn(prevOut, nil, nil)) - if confirmed { - sweepTx.TxIn[0].Sequence = LockTimeToSequence(false, 1) - } - - sweepTx.AddTxOut( - &wire.TxOut{ - PkScript: []byte("doesn't matter"), - Value: 1 * 10e8, - }, - ) - - sweepTxSigHashes = txscript.NewTxSigHashes(sweepTx) - - bobSigHash = params.SigHashAll - if confirmed { - bobSigHash = params.SigHashSingle | params.SigHashAnyOneCanPay - } - - // We'll also generate a signature on the sweep transaction above - // that will act as Bob's signature to Alice for the second level HTLC - // transaction. - bobSignDesc := SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - SingleTweak: bobCommitTweak, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: bobSigHash, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - bobSig, err := bobSigner.SignOutputRaw(sweepTx, &bobSignDesc) - if err != nil { - t.Fatalf("unable to generate alice signature: %v", err) - } - - bobRecvrSig, err = btcec.ParseDERSignature( - bobSig.Serialize(), btcec.S256(), - ) - if err != nil { - t.Fatalf("unable to parse signature: %v", err) - } - } - - testCases := []struct { - witness func() wire.TxWitness - valid bool - }{ - { - // revoke w/ sig - // TODO(roasbeef): test invalid revoke - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - genCommitTx(false) - genSweepTx(false) - - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - DoubleTweak: commitSecret, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return SenderHtlcSpendRevokeWithKey(bobSigner, signDesc, - revocationKey, sweepTx) - }), - true, - }, - { - // HTLC with invalid preimage size - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - genCommitTx(false) - genSweepTx(false) - - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - SingleTweak: bobCommitTweak, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return SenderHtlcSpendRedeem(bobSigner, signDesc, - sweepTx, - // Invalid preimage length - bytes.Repeat([]byte{1}, 45)) - }), - false, - }, - { - // HTLC with valid preimage size + sig - // TODO(roasbeef): invalid preimage - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - genCommitTx(false) - genSweepTx(false) - - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - SingleTweak: bobCommitTweak, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return SenderHtlcSpendRedeem(bobSigner, signDesc, - sweepTx, paymentPreimage) - }), - true, - }, - { - // HTLC with valid preimage size + sig, and with - // enforced locktime in HTLC script. - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - // Make a commit tx that needs confirmation for - // HTLC output to be spent. - genCommitTx(true) - - // Generate a sweep with the locktime set. - genSweepTx(true) - - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - SingleTweak: bobCommitTweak, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return SenderHtlcSpendRedeem(bobSigner, signDesc, - sweepTx, paymentPreimage) - }), - true, - }, - { - // HTLC with valid preimage size + sig, but trying to - // spend CSV output without sequence set. - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - // Generate commitment tx with 1 CSV locked - // HTLC. - genCommitTx(true) - - // Generate sweep tx that doesn't have locktime - // enabled. - genSweepTx(false) - - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - SingleTweak: bobCommitTweak, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return SenderHtlcSpendRedeem(bobSigner, signDesc, - sweepTx, paymentPreimage) - }), - false, - }, - - { - // valid spend to the transition the state of the HTLC - // output with the second level HTLC timeout - // transaction. - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - genCommitTx(false) - genSweepTx(false) - - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - SingleTweak: aliceCommitTweak, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return SenderHtlcSpendTimeout( - bobRecvrSig, bobSigHash, aliceSigner, - signDesc, sweepTx, - ) - }), - true, - }, - { - // valid spend to the transition the state of the HTLC - // output with the second level HTLC timeout - // transaction. - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - // Make a commit tx that needs confirmation for - // HTLC output to be spent. - genCommitTx(true) - - // Generate a sweep with the locktime set. - genSweepTx(true) - - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - SingleTweak: aliceCommitTweak, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return SenderHtlcSpendTimeout( - bobRecvrSig, bobSigHash, aliceSigner, - signDesc, sweepTx, - ) - }), - true, - }, - { - // valid spend to the transition the state of the HTLC - // output with the second level HTLC timeout - // transaction. - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - // Generate commitment tx with 1 CSV locked - // HTLC. - genCommitTx(true) - - // Generate sweep tx that doesn't have locktime - // enabled. - genSweepTx(false) - - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - SingleTweak: aliceCommitTweak, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return SenderHtlcSpendTimeout( - bobRecvrSig, bobSigHash, aliceSigner, - signDesc, sweepTx, - ) - }), - false, - }, - } - - // TODO(roasbeef): set of cases to ensure able to sign w/ keypath and - // not - - for i, testCase := range testCases { - sweepTx.TxIn[0].Witness = testCase.witness() - - newEngine := func() (*txscript.Engine, er.R) { - return txscript.NewEngine(htlcPkScript, - sweepTx, 0, txscript.StandardVerifyFlags, nil, - nil, int64(paymentAmt)) - } - - assertEngineExecution(t, i, testCase.valid, newEngine) - } -} - -// TestHTLCReceiverSpendValidation tests all possible valid+invalid redemption -// paths in the script used within the receiver's commitment transaction for an -// incoming HTLC. -// -// The following cases are exercised by this test: -// * receiver spends -// * HTLC redemption w/ invalid preimage size -// * HTLC redemption w/ invalid sequence -// * HTLC redemption w/ valid preimage size -// * sender spends -// * revoke w/ sig -// * refund w/ invalid lock time -// * refund w/ valid lock time -func TestHTLCReceiverSpendValidation(t *testing.T) { - t.Parallel() - - // We generate a fake output, and the corresponding txin. This output - // doesn't need to exist, as we'll only be validating spending from the - // transaction that references this. - txid, err := chainhash.NewHash(testHdSeed.CloneBytes()) - if err != nil { - t.Fatalf("unable to create txid: %v", err) - } - fundingOut := &wire.OutPoint{ - Hash: *txid, - Index: 50, - } - fakeFundingTxIn := wire.NewTxIn(fundingOut, nil, nil) - - // Next we'll the commitment secret for our commitment tx and also the - // revocation key that we'll use as well. - revokePreimage := testHdSeed.CloneBytes() - commitSecret, commitPoint := btcec.PrivKeyFromBytes(btcec.S256(), - revokePreimage) - - // Generate a payment preimage to be used below. - paymentPreimage := revokePreimage - paymentPreimage[0] ^= 1 - paymentHash := sha256.Sum256(paymentPreimage[:]) - - // We'll also need some tests keys for alice and bob, and metadata of - // the HTLC output. - aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(btcec.S256(), - testWalletPrivKey) - bobKeyPriv, bobKeyPub := btcec.PrivKeyFromBytes(btcec.S256(), - bobsPrivKey) - paymentAmt := btcutil.Amount(1 * 10e8) - cltvTimeout := uint32(8) - - aliceLocalKey := TweakPubKey(aliceKeyPub, commitPoint) - bobLocalKey := TweakPubKey(bobKeyPub, commitPoint) - - // As we'll be modeling spends from Bob's commitment transaction, we'll - // be using Alice's base point for the revocation key. - revocationKey := DeriveRevocationPubkey(aliceKeyPub, commitPoint) - - bobCommitTweak := SingleTweakBytes(commitPoint, bobKeyPub) - aliceCommitTweak := SingleTweakBytes(commitPoint, aliceKeyPub) - - // Finally, we'll create mock signers for both of them based on their - // private keys. This test simplifies a bit and uses the same key as - // the base point for all scripts and derivations. - bobSigner := &MockSigner{Privkeys: []*btcec.PrivateKey{bobKeyPriv}} - aliceSigner := &MockSigner{Privkeys: []*btcec.PrivateKey{aliceKeyPriv}} - - var ( - htlcWitnessScript, htlcPkScript []byte - htlcOutput *wire.TxOut - receiverCommitTx, sweepTx *wire.MsgTx - sweepTxSigHashes *txscript.TxSigHashes - aliceSenderSig *btcec.Signature - aliceSigHash params.SigHashType - ) - - genCommitTx := func(confirmed bool) { - // Generate the raw HTLC redemption scripts, and its p2wsh - // counterpart. - htlcWitnessScript, err = ReceiverHTLCScript( - cltvTimeout, aliceLocalKey, bobLocalKey, revocationKey, - paymentHash[:], confirmed, - ) - if err != nil { - t.Fatalf("unable to create htlc sender script: %v", err) - } - htlcPkScript, err = WitnessScriptHash(htlcWitnessScript) - if err != nil { - t.Fatalf("unable to create p2wsh htlc script: %v", err) - } - - // This will be Bob's commitment transaction. In this scenario Alice is - // sending an HTLC to a node she has a path to (could be Bob, could be - // multiple hops down, it doesn't really matter). - htlcOutput = &wire.TxOut{ - Value: int64(paymentAmt), - PkScript: htlcWitnessScript, - } - - receiverCommitTx = wire.NewMsgTx(2) - receiverCommitTx.AddTxIn(fakeFundingTxIn) - receiverCommitTx.AddTxOut(htlcOutput) - } - - genSweepTx := func(confirmed bool) { - prevOut := &wire.OutPoint{ - Hash: receiverCommitTx.TxHash(), - Index: 0, - } - - sweepTx = wire.NewMsgTx(2) - sweepTx.AddTxIn(&wire.TxIn{ - PreviousOutPoint: *prevOut, - }) - if confirmed { - sweepTx.TxIn[0].Sequence = LockTimeToSequence(false, 1) - } - - sweepTx.AddTxOut( - &wire.TxOut{ - PkScript: []byte("doesn't matter"), - Value: 1 * 10e8, - }, - ) - sweepTxSigHashes = txscript.NewTxSigHashes(sweepTx) - - aliceSigHash = params.SigHashAll - if confirmed { - aliceSigHash = params.SigHashSingle | params.SigHashAnyOneCanPay - } - - // We'll also generate a signature on the sweep transaction above - // that will act as Alice's signature to Bob for the second level HTLC - // transaction. - aliceSignDesc := SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - SingleTweak: aliceCommitTweak, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: aliceSigHash, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - aliceSig, err := aliceSigner.SignOutputRaw(sweepTx, &aliceSignDesc) - if err != nil { - t.Fatalf("unable to generate alice signature: %v", err) - } - - aliceSenderSig, err = btcec.ParseDERSignature( - aliceSig.Serialize(), btcec.S256(), - ) - if err != nil { - t.Fatalf("unable to parse signature: %v", err) - } - } - - // TODO(roasbeef): modify valid to check precise script errors? - testCases := []struct { - witness func() wire.TxWitness - valid bool - }{ - { - // HTLC redemption w/ invalid preimage size - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - genCommitTx(false) - genSweepTx(false) - - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - SingleTweak: bobCommitTweak, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return ReceiverHtlcSpendRedeem( - aliceSenderSig, aliceSigHash, - bytes.Repeat([]byte{1}, 45), bobSigner, - signDesc, sweepTx, - ) - - }), - false, - }, - { - // HTLC redemption w/ valid preimage size - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - genCommitTx(false) - genSweepTx(false) - - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - SingleTweak: bobCommitTweak, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return ReceiverHtlcSpendRedeem( - aliceSenderSig, aliceSigHash, - paymentPreimage, bobSigner, - signDesc, sweepTx, - ) - }), - true, - }, - { - // revoke w/ sig - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - genCommitTx(false) - genSweepTx(false) - - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - DoubleTweak: commitSecret, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return ReceiverHtlcSpendRevokeWithKey(aliceSigner, - signDesc, revocationKey, sweepTx) - }), - true, - }, - { - // HTLC redemption w/ valid preimage size, and with - // enforced locktime in HTLC scripts. - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - // Make a commit tx that needs confirmation for - // HTLC output to be spent. - genCommitTx(true) - - // Generate a sweep with the locktime set. - genSweepTx(true) - - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - SingleTweak: bobCommitTweak, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return ReceiverHtlcSpendRedeem( - aliceSenderSig, aliceSigHash, - paymentPreimage, bobSigner, - signDesc, sweepTx, - ) - }), - true, - }, - { - // HTLC redemption w/ valid preimage size, but trying - // to spend CSV output without sequence set. - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - // Generate commitment tx with 1 CSV locked - // HTLC. - genCommitTx(true) - - // Generate sweep tx that doesn't have locktime - // enabled. - genSweepTx(false) - - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - SingleTweak: bobCommitTweak, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return ReceiverHtlcSpendRedeem( - aliceSenderSig, aliceSigHash, - paymentPreimage, bobSigner, signDesc, - sweepTx, - ) - }), - false, - }, - - { - // refund w/ invalid lock time - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - genCommitTx(false) - genSweepTx(false) - - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - SingleTweak: aliceCommitTweak, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return ReceiverHtlcSpendTimeout(aliceSigner, signDesc, - sweepTx, int32(cltvTimeout-2)) - }), - false, - }, - { - // refund w/ valid lock time - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - genCommitTx(false) - genSweepTx(false) - - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - SingleTweak: aliceCommitTweak, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return ReceiverHtlcSpendTimeout(aliceSigner, signDesc, - sweepTx, int32(cltvTimeout)) - }), - true, - }, - { - // refund w/ valid lock time, and enforced locktime in - // HTLC scripts. - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - // Make a commit tx that needs confirmation for - // HTLC output to be spent. - genCommitTx(true) - - // Generate a sweep with the locktime set. - genSweepTx(true) - - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - SingleTweak: aliceCommitTweak, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return ReceiverHtlcSpendTimeout(aliceSigner, signDesc, - sweepTx, int32(cltvTimeout)) - }), - true, - }, - { - // refund w/ valid lock time, but no sequence set in - // sweep tx trying to spend CSV locked HTLC output. - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - // Generate commitment tx with 1 CSV locked - // HTLC. - genCommitTx(true) - - // Generate sweep tx that doesn't have locktime - // enabled. - genSweepTx(false) - - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - SingleTweak: aliceCommitTweak, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return ReceiverHtlcSpendTimeout(aliceSigner, signDesc, - sweepTx, int32(cltvTimeout)) - }), - false, - }, - } - - for i, testCase := range testCases { - sweepTx.TxIn[0].Witness = testCase.witness() - - newEngine := func() (*txscript.Engine, er.R) { - return txscript.NewEngine(htlcPkScript, - sweepTx, 0, txscript.StandardVerifyFlags, nil, - nil, int64(paymentAmt)) - } - - assertEngineExecution(t, i, testCase.valid, newEngine) - } -} - -// TestSecondLevelHtlcSpends tests all the possible redemption clauses from the -// HTLC success and timeout covenant transactions. -func TestSecondLevelHtlcSpends(t *testing.T) { - t.Parallel() - - // We'll start be creating a creating a 2BTC HTLC. - const htlcAmt = btcutil.Amount(2 * 10e8) - - // In all of our scenarios, the CSV timeout to claim a self output will - // be 5 blocks. - const claimDelay = 5 - - // First we'll set up some initial key state for Alice and Bob that - // will be used in the scripts we created below. - aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(btcec.S256(), - testWalletPrivKey) - bobKeyPriv, bobKeyPub := btcec.PrivKeyFromBytes(btcec.S256(), - bobsPrivKey) - - revokePreimage := testHdSeed.CloneBytes() - commitSecret, commitPoint := btcec.PrivKeyFromBytes( - btcec.S256(), revokePreimage) - - // As we're modeling this as Bob sweeping the HTLC on-chain from his - // commitment transaction after a period of time, we'll be using a - // revocation key derived from Alice's base point and his secret. - revocationKey := DeriveRevocationPubkey(aliceKeyPub, commitPoint) - - // Next, craft a fake HTLC outpoint that we'll use to generate the - // sweeping transaction using. - txid, err := chainhash.NewHash(testHdSeed.CloneBytes()) - if err != nil { - t.Fatalf("unable to create txid: %v", err) - } - htlcOutPoint := &wire.OutPoint{ - Hash: *txid, - Index: 0, - } - sweepTx := wire.NewMsgTx(2) - sweepTx.AddTxIn(wire.NewTxIn(htlcOutPoint, nil, nil)) - sweepTx.AddTxOut( - &wire.TxOut{ - PkScript: []byte("doesn't matter"), - Value: 1 * 10e8, - }, - ) - sweepTxSigHashes := txscript.NewTxSigHashes(sweepTx) - - // The delay key will be crafted using Bob's public key as the output - // we created will be spending from Alice's commitment transaction. - delayKey := TweakPubKey(bobKeyPub, commitPoint) - - // The commit tweak will be required in order for Bob to derive the - // proper key need to spend the output. - commitTweak := SingleTweakBytes(commitPoint, bobKeyPub) - - // Finally we'll generate the HTLC script itself that we'll be spending - // from. The revocation clause can be claimed by Alice, while Bob can - // sweep the output after a particular delay. - htlcWitnessScript, errr := SecondLevelHtlcScript(revocationKey, - delayKey, claimDelay) - if errr != nil { - t.Fatalf("unable to create htlc script: %v", errr) - } - htlcPkScript, errr := WitnessScriptHash(htlcWitnessScript) - if errr != nil { - t.Fatalf("unable to create htlc output: %v", errr) - } - - htlcOutput := &wire.TxOut{ - PkScript: htlcPkScript, - Value: int64(htlcAmt), - } - - // TODO(roasbeef): make actually use timeout/success txns? - - // Finally, we'll create mock signers for both of them based on their - // private keys. This test simplifies a bit and uses the same key as - // the base point for all scripts and derivations. - bobSigner := &MockSigner{Privkeys: []*btcec.PrivateKey{bobKeyPriv}} - aliceSigner := &MockSigner{Privkeys: []*btcec.PrivateKey{aliceKeyPriv}} - - testCases := []struct { - witness func() wire.TxWitness - valid bool - }{ - { - // Sender of the HTLC attempts to activate the - // revocation clause, but uses the wrong key (fails to - // use the double tweak in this case). - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return HtlcSpendRevoke(aliceSigner, signDesc, - sweepTx) - }), - false, - }, - { - // Sender of HTLC activates the revocation clause. - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - DoubleTweak: commitSecret, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return HtlcSpendRevoke(aliceSigner, signDesc, - sweepTx) - }), - true, - }, - { - // Receiver of the HTLC attempts to sweep, but tries to - // do so pre-maturely with a smaller CSV delay (2 - // blocks instead of 5 blocks). - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - SingleTweak: commitTweak, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return HtlcSpendSuccess(bobSigner, signDesc, - sweepTx, claimDelay-3) - }), - false, - }, - { - // Receiver of the HTLC sweeps with the proper CSV - // delay, but uses the wrong key (leaves off the single - // tweak). - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return HtlcSpendSuccess(bobSigner, signDesc, - sweepTx, claimDelay) - }), - false, - }, - { - // Receiver of the HTLC sweeps with the proper CSV - // delay, and the correct key. - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - SingleTweak: commitTweak, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return HtlcSpendSuccess(bobSigner, signDesc, - sweepTx, claimDelay) - }), - true, - }, - } - - for i, testCase := range testCases { - sweepTx.TxIn[0].Witness = testCase.witness() - - newEngine := func() (*txscript.Engine, er.R) { - return txscript.NewEngine(htlcPkScript, - sweepTx, 0, txscript.StandardVerifyFlags, nil, - nil, int64(htlcAmt)) - } - - assertEngineExecution(t, i, testCase.valid, newEngine) - } -} - -// TestCommitSpendToRemoteConfirmed checks that the delayed version of the -// to_remote version can only be spent by the owner, and after one -// confirmation. -func TestCommitSpendToRemoteConfirmed(t *testing.T) { - t.Parallel() - - const outputVal = btcutil.Amount(2 * 10e8) - - aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(btcec.S256(), - testWalletPrivKey) - - txid, err := chainhash.NewHash(testHdSeed.CloneBytes()) - if err != nil { - t.Fatalf("unable to create txid: %v", err) - } - commitOut := &wire.OutPoint{ - Hash: *txid, - Index: 0, - } - commitScript, errr := CommitScriptToRemoteConfirmed(aliceKeyPub) - if errr != nil { - t.Fatalf("unable to create htlc script: %v", errr) - } - commitPkScript, errr := WitnessScriptHash(commitScript) - if errr != nil { - t.Fatalf("unable to create htlc output: %v", errr) - } - - commitOutput := &wire.TxOut{ - PkScript: commitPkScript, - Value: int64(outputVal), - } - - sweepTx := wire.NewMsgTx(2) - sweepTx.AddTxIn(wire.NewTxIn(commitOut, nil, nil)) - sweepTx.AddTxOut( - &wire.TxOut{ - PkScript: []byte("doesn't matter"), - Value: 1 * 10e8, - }, - ) - - aliceSigner := &MockSigner{Privkeys: []*btcec.PrivateKey{aliceKeyPriv}} - - testCases := []struct { - witness func() wire.TxWitness - valid bool - }{ - { - // Alice can spend after the a CSV delay has passed. - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - sweepTx.TxIn[0].Sequence = LockTimeToSequence(false, 1) - sweepTxSigHashes := txscript.NewTxSigHashes(sweepTx) - - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - WitnessScript: commitScript, - Output: commitOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return CommitSpendToRemoteConfirmed(aliceSigner, signDesc, - sweepTx) - }), - true, - }, - { - // Alice cannot spend output without sequence set. - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - sweepTx.TxIn[0].Sequence = constants.MaxTxInSequenceNum - sweepTxSigHashes := txscript.NewTxSigHashes(sweepTx) - - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - WitnessScript: commitScript, - Output: commitOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return CommitSpendToRemoteConfirmed(aliceSigner, signDesc, - sweepTx) - }), - false, - }, - } - - for i, testCase := range testCases { - sweepTx.TxIn[0].Witness = testCase.witness() - - newEngine := func() (*txscript.Engine, er.R) { - return txscript.NewEngine(commitPkScript, - sweepTx, 0, txscript.StandardVerifyFlags, nil, - nil, int64(outputVal)) - } - - assertEngineExecution(t, i, testCase.valid, newEngine) - } -} - -// TestSpendAnchor checks that we can spend the anchors using the various spend -// paths. -func TestSpendAnchor(t *testing.T) { - t.Parallel() - - const anchorSize = 294 - - // First we'll set up some initial key state for Alice. - aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(btcec.S256(), - testWalletPrivKey) - - // Create a fake anchor outpoint that we'll use to generate the - // sweeping transaction. - txid, err := chainhash.NewHash(testHdSeed.CloneBytes()) - if err != nil { - t.Fatalf("unable to create txid: %v", err) - } - anchorOutPoint := &wire.OutPoint{ - Hash: *txid, - Index: 0, - } - - sweepTx := wire.NewMsgTx(2) - sweepTx.AddTxIn(wire.NewTxIn(anchorOutPoint, nil, nil)) - sweepTx.AddTxOut( - &wire.TxOut{ - PkScript: []byte("doesn't matter"), - Value: 1 * 10e8, - }, - ) - - // Generate the anchor script that can be spent by Alice immediately, - // or by anyone after 16 blocks. - anchorScript, errr := CommitScriptAnchor(aliceKeyPub) - if errr != nil { - t.Fatalf("unable to create htlc script: %v", errr) - } - anchorPkScript, errr := WitnessScriptHash(anchorScript) - if errr != nil { - t.Fatalf("unable to create htlc output: %v", errr) - } - - anchorOutput := &wire.TxOut{ - PkScript: anchorPkScript, - Value: int64(anchorSize), - } - - // Create mock signer for Alice. - aliceSigner := &MockSigner{Privkeys: []*btcec.PrivateKey{aliceKeyPriv}} - - testCases := []struct { - witness func() wire.TxWitness - valid bool - }{ - { - // Alice can spend immediately. - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - sweepTx.TxIn[0].Sequence = constants.MaxTxInSequenceNum - sweepTxSigHashes := txscript.NewTxSigHashes(sweepTx) - - signDesc := &SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - WitnessScript: anchorScript, - Output: anchorOutput, - HashType: params.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - - return CommitSpendAnchor(aliceSigner, signDesc, - sweepTx) - }), - true, - }, - { - // Anyone can spend after 16 blocks. - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - sweepTx.TxIn[0].Sequence = LockTimeToSequence(false, 16) - return CommitSpendAnchorAnyone(anchorScript) - }), - true, - }, - { - // Anyone cannot spend before 16 blocks. - makeWitnessTestCase(t, func() (wire.TxWitness, er.R) { - sweepTx.TxIn[0].Sequence = LockTimeToSequence(false, 15) - return CommitSpendAnchorAnyone(anchorScript) - }), - false, - }, - } - - for i, testCase := range testCases { - sweepTx.TxIn[0].Witness = testCase.witness() - - newEngine := func() (*txscript.Engine, er.R) { - return txscript.NewEngine(anchorPkScript, - sweepTx, 0, txscript.StandardVerifyFlags, nil, - nil, int64(anchorSize)) - } - - assertEngineExecution(t, i, testCase.valid, newEngine) - } -} - -// TestSpecificationKeyDerivation implements the test vectors provided in -// BOLT-03, Appendix E. -func TestSpecificationKeyDerivation(t *testing.T) { - const ( - baseSecretHex = "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f" - perCommitmentSecretHex = "1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100" - basePointHex = "036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2" - perCommitmentPointHex = "025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486" - ) - - baseSecret, err := privkeyFromHex(baseSecretHex) - if err != nil { - t.Fatalf("Failed to parse serialized privkey: %v", err) - } - perCommitmentSecret, err := privkeyFromHex(perCommitmentSecretHex) - if err != nil { - t.Fatalf("Failed to parse serialized privkey: %v", err) - } - basePoint, err := pubkeyFromHex(basePointHex) - if err != nil { - t.Fatalf("Failed to parse serialized pubkey: %v", err) - } - perCommitmentPoint, err := pubkeyFromHex(perCommitmentPointHex) - if err != nil { - t.Fatalf("Failed to parse serialized pubkey: %v", err) - } - - // name: derivation of key from basepoint and per_commitment_point - const expectedLocalKeyHex = "0235f2dbfaa89b57ec7b055afe29849ef7ddfeb1cefdb9ebdc43f5494984db29e5" - actualLocalKey := TweakPubKey(basePoint, perCommitmentPoint) - actualLocalKeyHex := pubkeyToHex(actualLocalKey) - if actualLocalKeyHex != expectedLocalKeyHex { - t.Errorf("Incorrect derivation of local public key: "+ - "expected %v, got %v", expectedLocalKeyHex, actualLocalKeyHex) - } - - // name: derivation of secret key from basepoint secret and per_commitment_secret - const expectedLocalPrivKeyHex = "cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f" - tweak := SingleTweakBytes(perCommitmentPoint, basePoint) - actualLocalPrivKey := TweakPrivKey(baseSecret, tweak) - actualLocalPrivKeyHex := privkeyToHex(actualLocalPrivKey) - if actualLocalPrivKeyHex != expectedLocalPrivKeyHex { - t.Errorf("Incorrect derivation of local private key: "+ - "expected %v, got %v, %v", expectedLocalPrivKeyHex, - actualLocalPrivKeyHex, hex.EncodeToString(tweak)) - } - - // name: derivation of revocation key from basepoint and per_commitment_point - const expectedRevocationKeyHex = "02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0" - actualRevocationKey := DeriveRevocationPubkey(basePoint, perCommitmentPoint) - actualRevocationKeyHex := pubkeyToHex(actualRevocationKey) - if actualRevocationKeyHex != expectedRevocationKeyHex { - t.Errorf("Incorrect derivation of revocation public key: "+ - "expected %v, got %v", expectedRevocationKeyHex, - actualRevocationKeyHex) - } - - // name: derivation of revocation secret from basepoint_secret and per_commitment_secret - const expectedRevocationPrivKeyHex = "d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110" - actualRevocationPrivKey := DeriveRevocationPrivKey(baseSecret, - perCommitmentSecret) - actualRevocationPrivKeyHex := privkeyToHex(actualRevocationPrivKey) - if actualRevocationPrivKeyHex != expectedRevocationPrivKeyHex { - t.Errorf("Incorrect derivation of revocation private key: "+ - "expected %v, got %v", expectedRevocationPrivKeyHex, - actualRevocationPrivKeyHex) - } -} diff --git a/lnd/input/signdescriptor.go b/lnd/input/signdescriptor.go deleted file mode 100644 index 37357ab6..00000000 --- a/lnd/input/signdescriptor.go +++ /dev/null @@ -1,229 +0,0 @@ -package input - -import ( - "encoding/binary" - "io" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/txscript" - "github.com/pkt-cash/pktd/txscript/params" - "github.com/pkt-cash/pktd/wire" -) - -var ( - // ErrTweakOverdose signals a SignDescriptor is invalid because both of its - // SingleTweak and DoubleTweak are non-nil. - ErrTweakOverdose = er.GenericErrorType.CodeWithDetail("ErrTweakOverdose", - "sign descriptor should only have one tweak") -) - -// SignDescriptor houses the necessary information required to successfully -// sign a given segwit output. This struct is used by the Signer interface in -// order to gain access to critical data needed to generate a valid signature. -type SignDescriptor struct { - // KeyDesc is a descriptor that precisely describes *which* key to use - // for signing. This may provide the raw public key directly, or - // require the Signer to re-derive the key according to the populated - // derivation path. - KeyDesc keychain.KeyDescriptor - - // SingleTweak is a scalar value that will be added to the private key - // corresponding to the above public key to obtain the private key to - // be used to sign this input. This value is typically derived via the - // following computation: - // - // * derivedKey = privkey + sha256(perCommitmentPoint || pubKey) mod N - // - // NOTE: If this value is nil, then the input can be signed using only - // the above public key. Either a SingleTweak should be set or a - // DoubleTweak, not both. - SingleTweak []byte - - // DoubleTweak is a private key that will be used in combination with - // its corresponding private key to derive the private key that is to - // be used to sign the target input. Within the Lightning protocol, - // this value is typically the commitment secret from a previously - // revoked commitment transaction. This value is in combination with - // two hash values, and the original private key to derive the private - // key to be used when signing. - // - // * k = (privKey*sha256(pubKey || tweakPub) + - // tweakPriv*sha256(tweakPub || pubKey)) mod N - // - // NOTE: If this value is nil, then the input can be signed using only - // the above public key. Either a SingleTweak should be set or a - // DoubleTweak, not both. - DoubleTweak *btcec.PrivateKey - - // WitnessScript is the full script required to properly redeem the - // output. This field should be set to the full script if a p2wsh - // output is being signed. For p2wkh it should be set to the hashed - // script (PkScript). - WitnessScript []byte - - // Output is the target output which should be signed. The PkScript and - // Value fields within the output should be properly populated, - // otherwise an invalid signature may be generated. - Output *wire.TxOut - - // HashType is the target sighash type that should be used when - // generating the final sighash, and signature. - HashType params.SigHashType - - // SigHashes is the pre-computed sighash midstate to be used when - // generating the final sighash for signing. - SigHashes *txscript.TxSigHashes - - // InputIndex is the target input within the transaction that should be - // signed. - InputIndex int -} - -// WriteSignDescriptor serializes a SignDescriptor struct into the passed -// io.Writer stream. -// -// NOTE: We assume the SigHashes and InputIndex fields haven't been assigned -// yet, since that is usually done just before broadcast by the witness -// generator. -func WriteSignDescriptor(w io.Writer, sd *SignDescriptor) er.R { - err := util.WriteBin(w, binary.BigEndian, sd.KeyDesc.Family) - if err != nil { - return err - } - err = util.WriteBin(w, binary.BigEndian, sd.KeyDesc.Index) - if err != nil { - return err - } - - err = util.WriteBin(w, binary.BigEndian, sd.KeyDesc.PubKey != nil) - if err != nil { - return err - } - - if sd.KeyDesc.PubKey != nil { - serializedPubKey := sd.KeyDesc.PubKey.SerializeCompressed() - if err := wire.WriteVarBytes(w, 0, serializedPubKey); err != nil { - return err - } - } - - if err := wire.WriteVarBytes(w, 0, sd.SingleTweak); err != nil { - return err - } - - var doubleTweakBytes []byte - if sd.DoubleTweak != nil { - doubleTweakBytes = sd.DoubleTweak.Serialize() - } - if err := wire.WriteVarBytes(w, 0, doubleTweakBytes); err != nil { - return err - } - - if err := wire.WriteVarBytes(w, 0, sd.WitnessScript); err != nil { - return err - } - - if err := writeTxOut(w, sd.Output); err != nil { - return err - } - - var scratch [4]byte - binary.BigEndian.PutUint32(scratch[:], uint32(sd.HashType)) - if _, err := util.Write(w, scratch[:]); err != nil { - return err - } - - return nil -} - -// ReadSignDescriptor deserializes a SignDescriptor struct from the passed -// io.Reader stream. -func ReadSignDescriptor(r io.Reader, sd *SignDescriptor) er.R { - err := util.ReadBin(r, binary.BigEndian, &sd.KeyDesc.Family) - if err != nil { - return err - } - err = util.ReadBin(r, binary.BigEndian, &sd.KeyDesc.Index) - if err != nil { - return err - } - - var hasKey bool - err = util.ReadBin(r, binary.BigEndian, &hasKey) - if err != nil { - return err - } - - if hasKey { - pubKeyBytes, err := wire.ReadVarBytes(r, 0, 34, "pubkey") - if err != nil { - return err - } - sd.KeyDesc.PubKey, err = btcec.ParsePubKey( - pubKeyBytes, btcec.S256(), - ) - if err != nil { - return err - } - } - - singleTweak, err := wire.ReadVarBytes(r, 0, 32, "singleTweak") - if err != nil { - return err - } - - // Serializing a SignDescriptor with a nil-valued SingleTweak results - // in deserializing a zero-length slice. Since a nil-valued SingleTweak - // has special meaning and a zero-length slice for a SingleTweak is - // invalid, we can use the zero-length slice as the flag for a - // nil-valued SingleTweak. - if len(singleTweak) == 0 { - sd.SingleTweak = nil - } else { - sd.SingleTweak = singleTweak - } - - doubleTweakBytes, err := wire.ReadVarBytes(r, 0, 32, "doubleTweak") - if err != nil { - return err - } - - // Serializing a SignDescriptor with a nil-valued DoubleTweak results - // in deserializing a zero-length slice. Since a nil-valued DoubleTweak - // has special meaning and a zero-length slice for a DoubleTweak is - // invalid, we can use the zero-length slice as the flag for a - // nil-valued DoubleTweak. - if len(doubleTweakBytes) == 0 { - sd.DoubleTweak = nil - } else { - sd.DoubleTweak, _ = btcec.PrivKeyFromBytes(btcec.S256(), doubleTweakBytes) - } - - // Only one tweak should ever be set, fail if both are present. - if sd.SingleTweak != nil && sd.DoubleTweak != nil { - return ErrTweakOverdose.Default() - } - - witnessScript, err := wire.ReadVarBytes(r, 0, 500, "witnessScript") - if err != nil { - return err - } - sd.WitnessScript = witnessScript - - txOut := &wire.TxOut{} - if err := readTxOut(r, txOut); err != nil { - return err - } - sd.Output = txOut - - var hashType [4]byte - if _, err := util.ReadFull(r, hashType[:]); err != nil { - return err - } - sd.HashType = params.SigHashType(binary.BigEndian.Uint32(hashType[:])) - - return nil -} diff --git a/lnd/input/signdescriptor_test.go b/lnd/input/signdescriptor_test.go deleted file mode 100644 index e118414c..00000000 --- a/lnd/input/signdescriptor_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package input - -import ( - "bytes" - "reflect" - "testing" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/txscript/params" - "github.com/pkt-cash/pktd/wire" -) - -func TestSignDescriptorSerialization(t *testing.T) { - keys := [][]byte{ - {0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, - 0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e, - 0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca, - 0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0, - 0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64, - 0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9, - 0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56, - 0xb4, 0x12, 0xa3, - }, - {0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, - 0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e, - 0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca, - 0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0, - 0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64, - 0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9, - 0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56, - 0xb4, 0x12, 0xa3, - }, - } - - signDescriptors := []SignDescriptor{ - { - SingleTweak: []byte{ - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, 0x02, - }, - WitnessScript: []byte{ - 0x00, 0x14, 0xee, 0x91, 0x41, 0x7e, 0x85, 0x6c, 0xde, - 0x10, 0xa2, 0x91, 0x1e, 0xdc, 0xbd, 0xbd, 0x69, 0xe2, - 0xef, 0xb5, 0x71, 0x48, - }, - Output: &wire.TxOut{ - Value: 5000000000, - PkScript: []byte{ - 0x41, // OP_DATA_65 - 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5, - 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42, - 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1, - 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24, - 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97, - 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78, - 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20, - 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63, - 0xa6, // 65-byte signature - 0xac, // OP_CHECKSIG - }, - }, - HashType: params.SigHashAll, - }, - - // Test serializing a SignDescriptor with a nil-valued PrivateTweak - { - SingleTweak: nil, - WitnessScript: []byte{ - 0x00, 0x14, 0xee, 0x91, 0x41, 0x7e, 0x85, 0x6c, 0xde, - 0x10, 0xa2, 0x91, 0x1e, 0xdc, 0xbd, 0xbd, 0x69, 0xe2, - 0xef, 0xb5, 0x71, 0x48, - }, - Output: &wire.TxOut{ - Value: 5000000000, - PkScript: []byte{ - 0x41, // OP_DATA_65 - 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5, - 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42, - 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1, - 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24, - 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97, - 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78, - 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20, - 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63, - 0xa6, // 65-byte signature - 0xac, // OP_CHECKSIG - }, - }, - HashType: params.SigHashAll, - }, - } - - for i := 0; i < len(signDescriptors); i++ { - // Parse pubkeys for each sign descriptor. - sd := &signDescriptors[i] - pubkey, err := btcec.ParsePubKey(keys[i], btcec.S256()) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } - sd.KeyDesc = keychain.KeyDescriptor{ - KeyLocator: keychain.KeyLocator{ - Family: 50, - Index: 99, - }, - PubKey: pubkey, - } - - // Test that serialize -> deserialize yields same result as original. - var buf bytes.Buffer - if err := WriteSignDescriptor(&buf, sd); err != nil { - t.Fatalf("unable to serialize sign descriptor[%v]: %v", i, sd) - } - - desSd := &SignDescriptor{} - if err := ReadSignDescriptor(&buf, desSd); err != nil { - t.Fatalf("unable to deserialize sign descriptor[%v]: %v", i, sd) - } - - if !reflect.DeepEqual(sd, desSd) { - t.Fatalf("original and deserialized sign descriptors not equal:\n"+ - "original : %+v\n"+ - "deserialized : %+v\n", - sd, desSd) - } - } -} diff --git a/lnd/input/signer.go b/lnd/input/signer.go deleted file mode 100644 index 26f923aa..00000000 --- a/lnd/input/signer.go +++ /dev/null @@ -1,43 +0,0 @@ -package input - -import ( - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/wire" -) - -// Signer represents an abstract object capable of generating raw signatures as -// well as full complete input scripts given a valid SignDescriptor and -// transaction. This interface fully abstracts away signing paving the way for -// Signer implementations such as hardware wallets, hardware tokens, HSM's, or -// simply a regular wallet. -type Signer interface { - // SignOutputRaw generates a signature for the passed transaction - // according to the data within the passed SignDescriptor. - // - // NOTE: The resulting signature should be void of a sighash byte. - SignOutputRaw(tx *wire.MsgTx, - signDesc *SignDescriptor) (Signature, er.R) - - // ComputeInputScript generates a complete InputIndex for the passed - // transaction with the signature as defined within the passed - // SignDescriptor. This method should be capable of generating the - // proper input script for both regular p2wkh output and p2wkh outputs - // nested within a regular p2sh output. - // - // NOTE: This method will ignore any tweak parameters set within the - // passed SignDescriptor as it assumes a set of typical script - // templates (p2wkh, np2wkh, etc). - ComputeInputScript(tx *wire.MsgTx, signDesc *SignDescriptor) (*Script, er.R) -} - -// Script represents any script inputs required to redeem a previous -// output. This struct is used rather than just a witness, or scripSig in order -// to accommodate nested p2sh which utilizes both types of input scripts. -type Script struct { - // Witness is the full witness stack required to unlock this output. - Witness wire.TxWitness - - // SigScript will only be populated if this is an input script sweeping - // a nested p2sh output. - SigScript []byte -} diff --git a/lnd/input/size.go b/lnd/input/size.go deleted file mode 100644 index 46116e83..00000000 --- a/lnd/input/size.go +++ /dev/null @@ -1,596 +0,0 @@ -package input - -import ( - "github.com/pkt-cash/pktd/blockchain" - "github.com/pkt-cash/pktd/wire" -) - -const ( - // witnessScaleFactor determines the level of "discount" witness data - // receives compared to "base" data. A scale factor of 4, denotes that - // witness data is 1/4 as cheap as regular non-witness data. Value copied - // here for convenience. - witnessScaleFactor = blockchain.WitnessScaleFactor - - // The weight(weight), which is different from the !size! (see BIP-141), - // is calculated as: - // Weight = 4 * BaseSize + WitnessSize (weight). - // BaseSize - size of the transaction without witness data (bytes). - // WitnessSize - witness size (bytes). - // Weight - the metric for determining the weight of the transaction. - - // P2WPKHSize 22 bytes - // - OP_0: 1 byte - // - OP_DATA: 1 byte (PublicKeyHASH160 length) - // - PublicKeyHASH160: 20 bytes - P2WPKHSize = 1 + 1 + 20 - - // NestedP2WPKHSize 23 bytes - // - OP_DATA: 1 byte (P2WPKHSize) - // - P2WPKHWitnessProgram: 22 bytes - NestedP2WPKHSize = 1 + P2WPKHSize - - // P2WSHSize 34 bytes - // - OP_0: 1 byte - // - OP_DATA: 1 byte (WitnessScriptSHA256 length) - // - WitnessScriptSHA256: 32 bytes - P2WSHSize = 1 + 1 + 32 - - // NestedP2WSHSize 35 bytes - // - OP_DATA: 1 byte (P2WSHSize) - // - P2WSHWitnessProgram: 34 bytes - NestedP2WSHSize = 1 + P2WSHSize - - // P2PKHOutputSize 34 bytes - // - value: 8 bytes - // - var_int: 1 byte (pkscript_length) - // - pkscript (p2pkh): 25 bytes - P2PKHOutputSize = 8 + 1 + 25 - - // P2WKHOutputSize 31 bytes - // - value: 8 bytes - // - var_int: 1 byte (pkscript_length) - // - pkscript (p2wpkh): 22 bytes - P2WKHOutputSize = 8 + 1 + P2WPKHSize - - // P2WSHOutputSize 43 bytes - // - value: 8 bytes - // - var_int: 1 byte (pkscript_length) - // - pkscript (p2wsh): 34 bytes - P2WSHOutputSize = 8 + 1 + P2WSHSize - - // P2SHOutputSize 32 bytes - // - value: 8 bytes - // - var_int: 1 byte (pkscript_length) - // - pkscript (p2sh): 23 bytes - P2SHOutputSize = 8 + 1 + 23 - - // P2PKHScriptSigSize 108 bytes - // - OP_DATA: 1 byte (signature length) - // - signature - // - OP_DATA: 1 byte (pubkey length) - // - pubkey - P2PKHScriptSigSize = 1 + 73 + 1 + 33 - - // P2WKHWitnessSize 109 bytes - // - number_of_witness_elements: 1 byte - // - signature_length: 1 byte - // - signature - // - pubkey_length: 1 byte - // - pubkey - P2WKHWitnessSize = 1 + 1 + 73 + 1 + 33 - - // MultiSigSize 71 bytes - // - OP_2: 1 byte - // - OP_DATA: 1 byte (pubKeyAlice length) - // - pubKeyAlice: 33 bytes - // - OP_DATA: 1 byte (pubKeyBob length) - // - pubKeyBob: 33 bytes - // - OP_2: 1 byte - // - OP_CHECKMULTISIG: 1 byte - MultiSigSize = 1 + 1 + 33 + 1 + 33 + 1 + 1 - - // MultiSigWitnessSize 222 bytes - // - NumberOfWitnessElements: 1 byte - // - NilLength: 1 byte - // - sigAliceLength: 1 byte - // - sigAlice: 73 bytes - // - sigBobLength: 1 byte - // - sigBob: 73 bytes - // - WitnessScriptLength: 1 byte - // - WitnessScript (MultiSig) - MultiSigWitnessSize = 1 + 1 + 1 + 73 + 1 + 73 + 1 + MultiSigSize - - // InputSize 41 bytes - // - PreviousOutPoint: - // - Hash: 32 bytes - // - Index: 4 bytes - // - OP_DATA: 1 byte (ScriptSigLength) - // - ScriptSig: 0 bytes - // - Witness <---- we use "Witness" instead of "ScriptSig" for - // transaction validation, but "Witness" is stored - // separately and weight for it size is smaller. So - // we separate the calculation of ordinary data - // from witness data. - // - Sequence: 4 bytes - InputSize = 32 + 4 + 1 + 4 - - // FundingInputSize represents the size of an input to a funding - // transaction, and is equivalent to the size of a standard segwit input - // as calculated above. - FundingInputSize = InputSize - - // CommitmentDelayOutput 43 bytes - // - Value: 8 bytes - // - VarInt: 1 byte (PkScript length) - // - PkScript (P2WSH) - CommitmentDelayOutput = 8 + 1 + P2WSHSize - - // CommitmentKeyHashOutput 31 bytes - // - Value: 8 bytes - // - VarInt: 1 byte (PkScript length) - // - PkScript (P2WPKH) - CommitmentKeyHashOutput = 8 + 1 + P2WPKHSize - - // CommitmentAnchorOutput 43 bytes - // - Value: 8 bytes - // - VarInt: 1 byte (PkScript length) - // - PkScript (P2WSH) - CommitmentAnchorOutput = 8 + 1 + P2WSHSize - - // HTLCSize 43 bytes - // - Value: 8 bytes - // - VarInt: 1 byte (PkScript length) - // - PkScript (PW2SH) - HTLCSize = 8 + 1 + P2WSHSize - - // WitnessHeaderSize 2 bytes - // - Flag: 1 byte - // - Marker: 1 byte - WitnessHeaderSize = 1 + 1 - - // BaseTxSize 8 bytes - // - Version: 4 bytes - // - LockTime: 4 bytes - BaseTxSize = 4 + 4 - - // BaseCommitmentTxSize 125 + 43 * num-htlc-outputs bytes - // - Version: 4 bytes - // - WitnessHeader <---- part of the witness data - // - CountTxIn: 1 byte - // - TxIn: 41 bytes - // FundingInput - // - CountTxOut: 1 byte - // - TxOut: 74 + 43 * num-htlc-outputs bytes - // OutputPayingToThem, - // OutputPayingToUs, - // ....HTLCOutputs... - // - LockTime: 4 bytes - BaseCommitmentTxSize = 4 + 1 + FundingInputSize + 1 + - CommitmentDelayOutput + CommitmentKeyHashOutput + 4 - - // BaseCommitmentTxWeight 500 weight - BaseCommitmentTxWeight = witnessScaleFactor * BaseCommitmentTxSize - - // WitnessCommitmentTxWeight 224 weight - WitnessCommitmentTxWeight = WitnessHeaderSize + MultiSigWitnessSize - - // BaseAnchorCommitmentTxSize 225 + 43 * num-htlc-outputs bytes - // - Version: 4 bytes - // - WitnessHeader <---- part of the witness data - // - CountTxIn: 1 byte - // - TxIn: 41 bytes - // FundingInput - // - CountTxOut: 3 byte - // - TxOut: 4*43 + 43 * num-htlc-outputs bytes - // OutputPayingToThem, - // OutputPayingToUs, - // AnchorPayingToThem, - // AnchorPayingToUs, - // ....HTLCOutputs... - // - LockTime: 4 bytes - BaseAnchorCommitmentTxSize = 4 + 1 + FundingInputSize + 3 + - 2*CommitmentDelayOutput + 2*CommitmentAnchorOutput + 4 - - // BaseAnchorCommitmentTxWeight 900 weight - BaseAnchorCommitmentTxWeight = witnessScaleFactor * BaseAnchorCommitmentTxSize - - // CommitWeight 724 weight - CommitWeight = BaseCommitmentTxWeight + WitnessCommitmentTxWeight - - // AnchorCommitWeight 1124 weight - AnchorCommitWeight = BaseAnchorCommitmentTxWeight + WitnessCommitmentTxWeight - - // HTLCWeight 172 weight - HTLCWeight = witnessScaleFactor * HTLCSize - - // HtlcTimeoutWeight is the weight of the HTLC timeout transaction - // which will transition an outgoing HTLC to the delay-and-claim state. - HtlcTimeoutWeight = 663 - - // HtlcSuccessWeight is the weight of the HTLC success transaction - // which will transition an incoming HTLC to the delay-and-claim state. - HtlcSuccessWeight = 703 - - // HtlcConfirmedScriptOverhead is the extra length of an HTLC script - // that requires confirmation before it can be spent. These extra bytes - // is a result of the extra CSV check. - HtlcConfirmedScriptOverhead = 3 - - // HtlcTimeoutWeightConfirmed is the weight of the HTLC timeout - // transaction which will transition an outgoing HTLC to the - // delay-and-claim state, for the confirmed HTLC outputs. It is 3 bytes - // larger because of the additional CSV check in the input script. - HtlcTimeoutWeightConfirmed = HtlcTimeoutWeight + HtlcConfirmedScriptOverhead - - // HtlcSuccessWeightCOnfirmed is the weight of the HTLC success - // transaction which will transition an incoming HTLC to the - // delay-and-claim state, for the confirmed HTLC outputs. It is 3 bytes - // larger because of the cdditional CSV check in the input script. - HtlcSuccessWeightConfirmed = HtlcSuccessWeight + HtlcConfirmedScriptOverhead - - // MaxHTLCNumber is the maximum number HTLCs which can be included in a - // commitment transaction. This limit was chosen such that, in the case - // of a contract breach, the punishment transaction is able to sweep - // all the HTLC's yet still remain below the widely used standard - // weight limits. - MaxHTLCNumber = 966 - - // ToLocalScriptSize 79 bytes - // - OP_IF: 1 byte - // - OP_DATA: 1 byte - // - revoke_key: 33 bytes - // - OP_ELSE: 1 byte - // - OP_DATA: 1 byte - // - csv_delay: 4 bytes - // - OP_CHECKSEQUENCEVERIFY: 1 byte - // - OP_DROP: 1 byte - // - OP_DATA: 1 byte - // - delay_key: 33 bytes - // - OP_ENDIF: 1 byte - // - OP_CHECKSIG: 1 byte - ToLocalScriptSize = 1 + 1 + 33 + 1 + 1 + 4 + 1 + 1 + 1 + 33 + 1 + 1 - - // ToLocalTimeoutWitnessSize 156 bytes - // - number_of_witness_elements: 1 byte - // - local_delay_sig_length: 1 byte - // - local_delay_sig: 73 bytes - // - zero_length: 1 byte - // - witness_script_length: 1 byte - // - witness_script (to_local_script) - ToLocalTimeoutWitnessSize = 1 + 1 + 73 + 1 + 1 + ToLocalScriptSize - - // ToLocalPenaltyWitnessSize 157 bytes - // - number_of_witness_elements: 1 byte - // - revocation_sig_length: 1 byte - // - revocation_sig: 73 bytes - // - OP_TRUE_length: 1 byte - // - OP_TRUE: 1 byte - // - witness_script_length: 1 byte - // - witness_script (to_local_script) - ToLocalPenaltyWitnessSize = 1 + 1 + 73 + 1 + 1 + 1 + ToLocalScriptSize - - // ToRemoteConfirmedScriptSize 37 bytes - // - OP_DATA: 1 byte - // - to_remote_key: 33 bytes - // - OP_CHECKSIGVERIFY: 1 byte - // - OP_1: 1 byte - // - OP_CHECKSEQUENCEVERIFY: 1 byte - ToRemoteConfirmedScriptSize = 1 + 33 + 1 + 1 + 1 - - // ToRemoteConfirmedWitnessSize 113 bytes - // - number_of_witness_elements: 1 byte - // - sig_length: 1 byte - // - sig: 73 bytes - // - witness_script_length: 1 byte - // - witness_script (to_remote_delayed_script) - ToRemoteConfirmedWitnessSize = 1 + 1 + 73 + 1 + ToRemoteConfirmedScriptSize - - // AcceptedHtlcScriptSize 143 bytes - // - OP_DUP: 1 byte - // - OP_HASH160: 1 byte - // - OP_DATA: 1 byte (RIPEMD160(SHA256(revocationkey)) length) - // - RIPEMD160(SHA256(revocationkey)): 20 bytes - // - OP_EQUAL: 1 byte - // - OP_IF: 1 byte - // - OP_CHECKSIG: 1 byte - // - OP_ELSE: 1 byte - // - OP_DATA: 1 byte (remotekey length) - // - remotekey: 33 bytes - // - OP_SWAP: 1 byte - // - OP_SIZE: 1 byte - // - OP_DATA: 1 byte (32 length) - // - 32: 1 byte - // - OP_EQUAL: 1 byte - // - OP_IF: 1 byte - // - OP_HASH160: 1 byte - // - OP_DATA: 1 byte (RIPEMD160(payment_hash) length) - // - RIPEMD160(payment_hash): 20 bytes - // - OP_EQUALVERIFY: 1 byte - // - 2: 1 byte - // - OP_SWAP: 1 byte - // - OP_DATA: 1 byte (localkey length) - // - localkey: 33 bytes - // - 2: 1 byte - // - OP_CHECKMULTISIG: 1 byte - // - OP_ELSE: 1 byte - // - OP_DROP: 1 byte - // - OP_DATA: 1 byte (cltv_expiry length) - // - cltv_expiry: 4 bytes - // - OP_CHECKLOCKTIMEVERIFY: 1 byte - // - OP_DROP: 1 byte - // - OP_CHECKSIG: 1 byte - // - OP_ENDIF: 1 byte - // - OP_1: 1 byte // These 3 extra bytes are used for both confirmed and regular - // - OP_CSV: 1 byte // HTLC script types. The size won't be correct in all cases, - // - OP_DROP: 1 byte // but it is just an upper bound used for fee estimation in any case. - // - OP_ENDIF: 1 byte - AcceptedHtlcScriptSize = 3*1 + 20 + 5*1 + 33 + 8*1 + 20 + 4*1 + - 33 + 5*1 + 4 + 8*1 - - // AcceptedHtlcTimeoutWitnessSize 219 - // - number_of_witness_elements: 1 byte - // - sender_sig_length: 1 byte - // - sender_sig: 73 bytes - // - nil_length: 1 byte - // - witness_script_length: 1 byte - // - witness_script: (accepted_htlc_script) - AcceptedHtlcTimeoutWitnessSize = 1 + 1 + 73 + 1 + 1 + AcceptedHtlcScriptSize - - // AcceptedHtlcPenaltyWitnessSize 252 bytes - // - number_of_witness_elements: 1 byte - // - revocation_sig_length: 1 byte - // - revocation_sig: 73 bytes - // - revocation_key_length: 1 byte - // - revocation_key: 33 bytes - // - witness_script_length: 1 byte - // - witness_script (accepted_htlc_script) - AcceptedHtlcPenaltyWitnessSize = 1 + 1 + 73 + 1 + 33 + 1 + AcceptedHtlcScriptSize - - // AcceptedHtlcSuccessWitnessSize 322 bytes - // - number_of_witness_elements: 1 byte - // - nil_length: 1 byte - // - sig_alice_length: 1 byte - // - sig_alice: 73 bytes - // - sig_bob_length: 1 byte - // - sig_bob: 73 bytes - // - preimage_length: 1 byte - // - preimage: 32 bytes - // - witness_script_length: 1 byte - // - witness_script (accepted_htlc_script) - AcceptedHtlcSuccessWitnessSize = 1 + 1 + 1 + 73 + 1 + 73 + 1 + 32 + 1 + - AcceptedHtlcScriptSize - - // OfferedHtlcScriptSize 136 bytes - // - OP_DUP: 1 byte - // - OP_HASH160: 1 byte - // - OP_DATA: 1 byte (RIPEMD160(SHA256(revocationkey)) length) - // - RIPEMD160(SHA256(revocationkey)): 20 bytes - // - OP_EQUAL: 1 byte - // - OP_IF: 1 byte - // - OP_CHECKSIG: 1 byte - // - OP_ELSE: 1 byte - // - OP_DATA: 1 byte (remotekey length) - // - remotekey: 33 bytes - // - OP_SWAP: 1 byte - // - OP_SIZE: 1 byte - // - OP_DATA: 1 byte (32 length) - // - 32: 1 byte - // - OP_EQUAL: 1 byte - // - OP_NOTIF: 1 byte - // - OP_DROP: 1 byte - // - 2: 1 byte - // - OP_SWAP: 1 byte - // - OP_DATA: 1 byte (localkey length) - // - localkey: 33 bytes - // - 2: 1 byte - // - OP_CHECKMULTISIG: 1 byte - // - OP_ELSE: 1 byte - // - OP_HASH160: 1 byte - // - OP_DATA: 1 byte (RIPEMD160(payment_hash) length) - // - RIPEMD160(payment_hash): 20 bytes - // - OP_EQUALVERIFY: 1 byte - // - OP_CHECKSIG: 1 byte - // - OP_ENDIF: 1 byte - // - OP_1: 1 byte - // - OP_CSV: 1 byte - // - OP_DROP: 1 byte - // - OP_ENDIF: 1 byte - OfferedHtlcScriptSize = 3*1 + 20 + 5*1 + 33 + 10*1 + 33 + 5*1 + 20 + 7*1 - - // OfferedHtlcSuccessWitnessSize 245 bytes - // - number_of_witness_elements: 1 byte - // - receiver_sig_length: 1 byte - // - receiver_sig: 73 bytes - // - payment_preimage_length: 1 byte - // - payment_preimage: 32 bytes - // - witness_script_length: 1 byte - // - witness_script (offered_htlc_script) - OfferedHtlcSuccessWitnessSize = 1 + 1 + 73 + 1 + 32 + 1 + OfferedHtlcScriptSize - - // OfferedHtlcTimeoutWitnessSize 285 bytes - // - number_of_witness_elements: 1 byte - // - nil_length: 1 byte - // - sig_alice_length: 1 byte - // - sig_alice: 73 bytes - // - sig_bob_length: 1 byte - // - sig_bob: 73 bytes - // - nil_length: 1 byte - // - witness_script_length: 1 byte - // - witness_script (offered_htlc_script) - OfferedHtlcTimeoutWitnessSize = 1 + 1 + 1 + 73 + 1 + 73 + 1 + 1 + OfferedHtlcScriptSize - - // OfferedHtlcPenaltyWitnessSize 246 bytes - // - number_of_witness_elements: 1 byte - // - revocation_sig_length: 1 byte - // - revocation_sig: 73 bytes - // - revocation_key_length: 1 byte - // - revocation_key: 33 bytes - // - witness_script_length: 1 byte - // - witness_script (offered_htlc_script) - OfferedHtlcPenaltyWitnessSize = 1 + 1 + 73 + 1 + 33 + 1 + OfferedHtlcScriptSize - - // AnchorScriptSize 40 bytes - // - pubkey_length: 1 byte - // - pubkey: 33 bytes - // - OP_CHECKSIG: 1 byte - // - OP_IFDUP: 1 byte - // - OP_NOTIF: 1 byte - // - OP_16: 1 byte - // - OP_CSV 1 byte - // - OP_ENDIF: 1 byte - AnchorScriptSize = 1 + 33 + 6*1 - - // AnchorWitnessSize 116 bytes - // - number_of_witnes_elements: 1 byte - // - signature_length: 1 byte - // - signature: 73 bytes - // - witness_script_length: 1 byte - // - witness_script (anchor_script) - AnchorWitnessSize = 1 + 1 + 73 + 1 + AnchorScriptSize -) - -// EstimateCommitTxWeight estimate commitment transaction weight depending on -// the precalculated weight of base transaction, witness data, which is needed -// for paying for funding tx, and htlc weight multiplied by their count. -func EstimateCommitTxWeight(count int, prediction bool) int64 { - // Make prediction about the size of commitment transaction with - // additional HTLC. - if prediction { - count++ - } - - htlcWeight := int64(count * HTLCWeight) - baseWeight := int64(BaseCommitmentTxWeight) - witnessWeight := int64(WitnessCommitmentTxWeight) - - return htlcWeight + baseWeight + witnessWeight -} - -// TxWeightEstimator is able to calculate weight estimates for transactions -// based on the input and output types. For purposes of estimation, all -// signatures are assumed to be of the maximum possible size, 73 bytes. Each -// method of the estimator returns an instance with the estimate applied. This -// allows callers to chain each of the methods -type TxWeightEstimator struct { - hasWitness bool - inputCount uint32 - outputCount uint32 - inputSize int - inputWitnessSize int - outputSize int -} - -// AddP2PKHInput updates the weight estimate to account for an additional input -// spending a P2PKH output. -func (twe *TxWeightEstimator) AddP2PKHInput() *TxWeightEstimator { - twe.inputSize += InputSize + P2PKHScriptSigSize - twe.inputWitnessSize++ - twe.inputCount++ - - return twe -} - -// AddP2WKHInput updates the weight estimate to account for an additional input -// spending a native P2PWKH output. -func (twe *TxWeightEstimator) AddP2WKHInput() *TxWeightEstimator { - twe.AddWitnessInput(P2WKHWitnessSize) - - return twe -} - -// AddWitnessInput updates the weight estimate to account for an additional -// input spending a native pay-to-witness output. This accepts the total size -// of the witness as a parameter. -func (twe *TxWeightEstimator) AddWitnessInput(witnessSize int) *TxWeightEstimator { - twe.inputSize += InputSize - twe.inputWitnessSize += witnessSize - twe.inputCount++ - twe.hasWitness = true - - return twe -} - -// AddNestedP2WKHInput updates the weight estimate to account for an additional -// input spending a P2SH output with a nested P2WKH redeem script. -func (twe *TxWeightEstimator) AddNestedP2WKHInput() *TxWeightEstimator { - twe.inputSize += InputSize + NestedP2WPKHSize - twe.inputWitnessSize += P2WKHWitnessSize - twe.inputCount++ - twe.hasWitness = true - - return twe -} - -// AddNestedP2WSHInput updates the weight estimate to account for an additional -// input spending a P2SH output with a nested P2WSH redeem script. -func (twe *TxWeightEstimator) AddNestedP2WSHInput(witnessSize int) *TxWeightEstimator { - twe.inputSize += InputSize + NestedP2WSHSize - twe.inputWitnessSize += witnessSize - twe.inputCount++ - twe.hasWitness = true - - return twe -} - -// AddTxOutput adds a known TxOut to the weight estimator. -func (twe *TxWeightEstimator) AddTxOutput(txOut *wire.TxOut) *TxWeightEstimator { - twe.outputSize += txOut.SerializeSize() - twe.outputCount++ - - return twe -} - -// AddP2PKHOutput updates the weight estimate to account for an additional P2PKH -// output. -func (twe *TxWeightEstimator) AddP2PKHOutput() *TxWeightEstimator { - twe.outputSize += P2PKHOutputSize - twe.outputCount++ - - return twe -} - -// AddP2WKHOutput updates the weight estimate to account for an additional -// native P2WKH output. -func (twe *TxWeightEstimator) AddP2WKHOutput() *TxWeightEstimator { - twe.outputSize += P2WKHOutputSize - twe.outputCount++ - - return twe -} - -// AddP2WSHOutput updates the weight estimate to account for an additional -// native P2WSH output. -func (twe *TxWeightEstimator) AddP2WSHOutput() *TxWeightEstimator { - twe.outputSize += P2WSHOutputSize - twe.outputCount++ - - return twe -} - -// AddP2SHOutput updates the weight estimate to account for an additional P2SH -// output. -func (twe *TxWeightEstimator) AddP2SHOutput() *TxWeightEstimator { - twe.outputSize += P2SHOutputSize - twe.outputCount++ - - return twe -} - -// Weight gets the estimated weight of the transaction. -func (twe *TxWeightEstimator) Weight() int { - txSizeStripped := BaseTxSize + - wire.VarIntSerializeSize(uint64(twe.inputCount)) + twe.inputSize + - wire.VarIntSerializeSize(uint64(twe.outputCount)) + twe.outputSize - weight := txSizeStripped * witnessScaleFactor - if twe.hasWitness { - weight += WitnessHeaderSize + twe.inputWitnessSize - } - return weight -} - -// VSize gets the estimated virtual size of the transactions, in vbytes. -func (twe *TxWeightEstimator) VSize() int { - // A tx's vsize is 1/4 of the weight, rounded up. - return (twe.Weight() + witnessScaleFactor - 1) / witnessScaleFactor -} diff --git a/lnd/input/size_test.go b/lnd/input/size_test.go deleted file mode 100644 index 7689e63f..00000000 --- a/lnd/input/size_test.go +++ /dev/null @@ -1,850 +0,0 @@ -package input_test - -import ( - "math/big" - "testing" - - "github.com/pkt-cash/pktd/blockchain" - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/txscript" - "github.com/pkt-cash/pktd/txscript/params" - "github.com/pkt-cash/pktd/txscript/scriptbuilder" - "github.com/pkt-cash/pktd/wire" - - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/keychain" -) - -const ( - testCSVDelay = (1 << 31) - 1 - - testCLTVExpiry = 500000000 - - // maxDERSignatureSize is the largest possible DER-encoded signature - // without the trailing sighash flag. - maxDERSignatureSize = 72 -) - -var ( - testPubkeyBytes = make([]byte, 33) - - testHash160 = make([]byte, 20) - testPreimage = make([]byte, 32) - - // testPubkey is a pubkey used in script size calculation. - testPubkey = &btcec.PublicKey{ - X: &big.Int{}, - Y: &big.Int{}, - } - - testPrivkey, _ = btcec.PrivKeyFromBytes(btcec.S256(), make([]byte, 32)) - - testTx = wire.NewMsgTx(2) -) - -// TestTxWeightEstimator tests that transaction weight estimates are calculated -// correctly by comparing against an actual (though invalid) transaction -// matching the template. -func TestTxWeightEstimator(t *testing.T) { - netParams := &chaincfg.MainNetParams - - p2pkhAddr, err := btcutil.NewAddressPubKeyHash( - make([]byte, 20), netParams) - if err != nil { - t.Fatalf("Failed to generate address: %v", err) - } - p2pkhScript, err := txscript.PayToAddrScript(p2pkhAddr) - if err != nil { - t.Fatalf("Failed to generate scriptPubKey: %v", err) - } - - p2wkhAddr, err := btcutil.NewAddressWitnessPubKeyHash( - make([]byte, 20), netParams) - if err != nil { - t.Fatalf("Failed to generate address: %v", err) - } - p2wkhScript, err := txscript.PayToAddrScript(p2wkhAddr) - if err != nil { - t.Fatalf("Failed to generate scriptPubKey: %v", err) - } - - p2wshAddr, err := btcutil.NewAddressWitnessScriptHash( - make([]byte, 32), netParams) - if err != nil { - t.Fatalf("Failed to generate address: %v", err) - } - p2wshScript, err := txscript.PayToAddrScript(p2wshAddr) - if err != nil { - t.Fatalf("Failed to generate scriptPubKey: %v", err) - } - - p2shAddr, err := btcutil.NewAddressScriptHash([]byte{0}, netParams) - if err != nil { - t.Fatalf("Failed to generate address: %v", err) - } - p2shScript, err := txscript.PayToAddrScript(p2shAddr) - if err != nil { - t.Fatalf("Failed to generate scriptPubKey: %v", err) - } - - testCases := []struct { - numP2PKHInputs int - numP2WKHInputs int - numP2WSHInputs int - numNestedP2WKHInputs int - numNestedP2WSHInputs int - numP2PKHOutputs int - numP2WKHOutputs int - numP2WSHOutputs int - numP2SHOutputs int - }{ - // Assert base txn size. - {}, - - // Assert single input/output sizes. - { - numP2PKHInputs: 1, - }, - { - numP2WKHInputs: 1, - }, - { - numP2WSHInputs: 1, - }, - { - numNestedP2WKHInputs: 1, - }, - { - numNestedP2WSHInputs: 1, - }, - { - numP2WKHOutputs: 1, - }, - { - numP2PKHOutputs: 1, - }, - { - numP2WSHOutputs: 1, - }, - { - numP2SHOutputs: 1, - }, - - // Assert each input/output increments input/output counts. - { - numP2PKHInputs: 253, - }, - { - numP2WKHInputs: 253, - }, - { - numP2WSHInputs: 253, - }, - { - numNestedP2WKHInputs: 253, - }, - { - numNestedP2WSHInputs: 253, - }, - { - numP2WKHOutputs: 253, - }, - { - numP2PKHOutputs: 253, - }, - { - numP2WSHOutputs: 253, - }, - { - numP2SHOutputs: 253, - }, - - // Assert basic combinations of inputs and outputs. - { - numP2PKHInputs: 1, - numP2PKHOutputs: 2, - }, - { - numP2PKHInputs: 1, - numP2WKHInputs: 1, - numP2WKHOutputs: 1, - numP2WSHOutputs: 1, - }, - { - numP2WKHInputs: 1, - numP2WKHOutputs: 1, - numP2WSHOutputs: 1, - }, - { - numP2WKHInputs: 2, - numP2WKHOutputs: 1, - numP2WSHOutputs: 1, - }, - { - numP2WSHInputs: 1, - numP2WKHOutputs: 1, - }, - { - numP2PKHInputs: 1, - numP2SHOutputs: 1, - }, - { - numNestedP2WKHInputs: 1, - numP2WKHOutputs: 1, - }, - { - numNestedP2WSHInputs: 1, - numP2WKHOutputs: 1, - }, - - // Assert disparate input/output types increment total - // input/output counts. - { - numP2PKHInputs: 50, - numP2WKHInputs: 50, - numP2WSHInputs: 51, - numNestedP2WKHInputs: 51, - numNestedP2WSHInputs: 51, - numP2WKHOutputs: 1, - }, - { - numP2WKHInputs: 1, - numP2WKHOutputs: 63, - numP2PKHOutputs: 63, - numP2WSHOutputs: 63, - numP2SHOutputs: 64, - }, - { - numP2PKHInputs: 50, - numP2WKHInputs: 50, - numP2WSHInputs: 51, - numNestedP2WKHInputs: 51, - numNestedP2WSHInputs: 51, - numP2WKHOutputs: 63, - numP2PKHOutputs: 63, - numP2WSHOutputs: 63, - numP2SHOutputs: 64, - }, - } - - for i, test := range testCases { - var weightEstimate input.TxWeightEstimator - tx := wire.NewMsgTx(1) - - for j := 0; j < test.numP2PKHInputs; j++ { - weightEstimate.AddP2PKHInput() - - signature := make([]byte, maxDERSignatureSize+1) - compressedPubKey := make([]byte, 33) - scriptSig, err := scriptbuilder.NewScriptBuilder().AddData(signature). - AddData(compressedPubKey).Script() - if err != nil { - t.Fatalf("Failed to generate scriptSig: %v", err) - } - - tx.AddTxIn(&wire.TxIn{SignatureScript: scriptSig}) - } - for j := 0; j < test.numP2WKHInputs; j++ { - weightEstimate.AddP2WKHInput() - - signature := make([]byte, maxDERSignatureSize+1) - compressedPubKey := make([]byte, 33) - witness := wire.TxWitness{signature, compressedPubKey} - tx.AddTxIn(&wire.TxIn{Witness: witness}) - } - for j := 0; j < test.numP2WSHInputs; j++ { - weightEstimate.AddWitnessInput(42) - - witnessScript := make([]byte, 40) - witness := wire.TxWitness{witnessScript} - tx.AddTxIn(&wire.TxIn{Witness: witness}) - } - for j := 0; j < test.numNestedP2WKHInputs; j++ { - weightEstimate.AddNestedP2WKHInput() - - signature := make([]byte, maxDERSignatureSize+1) - compressedPubKey := make([]byte, 33) - witness := wire.TxWitness{signature, compressedPubKey} - scriptSig, err := scriptbuilder.NewScriptBuilder().AddData(p2wkhScript). - Script() - if err != nil { - t.Fatalf("Failed to generate scriptSig: %v", err) - } - - tx.AddTxIn(&wire.TxIn{SignatureScript: scriptSig, Witness: witness}) - } - for j := 0; j < test.numNestedP2WSHInputs; j++ { - weightEstimate.AddNestedP2WSHInput(42) - - witnessScript := make([]byte, 40) - witness := wire.TxWitness{witnessScript} - scriptSig, err := scriptbuilder.NewScriptBuilder().AddData(p2wshScript). - Script() - if err != nil { - t.Fatalf("Failed to generate scriptSig: %v", err) - } - - tx.AddTxIn(&wire.TxIn{SignatureScript: scriptSig, Witness: witness}) - } - for j := 0; j < test.numP2PKHOutputs; j++ { - weightEstimate.AddP2PKHOutput() - tx.AddTxOut(&wire.TxOut{PkScript: p2pkhScript}) - } - for j := 0; j < test.numP2WKHOutputs; j++ { - weightEstimate.AddP2WKHOutput() - tx.AddTxOut(&wire.TxOut{PkScript: p2wkhScript}) - } - for j := 0; j < test.numP2WSHOutputs; j++ { - weightEstimate.AddP2WSHOutput() - tx.AddTxOut(&wire.TxOut{PkScript: p2wshScript}) - } - for j := 0; j < test.numP2SHOutputs; j++ { - weightEstimate.AddP2SHOutput() - tx.AddTxOut(&wire.TxOut{PkScript: p2shScript}) - } - - expectedWeight := blockchain.GetTransactionWeight(btcutil.NewTx(tx)) - if weightEstimate.Weight() != int(expectedWeight) { - t.Errorf("Case %d: Got wrong weight: expected %d, got %d", - i, expectedWeight, weightEstimate.Weight()) - } - } -} - -type maxDERSignature struct{} - -func (s *maxDERSignature) Serialize() []byte { - // Always return worst-case signature length, excluding the one byte - // sighash flag. - return make([]byte, maxDERSignatureSize) -} - -func (s *maxDERSignature) Verify(_ []byte, _ *btcec.PublicKey) bool { - return true -} - -// dummySigner is a fake signer used for size (upper bound) calculations. -type dummySigner struct { - input.Signer -} - -// SignOutputRaw generates a signature for the passed transaction according to -// the data within the passed SignDescriptor. -func (s *dummySigner) SignOutputRaw(tx *wire.MsgTx, - signDesc *input.SignDescriptor) (input.Signature, er.R) { - - return &maxDERSignature{}, nil -} - -type witnessSizeTest struct { - name string - expSize int - genWitness func(t *testing.T) wire.TxWitness -} - -var witnessSizeTests = []witnessSizeTest{ - { - name: "funding", - expSize: input.MultiSigWitnessSize, - genWitness: func(t *testing.T) wire.TxWitness { - witnessScript, _, err := input.GenFundingPkScript( - testPubkeyBytes, testPubkeyBytes, 1, - ) - if err != nil { - t.Fatal(err) - } - - return input.SpendMultiSig( - witnessScript, - testPubkeyBytes, &maxDERSignature{}, - testPubkeyBytes, &maxDERSignature{}, - ) - }, - }, - { - name: "to local timeout", - expSize: input.ToLocalTimeoutWitnessSize, - genWitness: func(t *testing.T) wire.TxWitness { - witnessScript, err := input.CommitScriptToSelf( - testCSVDelay, testPubkey, testPubkey, - ) - if err != nil { - t.Fatal(err) - } - - signDesc := &input.SignDescriptor{ - WitnessScript: witnessScript, - } - - witness, err := input.CommitSpendTimeout( - &dummySigner{}, signDesc, testTx, - ) - if err != nil { - t.Fatal(err) - } - - return witness - }, - }, - { - name: "to local revoke", - expSize: input.ToLocalPenaltyWitnessSize, - genWitness: func(t *testing.T) wire.TxWitness { - witnessScript, err := input.CommitScriptToSelf( - testCSVDelay, testPubkey, testPubkey, - ) - if err != nil { - t.Fatal(err) - } - - signDesc := &input.SignDescriptor{ - WitnessScript: witnessScript, - } - - witness, err := input.CommitSpendRevoke( - &dummySigner{}, signDesc, testTx, - ) - if err != nil { - t.Fatal(err) - } - - return witness - }, - }, - { - name: "to remote confirmed", - expSize: input.ToRemoteConfirmedWitnessSize, - genWitness: func(t *testing.T) wire.TxWitness { - witScript, err := input.CommitScriptToRemoteConfirmed( - testPubkey, - ) - if err != nil { - t.Fatal(err) - } - - signDesc := &input.SignDescriptor{ - WitnessScript: witScript, - KeyDesc: keychain.KeyDescriptor{ - PubKey: testPubkey, - }, - } - - witness, err := input.CommitSpendToRemoteConfirmed( - &dummySigner{}, signDesc, testTx, - ) - if err != nil { - t.Fatal(err) - } - - return witness - }, - }, - { - name: "anchor", - expSize: input.AnchorWitnessSize, - genWitness: func(t *testing.T) wire.TxWitness { - witScript, err := input.CommitScriptAnchor( - testPubkey, - ) - if err != nil { - t.Fatal(err) - } - - signDesc := &input.SignDescriptor{ - WitnessScript: witScript, - KeyDesc: keychain.KeyDescriptor{ - PubKey: testPubkey, - }, - } - - witness, err := input.CommitSpendAnchor( - &dummySigner{}, signDesc, testTx, - ) - if err != nil { - t.Fatal(err) - } - - return witness - }, - }, - { - name: "anchor anyone", - expSize: 43, - genWitness: func(t *testing.T) wire.TxWitness { - witScript, err := input.CommitScriptAnchor( - testPubkey, - ) - if err != nil { - t.Fatal(err) - } - - witness, _ := input.CommitSpendAnchorAnyone(witScript) - - return witness - }, - }, - { - name: "offered htlc revoke", - expSize: input.OfferedHtlcPenaltyWitnessSize - 3, - genWitness: func(t *testing.T) wire.TxWitness { - witScript, err := input.SenderHTLCScript( - testPubkey, testPubkey, testPubkey, - testHash160, false, - ) - if err != nil { - t.Fatal(err) - } - - signDesc := &input.SignDescriptor{ - WitnessScript: witScript, - KeyDesc: keychain.KeyDescriptor{ - PubKey: testPubkey, - }, - DoubleTweak: testPrivkey, - } - - witness, err := input.SenderHtlcSpendRevoke( - &dummySigner{}, signDesc, testTx, - ) - if err != nil { - t.Fatal(err) - } - - return witness - }, - }, - { - name: "offered htlc revoke confirmed", - expSize: input.OfferedHtlcPenaltyWitnessSize, - genWitness: func(t *testing.T) wire.TxWitness { - hash := make([]byte, 20) - - witScript, err := input.SenderHTLCScript( - testPubkey, testPubkey, testPubkey, - hash, true, - ) - if err != nil { - t.Fatal(err) - } - - signDesc := &input.SignDescriptor{ - WitnessScript: witScript, - KeyDesc: keychain.KeyDescriptor{ - PubKey: testPubkey, - }, - DoubleTweak: testPrivkey, - } - - witness, err := input.SenderHtlcSpendRevoke( - &dummySigner{}, signDesc, testTx, - ) - if err != nil { - t.Fatal(err) - } - - return witness - }, - }, - { - name: "offered htlc timeout", - expSize: input.OfferedHtlcTimeoutWitnessSize - 3, - genWitness: func(t *testing.T) wire.TxWitness { - witScript, err := input.SenderHTLCScript( - testPubkey, testPubkey, testPubkey, - testHash160, false, - ) - if err != nil { - t.Fatal(err) - } - - signDesc := &input.SignDescriptor{ - WitnessScript: witScript, - } - - witness, err := input.SenderHtlcSpendTimeout( - &maxDERSignature{}, params.SigHashAll, - &dummySigner{}, signDesc, testTx, - ) - if err != nil { - t.Fatal(err) - } - - return witness - }, - }, - { - name: "offered htlc timeout confirmed", - expSize: input.OfferedHtlcTimeoutWitnessSize, - genWitness: func(t *testing.T) wire.TxWitness { - witScript, err := input.SenderHTLCScript( - testPubkey, testPubkey, testPubkey, - testHash160, true, - ) - if err != nil { - t.Fatal(err) - } - - signDesc := &input.SignDescriptor{ - WitnessScript: witScript, - } - - witness, err := input.SenderHtlcSpendTimeout( - &maxDERSignature{}, params.SigHashAll, - &dummySigner{}, signDesc, testTx, - ) - if err != nil { - t.Fatal(err) - } - - return witness - }, - }, - { - name: "offered htlc success", - expSize: input.OfferedHtlcSuccessWitnessSize - 3, - genWitness: func(t *testing.T) wire.TxWitness { - witScript, err := input.SenderHTLCScript( - testPubkey, testPubkey, testPubkey, - testHash160, false, - ) - if err != nil { - t.Fatal(err) - } - - signDesc := &input.SignDescriptor{ - WitnessScript: witScript, - } - - witness, err := input.SenderHtlcSpendRedeem( - &dummySigner{}, signDesc, testTx, testPreimage, - ) - if err != nil { - t.Fatal(err) - } - - return witness - }, - }, - { - name: "offered htlc success confirmed", - expSize: input.OfferedHtlcSuccessWitnessSize, - genWitness: func(t *testing.T) wire.TxWitness { - witScript, err := input.SenderHTLCScript( - testPubkey, testPubkey, testPubkey, - testHash160, true, - ) - if err != nil { - t.Fatal(err) - } - - signDesc := &input.SignDescriptor{ - WitnessScript: witScript, - } - - witness, err := input.SenderHtlcSpendRedeem( - &dummySigner{}, signDesc, testTx, testPreimage, - ) - if err != nil { - t.Fatal(err) - } - - return witness - }, - }, - { - name: "accepted htlc revoke", - expSize: input.AcceptedHtlcPenaltyWitnessSize - 3, - genWitness: func(t *testing.T) wire.TxWitness { - witScript, err := input.ReceiverHTLCScript( - testCLTVExpiry, testPubkey, testPubkey, - testPubkey, testHash160, false, - ) - if err != nil { - t.Fatal(err) - } - - signDesc := &input.SignDescriptor{ - WitnessScript: witScript, - KeyDesc: keychain.KeyDescriptor{ - PubKey: testPubkey, - }, - DoubleTweak: testPrivkey, - } - - witness, err := input.ReceiverHtlcSpendRevoke( - &dummySigner{}, signDesc, testTx, - ) - if err != nil { - t.Fatal(err) - } - - return witness - }, - }, - { - name: "accepted htlc revoke confirmed", - expSize: input.AcceptedHtlcPenaltyWitnessSize, - genWitness: func(t *testing.T) wire.TxWitness { - witScript, err := input.ReceiverHTLCScript( - testCLTVExpiry, testPubkey, testPubkey, - testPubkey, testHash160, true, - ) - if err != nil { - t.Fatal(err) - } - - signDesc := &input.SignDescriptor{ - WitnessScript: witScript, - KeyDesc: keychain.KeyDescriptor{ - PubKey: testPubkey, - }, - DoubleTweak: testPrivkey, - } - - witness, err := input.ReceiverHtlcSpendRevoke( - &dummySigner{}, signDesc, testTx, - ) - if err != nil { - t.Fatal(err) - } - - return witness - }, - }, - { - name: "accepted htlc timeout", - expSize: input.AcceptedHtlcTimeoutWitnessSize - 3, - genWitness: func(t *testing.T) wire.TxWitness { - - witScript, err := input.ReceiverHTLCScript( - testCLTVExpiry, testPubkey, testPubkey, - testPubkey, testHash160, false, - ) - if err != nil { - t.Fatal(err) - } - - signDesc := &input.SignDescriptor{ - WitnessScript: witScript, - } - - witness, err := input.ReceiverHtlcSpendTimeout( - &dummySigner{}, signDesc, testTx, - testCLTVExpiry, - ) - if err != nil { - t.Fatal(err) - } - - return witness - }, - }, - { - name: "accepted htlc timeout confirmed", - expSize: input.AcceptedHtlcTimeoutWitnessSize, - genWitness: func(t *testing.T) wire.TxWitness { - witScript, err := input.ReceiverHTLCScript( - testCLTVExpiry, testPubkey, testPubkey, - testPubkey, testHash160, true, - ) - if err != nil { - t.Fatal(err) - } - - signDesc := &input.SignDescriptor{ - WitnessScript: witScript, - } - - witness, err := input.ReceiverHtlcSpendTimeout( - &dummySigner{}, signDesc, testTx, - testCLTVExpiry, - ) - if err != nil { - t.Fatal(err) - } - - return witness - }, - }, - { - name: "accepted htlc success", - expSize: input.AcceptedHtlcSuccessWitnessSize - 3, - genWitness: func(t *testing.T) wire.TxWitness { - witScript, err := input.ReceiverHTLCScript( - testCLTVExpiry, testPubkey, testPubkey, - testPubkey, testHash160, false, - ) - if err != nil { - t.Fatal(err) - } - - signDesc := &input.SignDescriptor{ - WitnessScript: witScript, - KeyDesc: keychain.KeyDescriptor{ - PubKey: testPubkey, - }, - } - - witness, err := input.ReceiverHtlcSpendRedeem( - &maxDERSignature{}, params.SigHashAll, - testPreimage, &dummySigner{}, signDesc, testTx, - ) - if err != nil { - t.Fatal(err) - } - - return witness - }, - }, - { - name: "accepted htlc success confirmed", - expSize: input.AcceptedHtlcSuccessWitnessSize, - genWitness: func(t *testing.T) wire.TxWitness { - witScript, err := input.ReceiverHTLCScript( - testCLTVExpiry, testPubkey, testPubkey, - testPubkey, testHash160, true, - ) - if err != nil { - t.Fatal(err) - } - - signDesc := &input.SignDescriptor{ - WitnessScript: witScript, - KeyDesc: keychain.KeyDescriptor{ - PubKey: testPubkey, - }, - } - - witness, err := input.ReceiverHtlcSpendRedeem( - &maxDERSignature{}, params.SigHashAll, - testPreimage, &dummySigner{}, signDesc, testTx, - ) - if err != nil { - t.Fatal(err) - } - - return witness - }, - }, -} - -// TestWitnessSizes asserts the correctness of our magic witness constants. -// Witnesses involving signatures will have maxDERSignatures injected so that we -// can determine upper bounds for the witness sizes. These constants are -// predominately used for fee estimation, so we want to be certain that we -// aren't under estimating or our transactions could get stuck. -func TestWitnessSizes(t *testing.T) { - for _, test := range witnessSizeTests { - test := test - t.Run(test.name, func(t *testing.T) { - size := test.genWitness(t).SerializeSize() - if size != test.expSize { - t.Fatalf("size mismatch, want: %v, got: %v", - test.expSize, size) - } - }) - } -} diff --git a/lnd/input/test_utils.go b/lnd/input/test_utils.go deleted file mode 100644 index 3d2d4ead..00000000 --- a/lnd/input/test_utils.go +++ /dev/null @@ -1,193 +0,0 @@ -package input - -import ( - "bytes" - "encoding/hex" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/txscript" - "github.com/pkt-cash/pktd/txscript/params" - "github.com/pkt-cash/pktd/wire" -) - -var ( - - // For simplicity a single priv key controls all of our test outputs. - testWalletPrivKey = []byte{ - 0x2b, 0xd8, 0x06, 0xc9, 0x7f, 0x0e, 0x00, 0xaf, - 0x1a, 0x1f, 0xc3, 0x32, 0x8f, 0xa7, 0x63, 0xa9, - 0x26, 0x97, 0x23, 0xc8, 0xdb, 0x8f, 0xac, 0x4f, - 0x93, 0xaf, 0x71, 0xdb, 0x18, 0x6d, 0x6e, 0x90, - } - - // We're alice :) - bobsPrivKey = []byte{ - 0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, - 0x63, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, - 0xd, 0xe7, 0x95, 0xe4, 0xb7, 0x25, 0xb8, 0x4d, - 0x1e, 0xb, 0x4c, 0xfd, 0x9e, 0xc5, 0x8c, 0xe9, - } - - // Use a hard-coded HD seed. - testHdSeed = chainhash.Hash{ - 0xb7, 0x94, 0x38, 0x5f, 0x2d, 0x1e, 0xf7, 0xab, - 0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4, - 0x4f, 0x2f, 0x6f, 0x25, 0x88, 0xa3, 0xef, 0xb9, - 0x6a, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53, - } -) - -// MockSigner is a simple implementation of the Signer interface. Each one has -// a set of private keys in a slice and can sign messages using the appropriate -// one. -type MockSigner struct { - Privkeys []*btcec.PrivateKey - NetParams *chaincfg.Params -} - -// SignOutputRaw generates a signature for the passed transaction according to -// the data within the passed SignDescriptor. -func (m *MockSigner) SignOutputRaw(tx *wire.MsgTx, - signDesc *SignDescriptor) (Signature, er.R) { - - pubkey := signDesc.KeyDesc.PubKey - switch { - case signDesc.SingleTweak != nil: - pubkey = TweakPubKeyWithTweak(pubkey, signDesc.SingleTweak) - case signDesc.DoubleTweak != nil: - pubkey = DeriveRevocationPubkey(pubkey, signDesc.DoubleTweak.PubKey()) - } - - hash160 := btcutil.Hash160(pubkey.SerializeCompressed()) - privKey := m.findKey(hash160, signDesc.SingleTweak, signDesc.DoubleTweak) - if privKey == nil { - return nil, er.Errorf("mock signer does not have key") - } - - sig, err := txscript.RawTxInWitnessSignature(tx, signDesc.SigHashes, - signDesc.InputIndex, signDesc.Output.Value, signDesc.WitnessScript, - signDesc.HashType, privKey) - if err != nil { - return nil, err - } - - return btcec.ParseDERSignature(sig[:len(sig)-1], btcec.S256()) -} - -// ComputeInputScript generates a complete InputIndex for the passed transaction -// with the signature as defined within the passed SignDescriptor. This method -// should be capable of generating the proper input script for both regular -// p2wkh output and p2wkh outputs nested within a regular p2sh output. -func (m *MockSigner) ComputeInputScript(tx *wire.MsgTx, signDesc *SignDescriptor) (*Script, er.R) { - scriptType, addresses, _, err := txscript.ExtractPkScriptAddrs( - signDesc.Output.PkScript, m.NetParams) - if err != nil { - return nil, err - } - - switch scriptType { - case txscript.PubKeyHashTy: - privKey := m.findKey(addresses[0].ScriptAddress(), signDesc.SingleTweak, - signDesc.DoubleTweak) - if privKey == nil { - return nil, er.Errorf("mock signer does not have key for "+ - "address %v", addresses[0]) - } - - sigScript, err := txscript.SignatureScript( - tx, signDesc.InputIndex, signDesc.Output.PkScript, - params.SigHashAll, privKey, true, - ) - if err != nil { - return nil, err - } - - return &Script{SigScript: sigScript}, nil - - case txscript.WitnessV0PubKeyHashTy: - privKey := m.findKey(addresses[0].ScriptAddress(), signDesc.SingleTweak, - signDesc.DoubleTweak) - if privKey == nil { - return nil, er.Errorf("mock signer does not have key for "+ - "address %v", addresses[0]) - } - - witnessScript, err := txscript.WitnessSignature(tx, signDesc.SigHashes, - signDesc.InputIndex, signDesc.Output.Value, - signDesc.Output.PkScript, params.SigHashAll, privKey, true) - if err != nil { - return nil, err - } - - return &Script{Witness: witnessScript}, nil - - default: - return nil, er.Errorf("unexpected script type: %v", scriptType) - } -} - -// findKey searches through all stored private keys and returns one -// corresponding to the hashed pubkey if it can be found. The public key may -// either correspond directly to the private key or to the private key with a -// tweak applied. -func (m *MockSigner) findKey(needleHash160 []byte, singleTweak []byte, - doubleTweak *btcec.PrivateKey) *btcec.PrivateKey { - - for _, privkey := range m.Privkeys { - // First check whether public key is directly derived from private key. - hash160 := btcutil.Hash160(privkey.PubKey().SerializeCompressed()) - if bytes.Equal(hash160, needleHash160) { - return privkey - } - - // Otherwise check if public key is derived from tweaked private key. - switch { - case singleTweak != nil: - privkey = TweakPrivKey(privkey, singleTweak) - case doubleTweak != nil: - privkey = DeriveRevocationPrivKey(privkey, doubleTweak) - default: - continue - } - hash160 = btcutil.Hash160(privkey.PubKey().SerializeCompressed()) - if bytes.Equal(hash160, needleHash160) { - return privkey - } - } - return nil -} - -// pubkeyFromHex parses a Bitcoin public key from a hex encoded string. -func pubkeyFromHex(keyHex string) (*btcec.PublicKey, er.R) { - bytes, err := util.DecodeHex(keyHex) - if err != nil { - return nil, err - } - return btcec.ParsePubKey(bytes, btcec.S256()) -} - -// privkeyFromHex parses a Bitcoin private key from a hex encoded string. -func privkeyFromHex(keyHex string) (*btcec.PrivateKey, er.R) { - bytes, err := util.DecodeHex(keyHex) - if err != nil { - return nil, err - } - key, _ := btcec.PrivKeyFromBytes(btcec.S256(), bytes) - return key, nil - -} - -// pubkeyToHex serializes a Bitcoin public key to a hex encoded string. -func pubkeyToHex(key *btcec.PublicKey) string { - return hex.EncodeToString(key.SerializeCompressed()) -} - -// privkeyFromHex serializes a Bitcoin private key to a hex encoded string. -func privkeyToHex(key *btcec.PrivateKey) string { - return hex.EncodeToString(key.Serialize()) -} diff --git a/lnd/input/txout.go b/lnd/input/txout.go deleted file mode 100644 index c973aafd..00000000 --- a/lnd/input/txout.go +++ /dev/null @@ -1,48 +0,0 @@ -package input - -import ( - "encoding/binary" - "io" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/wire" -) - -// writeTxOut serializes a wire.TxOut struct into the passed io.Writer stream. -func writeTxOut(w io.Writer, txo *wire.TxOut) er.R { - var scratch [8]byte - - binary.BigEndian.PutUint64(scratch[:], uint64(txo.Value)) - if _, err := util.Write(w, scratch[:]); err != nil { - return err - } - - if err := wire.WriteVarBytes(w, 0, txo.PkScript); err != nil { - return err - } - - return nil -} - -// readTxOut deserializes a wire.TxOut struct from the passed io.Reader stream. -func readTxOut(r io.Reader, txo *wire.TxOut) er.R { - var scratch [8]byte - - if _, err := util.ReadFull(r, scratch[:]); err != nil { - return err - } - value := int64(binary.BigEndian.Uint64(scratch[:])) - - pkScript, err := wire.ReadVarBytes(r, 0, 80, "pkScript") - if err != nil { - return err - } - - *txo = wire.TxOut{ - Value: value, - PkScript: pkScript, - } - - return nil -} diff --git a/lnd/input/txout_test.go b/lnd/input/txout_test.go deleted file mode 100644 index 78980364..00000000 --- a/lnd/input/txout_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package input - -import ( - "bytes" - "reflect" - "testing" - - "github.com/pkt-cash/pktd/wire" -) - -func TestTxOutSerialization(t *testing.T) { - txo := wire.TxOut{ - Value: 1e7, - PkScript: []byte{ - 0x41, // OP_DATA_65 - 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5, - 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42, - 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1, - 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24, - 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97, - 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78, - 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20, - 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63, - 0xa6, // 65-byte signature - 0xac, // OP_CHECKSIG - }, - } - - var buf bytes.Buffer - - if err := writeTxOut(&buf, &txo); err != nil { - t.Fatalf("unable to serialize txout: %v", err) - } - - var deserializedTxo wire.TxOut - if err := readTxOut(&buf, &deserializedTxo); err != nil { - t.Fatalf("unable to deserialize txout: %v", err) - } - - if !reflect.DeepEqual(txo, deserializedTxo) { - t.Fatalf("original and deserialized txouts are different:\n"+ - "original : %+v\n"+ - "deserialized : %+v\n", - txo, deserializedTxo) - } -} diff --git a/lnd/input/witnessgen.go b/lnd/input/witnessgen.go deleted file mode 100644 index ee0f1f39..00000000 --- a/lnd/input/witnessgen.go +++ /dev/null @@ -1,444 +0,0 @@ -package input - -import ( - "fmt" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/txscript" - "github.com/pkt-cash/pktd/wire" -) - -// WitnessGenerator represents a function that is able to generate the final -// witness for a particular public key script. Additionally, if required, this -// function will also return the sigScript for spending nested P2SH witness -// outputs. This function acts as an abstraction layer, hiding the details of -// the underlying script. -type WitnessGenerator func(tx *wire.MsgTx, hc *txscript.TxSigHashes, - inputIndex int) (*Script, er.R) - -// WitnessType determines how an output's witness will be generated. This -// interface can be implemented to be used for custom sweep scripts if the -// pre-defined StandardWitnessType list doesn't provide a suitable one. -type WitnessType interface { - // String returns a human readable version of the WitnessType. - String() string - - // WitnessGenerator will return a WitnessGenerator function that an - // output uses to generate the witness and optionally the sigScript for - // a sweep transaction. - WitnessGenerator(signer Signer, - descriptor *SignDescriptor) WitnessGenerator - - // SizeUpperBound returns the maximum length of the witness of this - // WitnessType if it would be included in a tx. It also returns if the - // output itself is a nested p2sh output, if so then we need to take - // into account the extra sigScript data size. - SizeUpperBound() (int, bool, er.R) - - // AddWeightEstimation adds the estimated size of the witness in bytes - // to the given weight estimator. - AddWeightEstimation(e *TxWeightEstimator) er.R -} - -// StandardWitnessType is a numeric representation of standard pre-defined types -// of witness configurations. -type StandardWitnessType uint16 - -// A compile time check to ensure StandardWitnessType implements the -// WitnessType interface. -var _ WitnessType = (StandardWitnessType)(0) - -const ( - // CommitmentTimeLock is a witness that allows us to spend our output - // on our local commitment transaction after a relative lock-time - // lockout. - CommitmentTimeLock StandardWitnessType = 0 - - // CommitmentNoDelay is a witness that allows us to spend a settled - // no-delay output immediately on a counterparty's commitment - // transaction. - CommitmentNoDelay StandardWitnessType = 1 - - // CommitmentRevoke is a witness that allows us to sweep the settled - // output of a malicious counterparty's who broadcasts a revoked - // commitment transaction. - CommitmentRevoke StandardWitnessType = 2 - - // HtlcOfferedRevoke is a witness that allows us to sweep an HTLC which - // we offered to the remote party in the case that they broadcast a - // revoked commitment state. - HtlcOfferedRevoke StandardWitnessType = 3 - - // HtlcAcceptedRevoke is a witness that allows us to sweep an HTLC - // output sent to us in the case that the remote party broadcasts a - // revoked commitment state. - HtlcAcceptedRevoke StandardWitnessType = 4 - - // HtlcOfferedTimeoutSecondLevel is a witness that allows us to sweep - // an HTLC output that we extended to a party, but was never fulfilled. - // This HTLC output isn't directly on the commitment transaction, but - // is the result of a confirmed second-level HTLC transaction. As a - // result, we can only spend this after a CSV delay. - HtlcOfferedTimeoutSecondLevel StandardWitnessType = 5 - - // HtlcAcceptedSuccessSecondLevel is a witness that allows us to sweep - // an HTLC output that was offered to us, and for which we have a - // payment preimage. This HTLC output isn't directly on our commitment - // transaction, but is the result of confirmed second-level HTLC - // transaction. As a result, we can only spend this after a CSV delay. - HtlcAcceptedSuccessSecondLevel StandardWitnessType = 6 - - // HtlcOfferedRemoteTimeout is a witness that allows us to sweep an - // HTLC that we offered to the remote party which lies in the - // commitment transaction of the remote party. We can spend this output - // after the absolute CLTV timeout of the HTLC as passed. - HtlcOfferedRemoteTimeout StandardWitnessType = 7 - - // HtlcAcceptedRemoteSuccess is a witness that allows us to sweep an - // HTLC that was offered to us by the remote party. We use this witness - // in the case that the remote party goes to chain, and we know the - // pre-image to the HTLC. We can sweep this without any additional - // timeout. - HtlcAcceptedRemoteSuccess StandardWitnessType = 8 - - // HtlcSecondLevelRevoke is a witness that allows us to sweep an HTLC - // from the remote party's commitment transaction in the case that the - // broadcast a revoked commitment, but then also immediately attempt to - // go to the second level to claim the HTLC. - HtlcSecondLevelRevoke StandardWitnessType = 9 - - // WitnessKeyHash is a witness type that allows us to spend a regular - // p2wkh output that's sent to an output which is under complete - // control of the backing wallet. - WitnessKeyHash StandardWitnessType = 10 - - // NestedWitnessKeyHash is a witness type that allows us to sweep an - // output that sends to a nested P2SH script that pays to a key solely - // under our control. The witness generated needs to include the - NestedWitnessKeyHash StandardWitnessType = 11 - - // CommitSpendNoDelayTweakless is similar to the CommitSpendNoDelay - // type, but it omits the tweak that randomizes the key we need to - // spend with a channel peer supplied set of randomness. - CommitSpendNoDelayTweakless StandardWitnessType = 12 - - // CommitmentToRemoteConfirmed is a witness that allows us to spend our - // output on the counterparty's commitment transaction after a - // confirmation. - CommitmentToRemoteConfirmed StandardWitnessType = 13 - - // CommitmentAnchor is a witness that allows us to spend our anchor on - // the commitment transaction. - CommitmentAnchor StandardWitnessType = 14 -) - -// String returns a human readable version of the target WitnessType. -// -// NOTE: This is part of the WitnessType interface. -func (wt StandardWitnessType) String() string { - switch wt { - case CommitmentTimeLock: - return "CommitmentTimeLock" - - case CommitmentToRemoteConfirmed: - return "CommitmentToRemoteConfirmed" - - case CommitmentAnchor: - return "CommitmentAnchor" - - case CommitmentNoDelay: - return "CommitmentNoDelay" - - case CommitSpendNoDelayTweakless: - return "CommitmentNoDelayTweakless" - - case CommitmentRevoke: - return "CommitmentRevoke" - - case HtlcOfferedRevoke: - return "HtlcOfferedRevoke" - - case HtlcAcceptedRevoke: - return "HtlcAcceptedRevoke" - - case HtlcOfferedTimeoutSecondLevel: - return "HtlcOfferedTimeoutSecondLevel" - - case HtlcAcceptedSuccessSecondLevel: - return "HtlcAcceptedSuccessSecondLevel" - - case HtlcOfferedRemoteTimeout: - return "HtlcOfferedRemoteTimeout" - - case HtlcAcceptedRemoteSuccess: - return "HtlcAcceptedRemoteSuccess" - - case HtlcSecondLevelRevoke: - return "HtlcSecondLevelRevoke" - - case WitnessKeyHash: - return "WitnessKeyHash" - - case NestedWitnessKeyHash: - return "NestedWitnessKeyHash" - - default: - return fmt.Sprintf("Unknown WitnessType: %v", uint32(wt)) - } -} - -// WitnessGenerator will return a WitnessGenerator function that an output uses -// to generate the witness and optionally the sigScript for a sweep -// transaction. The sigScript will be generated if the witness type warrants -// one for spending, such as the NestedWitnessKeyHash witness type. -// -// NOTE: This is part of the WitnessType interface. -func (wt StandardWitnessType) WitnessGenerator(signer Signer, - descriptor *SignDescriptor) WitnessGenerator { - - return func(tx *wire.MsgTx, hc *txscript.TxSigHashes, - inputIndex int) (*Script, er.R) { - - desc := descriptor - desc.SigHashes = hc - desc.InputIndex = inputIndex - - switch wt { - case CommitmentTimeLock: - witness, err := CommitSpendTimeout(signer, desc, tx) - if err != nil { - return nil, err - } - - return &Script{ - Witness: witness, - }, nil - - case CommitmentToRemoteConfirmed: - witness, err := CommitSpendToRemoteConfirmed( - signer, desc, tx, - ) - if err != nil { - return nil, err - } - - return &Script{ - Witness: witness, - }, nil - - case CommitmentAnchor: - witness, err := CommitSpendAnchor(signer, desc, tx) - if err != nil { - return nil, err - } - - return &Script{ - Witness: witness, - }, nil - - case CommitmentNoDelay: - witness, err := CommitSpendNoDelay(signer, desc, tx, false) - if err != nil { - return nil, err - } - - return &Script{ - Witness: witness, - }, nil - - case CommitSpendNoDelayTweakless: - witness, err := CommitSpendNoDelay(signer, desc, tx, true) - if err != nil { - return nil, err - } - - return &Script{ - Witness: witness, - }, nil - - case CommitmentRevoke: - witness, err := CommitSpendRevoke(signer, desc, tx) - if err != nil { - return nil, err - } - - return &Script{ - Witness: witness, - }, nil - - case HtlcOfferedRevoke: - witness, err := ReceiverHtlcSpendRevoke(signer, desc, tx) - if err != nil { - return nil, err - } - - return &Script{ - Witness: witness, - }, nil - - case HtlcAcceptedRevoke: - witness, err := SenderHtlcSpendRevoke(signer, desc, tx) - if err != nil { - return nil, err - } - - return &Script{ - Witness: witness, - }, nil - - case HtlcOfferedTimeoutSecondLevel: - witness, err := HtlcSecondLevelSpend(signer, desc, tx) - if err != nil { - return nil, err - } - - return &Script{ - Witness: witness, - }, nil - - case HtlcAcceptedSuccessSecondLevel: - witness, err := HtlcSecondLevelSpend(signer, desc, tx) - if err != nil { - return nil, err - } - - return &Script{ - Witness: witness, - }, nil - - case HtlcOfferedRemoteTimeout: - // We pass in a value of -1 for the timeout, as we - // expect the caller to have already set the lock time - // value. - witness, err := ReceiverHtlcSpendTimeout(signer, desc, tx, -1) - if err != nil { - return nil, err - } - - return &Script{ - Witness: witness, - }, nil - - case HtlcSecondLevelRevoke: - witness, err := HtlcSpendRevoke(signer, desc, tx) - if err != nil { - return nil, err - } - - return &Script{ - Witness: witness, - }, nil - - case WitnessKeyHash: - fallthrough - case NestedWitnessKeyHash: - return signer.ComputeInputScript(tx, desc) - - default: - return nil, er.Errorf("unknown witness type: %v", wt) - } - } -} - -// SizeUpperBound returns the maximum length of the witness of this witness -// type if it would be included in a tx. We also return if the output itself is -// a nested p2sh output, if so then we need to take into account the extra -// sigScript data size. -// -// NOTE: This is part of the WitnessType interface. -func (wt StandardWitnessType) SizeUpperBound() (int, bool, er.R) { - switch wt { - - // Outputs on a remote commitment transaction that pay directly to us. - case CommitSpendNoDelayTweakless: - fallthrough - case WitnessKeyHash: - fallthrough - case CommitmentNoDelay: - return P2WKHWitnessSize, false, nil - - // Outputs on a past commitment transaction that pay directly - // to us. - case CommitmentTimeLock: - return ToLocalTimeoutWitnessSize, false, nil - - // 1 CSV time locked output to us on remote commitment. - case CommitmentToRemoteConfirmed: - return ToRemoteConfirmedWitnessSize, false, nil - - // Anchor output on the commitment transaction. - case CommitmentAnchor: - return AnchorWitnessSize, false, nil - - // Outgoing second layer HTLC's that have confirmed within the - // chain, and the output they produced is now mature enough to - // sweep. - case HtlcOfferedTimeoutSecondLevel: - return ToLocalTimeoutWitnessSize, false, nil - - // Incoming second layer HTLC's that have confirmed within the - // chain, and the output they produced is now mature enough to - // sweep. - case HtlcAcceptedSuccessSecondLevel: - return ToLocalTimeoutWitnessSize, false, nil - - // An HTLC on the commitment transaction of the remote party, - // that has had its absolute timelock expire. - case HtlcOfferedRemoteTimeout: - return AcceptedHtlcTimeoutWitnessSize, false, nil - - // An HTLC on the commitment transaction of the remote party, - // that can be swept with the preimage. - case HtlcAcceptedRemoteSuccess: - return OfferedHtlcSuccessWitnessSize, false, nil - - // A nested P2SH input that has a p2wkh witness script. We'll mark this - // as nested P2SH so the caller can estimate the weight properly - // including the sigScript. - case NestedWitnessKeyHash: - return P2WKHWitnessSize, true, nil - - // The revocation output on a revoked commitment transaction. - case CommitmentRevoke: - return ToLocalPenaltyWitnessSize, false, nil - - // The revocation output on a revoked HTLC that we offered to the remote - // party. - case HtlcOfferedRevoke: - return OfferedHtlcPenaltyWitnessSize, false, nil - - // The revocation output on a revoked HTLC that was sent to us. - case HtlcAcceptedRevoke: - return AcceptedHtlcPenaltyWitnessSize, false, nil - - // The revocation output of a second level output of an HTLC. - case HtlcSecondLevelRevoke: - return ToLocalPenaltyWitnessSize, false, nil - } - - return 0, false, er.Errorf("unexpected witness type: %v", wt) -} - -// AddWeightEstimation adds the estimated size of the witness in bytes to the -// given weight estimator. -// -// NOTE: This is part of the WitnessType interface. -func (wt StandardWitnessType) AddWeightEstimation(e *TxWeightEstimator) er.R { - // For fee estimation purposes, we'll now attempt to obtain an - // upper bound on the weight this input will add when fully - // populated. - size, isNestedP2SH, err := wt.SizeUpperBound() - if err != nil { - return err - } - - // If this is a nested P2SH input, then we'll need to factor in - // the additional data push within the sigScript. - if isNestedP2SH { - e.AddNestedP2WSHInput(size) - } else { - e.AddWitnessInput(size) - } - - return nil -} diff --git a/lnd/invoices/interface.go b/lnd/invoices/interface.go deleted file mode 100644 index 54ab0280..00000000 --- a/lnd/invoices/interface.go +++ /dev/null @@ -1,17 +0,0 @@ -package invoices - -import ( - "github.com/pkt-cash/pktd/lnd/record" -) - -// Payload abstracts access to any additional fields provided in the final hop's -// TLV onion payload. -type Payload interface { - // MultiPath returns the record corresponding the option_mpp parsed from - // the onion payload. - MultiPath() *record.MPP - - // CustomRecords returns the custom tlv type records that were parsed - // from the payload. - CustomRecords() record.CustomSet -} diff --git a/lnd/invoices/invoice_expiry_watcher.go b/lnd/invoices/invoice_expiry_watcher.go deleted file mode 100644 index 0922191d..00000000 --- a/lnd/invoices/invoice_expiry_watcher.go +++ /dev/null @@ -1,226 +0,0 @@ -package invoices - -import ( - "sync" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/clock" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/queue" - "github.com/pkt-cash/pktd/lnd/zpay32" - "github.com/pkt-cash/pktd/pktlog/log" -) - -// invoiceExpiry holds and invoice's payment hash and its expiry. This -// is used to order invoices by their expiry for cancellation. -type invoiceExpiry struct { - PaymentHash lntypes.Hash - Expiry time.Time - Keysend bool -} - -// Less implements PriorityQueueItem.Less such that the top item in the -// priorty queue will be the one that expires next. -func (e invoiceExpiry) Less(other queue.PriorityQueueItem) bool { - return e.Expiry.Before(other.(*invoiceExpiry).Expiry) -} - -// InvoiceExpiryWatcher handles automatic invoice cancellation of expried -// invoices. Upon start InvoiceExpiryWatcher will retrieve all pending (not yet -// settled or canceled) invoices invoices to its watcing queue. When a new -// invoice is added to the InvoiceRegistry, it'll be forarded to the -// InvoiceExpiryWatcher and will end up in the watching queue as well. -// If any of the watched invoices expire, they'll be removed from the watching -// queue and will be cancelled through InvoiceRegistry.CancelInvoice(). -type InvoiceExpiryWatcher struct { - sync.Mutex - started bool - - // clock is the clock implementation that InvoiceExpiryWatcher uses. - // It is useful for testing. - clock clock.Clock - - // cancelInvoice is a template method that cancels an expired invoice. - cancelInvoice func(lntypes.Hash, bool) er.R - - // expiryQueue holds invoiceExpiry items and is used to find the next - // invoice to expire. - expiryQueue queue.PriorityQueue - - // newInvoices channel is used to wake up the main loop when a new - // invoices is added. - newInvoices chan []*invoiceExpiry - - wg sync.WaitGroup - - // quit signals InvoiceExpiryWatcher to stop. - quit chan struct{} -} - -// NewInvoiceExpiryWatcher creates a new InvoiceExpiryWatcher instance. -func NewInvoiceExpiryWatcher(clock clock.Clock) *InvoiceExpiryWatcher { - return &InvoiceExpiryWatcher{ - clock: clock, - newInvoices: make(chan []*invoiceExpiry), - quit: make(chan struct{}), - } -} - -// Start starts the the subscription handler and the main loop. Start() will -// return with error if InvoiceExpiryWatcher is already started. Start() -// expects a cancellation function passed that will be use to cancel expired -// invoices by their payment hash. -func (ew *InvoiceExpiryWatcher) Start( - cancelInvoice func(lntypes.Hash, bool) er.R) er.R { - - ew.Lock() - defer ew.Unlock() - - if ew.started { - return er.Errorf("InvoiceExpiryWatcher already started") - } - - ew.started = true - ew.cancelInvoice = cancelInvoice - ew.wg.Add(1) - go ew.mainLoop() - - return nil -} - -// Stop quits the expiry handler loop and waits for InvoiceExpiryWatcher to -// fully stop. -func (ew *InvoiceExpiryWatcher) Stop() { - ew.Lock() - defer ew.Unlock() - - if ew.started { - // Signal subscriptionHandler to quit and wait for it to return. - close(ew.quit) - ew.wg.Wait() - ew.started = false - } -} - -// makeInvoiceExpiry checks if the passed invoice may be canceled and calculates -// the expiry time and creates a slimmer invoiceExpiry object with the hash and -// expiry time. -func makeInvoiceExpiry(paymentHash lntypes.Hash, - invoice *channeldb.Invoice) *invoiceExpiry { - - if invoice.State != channeldb.ContractOpen { - log.Debugf("Invoice not added to expiry watcher: %v", - paymentHash) - return nil - } - - realExpiry := invoice.Terms.Expiry - if realExpiry == 0 { - realExpiry = zpay32.DefaultInvoiceExpiry - } - - expiry := invoice.CreationDate.Add(realExpiry) - return &invoiceExpiry{ - PaymentHash: paymentHash, - Expiry: expiry, - Keysend: len(invoice.PaymentRequest) == 0, - } -} - -// AddInvoices adds invoices to the InvoiceExpiryWatcher. -func (ew *InvoiceExpiryWatcher) AddInvoices(invoices ...*invoiceExpiry) { - if len(invoices) > 0 { - select { - case ew.newInvoices <- invoices: - log.Debugf("Added %d invoices to the expiry watcher", - len(invoices)) - - // Select on quit too so that callers won't get blocked in case - // of concurrent shutdown. - case <-ew.quit: - } - } -} - -// nextExpiry returns a Time chan to wait on until the next invoice expires. -// If there are no active invoices, then it'll simply wait indefinitely. -func (ew *InvoiceExpiryWatcher) nextExpiry() <-chan time.Time { - if !ew.expiryQueue.Empty() { - top := ew.expiryQueue.Top().(*invoiceExpiry) - return ew.clock.TickAfter(top.Expiry.Sub(ew.clock.Now())) - } - - return nil -} - -// cancelNextExpiredInvoice will cancel the next expired invoice and removes -// it from the expiry queue. -func (ew *InvoiceExpiryWatcher) cancelNextExpiredInvoice() { - if !ew.expiryQueue.Empty() { - top := ew.expiryQueue.Top().(*invoiceExpiry) - if !top.Expiry.Before(ew.clock.Now()) { - return - } - - // Don't force-cancel already accepted invoices. An exception to - // this are auto-generated keysend invoices. Because those move - // to the Accepted state directly after being opened, the expiry - // field would never be used. Enabling cancellation for accepted - // keysend invoices creates a safety mechanism that can prevents - // channel force-closes. - err := ew.cancelInvoice(top.PaymentHash, top.Keysend) - if err != nil && !channeldb.ErrInvoiceAlreadySettled.Is(err) && - !channeldb.ErrInvoiceAlreadyCanceled.Is(err) { - - log.Errorf("Unable to cancel invoice: %v", - top.PaymentHash) - } - - ew.expiryQueue.Pop() - } -} - -// mainLoop is a goroutine that receives new invoices and handles cancellation -// of expired invoices. -func (ew *InvoiceExpiryWatcher) mainLoop() { - defer ew.wg.Done() - - for { - // Cancel any invoices that may have expired. - ew.cancelNextExpiredInvoice() - - pushInvoices := func(invoicesWithExpiry []*invoiceExpiry) { - for _, invoiceWithExpiry := range invoicesWithExpiry { - // Avoid pushing nil object to the heap. - if invoiceWithExpiry != nil { - ew.expiryQueue.Push(invoiceWithExpiry) - } - } - } - - select { - - case invoicesWithExpiry := <-ew.newInvoices: - // Take newly forwarded invoices with higher priority - // in order to not block the newInvoices channel. - pushInvoices(invoicesWithExpiry) - continue - - default: - select { - - case <-ew.nextExpiry(): - // Wait until the next invoice expires. - continue - - case invoicesWithExpiry := <-ew.newInvoices: - pushInvoices(invoicesWithExpiry) - - case <-ew.quit: - return - } - } - } -} diff --git a/lnd/invoices/invoice_expiry_watcher_test.go b/lnd/invoices/invoice_expiry_watcher_test.go deleted file mode 100644 index bfe5db2a..00000000 --- a/lnd/invoices/invoice_expiry_watcher_test.go +++ /dev/null @@ -1,175 +0,0 @@ -package invoices - -import ( - "sync" - "testing" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/clock" - "github.com/pkt-cash/pktd/lnd/lntypes" -) - -// invoiceExpiryWatcherTest holds a test fixture and implements checks -// for InvoiceExpiryWatcher tests. -type invoiceExpiryWatcherTest struct { - t *testing.T - wg sync.WaitGroup - watcher *InvoiceExpiryWatcher - testData invoiceExpiryTestData - canceledInvoices []lntypes.Hash -} - -// newInvoiceExpiryWatcherTest creates a new InvoiceExpiryWatcher test fixture -// and sets up the test environment. -func newInvoiceExpiryWatcherTest(t *testing.T, now time.Time, - numExpiredInvoices, numPendingInvoices int) *invoiceExpiryWatcherTest { - - test := &invoiceExpiryWatcherTest{ - watcher: NewInvoiceExpiryWatcher(clock.NewTestClock(testTime)), - testData: generateInvoiceExpiryTestData( - t, now, 0, numExpiredInvoices, numPendingInvoices, - ), - } - - test.wg.Add(numExpiredInvoices) - - err := test.watcher.Start(func(paymentHash lntypes.Hash, - force bool) er.R { - - test.canceledInvoices = append( - test.canceledInvoices, paymentHash, - ) - test.wg.Done() - return nil - }) - - if err != nil { - t.Fatalf("cannot start InvoiceExpiryWatcher: %v", err) - } - - return test -} - -func (t *invoiceExpiryWatcherTest) waitForFinish(timeout time.Duration) { - done := make(chan struct{}) - - // Wait for all cancels. - go func() { - t.wg.Wait() - close(done) - }() - - select { - case <-done: - case <-time.After(timeout): - t.t.Fatalf("test timeout") - } -} - -func (t *invoiceExpiryWatcherTest) checkExpectations() { - // Check that invoices that got canceled during the test are the ones - // that expired. - if len(t.canceledInvoices) != len(t.testData.expiredInvoices) { - t.t.Fatalf("expected %v cancellations, got %v", - len(t.testData.expiredInvoices), - len(t.canceledInvoices)) - } - - for i := range t.canceledInvoices { - if _, ok := t.testData.expiredInvoices[t.canceledInvoices[i]]; !ok { - t.t.Fatalf("wrong invoice canceled") - } - } -} - -// Tests that InvoiceExpiryWatcher can be started and stopped. -func TestInvoiceExpiryWatcherStartStop(t *testing.T) { - watcher := NewInvoiceExpiryWatcher(clock.NewTestClock(testTime)) - cancel := func(lntypes.Hash, bool) er.R { - t.Fatalf("unexpected call") - return nil - } - - if err := watcher.Start(cancel); err != nil { - t.Fatalf("unexpected error upon start: %v", err) - } - - if err := watcher.Start(cancel); err == nil { - t.Fatalf("expected error upon second start") - } - - watcher.Stop() - - if err := watcher.Start(cancel); err != nil { - t.Fatalf("unexpected error upon start: %v", err) - } -} - -// Tests that no invoices will expire from an empty InvoiceExpiryWatcher. -func TestInvoiceExpiryWithNoInvoices(t *testing.T) { - t.Parallel() - - test := newInvoiceExpiryWatcherTest(t, testTime, 0, 0) - - test.waitForFinish(testTimeout) - test.watcher.Stop() - test.checkExpectations() -} - -// Tests that if all add invoices are expired, then all invoices -// will be canceled. -func TestInvoiceExpiryWithOnlyExpiredInvoices(t *testing.T) { - t.Parallel() - - test := newInvoiceExpiryWatcherTest(t, testTime, 0, 5) - - for paymentHash, invoice := range test.testData.pendingInvoices { - test.watcher.AddInvoices(makeInvoiceExpiry(paymentHash, invoice)) - } - - test.waitForFinish(testTimeout) - test.watcher.Stop() - test.checkExpectations() -} - -// Tests that if some invoices are expired, then those invoices -// will be canceled. -func TestInvoiceExpiryWithPendingAndExpiredInvoices(t *testing.T) { - t.Parallel() - - test := newInvoiceExpiryWatcherTest(t, testTime, 5, 5) - - for paymentHash, invoice := range test.testData.expiredInvoices { - test.watcher.AddInvoices(makeInvoiceExpiry(paymentHash, invoice)) - } - - for paymentHash, invoice := range test.testData.pendingInvoices { - test.watcher.AddInvoices(makeInvoiceExpiry(paymentHash, invoice)) - } - - test.waitForFinish(testTimeout) - test.watcher.Stop() - test.checkExpectations() -} - -// Tests adding multiple invoices at once. -func TestInvoiceExpiryWhenAddingMultipleInvoices(t *testing.T) { - t.Parallel() - - test := newInvoiceExpiryWatcherTest(t, testTime, 5, 5) - var invoices []*invoiceExpiry - - for hash, invoice := range test.testData.expiredInvoices { - invoices = append(invoices, makeInvoiceExpiry(hash, invoice)) - } - - for hash, invoice := range test.testData.pendingInvoices { - invoices = append(invoices, makeInvoiceExpiry(hash, invoice)) - } - - test.watcher.AddInvoices(invoices...) - test.waitForFinish(testTimeout) - test.watcher.Stop() - test.checkExpectations() -} diff --git a/lnd/invoices/invoiceregistry.go b/lnd/invoices/invoiceregistry.go deleted file mode 100644 index b71961b4..00000000 --- a/lnd/invoices/invoiceregistry.go +++ /dev/null @@ -1,1499 +0,0 @@ -package invoices - -import ( - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/clock" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/queue" - "github.com/pkt-cash/pktd/lnd/record" - "github.com/pkt-cash/pktd/pktlog/log" -) - -var ( - Err = er.NewErrorType("lnd.invoices") - // ErrInvoiceExpiryTooSoon is returned when an invoice is attempted to be - // accepted or settled with not enough blocks remaining. - ErrInvoiceExpiryTooSoon = Err.CodeWithDetail("ErrInvoiceExpiryTooSoon", "invoice expiry too soon") - - // ErrInvoiceAmountTooLow is returned when an invoice is attempted to be - // accepted or settled with an amount that is too low. - ErrInvoiceAmountTooLow = Err.CodeWithDetail("ErrInvoiceAmountTooLow", "paid amount less than invoice amount") - - // ErrShuttingDown is returned when an operation failed because the - // invoice registry is shutting down. - ErrShuttingDown = Err.CodeWithDetail("ErrShuttingDown", "invoice registry shutting down") -) - -const ( - // DefaultHtlcHoldDuration defines the default for how long mpp htlcs - // are held while waiting for the other set members to arrive. - DefaultHtlcHoldDuration = 120 * time.Second -) - -// RegistryConfig contains the configuration parameters for invoice registry. -type RegistryConfig struct { - // FinalCltvRejectDelta defines the number of blocks before the expiry - // of the htlc where we no longer settle it as an exit hop and instead - // cancel it back. Normally this value should be lower than the cltv - // expiry of any invoice we create and the code effectuating this should - // not be hit. - FinalCltvRejectDelta int32 - - // HtlcHoldDuration defines for how long mpp htlcs are held while - // waiting for the other set members to arrive. - HtlcHoldDuration time.Duration - - // Clock holds the clock implementation that is used to provide - // Now() and TickAfter() and is useful to stub out the clock functions - // during testing. - Clock clock.Clock - - // AcceptKeySend indicates whether we want to accept spontaneous key - // send payments. - AcceptKeySend bool - - // GcCanceledInvoicesOnStartup if set, we'll attempt to garbage collect - // all canceled invoices upon start. - GcCanceledInvoicesOnStartup bool - - // GcCanceledInvoicesOnTheFly if set, we'll garbage collect all newly - // canceled invoices on the fly. - GcCanceledInvoicesOnTheFly bool - - // KeysendHoldTime indicates for how long we want to accept and hold - // spontaneous keysend payments. - KeysendHoldTime time.Duration -} - -// htlcReleaseEvent describes an htlc auto-release event. It is used to release -// mpp htlcs for which the complete set didn't arrive in time. -type htlcReleaseEvent struct { - // invoiceRef identifiers the invoice this htlc belongs to. - invoiceRef channeldb.InvoiceRef - - // key is the circuit key of the htlc to release. - key channeldb.CircuitKey - - // releaseTime is the time at which to release the htlc. - releaseTime time.Time -} - -// Less is used to order PriorityQueueItem's by their release time such that -// items with the older release time are at the top of the queue. -// -// NOTE: Part of the queue.PriorityQueueItem interface. -func (r *htlcReleaseEvent) Less(other queue.PriorityQueueItem) bool { - return r.releaseTime.Before(other.(*htlcReleaseEvent).releaseTime) -} - -// InvoiceRegistry is a central registry of all the outstanding invoices -// created by the daemon. The registry is a thin wrapper around a map in order -// to ensure that all updates/reads are thread safe. -type InvoiceRegistry struct { - sync.RWMutex - - cdb *channeldb.DB - - // cfg contains the registry's configuration parameters. - cfg *RegistryConfig - - clientMtx sync.Mutex - nextClientID uint32 - notificationClients map[uint32]*InvoiceSubscription - singleNotificationClients map[uint32]*SingleInvoiceSubscription - - newSubscriptions chan *InvoiceSubscription - subscriptionCancels chan uint32 - - // invoiceEvents is a single channel over which both invoice updates and - // new single invoice subscriptions are carried. - invoiceEvents chan interface{} - - // subscriptions is a map from a circuit key to a list of subscribers. - // It is used for efficient notification of links. - hodlSubscriptions map[channeldb.CircuitKey]map[chan<- interface{}]struct{} - - // reverseSubscriptions tracks circuit keys subscribed to per - // subscriber. This is used to unsubscribe from all hashes efficiently. - hodlReverseSubscriptions map[chan<- interface{}]map[channeldb.CircuitKey]struct{} - - // htlcAutoReleaseChan contains the new htlcs that need to be - // auto-released. - htlcAutoReleaseChan chan *htlcReleaseEvent - - expiryWatcher *InvoiceExpiryWatcher - - wg sync.WaitGroup - quit chan struct{} -} - -// NewRegistry creates a new invoice registry. The invoice registry -// wraps the persistent on-disk invoice storage with an additional in-memory -// layer. The in-memory layer is in place such that debug invoices can be added -// which are volatile yet available system wide within the daemon. -func NewRegistry(cdb *channeldb.DB, expiryWatcher *InvoiceExpiryWatcher, - cfg *RegistryConfig) *InvoiceRegistry { - - return &InvoiceRegistry{ - cdb: cdb, - notificationClients: make(map[uint32]*InvoiceSubscription), - singleNotificationClients: make(map[uint32]*SingleInvoiceSubscription), - newSubscriptions: make(chan *InvoiceSubscription), - subscriptionCancels: make(chan uint32), - invoiceEvents: make(chan interface{}, 100), - hodlSubscriptions: make(map[channeldb.CircuitKey]map[chan<- interface{}]struct{}), - hodlReverseSubscriptions: make(map[chan<- interface{}]map[channeldb.CircuitKey]struct{}), - cfg: cfg, - htlcAutoReleaseChan: make(chan *htlcReleaseEvent), - expiryWatcher: expiryWatcher, - quit: make(chan struct{}), - } -} - -// scanInvoicesOnStart will scan all invoices on start and add active invoices -// to the invoice expirt watcher while also attempting to delete all canceled -// invoices. -func (i *InvoiceRegistry) scanInvoicesOnStart() er.R { - var ( - pending []*invoiceExpiry - removable []channeldb.InvoiceDeleteRef - ) - - reset := func() { - // Zero out our results on start and if the scan is ever run - // more than once. This latter case can happen if the kvdb - // layer needs to retry the View transaction underneath (eg. - // using the etcd driver, where all transactions are allowed - // to retry for serializability). - pending = nil - removable = make([]channeldb.InvoiceDeleteRef, 0) - } - - scanFunc := func( - paymentHash lntypes.Hash, invoice *channeldb.Invoice) er.R { - - if invoice.IsPending() { - expiryRef := makeInvoiceExpiry(paymentHash, invoice) - if expiryRef != nil { - pending = append(pending, expiryRef) - } - } else if i.cfg.GcCanceledInvoicesOnStartup && - invoice.State == channeldb.ContractCanceled { - - // Consider invoice for removal if it is already - // canceled. Invoices that are expired but not yet - // canceled, will be queued up for cancellation after - // startup and will be deleted afterwards. - ref := channeldb.InvoiceDeleteRef{ - PayHash: paymentHash, - AddIndex: invoice.AddIndex, - SettleIndex: invoice.SettleIndex, - } - - if invoice.Terms.PaymentAddr != channeldb.BlankPayAddr { - ref.PayAddr = &invoice.Terms.PaymentAddr - } - - removable = append(removable, ref) - } - return nil - } - - err := i.cdb.ScanInvoices(scanFunc, reset) - if err != nil { - return err - } - - log.Debugf("Adding %d pending invoices to the expiry watcher", - len(pending)) - i.expiryWatcher.AddInvoices(pending...) - - if err := i.cdb.DeleteInvoice(removable); err != nil { - log.Warnf("Deleting old invoices failed: %v", err) - } - - return nil -} - -// Start starts the registry and all goroutines it needs to carry out its task. -func (i *InvoiceRegistry) Start() er.R { - // Start InvoiceExpiryWatcher and prepopulate it with existing active - // invoices. - err := i.expiryWatcher.Start(i.cancelInvoiceImpl) - - if err != nil { - return err - } - - i.wg.Add(1) - go i.invoiceEventLoop() - - // Now scan all pending and removable invoices to the expiry watcher or - // delete them. - err = i.scanInvoicesOnStart() - if err != nil { - i.Stop() - return err - } - - return nil -} - -// Stop signals the registry for a graceful shutdown. -func (i *InvoiceRegistry) Stop() { - i.expiryWatcher.Stop() - - close(i.quit) - - i.wg.Wait() -} - -// invoiceEvent represents a new event that has modified on invoice on disk. -// Only two event types are currently supported: newly created invoices, and -// instance where invoices are settled. -type invoiceEvent struct { - hash lntypes.Hash - invoice *channeldb.Invoice -} - -// tickAt returns a channel that ticks at the specified time. If the time has -// already passed, it will tick immediately. -func (i *InvoiceRegistry) tickAt(t time.Time) <-chan time.Time { - now := i.cfg.Clock.Now() - return i.cfg.Clock.TickAfter(t.Sub(now)) -} - -// invoiceEventLoop is the dedicated goroutine responsible for accepting -// new notification subscriptions, cancelling old subscriptions, and -// dispatching new invoice events. -func (i *InvoiceRegistry) invoiceEventLoop() { - defer i.wg.Done() - - // Set up a heap for htlc auto-releases. - autoReleaseHeap := &queue.PriorityQueue{} - - for { - // If there is something to release, set up a release tick - // channel. - var nextReleaseTick <-chan time.Time - if autoReleaseHeap.Len() > 0 { - head := autoReleaseHeap.Top().(*htlcReleaseEvent) - nextReleaseTick = i.tickAt(head.releaseTime) - } - - select { - // A new invoice subscription for all invoices has just arrived! - // We'll query for any backlog notifications, then add it to the - // set of clients. - case newClient := <-i.newSubscriptions: - log.Infof("New invoice subscription "+ - "client: id=%v", newClient.id) - - // With the backlog notifications delivered (if any), - // we'll add this to our active subscriptions and - // continue. - i.notificationClients[newClient.id] = newClient - - // A client no longer wishes to receive invoice notifications. - // So we'll remove them from the set of active clients. - case clientID := <-i.subscriptionCancels: - log.Infof("Cancelling invoice subscription for "+ - "client=%v", clientID) - - delete(i.notificationClients, clientID) - delete(i.singleNotificationClients, clientID) - - // An invoice event has come in. This can either be an update to - // an invoice or a new single invoice subscriber. Both type of - // events are passed in via the same channel, to make sure that - // subscribers get a consistent view of the event sequence. - case event := <-i.invoiceEvents: - switch e := event.(type) { - - // A sub-systems has just modified the invoice state, so - // we'll dispatch notifications to all registered - // clients. - case *invoiceEvent: - // For backwards compatibility, do not notify - // all invoice subscribers of cancel and accept - // events. - state := e.invoice.State - if state != channeldb.ContractCanceled && - state != channeldb.ContractAccepted { - - i.dispatchToClients(e) - } - i.dispatchToSingleClients(e) - - // A new single invoice subscription has arrived. Add it - // to the set of clients. It is important to do this in - // sequence with any other invoice events, because an - // initial invoice update has already been sent out to - // the subscriber. - case *SingleInvoiceSubscription: - log.Infof("New single invoice subscription "+ - "client: id=%v, ref=%v", e.id, - e.invoiceRef) - - i.singleNotificationClients[e.id] = e - } - - // A new htlc came in for auto-release. - case event := <-i.htlcAutoReleaseChan: - log.Debugf("Scheduling auto-release for htlc: "+ - "ref=%v, key=%v at %v", - event.invoiceRef, event.key, event.releaseTime) - - // We use an independent timer for every htlc rather - // than a set timer that is reset with every htlc coming - // in. Otherwise the sender could keep resetting the - // timer until the broadcast window is entered and our - // channel is force closed. - autoReleaseHeap.Push(event) - - // The htlc at the top of the heap needs to be auto-released. - case <-nextReleaseTick: - event := autoReleaseHeap.Pop().(*htlcReleaseEvent) - err := i.cancelSingleHtlc( - event.invoiceRef, event.key, ResultMppTimeout, - ) - if err != nil { - log.Errorf("HTLC timer: %v", err) - } - - case <-i.quit: - return - } - } -} - -// dispatchToSingleClients passes the supplied event to all notification clients -// that subscribed to all the invoice this event applies to. -func (i *InvoiceRegistry) dispatchToSingleClients(event *invoiceEvent) { - // Dispatch to single invoice subscribers. - for _, client := range i.singleNotificationClients { - if client.invoiceRef.PayHash() != event.hash { - continue - } - - client.notify(event) - } -} - -// dispatchToClients passes the supplied event to all notification clients that -// subscribed to all invoices. Add and settle indices are used to make sure that -// clients don't receive duplicate or unwanted events. -func (i *InvoiceRegistry) dispatchToClients(event *invoiceEvent) { - invoice := event.invoice - - for clientID, client := range i.notificationClients { - // Before we dispatch this event, we'll check - // to ensure that this client hasn't already - // received this notification in order to - // ensure we don't duplicate any events. - - // TODO(joostjager): Refactor switches. - state := event.invoice.State - switch { - // If we've already sent this settle event to - // the client, then we can skip this. - case state == channeldb.ContractSettled && - client.settleIndex >= invoice.SettleIndex: - continue - - // Similarly, if we've already sent this add to - // the client then we can skip this one. - case state == channeldb.ContractOpen && - client.addIndex >= invoice.AddIndex: - continue - - // These two states should never happen, but we - // log them just in case so we can detect this - // instance. - case state == channeldb.ContractOpen && - client.addIndex+1 != invoice.AddIndex: - log.Warnf("client=%v for invoice "+ - "notifications missed an update, "+ - "add_index=%v, new add event index=%v", - clientID, client.addIndex, - invoice.AddIndex) - - case state == channeldb.ContractSettled && - client.settleIndex+1 != invoice.SettleIndex: - log.Warnf("client=%v for invoice "+ - "notifications missed an update, "+ - "settle_index=%v, new settle event index=%v", - clientID, client.settleIndex, - invoice.SettleIndex) - } - - select { - case client.ntfnQueue.ChanIn() <- &invoiceEvent{ - invoice: invoice, - }: - case <-i.quit: - return - } - - // Each time we send a notification to a client, we'll record - // the latest add/settle index it has. We'll use this to ensure - // we don't send a notification twice, which can happen if a new - // event is added while we're catching up a new client. - switch event.invoice.State { - case channeldb.ContractSettled: - client.settleIndex = invoice.SettleIndex - case channeldb.ContractOpen: - client.addIndex = invoice.AddIndex - default: - log.Errorf("unexpected invoice state: %v", - event.invoice.State) - } - } -} - -// deliverBacklogEvents will attempts to query the invoice database for any -// notifications that the client has missed since it reconnected last. -func (i *InvoiceRegistry) deliverBacklogEvents(client *InvoiceSubscription) er.R { - addEvents, err := i.cdb.InvoicesAddedSince(client.addIndex) - if err != nil { - return err - } - - settleEvents, err := i.cdb.InvoicesSettledSince(client.settleIndex) - if err != nil { - return err - } - - // If we have any to deliver, then we'll append them to the end of the - // notification queue in order to catch up the client before delivering - // any new notifications. - for _, addEvent := range addEvents { - // We re-bind the loop variable to ensure we don't hold onto - // the loop reference causing is to point to the same item. - addEvent := addEvent - - select { - case client.ntfnQueue.ChanIn() <- &invoiceEvent{ - invoice: &addEvent, - }: - case <-i.quit: - return ErrShuttingDown.Default() - } - } - - for _, settleEvent := range settleEvents { - // We re-bind the loop variable to ensure we don't hold onto - // the loop reference causing is to point to the same item. - settleEvent := settleEvent - - select { - case client.ntfnQueue.ChanIn() <- &invoiceEvent{ - invoice: &settleEvent, - }: - case <-i.quit: - return ErrShuttingDown.Default() - } - } - - return nil -} - -// deliverSingleBacklogEvents will attempt to query the invoice database to -// retrieve the current invoice state and deliver this to the subscriber. Single -// invoice subscribers will always receive the current state right after -// subscribing. Only in case the invoice does not yet exist, nothing is sent -// yet. -func (i *InvoiceRegistry) deliverSingleBacklogEvents( - client *SingleInvoiceSubscription) er.R { - - invoice, err := i.cdb.LookupInvoice(client.invoiceRef) - - // It is possible that the invoice does not exist yet, but the client is - // already watching it in anticipation. - if channeldb.ErrInvoiceNotFound.Is(err) || - channeldb.ErrNoInvoicesCreated.Is(err) { - - return nil - } - if err != nil { - return err - } - - err = client.notify(&invoiceEvent{ - hash: client.invoiceRef.PayHash(), - invoice: &invoice, - }) - if err != nil { - return err - } - - return nil -} - -// AddInvoice adds a regular invoice for the specified amount, identified by -// the passed preimage. Additionally, any memo or receipt data provided will -// also be stored on-disk. Once this invoice is added, subsystems within the -// daemon add/forward HTLCs are able to obtain the proper preimage required for -// redemption in the case that we're the final destination. We also return the -// addIndex of the newly created invoice which monotonically increases for each -// new invoice added. A side effect of this function is that it also sets -// AddIndex on the invoice argument. -func (i *InvoiceRegistry) AddInvoice(invoice *channeldb.Invoice, - paymentHash lntypes.Hash) (uint64, er.R) { - - i.Lock() - - ref := channeldb.InvoiceRefByHash(paymentHash) - log.Debugf("Invoice%v: added with terms %v", ref, invoice.Terms) - - addIndex, err := i.cdb.AddInvoice(invoice, paymentHash) - if err != nil { - i.Unlock() - return 0, err - } - - // Now that we've added the invoice, we'll send dispatch a message to - // notify the clients of this new invoice. - i.notifyClients(paymentHash, invoice, channeldb.ContractOpen) - i.Unlock() - - // InvoiceExpiryWatcher.AddInvoice must not be locked by InvoiceRegistry - // to avoid deadlock when a new invoice is added while an other is being - // canceled. - invoiceExpiryRef := makeInvoiceExpiry(paymentHash, invoice) - if invoiceExpiryRef != nil { - i.expiryWatcher.AddInvoices(invoiceExpiryRef) - } - - return addIndex, nil -} - -// LookupInvoice looks up an invoice by its payment hash (R-Hash), if found -// then we're able to pull the funds pending within an HTLC. -// -// TODO(roasbeef): ignore if settled? -func (i *InvoiceRegistry) LookupInvoice(rHash lntypes.Hash) (channeldb.Invoice, - er.R) { - - // We'll check the database to see if there's an existing matching - // invoice. - ref := channeldb.InvoiceRefByHash(rHash) - return i.cdb.LookupInvoice(ref) -} - -// startHtlcTimer starts a new timer via the invoice registry main loop that -// cancels a single htlc on an invoice when the htlc hold duration has passed. -func (i *InvoiceRegistry) startHtlcTimer(invoiceRef channeldb.InvoiceRef, - key channeldb.CircuitKey, acceptTime time.Time) er.R { - - releaseTime := acceptTime.Add(i.cfg.HtlcHoldDuration) - event := &htlcReleaseEvent{ - invoiceRef: invoiceRef, - key: key, - releaseTime: releaseTime, - } - - select { - case i.htlcAutoReleaseChan <- event: - return nil - - case <-i.quit: - return ErrShuttingDown.Default() - } -} - -// cancelSingleHtlc cancels a single accepted htlc on an invoice. It takes -// a resolution result which will be used to notify subscribed links and -// resolvers of the details of the htlc cancellation. -func (i *InvoiceRegistry) cancelSingleHtlc(invoiceRef channeldb.InvoiceRef, - key channeldb.CircuitKey, result FailResolutionResult) er.R { - - i.Lock() - defer i.Unlock() - - updateInvoice := func(invoice *channeldb.Invoice) ( - *channeldb.InvoiceUpdateDesc, er.R) { - - // Only allow individual htlc cancelation on open invoices. - if invoice.State != channeldb.ContractOpen { - log.Debugf("cancelSingleHtlc: invoice %v no longer "+ - "open", invoiceRef) - - return nil, nil - } - - // Lookup the current status of the htlc in the database. - htlc, ok := invoice.Htlcs[key] - if !ok { - return nil, er.Errorf("htlc %v not found", key) - } - - // Cancelation is only possible if the htlc wasn't already - // resolved. - if htlc.State != channeldb.HtlcStateAccepted { - log.Debugf("cancelSingleHtlc: htlc %v on invoice %v "+ - "is already resolved", key, invoiceRef) - - return nil, nil - } - - log.Debugf("cancelSingleHtlc: cancelling htlc %v on invoice %v", - key, invoiceRef) - - // Return an update descriptor that cancels htlc and keeps - // invoice open. - canceledHtlcs := map[channeldb.CircuitKey]struct{}{ - key: {}, - } - - return &channeldb.InvoiceUpdateDesc{ - CancelHtlcs: canceledHtlcs, - }, nil - } - - // Try to mark the specified htlc as canceled in the invoice database. - // Intercept the update descriptor to set the local updated variable. If - // no invoice update is performed, we can return early. - var updated bool - invoice, err := i.cdb.UpdateInvoice(invoiceRef, - func(invoice *channeldb.Invoice) ( - *channeldb.InvoiceUpdateDesc, er.R) { - - updateDesc, err := updateInvoice(invoice) - if err != nil { - return nil, err - } - updated = updateDesc != nil - - return updateDesc, err - }, - ) - if err != nil { - return err - } - if !updated { - return nil - } - - // The invoice has been updated. Notify subscribers of the htlc - // resolution. - htlc, ok := invoice.Htlcs[key] - if !ok { - return er.Errorf("htlc %v not found", key) - } - if htlc.State == channeldb.HtlcStateCanceled { - resolution := NewFailResolution( - key, int32(htlc.AcceptHeight), result, - ) - - i.notifyHodlSubscribers(resolution) - } - return nil -} - -// processKeySend just-in-time inserts an invoice if this htlc is a keysend -// htlc. -func (i *InvoiceRegistry) processKeySend(ctx invoiceUpdateCtx) er.R { - // Retrieve keysend record if present. - preimageSlice, ok := ctx.customRecords[record.KeySendType] - if !ok { - return nil - } - - // Cancel htlc is preimage is invalid. - preimage, err := lntypes.MakePreimage(preimageSlice) - if err != nil || preimage.Hash() != ctx.hash { - return er.New("invalid keysend preimage") - } - - // Only allow keysend for non-mpp payments. - if ctx.mpp != nil { - return er.New("no mpp keysend supported") - } - - // Create an invoice for the htlc amount. - amt := ctx.amtPaid - - // Set tlv optional feature vector on the invoice. Otherwise we wouldn't - // be able to pay to it with keysend. - rawFeatures := lnwire.NewRawFeatureVector( - lnwire.TLVOnionPayloadOptional, - ) - features := lnwire.NewFeatureVector(rawFeatures, lnwire.Features) - - // Use the minimum block delta that we require for settling htlcs. - finalCltvDelta := i.cfg.FinalCltvRejectDelta - - // Pre-check expiry here to prevent inserting an invoice that will not - // be settled. - if ctx.expiry < uint32(ctx.currentHeight+finalCltvDelta) { - return er.New("final expiry too soon") - } - - // The invoice database indexes all invoices by payment address, however - // legacy keysend payment do not have one. In order to avoid a new - // payment type on-disk wrt. to indexing, we'll continue to insert a - // blank payment address which is special cased in the insertion logic - // to not be indexed. In the future, once AMP is merged, this should be - // replaced by generating a random payment address on the behalf of the - // sender. - payAddr := channeldb.BlankPayAddr - - // Create placeholder invoice. - invoice := &channeldb.Invoice{ - CreationDate: i.cfg.Clock.Now(), - Terms: channeldb.ContractTerm{ - FinalCltvDelta: finalCltvDelta, - Value: amt, - PaymentPreimage: &preimage, - PaymentAddr: payAddr, - Features: features, - }, - } - - if i.cfg.KeysendHoldTime != 0 { - invoice.HodlInvoice = true - invoice.Terms.Expiry = i.cfg.KeysendHoldTime - } - - // Insert invoice into database. Ignore duplicates, because this - // may be a replay. - _, err = i.AddInvoice(invoice, ctx.hash) - if err != nil && !channeldb.ErrDuplicateInvoice.Is(err) { - return err - } - - return nil -} - -// NotifyExitHopHtlc attempts to mark an invoice as settled. The return value -// describes how the htlc should be resolved. -// -// When the preimage of the invoice is not yet known (hodl invoice), this -// function moves the invoice to the accepted state. When SettleHoldInvoice is -// called later, a resolution message will be send back to the caller via the -// provided hodlChan. Invoice registry sends on this channel what action needs -// to be taken on the htlc (settle or cancel). The caller needs to ensure that -// the channel is either buffered or received on from another goroutine to -// prevent deadlock. -// -// In the case that the htlc is part of a larger set of htlcs that pay to the -// same invoice (multi-path payment), the htlc is held until the set is -// complete. If the set doesn't fully arrive in time, a timer will cancel the -// held htlc. -func (i *InvoiceRegistry) NotifyExitHopHtlc(rHash lntypes.Hash, - amtPaid lnwire.MilliSatoshi, expiry uint32, currentHeight int32, - circuitKey channeldb.CircuitKey, hodlChan chan<- interface{}, - payload Payload) (HtlcResolution, er.R) { - - // Create the update context containing the relevant details of the - // incoming htlc. - ctx := invoiceUpdateCtx{ - hash: rHash, - circuitKey: circuitKey, - amtPaid: amtPaid, - expiry: expiry, - currentHeight: currentHeight, - finalCltvRejectDelta: i.cfg.FinalCltvRejectDelta, - customRecords: payload.CustomRecords(), - mpp: payload.MultiPath(), - } - - // Process keysend if present. Do this outside of the lock, because - // AddInvoice obtains its own lock. This is no problem, because the - // operation is idempotent. - if i.cfg.AcceptKeySend { - err := i.processKeySend(ctx) - if err != nil { - ctx.log(fmt.Sprintf("keysend error: %v", err)) - - return NewFailResolution( - circuitKey, currentHeight, ResultKeySendError, - ), nil - } - } - - // Execute locked notify exit hop logic. - i.Lock() - resolution, err := i.notifyExitHopHtlcLocked(&ctx, hodlChan) - i.Unlock() - if err != nil { - return nil, err - } - - switch r := resolution.(type) { - // The htlc is held. Start a timer outside the lock if the htlc should - // be auto-released, because otherwise a deadlock may happen with the - // main event loop. - case *htlcAcceptResolution: - if r.autoRelease { - err := i.startHtlcTimer( - ctx.invoiceRef(), circuitKey, r.acceptTime, - ) - if err != nil { - return nil, err - } - } - - // We return a nil resolution because htlc acceptances are - // represented as nil resolutions externally. - // TODO(carla) update calling code to handle accept resolutions. - return nil, nil - - // A direct resolution was received for this htlc. - case HtlcResolution: - return r, nil - - // Fail if an unknown resolution type was received. - default: - return nil, er.New("invalid resolution type") - } -} - -// notifyExitHopHtlcLocked is the internal implementation of NotifyExitHopHtlc -// that should be executed inside the registry lock. -func (i *InvoiceRegistry) notifyExitHopHtlcLocked( - ctx *invoiceUpdateCtx, hodlChan chan<- interface{}) ( - HtlcResolution, er.R) { - - // We'll attempt to settle an invoice matching this rHash on disk (if - // one exists). The callback will update the invoice state and/or htlcs. - var ( - resolution HtlcResolution - updateSubscribers bool - ) - invoice, err := i.cdb.UpdateInvoice( - ctx.invoiceRef(), - func(inv *channeldb.Invoice) ( - *channeldb.InvoiceUpdateDesc, er.R) { - - updateDesc, res, err := updateInvoice(ctx, inv) - if err != nil { - return nil, err - } - - // Only send an update if the invoice state was changed. - updateSubscribers = updateDesc != nil && - updateDesc.State != nil - - // Assign resolution to outer scope variable. - resolution = res - - return updateDesc, nil - }, - ) - switch { - case channeldb.ErrInvoiceNotFound.Is(err): - // If the invoice was not found, return a failure resolution - // with an invoice not found result. - return NewFailResolution( - ctx.circuitKey, ctx.currentHeight, - ResultInvoiceNotFound, - ), nil - - case err == nil: - - default: - ctx.log(err.String()) - return nil, err - } - - switch res := resolution.(type) { - case *HtlcFailResolution: - // Inspect latest htlc state on the invoice. If it is found, - // we will update the accept height as it was recorded in the - // invoice database (which occurs in the case where the htlc - // reached the database in a previous call). If the htlc was - // not found on the invoice, it was immediately failed so we - // send the failure resolution as is, which has the current - // height set as the accept height. - invoiceHtlc, ok := invoice.Htlcs[ctx.circuitKey] - if ok { - res.AcceptHeight = int32(invoiceHtlc.AcceptHeight) - } - - ctx.log(fmt.Sprintf("failure resolution result "+ - "outcome: %v, at accept height: %v", - res.Outcome, res.AcceptHeight)) - - // If the htlc was settled, we will settle any previously accepted - // htlcs and notify our peer to settle them. - case *HtlcSettleResolution: - ctx.log(fmt.Sprintf("settle resolution result "+ - "outcome: %v, at accept height: %v", - res.Outcome, res.AcceptHeight)) - - // Also settle any previously accepted htlcs. If a htlc is - // marked as settled, we should follow now and settle the htlc - // with our peer. - for key, htlc := range invoice.Htlcs { - if htlc.State != channeldb.HtlcStateSettled { - continue - } - - // Notify subscribers that the htlcs should be settled - // with our peer. Note that the outcome of the - // resolution is set based on the outcome of the single - // htlc that we just settled, so may not be accurate - // for all htlcs. - htlcSettleResolution := NewSettleResolution( - res.Preimage, key, - int32(htlc.AcceptHeight), res.Outcome, - ) - - // Notify subscribers that the htlc should be settled - // with our peer. - i.notifyHodlSubscribers(htlcSettleResolution) - } - - // If we accepted the htlc, subscribe to the hodl invoice and return - // an accept resolution with the htlc's accept time on it. - case *htlcAcceptResolution: - invoiceHtlc, ok := invoice.Htlcs[ctx.circuitKey] - if !ok { - return nil, er.Errorf("accepted htlc: %v not"+ - " present on invoice: %x", ctx.circuitKey, - ctx.hash[:]) - } - - // Determine accepted height of this htlc. If the htlc reached - // the invoice database (possibly in a previous call to the - // invoice registry), we'll take the original accepted height - // as it was recorded in the database. - acceptHeight := int32(invoiceHtlc.AcceptHeight) - - ctx.log(fmt.Sprintf("accept resolution result "+ - "outcome: %v, at accept height: %v", - res.outcome, acceptHeight)) - - // Auto-release the htlc if the invoice is still open. It can - // only happen for mpp payments that there are htlcs in state - // Accepted while the invoice is Open. - if invoice.State == channeldb.ContractOpen { - res.acceptTime = invoiceHtlc.AcceptTime - res.autoRelease = true - - } - - i.hodlSubscribe(hodlChan, ctx.circuitKey) - - default: - panic("unknown action") - } - - // Now that the links have been notified of any state changes to their - // HTLCs, we'll go ahead and notify any clients wiaiting on the invoice - // state changes. - if updateSubscribers { - i.notifyClients(ctx.hash, invoice, invoice.State) - } - - return resolution, nil -} - -// SettleHodlInvoice sets the preimage of a hodl invoice. -func (i *InvoiceRegistry) SettleHodlInvoice(preimage lntypes.Preimage) er.R { - i.Lock() - defer i.Unlock() - - updateInvoice := func(invoice *channeldb.Invoice) ( - *channeldb.InvoiceUpdateDesc, er.R) { - - switch invoice.State { - case channeldb.ContractOpen: - return nil, channeldb.ErrInvoiceStillOpen.Default() - case channeldb.ContractCanceled: - return nil, channeldb.ErrInvoiceAlreadyCanceled.Default() - case channeldb.ContractSettled: - return nil, channeldb.ErrInvoiceAlreadySettled.Default() - } - - return &channeldb.InvoiceUpdateDesc{ - State: &channeldb.InvoiceStateUpdateDesc{ - NewState: channeldb.ContractSettled, - Preimage: &preimage, - }, - }, nil - } - - hash := preimage.Hash() - invoiceRef := channeldb.InvoiceRefByHash(hash) - invoice, err := i.cdb.UpdateInvoice(invoiceRef, updateInvoice) - if err != nil { - log.Errorf("SettleHodlInvoice with preimage %v: %v", - preimage, err) - - return err - } - - log.Debugf("Invoice%v: settled with preimage %v", invoiceRef, - invoice.Terms.PaymentPreimage) - - // In the callback, we marked the invoice as settled. UpdateInvoice will - // have seen this and should have moved all htlcs that were accepted to - // the settled state. In the loop below, we go through all of these and - // notify links and resolvers that are waiting for resolution. Any htlcs - // that were already settled before, will be notified again. This isn't - // necessary but doesn't hurt either. - for key, htlc := range invoice.Htlcs { - if htlc.State != channeldb.HtlcStateSettled { - continue - } - - resolution := NewSettleResolution( - preimage, key, int32(htlc.AcceptHeight), ResultSettled, - ) - - i.notifyHodlSubscribers(resolution) - } - i.notifyClients(hash, invoice, invoice.State) - - return nil -} - -// CancelInvoice attempts to cancel the invoice corresponding to the passed -// payment hash. -func (i *InvoiceRegistry) CancelInvoice(payHash lntypes.Hash) er.R { - return i.cancelInvoiceImpl(payHash, true) -} - -// cancelInvoice attempts to cancel the invoice corresponding to the passed -// payment hash. Accepted invoices will only be canceled if explicitly -// requested to do so. It notifies subscribing links and resolvers that -// the associated htlcs were canceled if they change state. -func (i *InvoiceRegistry) cancelInvoiceImpl(payHash lntypes.Hash, - cancelAccepted bool) er.R { - - i.Lock() - defer i.Unlock() - - ref := channeldb.InvoiceRefByHash(payHash) - log.Debugf("Invoice%v: canceling invoice", ref) - - updateInvoice := func(invoice *channeldb.Invoice) ( - *channeldb.InvoiceUpdateDesc, er.R) { - - // Only cancel the invoice in ContractAccepted state if explicitly - // requested to do so. - if invoice.State == channeldb.ContractAccepted && !cancelAccepted { - return nil, nil - } - - // Move invoice to the canceled state. Rely on validation in - // channeldb to return an error if the invoice is already - // settled or canceled. - return &channeldb.InvoiceUpdateDesc{ - State: &channeldb.InvoiceStateUpdateDesc{ - NewState: channeldb.ContractCanceled, - }, - }, nil - } - - invoiceRef := channeldb.InvoiceRefByHash(payHash) - invoice, err := i.cdb.UpdateInvoice(invoiceRef, updateInvoice) - - // Implement idempotency by returning success if the invoice was already - // canceled. - if channeldb.ErrInvoiceAlreadyCanceled.Is(err) { - log.Debugf("Invoice%v: already canceled", ref) - return nil - } - if err != nil { - return err - } - - // Return without cancellation if the invoice state is ContractAccepted. - if invoice.State == channeldb.ContractAccepted { - log.Debugf("Invoice%v: remains accepted as cancel wasn't"+ - "explicitly requested.", ref) - return nil - } - - log.Debugf("Invoice%v: canceled", ref) - - // In the callback, some htlcs may have been moved to the canceled - // state. We now go through all of these and notify links and resolvers - // that are waiting for resolution. Any htlcs that were already canceled - // before, will be notified again. This isn't necessary but doesn't hurt - // either. - for key, htlc := range invoice.Htlcs { - if htlc.State != channeldb.HtlcStateCanceled { - continue - } - - i.notifyHodlSubscribers( - NewFailResolution( - key, int32(htlc.AcceptHeight), ResultCanceled, - ), - ) - } - i.notifyClients(payHash, invoice, channeldb.ContractCanceled) - - // Attempt to also delete the invoice if requested through the registry - // config. - if i.cfg.GcCanceledInvoicesOnTheFly { - // Assemble the delete reference and attempt to delete through - // the invocice from the DB. - deleteRef := channeldb.InvoiceDeleteRef{ - PayHash: payHash, - AddIndex: invoice.AddIndex, - SettleIndex: invoice.SettleIndex, - } - if invoice.Terms.PaymentAddr != channeldb.BlankPayAddr { - deleteRef.PayAddr = &invoice.Terms.PaymentAddr - } - - err = i.cdb.DeleteInvoice( - []channeldb.InvoiceDeleteRef{deleteRef}, - ) - // If by any chance deletion failed, then log it instead of - // returning the error, as the invoice itsels has already been - // canceled. - if err != nil { - log.Warnf("Invoice%v could not be deleted: %v", - ref, err) - } - } - - return nil -} - -// notifyClients notifies all currently registered invoice notification clients -// of a newly added/settled invoice. -func (i *InvoiceRegistry) notifyClients(hash lntypes.Hash, - invoice *channeldb.Invoice, - state channeldb.ContractState) { - - event := &invoiceEvent{ - invoice: invoice, - hash: hash, - } - - select { - case i.invoiceEvents <- event: - case <-i.quit: - } -} - -// invoiceSubscriptionKit defines that are common to both all invoice -// subscribers and single invoice subscribers. -type invoiceSubscriptionKit struct { - id uint32 - inv *InvoiceRegistry - ntfnQueue *queue.ConcurrentQueue - - canceled uint32 // To be used atomically. - cancelChan chan struct{} - wg sync.WaitGroup -} - -// InvoiceSubscription represents an intent to receive updates for newly added -// or settled invoices. For each newly added invoice, a copy of the invoice -// will be sent over the NewInvoices channel. Similarly, for each newly settled -// invoice, a copy of the invoice will be sent over the SettledInvoices -// channel. -type InvoiceSubscription struct { - invoiceSubscriptionKit - - // NewInvoices is a channel that we'll use to send all newly created - // invoices with an invoice index greater than the specified - // StartingInvoiceIndex field. - NewInvoices chan *channeldb.Invoice - - // SettledInvoices is a channel that we'll use to send all setted - // invoices with an invoices index greater than the specified - // StartingInvoiceIndex field. - SettledInvoices chan *channeldb.Invoice - - // addIndex is the highest add index the caller knows of. We'll use - // this information to send out an event backlog to the notifications - // subscriber. Any new add events with an index greater than this will - // be dispatched before any new notifications are sent out. - addIndex uint64 - - // settleIndex is the highest settle index the caller knows of. We'll - // use this information to send out an event backlog to the - // notifications subscriber. Any new settle events with an index - // greater than this will be dispatched before any new notifications - // are sent out. - settleIndex uint64 -} - -// SingleInvoiceSubscription represents an intent to receive updates for a -// specific invoice. -type SingleInvoiceSubscription struct { - invoiceSubscriptionKit - - invoiceRef channeldb.InvoiceRef - - // Updates is a channel that we'll use to send all invoice events for - // the invoice that is subscribed to. - Updates chan *channeldb.Invoice -} - -// Cancel unregisters the InvoiceSubscription, freeing any previously allocated -// resources. -func (i *invoiceSubscriptionKit) Cancel() { - if !atomic.CompareAndSwapUint32(&i.canceled, 0, 1) { - return - } - - select { - case i.inv.subscriptionCancels <- i.id: - case <-i.inv.quit: - } - - i.ntfnQueue.Stop() - close(i.cancelChan) - - i.wg.Wait() -} - -func (i *invoiceSubscriptionKit) notify(event *invoiceEvent) er.R { - select { - case i.ntfnQueue.ChanIn() <- event: - case <-i.inv.quit: - return ErrShuttingDown.Default() - } - - return nil -} - -// SubscribeNotifications returns an InvoiceSubscription which allows the -// caller to receive async notifications when any invoices are settled or -// added. The invoiceIndex parameter is a streaming "checkpoint". We'll start -// by first sending out all new events with an invoice index _greater_ than -// this value. Afterwards, we'll send out real-time notifications. -func (i *InvoiceRegistry) SubscribeNotifications( - addIndex, settleIndex uint64) (*InvoiceSubscription, er.R) { - - client := &InvoiceSubscription{ - NewInvoices: make(chan *channeldb.Invoice), - SettledInvoices: make(chan *channeldb.Invoice), - addIndex: addIndex, - settleIndex: settleIndex, - invoiceSubscriptionKit: invoiceSubscriptionKit{ - inv: i, - ntfnQueue: queue.NewConcurrentQueue(20), - cancelChan: make(chan struct{}), - }, - } - client.ntfnQueue.Start() - - i.clientMtx.Lock() - client.id = i.nextClientID - i.nextClientID++ - i.clientMtx.Unlock() - - // Before we register this new invoice subscription, we'll launch a new - // goroutine that will proxy all notifications appended to the end of - // the concurrent queue to the two client-side channels the caller will - // feed off of. - i.wg.Add(1) - go func() { - defer i.wg.Done() - - for { - select { - // A new invoice event has been sent by the - // invoiceRegistry! We'll figure out if this is an add - // event or a settle event, then dispatch the event to - // the client. - case ntfn := <-client.ntfnQueue.ChanOut(): - invoiceEvent := ntfn.(*invoiceEvent) - - var targetChan chan *channeldb.Invoice - state := invoiceEvent.invoice.State - switch state { - case channeldb.ContractOpen: - targetChan = client.NewInvoices - case channeldb.ContractSettled: - targetChan = client.SettledInvoices - default: - log.Errorf("unknown invoice "+ - "state: %v", state) - - continue - } - - select { - case targetChan <- invoiceEvent.invoice: - - case <-client.cancelChan: - return - - case <-i.quit: - return - } - - case <-client.cancelChan: - return - - case <-i.quit: - return - } - } - }() - - i.Lock() - defer i.Unlock() - - // Query the database to see if based on the provided addIndex and - // settledIndex we need to deliver any backlog notifications. - err := i.deliverBacklogEvents(client) - if err != nil { - return nil, err - } - - select { - case i.newSubscriptions <- client: - case <-i.quit: - return nil, ErrShuttingDown.Default() - } - - return client, nil -} - -// SubscribeSingleInvoice returns an SingleInvoiceSubscription which allows the -// caller to receive async notifications for a specific invoice. -func (i *InvoiceRegistry) SubscribeSingleInvoice( - hash lntypes.Hash) (*SingleInvoiceSubscription, er.R) { - - client := &SingleInvoiceSubscription{ - Updates: make(chan *channeldb.Invoice), - invoiceSubscriptionKit: invoiceSubscriptionKit{ - inv: i, - ntfnQueue: queue.NewConcurrentQueue(20), - cancelChan: make(chan struct{}), - }, - invoiceRef: channeldb.InvoiceRefByHash(hash), - } - client.ntfnQueue.Start() - - i.clientMtx.Lock() - client.id = i.nextClientID - i.nextClientID++ - i.clientMtx.Unlock() - - // Before we register this new invoice subscription, we'll launch a new - // goroutine that will proxy all notifications appended to the end of - // the concurrent queue to the two client-side channels the caller will - // feed off of. - i.wg.Add(1) - go func() { - defer i.wg.Done() - - for { - select { - // A new invoice event has been sent by the - // invoiceRegistry. We will dispatch the event to the - // client. - case ntfn := <-client.ntfnQueue.ChanOut(): - invoiceEvent := ntfn.(*invoiceEvent) - - select { - case client.Updates <- invoiceEvent.invoice: - - case <-client.cancelChan: - return - - case <-i.quit: - return - } - - case <-client.cancelChan: - return - - case <-i.quit: - return - } - } - }() - - // Within the lock, we both query the invoice state and pass the client - // subscription to the invoiceEvents channel. This is to make sure that - // the client receives a consistent stream of events. - i.Lock() - defer i.Unlock() - - err := i.deliverSingleBacklogEvents(client) - if err != nil { - return nil, err - } - - select { - case i.invoiceEvents <- client: - case <-i.quit: - return nil, ErrShuttingDown.Default() - } - - return client, nil -} - -// notifyHodlSubscribers sends out the htlc resolution to all current -// subscribers. -func (i *InvoiceRegistry) notifyHodlSubscribers(htlcResolution HtlcResolution) { - subscribers, ok := i.hodlSubscriptions[htlcResolution.CircuitKey()] - if !ok { - return - } - - // Notify all interested subscribers and remove subscription from both - // maps. The subscription can be removed as there only ever will be a - // single resolution for each hash. - for subscriber := range subscribers { - select { - case subscriber <- htlcResolution: - case <-i.quit: - return - } - - delete( - i.hodlReverseSubscriptions[subscriber], - htlcResolution.CircuitKey(), - ) - } - - delete(i.hodlSubscriptions, htlcResolution.CircuitKey()) -} - -// hodlSubscribe adds a new invoice subscription. -func (i *InvoiceRegistry) hodlSubscribe(subscriber chan<- interface{}, - circuitKey channeldb.CircuitKey) { - - log.Debugf("Hodl subscribe for %v", circuitKey) - - subscriptions, ok := i.hodlSubscriptions[circuitKey] - if !ok { - subscriptions = make(map[chan<- interface{}]struct{}) - i.hodlSubscriptions[circuitKey] = subscriptions - } - subscriptions[subscriber] = struct{}{} - - reverseSubscriptions, ok := i.hodlReverseSubscriptions[subscriber] - if !ok { - reverseSubscriptions = make(map[channeldb.CircuitKey]struct{}) - i.hodlReverseSubscriptions[subscriber] = reverseSubscriptions - } - reverseSubscriptions[circuitKey] = struct{}{} -} - -// HodlUnsubscribeAll cancels the subscription. -func (i *InvoiceRegistry) HodlUnsubscribeAll(subscriber chan<- interface{}) { - i.Lock() - defer i.Unlock() - - hashes := i.hodlReverseSubscriptions[subscriber] - for hash := range hashes { - delete(i.hodlSubscriptions[hash], subscriber) - } - - delete(i.hodlReverseSubscriptions, subscriber) -} diff --git a/lnd/invoices/invoiceregistry_test.go b/lnd/invoices/invoiceregistry_test.go deleted file mode 100644 index 108420a8..00000000 --- a/lnd/invoices/invoiceregistry_test.go +++ /dev/null @@ -1,1194 +0,0 @@ -package invoices - -import ( - "math" - "testing" - "time" - - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/clock" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/record" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestSettleInvoice tests settling of an invoice and related notifications. -func TestSettleInvoice(t *testing.T) { - ctx := newTestContext(t) - defer ctx.cleanup() - - allSubscriptions, err := ctx.registry.SubscribeNotifications(0, 0) - assert.Nil(t, err) - defer allSubscriptions.Cancel() - - // Subscribe to the not yet existing invoice. - subscription, err := ctx.registry.SubscribeSingleInvoice(testInvoicePaymentHash) - if err != nil { - t.Fatal(err) - } - defer subscription.Cancel() - - if subscription.invoiceRef.PayHash() != testInvoicePaymentHash { - t.Fatalf("expected subscription for provided hash") - } - - // Add the invoice. - addIdx, err := ctx.registry.AddInvoice(testInvoice, testInvoicePaymentHash) - if err != nil { - t.Fatal(err) - } - - if addIdx != 1 { - t.Fatalf("expected addIndex to start with 1, but got %v", - addIdx) - } - - // We expect the open state to be sent to the single invoice subscriber. - select { - case update := <-subscription.Updates: - if update.State != channeldb.ContractOpen { - t.Fatalf("expected state ContractOpen, but got %v", - update.State) - } - case <-time.After(testTimeout): - t.Fatal("no update received") - } - - // We expect a new invoice notification to be sent out. - select { - case newInvoice := <-allSubscriptions.NewInvoices: - if newInvoice.State != channeldb.ContractOpen { - t.Fatalf("expected state ContractOpen, but got %v", - newInvoice.State) - } - case <-time.After(testTimeout): - t.Fatal("no update received") - } - - hodlChan := make(chan interface{}, 1) - - // Try to settle invoice with an htlc that expires too soon. - resolution, err := ctx.registry.NotifyExitHopHtlc( - testInvoicePaymentHash, testInvoice.Terms.Value, - uint32(testCurrentHeight)+testInvoiceCltvDelta-1, - testCurrentHeight, getCircuitKey(10), hodlChan, testPayload, - ) - if err != nil { - t.Fatal(err) - } - failResolution, ok := resolution.(*HtlcFailResolution) - if !ok { - t.Fatalf("expected fail resolution, got: %T", - resolution) - } - if failResolution.AcceptHeight != testCurrentHeight { - t.Fatalf("expected acceptHeight %v, but got %v", - testCurrentHeight, failResolution.AcceptHeight) - } - if failResolution.Outcome != ResultExpiryTooSoon { - t.Fatalf("expected expiry too soon, got: %v", - failResolution.Outcome) - } - - // Settle invoice with a slightly higher amount. - amtPaid := lnwire.MilliSatoshi(100500) - resolution, err = ctx.registry.NotifyExitHopHtlc( - testInvoicePaymentHash, amtPaid, testHtlcExpiry, - testCurrentHeight, getCircuitKey(0), hodlChan, - testPayload, - ) - if err != nil { - t.Fatal(err) - } - settleResolution, ok := resolution.(*HtlcSettleResolution) - if !ok { - t.Fatalf("expected settle resolution, got: %T", - resolution) - } - if settleResolution.Outcome != ResultSettled { - t.Fatalf("expected settled, got: %v", - settleResolution.Outcome) - } - - // We expect the settled state to be sent to the single invoice - // subscriber. - select { - case update := <-subscription.Updates: - if update.State != channeldb.ContractSettled { - t.Fatalf("expected state ContractOpen, but got %v", - update.State) - } - if update.AmtPaid != amtPaid { - t.Fatal("invoice AmtPaid incorrect") - } - case <-time.After(testTimeout): - t.Fatal("no update received") - } - - // We expect a settled notification to be sent out. - select { - case settledInvoice := <-allSubscriptions.SettledInvoices: - if settledInvoice.State != channeldb.ContractSettled { - t.Fatalf("expected state ContractOpen, but got %v", - settledInvoice.State) - } - case <-time.After(testTimeout): - t.Fatal("no update received") - } - - // Try to settle again with the same htlc id. We need this idempotent - // behaviour after a restart. - resolution, err = ctx.registry.NotifyExitHopHtlc( - testInvoicePaymentHash, amtPaid, testHtlcExpiry, testCurrentHeight, - getCircuitKey(0), hodlChan, testPayload, - ) - if err != nil { - t.Fatalf("unexpected NotifyExitHopHtlc error: %v", err) - } - settleResolution, ok = resolution.(*HtlcSettleResolution) - if !ok { - t.Fatalf("expected settle resolution, got: %T", - resolution) - } - if settleResolution.Outcome != ResultReplayToSettled { - t.Fatalf("expected replay settled, got: %v", - settleResolution.Outcome) - } - - // Try to settle again with a new higher-valued htlc. This payment - // should also be accepted, to prevent any change in behaviour for a - // paid invoice that may open up a probe vector. - resolution, err = ctx.registry.NotifyExitHopHtlc( - testInvoicePaymentHash, amtPaid+600, testHtlcExpiry, testCurrentHeight, - getCircuitKey(1), hodlChan, testPayload, - ) - if err != nil { - t.Fatalf("unexpected NotifyExitHopHtlc error: %v", err) - } - settleResolution, ok = resolution.(*HtlcSettleResolution) - if !ok { - t.Fatalf("expected settle resolution, got: %T", - resolution) - } - if settleResolution.Outcome != ResultDuplicateToSettled { - t.Fatalf("expected duplicate settled, got: %v", - settleResolution.Outcome) - } - - // Try to settle again with a lower amount. This should fail just as it - // would have failed if it were the first payment. - resolution, err = ctx.registry.NotifyExitHopHtlc( - testInvoicePaymentHash, amtPaid-600, testHtlcExpiry, testCurrentHeight, - getCircuitKey(2), hodlChan, testPayload, - ) - if err != nil { - t.Fatalf("unexpected NotifyExitHopHtlc error: %v", err) - } - failResolution, ok = resolution.(*HtlcFailResolution) - if !ok { - t.Fatalf("expected fail resolution, got: %T", - resolution) - } - if failResolution.Outcome != ResultAmountTooLow { - t.Fatalf("expected amount too low, got: %v", - failResolution.Outcome) - } - - // Check that settled amount is equal to the sum of values of the htlcs - // 0 and 1. - inv, err := ctx.registry.LookupInvoice(testInvoicePaymentHash) - if err != nil { - t.Fatal(err) - } - if inv.AmtPaid != amtPaid+amtPaid+600 { - t.Fatal("amount incorrect") - } - - // Try to cancel. - err = ctx.registry.CancelInvoice(testInvoicePaymentHash) - if !channeldb.ErrInvoiceAlreadySettled.Is(err) { - t.Fatal("expected cancelation of a settled invoice to fail") - } - - // As this is a direct sette, we expect nothing on the hodl chan. - select { - case <-hodlChan: - t.Fatal("unexpected resolution") - default: - } -} - -func testCancelInvoice(t *testing.T, gc bool) { - ctx := newTestContext(t) - defer ctx.cleanup() - - // If set to true, then also delete the invoice from the DB after - // cancellation. - ctx.registry.cfg.GcCanceledInvoicesOnTheFly = gc - - allSubscriptions, err := ctx.registry.SubscribeNotifications(0, 0) - assert.Nil(t, err) - defer allSubscriptions.Cancel() - - // Try to cancel the not yet existing invoice. This should fail. - err = ctx.registry.CancelInvoice(testInvoicePaymentHash) - if !channeldb.ErrInvoiceNotFound.Is(err) { - t.Fatalf("expected ErrInvoiceNotFound, but got %v", err) - } - - // Subscribe to the not yet existing invoice. - subscription, err := ctx.registry.SubscribeSingleInvoice(testInvoicePaymentHash) - if err != nil { - t.Fatal(err) - } - defer subscription.Cancel() - - if subscription.invoiceRef.PayHash() != testInvoicePaymentHash { - t.Fatalf("expected subscription for provided hash") - } - - // Add the invoice. - amt := lnwire.MilliSatoshi(100000) - _, err = ctx.registry.AddInvoice(testInvoice, testInvoicePaymentHash) - if err != nil { - t.Fatal(err) - } - - // We expect the open state to be sent to the single invoice subscriber. - select { - case update := <-subscription.Updates: - if update.State != channeldb.ContractOpen { - t.Fatalf( - "expected state ContractOpen, but got %v", - update.State, - ) - } - case <-time.After(testTimeout): - t.Fatal("no update received") - } - - // We expect a new invoice notification to be sent out. - select { - case newInvoice := <-allSubscriptions.NewInvoices: - if newInvoice.State != channeldb.ContractOpen { - t.Fatalf( - "expected state ContractOpen, but got %v", - newInvoice.State, - ) - } - case <-time.After(testTimeout): - t.Fatal("no update received") - } - - // Cancel invoice. - err = ctx.registry.CancelInvoice(testInvoicePaymentHash) - if err != nil { - t.Fatal(err) - } - - // We expect the canceled state to be sent to the single invoice - // subscriber. - select { - case update := <-subscription.Updates: - if update.State != channeldb.ContractCanceled { - t.Fatalf( - "expected state ContractCanceled, but got %v", - update.State, - ) - } - case <-time.After(testTimeout): - t.Fatal("no update received") - } - - if gc { - // Check that the invoice has been deleted from the db. - _, err = ctx.cdb.LookupInvoice( - channeldb.InvoiceRefByHash(testInvoicePaymentHash), - ) - util.RequireErr(t, err) - } - - // We expect no cancel notification to be sent to all invoice - // subscribers (backwards compatibility). - - // Try to cancel again. Expect that we report ErrInvoiceNotFound if the - // invoice has been garbage collected (since the invoice has been - // deleted when it was canceled), and no error otherwise. - err = ctx.registry.CancelInvoice(testInvoicePaymentHash) - - if gc { - util.RequireErr(t, err, channeldb.ErrInvoiceNotFound) - } else { - util.RequireNoErr(t, err) - } - - // Notify arrival of a new htlc paying to this invoice. This should - // result in a cancel resolution. - hodlChan := make(chan interface{}) - resolution, err := ctx.registry.NotifyExitHopHtlc( - testInvoicePaymentHash, amt, testHtlcExpiry, testCurrentHeight, - getCircuitKey(0), hodlChan, testPayload, - ) - if err != nil { - t.Fatal("expected settlement of a canceled invoice to succeed") - } - failResolution, ok := resolution.(*HtlcFailResolution) - if !ok { - t.Fatalf("expected fail resolution, got: %T", - resolution) - } - if failResolution.AcceptHeight != testCurrentHeight { - t.Fatalf("expected acceptHeight %v, but got %v", - testCurrentHeight, failResolution.AcceptHeight) - } - - // If the invoice has been deleted (or not present) then we expect the - // outcome to be ResultInvoiceNotFound instead of when the invoice is - // in our database in which case we expect ResultInvoiceAlreadyCanceled. - if gc { - require.Equal(t, failResolution.Outcome, ResultInvoiceNotFound) - } else { - require.Equal(t, - failResolution.Outcome, - ResultInvoiceAlreadyCanceled, - ) - } -} - -// TestCancelInvoice tests cancelation of an invoice and related notifications. -func TestCancelInvoice(t *testing.T) { - // Test cancellation both with garbage collection (meaning that canceled - // invoice will be deleted) and without (meain it'll be kept). - t.Run("garbage collect", func(t *testing.T) { - testCancelInvoice(t, true) - }) - - t.Run("no garbage collect", func(t *testing.T) { - testCancelInvoice(t, false) - }) -} - -// TestSettleHoldInvoice tests settling of a hold invoice and related -// notifications. -func TestSettleHoldInvoice(t *testing.T) { - defer timeout()() - - cdb, cleanup, err := newTestChannelDB(clock.NewTestClock(time.Time{})) - if err != nil { - t.Fatal(err) - } - defer cleanup() - - // Instantiate and start the invoice ctx.registry. - cfg := RegistryConfig{ - FinalCltvRejectDelta: testFinalCltvRejectDelta, - Clock: clock.NewTestClock(testTime), - } - registry := NewRegistry(cdb, NewInvoiceExpiryWatcher(cfg.Clock), &cfg) - - err = registry.Start() - if err != nil { - t.Fatal(err) - } - defer registry.Stop() - - allSubscriptions, err := registry.SubscribeNotifications(0, 0) - assert.Nil(t, err) - defer allSubscriptions.Cancel() - - // Subscribe to the not yet existing invoice. - subscription, err := registry.SubscribeSingleInvoice(testInvoicePaymentHash) - if err != nil { - t.Fatal(err) - } - defer subscription.Cancel() - - if subscription.invoiceRef.PayHash() != testInvoicePaymentHash { - t.Fatalf("expected subscription for provided hash") - } - - // Add the invoice. - _, err = registry.AddInvoice(testHodlInvoice, testInvoicePaymentHash) - if err != nil { - t.Fatal(err) - } - - // We expect the open state to be sent to the single invoice subscriber. - update := <-subscription.Updates - if update.State != channeldb.ContractOpen { - t.Fatalf("expected state ContractOpen, but got %v", - update.State) - } - - // We expect a new invoice notification to be sent out. - newInvoice := <-allSubscriptions.NewInvoices - if newInvoice.State != channeldb.ContractOpen { - t.Fatalf("expected state ContractOpen, but got %v", - newInvoice.State) - } - - // Use slightly higher amount for accept/settle. - amtPaid := lnwire.MilliSatoshi(100500) - - hodlChan := make(chan interface{}, 1) - - // NotifyExitHopHtlc without a preimage present in the invoice registry - // should be possible. - resolution, err := registry.NotifyExitHopHtlc( - testInvoicePaymentHash, amtPaid, testHtlcExpiry, testCurrentHeight, - getCircuitKey(0), hodlChan, testPayload, - ) - if err != nil { - t.Fatalf("expected settle to succeed but got %v", err) - } - if resolution != nil { - t.Fatalf("expected htlc to be held") - } - - // Test idempotency. - resolution, err = registry.NotifyExitHopHtlc( - testInvoicePaymentHash, amtPaid, testHtlcExpiry, testCurrentHeight, - getCircuitKey(0), hodlChan, testPayload, - ) - if err != nil { - t.Fatalf("expected settle to succeed but got %v", err) - } - if resolution != nil { - t.Fatalf("expected htlc to be held") - } - - // Test replay at a higher height. We expect the same result because it - // is a replay. - resolution, err = registry.NotifyExitHopHtlc( - testInvoicePaymentHash, amtPaid, testHtlcExpiry, testCurrentHeight+10, - getCircuitKey(0), hodlChan, testPayload, - ) - if err != nil { - t.Fatalf("expected settle to succeed but got %v", err) - } - if resolution != nil { - t.Fatalf("expected htlc to be held") - } - - // Test a new htlc coming in that doesn't meet the final cltv delta - // requirement. It should be rejected. - resolution, err = registry.NotifyExitHopHtlc( - testInvoicePaymentHash, amtPaid, 1, testCurrentHeight, - getCircuitKey(1), hodlChan, testPayload, - ) - if err != nil { - t.Fatalf("expected settle to succeed but got %v", err) - } - failResolution, ok := resolution.(*HtlcFailResolution) - if !ok { - t.Fatalf("expected fail resolution, got: %T", - resolution) - } - if failResolution.Outcome != ResultExpiryTooSoon { - t.Fatalf("expected expiry too soon, got: %v", - failResolution.Outcome) - } - - // We expect the accepted state to be sent to the single invoice - // subscriber. For all invoice subscribers, we don't expect an update. - // Those only get notified on settle. - update = <-subscription.Updates - if update.State != channeldb.ContractAccepted { - t.Fatalf("expected state ContractAccepted, but got %v", - update.State) - } - if update.AmtPaid != amtPaid { - t.Fatal("invoice AmtPaid incorrect") - } - - // Settling with preimage should succeed. - err = registry.SettleHodlInvoice(testInvoicePreimage) - if err != nil { - t.Fatal("expected set preimage to succeed") - } - - htlcResolution := (<-hodlChan).(HtlcResolution) - settleResolution, ok := htlcResolution.(*HtlcSettleResolution) - if !ok { - t.Fatalf("expected settle resolution, got: %T", - htlcResolution) - } - if settleResolution.Preimage != testInvoicePreimage { - t.Fatal("unexpected preimage in hodl resolution") - } - if settleResolution.AcceptHeight != testCurrentHeight { - t.Fatalf("expected acceptHeight %v, but got %v", - testCurrentHeight, settleResolution.AcceptHeight) - } - if settleResolution.Outcome != ResultSettled { - t.Fatalf("expected result settled, got: %v", - settleResolution.Outcome) - } - - // We expect a settled notification to be sent out for both all and - // single invoice subscribers. - settledInvoice := <-allSubscriptions.SettledInvoices - if settledInvoice.State != channeldb.ContractSettled { - t.Fatalf("expected state ContractSettled, but got %v", - settledInvoice.State) - } - if settledInvoice.AmtPaid != amtPaid { - t.Fatalf("expected amount to be %v, but got %v", - amtPaid, settledInvoice.AmtPaid) - } - - update = <-subscription.Updates - if update.State != channeldb.ContractSettled { - t.Fatalf("expected state ContractSettled, but got %v", - update.State) - } - - // Idempotency. - err = registry.SettleHodlInvoice(testInvoicePreimage) - if !channeldb.ErrInvoiceAlreadySettled.Is(err) { - t.Fatalf("expected ErrInvoiceAlreadySettled but got %v", err) - } - - // Try to cancel. - err = registry.CancelInvoice(testInvoicePaymentHash) - if err == nil { - t.Fatal("expected cancelation of a settled invoice to fail") - } -} - -// TestCancelHoldInvoice tests canceling of a hold invoice and related -// notifications. -func TestCancelHoldInvoice(t *testing.T) { - defer timeout()() - - cdb, cleanup, err := newTestChannelDB(clock.NewTestClock(time.Time{})) - if err != nil { - t.Fatal(err) - } - defer cleanup() - - // Instantiate and start the invoice ctx.registry. - cfg := RegistryConfig{ - FinalCltvRejectDelta: testFinalCltvRejectDelta, - Clock: clock.NewTestClock(testTime), - } - registry := NewRegistry(cdb, NewInvoiceExpiryWatcher(cfg.Clock), &cfg) - - err = registry.Start() - if err != nil { - t.Fatal(err) - } - defer registry.Stop() - - // Add the invoice. - _, err = registry.AddInvoice(testHodlInvoice, testInvoicePaymentHash) - if err != nil { - t.Fatal(err) - } - - amtPaid := lnwire.MilliSatoshi(100000) - hodlChan := make(chan interface{}, 1) - - // NotifyExitHopHtlc without a preimage present in the invoice registry - // should be possible. - resolution, err := registry.NotifyExitHopHtlc( - testInvoicePaymentHash, amtPaid, testHtlcExpiry, testCurrentHeight, - getCircuitKey(0), hodlChan, testPayload, - ) - if err != nil { - t.Fatalf("expected settle to succeed but got %v", err) - } - if resolution != nil { - t.Fatalf("expected htlc to be held") - } - - // Cancel invoice. - err = registry.CancelInvoice(testInvoicePaymentHash) - if err != nil { - t.Fatal("cancel invoice failed") - } - - htlcResolution := (<-hodlChan).(HtlcResolution) - _, ok := htlcResolution.(*HtlcFailResolution) - if !ok { - t.Fatalf("expected fail resolution, got: %T", - htlcResolution) - } - - // Offering the same htlc again at a higher height should still result - // in a rejection. The accept height is expected to be the original - // accept height. - resolution, err = registry.NotifyExitHopHtlc( - testInvoicePaymentHash, amtPaid, testHtlcExpiry, testCurrentHeight+1, - getCircuitKey(0), hodlChan, testPayload, - ) - if err != nil { - t.Fatalf("expected settle to succeed but got %v", err) - } - failResolution, ok := resolution.(*HtlcFailResolution) - if !ok { - t.Fatalf("expected fail resolution, got: %T", - resolution) - } - if failResolution.AcceptHeight != testCurrentHeight { - t.Fatalf("expected acceptHeight %v, but got %v", - testCurrentHeight, failResolution.AcceptHeight) - } - if failResolution.Outcome != ResultReplayToCanceled { - t.Fatalf("expected replay to canceled, got %v", - failResolution.Outcome) - } -} - -// TestUnknownInvoice tests that invoice registry returns an error when the -// invoice is unknown. This is to guard against returning a cancel htlc -// resolution for forwarded htlcs. In the link, NotifyExitHopHtlc is only called -// if we are the exit hop, but in htlcIncomingContestResolver it is called with -// forwarded htlc hashes as well. -func TestUnknownInvoice(t *testing.T) { - ctx := newTestContext(t) - defer ctx.cleanup() - - // Notify arrival of a new htlc paying to this invoice. This should - // succeed. - hodlChan := make(chan interface{}) - amt := lnwire.MilliSatoshi(100000) - resolution, err := ctx.registry.NotifyExitHopHtlc( - testInvoicePaymentHash, amt, testHtlcExpiry, testCurrentHeight, - getCircuitKey(0), hodlChan, testPayload, - ) - if err != nil { - t.Fatal("unexpected error") - } - failResolution, ok := resolution.(*HtlcFailResolution) - if !ok { - t.Fatalf("expected fail resolution, got: %T", - resolution) - } - if failResolution.Outcome != ResultInvoiceNotFound { - t.Fatalf("expected ResultInvoiceNotFound, got: %v", - failResolution.Outcome) - } -} - -// TestKeySend tests receiving a spontaneous payment with and without keysend -// enabled. -func TestKeySend(t *testing.T) { - t.Run("enabled", func(t *testing.T) { - testKeySend(t, true) - }) - t.Run("disabled", func(t *testing.T) { - testKeySend(t, false) - }) -} - -// testKeySend is the inner test function that tests keysend for a particular -// enabled state on the receiver end. -func testKeySend(t *testing.T, keySendEnabled bool) { - defer timeout()() - - ctx := newTestContext(t) - defer ctx.cleanup() - - ctx.registry.cfg.AcceptKeySend = keySendEnabled - - allSubscriptions, err := ctx.registry.SubscribeNotifications(0, 0) - assert.Nil(t, err) - defer allSubscriptions.Cancel() - - hodlChan := make(chan interface{}, 1) - - amt := lnwire.MilliSatoshi(1000) - expiry := uint32(testCurrentHeight + 20) - - // Create key for keysend. - preimage := lntypes.Preimage{1, 2, 3} - hash := preimage.Hash() - - // Try to settle invoice with an invalid keysend htlc. - invalidKeySendPayload := &mockPayload{ - customRecords: map[uint64][]byte{ - record.KeySendType: {1, 2, 3}, - }, - } - - resolution, err := ctx.registry.NotifyExitHopHtlc( - hash, amt, expiry, - testCurrentHeight, getCircuitKey(10), hodlChan, - invalidKeySendPayload, - ) - if err != nil { - t.Fatal(err) - } - failResolution, ok := resolution.(*HtlcFailResolution) - if !ok { - t.Fatalf("expected fail resolution, got: %T", - resolution) - } - - switch { - case !keySendEnabled && failResolution.Outcome != ResultInvoiceNotFound: - t.Fatal("expected invoice not found outcome") - - case keySendEnabled && failResolution.Outcome != ResultKeySendError: - t.Fatal("expected keysend error") - } - - // Try to settle invoice with a valid keysend htlc. - keySendPayload := &mockPayload{ - customRecords: map[uint64][]byte{ - record.KeySendType: preimage[:], - }, - } - - resolution, err = ctx.registry.NotifyExitHopHtlc( - hash, amt, expiry, - testCurrentHeight, getCircuitKey(10), hodlChan, keySendPayload, - ) - if err != nil { - t.Fatal(err) - } - - // Expect a cancel resolution if keysend is disabled. - if !keySendEnabled { - failResolution, ok = resolution.(*HtlcFailResolution) - if !ok { - t.Fatalf("expected fail resolution, got: %T", - resolution) - } - if failResolution.Outcome != ResultInvoiceNotFound { - t.Fatal("expected keysend payment not to be accepted") - } - return - } - - checkResolution := func(res HtlcResolution, pimg lntypes.Preimage) { - // Otherwise we expect no error and a settle res for the htlc. - settleResolution, ok := res.(*HtlcSettleResolution) - assert.True(t, ok) - assert.Equal(t, settleResolution.Preimage, pimg) - } - checkSubscription := func() { - // We expect a new invoice notification to be sent out. - newInvoice := <-allSubscriptions.NewInvoices - assert.Equal(t, newInvoice.State, channeldb.ContractOpen) - - // We expect a settled notification to be sent out. - settledInvoice := <-allSubscriptions.SettledInvoices - assert.Equal(t, settledInvoice.State, channeldb.ContractSettled) - } - - checkResolution(resolution, preimage) - checkSubscription() - - // Replay the same keysend payment. We expect an identical resolution, - // but no event should be generated. - resolution, err = ctx.registry.NotifyExitHopHtlc( - hash, amt, expiry, - testCurrentHeight, getCircuitKey(10), hodlChan, keySendPayload, - ) - assert.Nil(t, err) - checkResolution(resolution, preimage) - - select { - case <-allSubscriptions.NewInvoices: - t.Fatalf("replayed keysend should not generate event") - case <-time.After(time.Second): - } - - // Finally, test that we can properly fulfill a second keysend payment - // with a unique preiamge. - preimage2 := lntypes.Preimage{1, 2, 3, 4} - hash2 := preimage2.Hash() - - keySendPayload2 := &mockPayload{ - customRecords: map[uint64][]byte{ - record.KeySendType: preimage2[:], - }, - } - - resolution, err = ctx.registry.NotifyExitHopHtlc( - hash2, amt, expiry, - testCurrentHeight, getCircuitKey(20), hodlChan, keySendPayload2, - ) - assert.Nil(t, err) - - checkResolution(resolution, preimage2) - checkSubscription() -} - -// TestHoldKeysend tests receiving a spontaneous payment that is held. -func TestHoldKeysend(t *testing.T) { - t.Run("settle", func(t *testing.T) { - testHoldKeysend(t, false) - }) - t.Run("timeout", func(t *testing.T) { - testHoldKeysend(t, true) - }) -} - -// testHoldKeysend is the inner test function that tests hold-keysend. -func testHoldKeysend(t *testing.T, timeoutKeysend bool) { - defer timeout()() - - const holdDuration = time.Minute - - ctx := newTestContext(t) - defer ctx.cleanup() - - ctx.registry.cfg.AcceptKeySend = true - ctx.registry.cfg.KeysendHoldTime = holdDuration - - allSubscriptions, err := ctx.registry.SubscribeNotifications(0, 0) - assert.Nil(t, err) - defer allSubscriptions.Cancel() - - hodlChan := make(chan interface{}, 1) - - amt := lnwire.MilliSatoshi(1000) - expiry := uint32(testCurrentHeight + 20) - - // Create key for keysend. - preimage := lntypes.Preimage{1, 2, 3} - hash := preimage.Hash() - - // Try to settle invoice with a valid keysend htlc. - keysendPayload := &mockPayload{ - customRecords: map[uint64][]byte{ - record.KeySendType: preimage[:], - }, - } - - resolution, err := ctx.registry.NotifyExitHopHtlc( - hash, amt, expiry, - testCurrentHeight, getCircuitKey(10), hodlChan, keysendPayload, - ) - if err != nil { - t.Fatal(err) - } - - // No immediate resolution is expected. - require.Nil(t, resolution, "expected hold resolution") - - // We expect a new invoice notification to be sent out. - newInvoice := <-allSubscriptions.NewInvoices - if newInvoice.State != channeldb.ContractOpen { - t.Fatalf("expected state ContractOpen, but got %v", - newInvoice.State) - } - - // We expect no further invoice notifications yet (on the all invoices - // subscription). - select { - case <-allSubscriptions.NewInvoices: - t.Fatalf("no invoice update expected") - case <-time.After(100 * time.Millisecond): - } - - if timeoutKeysend { - // Advance the clock to just past the hold duration. - ctx.clock.SetTime(ctx.clock.Now().Add( - holdDuration + time.Millisecond), - ) - - // Expect the keysend payment to be failed. - res := <-hodlChan - failResolution, ok := res.(*HtlcFailResolution) - require.Truef( - t, ok, "expected fail resolution, got: %T", - resolution, - ) - require.Equal( - t, ResultCanceled, failResolution.Outcome, - "expected keysend payment to be failed", - ) - - return - } - - // Settle keysend payment manually. - require.Nil(t, ctx.registry.SettleHodlInvoice( - *newInvoice.Terms.PaymentPreimage, - )) - - // We expect a settled notification to be sent out. - settledInvoice := <-allSubscriptions.SettledInvoices - assert.Equal(t, settledInvoice.State, channeldb.ContractSettled) -} - -// TestMppPayment tests settling of an invoice with multiple partial payments. -// It covers the case where there is a mpp timeout before the whole invoice is -// paid and the case where the invoice is settled in time. -func TestMppPayment(t *testing.T) { - defer timeout()() - - ctx := newTestContext(t) - defer ctx.cleanup() - - // Add the invoice. - _, err := ctx.registry.AddInvoice(testInvoice, testInvoicePaymentHash) - if err != nil { - t.Fatal(err) - } - - mppPayload := &mockPayload{ - mpp: record.NewMPP(testInvoiceAmt, [32]byte{}), - } - - // Send htlc 1. - hodlChan1 := make(chan interface{}, 1) - resolution, err := ctx.registry.NotifyExitHopHtlc( - testInvoicePaymentHash, testInvoice.Terms.Value/2, - testHtlcExpiry, - testCurrentHeight, getCircuitKey(10), hodlChan1, mppPayload, - ) - if err != nil { - t.Fatal(err) - } - if resolution != nil { - t.Fatal("expected no direct resolution") - } - - // Simulate mpp timeout releasing htlc 1. - ctx.clock.SetTime(testTime.Add(30 * time.Second)) - - htlcResolution := (<-hodlChan1).(HtlcResolution) - failResolution, ok := htlcResolution.(*HtlcFailResolution) - if !ok { - t.Fatalf("expected fail resolution, got: %T", - resolution) - } - if failResolution.Outcome != ResultMppTimeout { - t.Fatalf("expected mpp timeout, got: %v", - failResolution.Outcome) - } - - // Send htlc 2. - hodlChan2 := make(chan interface{}, 1) - resolution, err = ctx.registry.NotifyExitHopHtlc( - testInvoicePaymentHash, testInvoice.Terms.Value/2, - testHtlcExpiry, - testCurrentHeight, getCircuitKey(11), hodlChan2, mppPayload, - ) - if err != nil { - t.Fatal(err) - } - if resolution != nil { - t.Fatal("expected no direct resolution") - } - - // Send htlc 3. - hodlChan3 := make(chan interface{}, 1) - resolution, err = ctx.registry.NotifyExitHopHtlc( - testInvoicePaymentHash, testInvoice.Terms.Value/2, - testHtlcExpiry, - testCurrentHeight, getCircuitKey(12), hodlChan3, mppPayload, - ) - if err != nil { - t.Fatal(err) - } - settleResolution, ok := resolution.(*HtlcSettleResolution) - if !ok { - t.Fatalf("expected settle resolution, got: %T", - htlcResolution) - } - if settleResolution.Outcome != ResultSettled { - t.Fatalf("expected result settled, got: %v", - settleResolution.Outcome) - } - - // Check that settled amount is equal to the sum of values of the htlcs - // 2 and 3. - inv, err := ctx.registry.LookupInvoice(testInvoicePaymentHash) - if err != nil { - t.Fatal(err) - } - if inv.State != channeldb.ContractSettled { - t.Fatal("expected invoice to be settled") - } - if inv.AmtPaid != testInvoice.Terms.Value { - t.Fatalf("amount incorrect, expected %v but got %v", - testInvoice.Terms.Value, inv.AmtPaid) - } -} - -// Tests that invoices are canceled after expiration. -// TODO(cjd): DISABLED TEST -- needs investigation -func _TestInvoiceExpiryWithRegistry(t *testing.T) { - t.Parallel() - - cdb, cleanup, err := newTestChannelDB(clock.NewTestClock(time.Time{})) - defer cleanup() - - if err != nil { - t.Fatal(err) - } - - testClock := clock.NewTestClock(testTime) - - cfg := RegistryConfig{ - FinalCltvRejectDelta: testFinalCltvRejectDelta, - Clock: testClock, - } - - expiryWatcher := NewInvoiceExpiryWatcher(cfg.Clock) - registry := NewRegistry(cdb, expiryWatcher, &cfg) - - // First prefill the Channel DB with some pre-existing invoices, - // half of them still pending, half of them expired. - const numExpired = 5 - const numPending = 5 - existingInvoices := generateInvoiceExpiryTestData( - t, testTime, 0, numExpired, numPending, - ) - - var expectedCancellations []lntypes.Hash - - for paymentHash, expiredInvoice := range existingInvoices.expiredInvoices { - if _, err := cdb.AddInvoice(expiredInvoice, paymentHash); err != nil { - t.Fatalf("cannot add invoice to channel db: %v", err) - } - expectedCancellations = append(expectedCancellations, paymentHash) - } - - for paymentHash, pendingInvoice := range existingInvoices.pendingInvoices { - if _, err := cdb.AddInvoice(pendingInvoice, paymentHash); err != nil { - t.Fatalf("cannot add invoice to channel db: %v", err) - } - } - - if err = registry.Start(); err != nil { - t.Fatalf("cannot start registry: %v", err) - } - - // Now generate pending and invoices and add them to the registry while - // it is up and running. We'll manipulate the clock to let them expire. - newInvoices := generateInvoiceExpiryTestData( - t, testTime, numExpired+numPending, 0, numPending, - ) - - var invoicesThatWillCancel []lntypes.Hash - for paymentHash, pendingInvoice := range newInvoices.pendingInvoices { - _, err := registry.AddInvoice(pendingInvoice, paymentHash) - invoicesThatWillCancel = append(invoicesThatWillCancel, paymentHash) - if err != nil { - t.Fatal(err) - } - } - - // Check that they are really not canceled until before the clock is - // advanced. - for i := range invoicesThatWillCancel { - invoice, err := registry.LookupInvoice(invoicesThatWillCancel[i]) - if err != nil { - t.Fatalf("cannot find invoice: %v", err) - } - - if invoice.State == channeldb.ContractCanceled { - t.Fatalf("expected pending invoice, got canceled") - } - } - - // Fwd time 1 day. - testClock.SetTime(testTime.Add(24 * time.Hour)) - - // Give some time to the watcher to cancel everything. - time.Sleep(500 * time.Millisecond) - registry.Stop() - - // Create the expected cancellation set before the final check. - expectedCancellations = append( - expectedCancellations, invoicesThatWillCancel..., - ) - - // Retrospectively check that all invoices that were expected to be canceled - // are indeed canceled. - for i := range expectedCancellations { - invoice, err := registry.LookupInvoice(expectedCancellations[i]) - if err != nil { - t.Fatalf("cannot find invoice: %v", err) - } - - if invoice.State != channeldb.ContractCanceled { - t.Fatalf("expected canceled invoice, got: %v", invoice.State) - } - } -} - -// TestOldInvoiceRemovalOnStart tests that we'll attempt to remove old canceled -// invoices upon start while keeping all settled ones. -func TestOldInvoiceRemovalOnStart(t *testing.T) { - t.Parallel() - - testClock := clock.NewTestClock(testTime) - cdb, cleanup, err := newTestChannelDB(testClock) - defer cleanup() - - util.RequireNoErr(t, err) - - cfg := RegistryConfig{ - FinalCltvRejectDelta: testFinalCltvRejectDelta, - Clock: testClock, - GcCanceledInvoicesOnStartup: true, - } - - expiryWatcher := NewInvoiceExpiryWatcher(cfg.Clock) - registry := NewRegistry(cdb, expiryWatcher, &cfg) - - // First prefill the Channel DB with some pre-existing expired invoices. - const numExpired = 5 - const numPending = 0 - existingInvoices := generateInvoiceExpiryTestData( - t, testTime, 0, numExpired, numPending, - ) - - i := 0 - for paymentHash, invoice := range existingInvoices.expiredInvoices { - // Mark half of the invoices as settled, the other hald as - // canceled. - if i%2 == 0 { - invoice.State = channeldb.ContractSettled - } else { - invoice.State = channeldb.ContractCanceled - } - - _, err := cdb.AddInvoice(invoice, paymentHash) - util.RequireNoErr(t, err) - i++ - } - - // Collect all settled invoices for our expectation set. - var expected []channeldb.Invoice - - // Perform a scan query to collect all invoices. - query := channeldb.InvoiceQuery{ - IndexOffset: 0, - NumMaxInvoices: math.MaxUint64, - } - - response, err := cdb.QueryInvoices(query) - util.RequireNoErr(t, err) - - // Save all settled invoices for our expectation set. - for _, invoice := range response.Invoices { - if invoice.State == channeldb.ContractSettled { - expected = append(expected, invoice) - } - } - - // Start the registry which should collect and delete all canceled - // invoices upon start. - err = registry.Start() - util.RequireNoErr(t, err, "cannot start the registry") - - // Perform a scan query to collect all invoices. - response, err = cdb.QueryInvoices(query) - util.RequireNoErr(t, err) - - // Check that we really only kept the settled invoices after the - // registry start. - require.Equal(t, expected, response.Invoices) -} diff --git a/lnd/invoices/resolution.go b/lnd/invoices/resolution.go deleted file mode 100644 index ca993d79..00000000 --- a/lnd/invoices/resolution.go +++ /dev/null @@ -1,125 +0,0 @@ -package invoices - -import ( - "time" - - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/lntypes" -) - -// HtlcResolution describes how an htlc should be resolved. -type HtlcResolution interface { - // CircuitKey returns the circuit key for the htlc that we have a - // resolution for. - CircuitKey() channeldb.CircuitKey -} - -// HtlcFailResolution is an implementation of the HtlcResolution interface -// which is returned when a htlc is failed. -type HtlcFailResolution struct { - // circuitKey is the key of the htlc for which we have a resolution. - circuitKey channeldb.CircuitKey - - // AcceptHeight is the original height at which the htlc was accepted. - AcceptHeight int32 - - // Outcome indicates the outcome of the invoice registry update. - Outcome FailResolutionResult -} - -// NewFailResolution returns a htlc failure resolution. -func NewFailResolution(key channeldb.CircuitKey, - acceptHeight int32, outcome FailResolutionResult) *HtlcFailResolution { - - return &HtlcFailResolution{ - circuitKey: key, - AcceptHeight: acceptHeight, - Outcome: outcome, - } -} - -// CircuitKey returns the circuit key for the htlc that we have a -// resolution for. -// -// Note: it is part of the HtlcResolution interface. -func (f *HtlcFailResolution) CircuitKey() channeldb.CircuitKey { - return f.circuitKey -} - -// HtlcSettleResolution is an implementation of the HtlcResolution interface -// which is returned when a htlc is settled. -type HtlcSettleResolution struct { - // Preimage is the htlc preimage. Its value is nil in case of a cancel. - Preimage lntypes.Preimage - - // circuitKey is the key of the htlc for which we have a resolution. - circuitKey channeldb.CircuitKey - - // acceptHeight is the original height at which the htlc was accepted. - AcceptHeight int32 - - // Outcome indicates the outcome of the invoice registry update. - Outcome SettleResolutionResult -} - -// NewSettleResolution returns a htlc resolution which is associated with a -// settle. -func NewSettleResolution(preimage lntypes.Preimage, - key channeldb.CircuitKey, acceptHeight int32, - outcome SettleResolutionResult) *HtlcSettleResolution { - - return &HtlcSettleResolution{ - Preimage: preimage, - circuitKey: key, - AcceptHeight: acceptHeight, - Outcome: outcome, - } -} - -// CircuitKey returns the circuit key for the htlc that we have a -// resolution for. -// -// Note: it is part of the HtlcResolution interface. -func (s *HtlcSettleResolution) CircuitKey() channeldb.CircuitKey { - return s.circuitKey -} - -// htlcAcceptResolution is an implementation of the HtlcResolution interface -// which is returned when a htlc is accepted. This struct is not exported -// because the codebase uses a nil resolution to indicate that a htlc was -// accepted. This struct is used internally in the invoice registry to -// surface accept resolution results. When an invoice update returns an -// acceptResolution, a nil resolution should be surfaced. -type htlcAcceptResolution struct { - // circuitKey is the key of the htlc for which we have a resolution. - circuitKey channeldb.CircuitKey - - // autoRelease signals that the htlc should be automatically released - // after a timeout. - autoRelease bool - - // acceptTime is the time at which this htlc was accepted. - acceptTime time.Time - - // outcome indicates the outcome of the invoice registry update. - outcome acceptResolutionResult -} - -// newAcceptResolution returns a htlc resolution which is associated with a -// htlc accept. -func newAcceptResolution(key channeldb.CircuitKey, - outcome acceptResolutionResult) *htlcAcceptResolution { - - return &htlcAcceptResolution{ - circuitKey: key, - outcome: outcome, - } -} - -// CircuitKey returns the circuit key for the htlc that we have a -// resolution for. -// -// Note: it is part of the HtlcResolution interface. -func (a *htlcAcceptResolution) CircuitKey() channeldb.CircuitKey { - return a.circuitKey -} diff --git a/lnd/invoices/resolution_result.go b/lnd/invoices/resolution_result.go deleted file mode 100644 index b979d3ac..00000000 --- a/lnd/invoices/resolution_result.go +++ /dev/null @@ -1,207 +0,0 @@ -package invoices - -// acceptResolutionResult provides metadata which about a htlc that was -// accepted by the registry. -type acceptResolutionResult uint8 - -const ( - resultInvalidAccept acceptResolutionResult = iota - - // resultReplayToAccepted is returned when we replay an accepted - // invoice. - resultReplayToAccepted - - // resultDuplicateToAccepted is returned when we accept a duplicate - // htlc. - resultDuplicateToAccepted - - // resultAccepted is returned when we accept a hodl invoice. - resultAccepted - - // resultPartialAccepted is returned when we have partially received - // payment. - resultPartialAccepted -) - -// String returns a string representation of the result. -func (a acceptResolutionResult) String() string { - switch a { - case resultInvalidAccept: - return "invalid accept result" - - case resultReplayToAccepted: - return "replayed htlc to accepted invoice" - - case resultDuplicateToAccepted: - return "accepting duplicate payment to accepted invoice" - - case resultAccepted: - return "accepted" - - case resultPartialAccepted: - return "partial payment accepted" - - default: - return "unknown accept resolution result" - } -} - -// FailResolutionResult provides metadata about a htlc that was failed by -// the registry. It can be used to take custom actions on resolution of the -// htlc. -type FailResolutionResult uint8 - -const ( - resultInvalidFailure FailResolutionResult = iota - - // ResultReplayToCanceled is returned when we replay a canceled invoice. - ResultReplayToCanceled - - // ResultInvoiceAlreadyCanceled is returned when trying to pay an - // invoice that is already canceled. - ResultInvoiceAlreadyCanceled - - // ResultAmountTooLow is returned when an invoice is underpaid. - ResultAmountTooLow - - // ResultExpiryTooSoon is returned when we do not accept an invoice - // payment because it expires too soon. - ResultExpiryTooSoon - - // ResultCanceled is returned when we cancel an invoice and its - // associated htlcs. - ResultCanceled - - // ResultInvoiceNotOpen is returned when a mpp invoice is not open. - ResultInvoiceNotOpen - - // ResultMppTimeout is returned when an invoice paid with multiple - // partial payments times out before it is fully paid. - ResultMppTimeout - - // ResultAddressMismatch is returned when the payment address for a mpp - // invoice does not match. - ResultAddressMismatch - - // ResultHtlcSetTotalMismatch is returned when the amount paid by a - // htlc does not match its set total. - ResultHtlcSetTotalMismatch - - // ResultHtlcSetTotalTooLow is returned when a mpp set total is too low - // for an invoice. - ResultHtlcSetTotalTooLow - - // ResultHtlcSetOverpayment is returned when a mpp set is overpaid. - ResultHtlcSetOverpayment - - // ResultInvoiceNotFound is returned when an attempt is made to pay an - // invoice that is unknown to us. - ResultInvoiceNotFound - - // ResultKeySendError is returned when we receive invalid keysend - // parameters. - ResultKeySendError - - // ResultMppInProgress is returned when we are busy receiving a mpp - // payment. - ResultMppInProgress -) - -// String returns a string representation of the result. -func (f FailResolutionResult) String() string { - return f.FailureString() -} - -// FailureString returns a string representation of the result. -// -// Note: it is part of the FailureDetail interface. -func (f FailResolutionResult) FailureString() string { - switch f { - case resultInvalidFailure: - return "invalid failure result" - - case ResultReplayToCanceled: - return "replayed htlc to canceled invoice" - - case ResultInvoiceAlreadyCanceled: - return "invoice already canceled" - - case ResultAmountTooLow: - return "amount too low" - - case ResultExpiryTooSoon: - return "expiry too soon" - - case ResultCanceled: - return "canceled" - - case ResultInvoiceNotOpen: - return "invoice no longer open" - - case ResultMppTimeout: - return "mpp timeout" - - case ResultAddressMismatch: - return "payment address mismatch" - - case ResultHtlcSetTotalMismatch: - return "htlc total amt doesn't match set total" - - case ResultHtlcSetTotalTooLow: - return "set total too low for invoice" - - case ResultHtlcSetOverpayment: - return "mpp is overpaying set total" - - case ResultInvoiceNotFound: - return "invoice not found" - - case ResultKeySendError: - return "invalid keysend parameters" - - case ResultMppInProgress: - return "mpp reception in progress" - - default: - return "unknown failure resolution result" - } -} - -// SettleResolutionResult provides metadata which about a htlc that was failed -// by the registry. It can be used to take custom actions on resolution of the -// htlc. -type SettleResolutionResult uint8 - -const ( - resultInvalidSettle SettleResolutionResult = iota - - // ResultSettled is returned when we settle an invoice. - ResultSettled - - // ResultReplayToSettled is returned when we replay a settled invoice. - ResultReplayToSettled - - // ResultDuplicateToSettled is returned when we settle an invoice which - // has already been settled at least once. - ResultDuplicateToSettled -) - -// String returns a string representation of the result. -func (s SettleResolutionResult) String() string { - switch s { - case resultInvalidSettle: - return "invalid settle result" - - case ResultSettled: - return "settled" - - case ResultReplayToSettled: - return "replayed htlc to settled invoice" - - case ResultDuplicateToSettled: - return "accepting duplicate payment to settled invoice" - - default: - return "unknown settle resolution result" - } -} diff --git a/lnd/invoices/test_utils_test.go b/lnd/invoices/test_utils_test.go deleted file mode 100644 index fb8c1fcb..00000000 --- a/lnd/invoices/test_utils_test.go +++ /dev/null @@ -1,297 +0,0 @@ -package invoices - -import ( - "crypto/rand" - "encoding/binary" - "fmt" - "io/ioutil" - "os" - "runtime/pprof" - "testing" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/clock" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/record" - "github.com/pkt-cash/pktd/lnd/zpay32" -) - -type mockPayload struct { - mpp *record.MPP - customRecords record.CustomSet -} - -func (p *mockPayload) MultiPath() *record.MPP { - return p.mpp -} - -func (p *mockPayload) CustomRecords() record.CustomSet { - // This function should always return a map instance, but for mock - // configuration we do accept nil. - if p.customRecords == nil { - return make(record.CustomSet) - } - - return p.customRecords -} - -var ( - testTimeout = 5 * time.Second - - testTime = time.Date(2018, time.February, 2, 14, 0, 0, 0, time.UTC) - - testInvoicePreimage = lntypes.Preimage{1} - - testInvoicePaymentHash = testInvoicePreimage.Hash() - - testHtlcExpiry = uint32(5) - - testInvoiceCltvDelta = uint32(4) - - testFinalCltvRejectDelta = int32(4) - - testCurrentHeight = int32(1) - - testPrivKeyBytes, _ = util.DecodeHex( - "e126f68f7eafcc8b74f54d269fe206be715000f94dac067d1c04a8ca3b2db734") - - testPrivKey, _ = btcec.PrivKeyFromBytes( - btcec.S256(), testPrivKeyBytes) - - testInvoiceDescription = "coffee" - - testInvoiceAmount = lnwire.MilliSatoshi(100000) - - testNetParams = &chaincfg.MainNetParams - - testMessageSigner = zpay32.MessageSigner{ - SignCompact: func(hash []byte) ([]byte, er.R) { - sig, err := btcec.SignCompact(btcec.S256(), testPrivKey, hash, true) - if err != nil { - return nil, er.Errorf("can't sign the message: %v", err) - } - return sig, nil - }, - } - - testFeatures = lnwire.NewFeatureVector( - nil, lnwire.Features, - ) - - testPayload = &mockPayload{} - - testInvoiceCreationDate = testTime -) - -var ( - testInvoiceAmt = lnwire.MilliSatoshi(100000) - testInvoice = &channeldb.Invoice{ - Terms: channeldb.ContractTerm{ - PaymentPreimage: &testInvoicePreimage, - Value: testInvoiceAmt, - Expiry: time.Hour, - Features: testFeatures, - }, - CreationDate: testInvoiceCreationDate, - } - - testHodlInvoice = &channeldb.Invoice{ - Terms: channeldb.ContractTerm{ - Value: testInvoiceAmt, - Expiry: time.Hour, - Features: testFeatures, - }, - CreationDate: testInvoiceCreationDate, - HodlInvoice: true, - } -) - -func newTestChannelDB(clock clock.Clock) (*channeldb.DB, func(), er.R) { - // First, create a temporary directory to be used for the duration of - // this test. - tempDirName, errr := ioutil.TempDir("", "channeldb") - if errr != nil { - return nil, nil, er.E(errr) - } - - // Next, create channeldb for the first time. - cdb, err := channeldb.Open( - tempDirName, channeldb.OptionClock(clock), - ) - if err != nil { - os.RemoveAll(tempDirName) - return nil, nil, err - } - - cleanUp := func() { - cdb.Close() - os.RemoveAll(tempDirName) - } - - return cdb, cleanUp, nil -} - -type testContext struct { - cdb *channeldb.DB - registry *InvoiceRegistry - clock *clock.TestClock - - cleanup func() - t *testing.T -} - -func newTestContext(t *testing.T) *testContext { - clock := clock.NewTestClock(testTime) - - cdb, cleanup, err := newTestChannelDB(clock) - if err != nil { - t.Fatal(err) - } - - expiryWatcher := NewInvoiceExpiryWatcher(clock) - - // Instantiate and start the invoice ctx.registry. - cfg := RegistryConfig{ - FinalCltvRejectDelta: testFinalCltvRejectDelta, - HtlcHoldDuration: 30 * time.Second, - Clock: clock, - } - registry := NewRegistry(cdb, expiryWatcher, &cfg) - - err = registry.Start() - if err != nil { - cleanup() - t.Fatal(err) - } - - ctx := testContext{ - cdb: cdb, - registry: registry, - clock: clock, - t: t, - cleanup: func() { - registry.Stop() - cleanup() - }, - } - - return &ctx -} - -func getCircuitKey(htlcID uint64) channeldb.CircuitKey { - return channeldb.CircuitKey{ - ChanID: lnwire.ShortChannelID{ - BlockHeight: 1, TxIndex: 2, TxPosition: 3, - }, - HtlcID: htlcID, - } -} - -func newTestInvoice(t *testing.T, preimage lntypes.Preimage, - timestamp time.Time, expiry time.Duration) *channeldb.Invoice { - - if expiry == 0 { - expiry = time.Hour - } - - var payAddr [32]byte - if _, err := rand.Read(payAddr[:]); err != nil { - t.Fatalf("unable to generate payment addr: %v", err) - } - - rawInvoice, err := zpay32.NewInvoice( - testNetParams, - preimage.Hash(), - timestamp, - zpay32.Amount(testInvoiceAmount), - zpay32.Description(testInvoiceDescription), - zpay32.Expiry(expiry), - zpay32.PaymentAddr(payAddr), - ) - if err != nil { - t.Fatalf("Error while creating new invoice: %v", err) - } - - paymentRequest, err := rawInvoice.Encode(testMessageSigner) - - if err != nil { - t.Fatalf("Error while encoding payment request: %v", err) - } - - return &channeldb.Invoice{ - Terms: channeldb.ContractTerm{ - PaymentPreimage: &preimage, - PaymentAddr: payAddr, - Value: testInvoiceAmount, - Expiry: expiry, - Features: testFeatures, - }, - PaymentRequest: []byte(paymentRequest), - CreationDate: timestamp, - } -} - -// timeout implements a test level timeout. -func timeout() func() { - done := make(chan struct{}) - - go func() { - select { - case <-time.After(5 * time.Second): - err := pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) - if err != nil { - panic(fmt.Sprintf("error writing to std out after timeout: %v", err)) - } - panic("timeout") - case <-done: - } - }() - - return func() { - close(done) - } -} - -// invoiceExpiryTestData simply holds generated expired and pending invoices. -type invoiceExpiryTestData struct { - expiredInvoices map[lntypes.Hash]*channeldb.Invoice - pendingInvoices map[lntypes.Hash]*channeldb.Invoice -} - -// generateInvoiceExpiryTestData generates the specified number of fake expired -// and pending invoices anchored to the passed now timestamp. -func generateInvoiceExpiryTestData( - t *testing.T, now time.Time, - offset, numExpired, numPending int) invoiceExpiryTestData { - - var testData invoiceExpiryTestData - - testData.expiredInvoices = make(map[lntypes.Hash]*channeldb.Invoice) - testData.pendingInvoices = make(map[lntypes.Hash]*channeldb.Invoice) - - expiredCreationDate := now.Add(-24 * time.Hour) - - for i := 1; i <= numExpired; i++ { - var preimage lntypes.Preimage - binary.BigEndian.PutUint32(preimage[:4], uint32(offset+i)) - expiry := time.Duration((i+offset)%24) * time.Hour - invoice := newTestInvoice(t, preimage, expiredCreationDate, expiry) - testData.expiredInvoices[preimage.Hash()] = invoice - } - - for i := 1; i <= numPending; i++ { - var preimage lntypes.Preimage - binary.BigEndian.PutUint32(preimage[4:], uint32(offset+i)) - expiry := time.Duration((i+offset)%24) * time.Hour - invoice := newTestInvoice(t, preimage, now, expiry) - testData.pendingInvoices[preimage.Hash()] = invoice - } - - return testData -} diff --git a/lnd/invoices/update.go b/lnd/invoices/update.go deleted file mode 100644 index 269e4d7c..00000000 --- a/lnd/invoices/update.go +++ /dev/null @@ -1,293 +0,0 @@ -package invoices - -import ( - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/record" - "github.com/pkt-cash/pktd/pktlog/log" -) - -// invoiceUpdateCtx is an object that describes the context for the invoice -// update to be carried out. -type invoiceUpdateCtx struct { - hash lntypes.Hash - circuitKey channeldb.CircuitKey - amtPaid lnwire.MilliSatoshi - expiry uint32 - currentHeight int32 - finalCltvRejectDelta int32 - customRecords record.CustomSet - mpp *record.MPP -} - -// invoiceRef returns an identifier that can be used to lookup or update the -// invoice this HTLC is targeting. -func (i *invoiceUpdateCtx) invoiceRef() channeldb.InvoiceRef { - if i.mpp != nil { - payAddr := i.mpp.PaymentAddr() - return channeldb.InvoiceRefByHashAndAddr(i.hash, payAddr) - } - return channeldb.InvoiceRefByHash(i.hash) -} - -// log logs a message specific to this update context. -func (i *invoiceUpdateCtx) log(s string) { - log.Debugf("Invoice%v: %v, amt=%v, expiry=%v, circuit=%v, mpp=%v", - i.invoiceRef(), s, i.amtPaid, i.expiry, i.circuitKey, i.mpp) -} - -// failRes is a helper function which creates a failure resolution with -// the information contained in the invoiceUpdateCtx and the fail resolution -// result provided. -func (i invoiceUpdateCtx) failRes(outcome FailResolutionResult) *HtlcFailResolution { - return NewFailResolution(i.circuitKey, i.currentHeight, outcome) -} - -// settleRes is a helper function which creates a settle resolution with -// the information contained in the invoiceUpdateCtx and the preimage and -// the settle resolution result provided. -func (i invoiceUpdateCtx) settleRes(preimage lntypes.Preimage, - outcome SettleResolutionResult) *HtlcSettleResolution { - - return NewSettleResolution( - preimage, i.circuitKey, i.currentHeight, outcome, - ) -} - -// acceptRes is a helper function which creates an accept resolution with -// the information contained in the invoiceUpdateCtx and the accept resolution -// result provided. -func (i invoiceUpdateCtx) acceptRes(outcome acceptResolutionResult) *htlcAcceptResolution { - return newAcceptResolution(i.circuitKey, outcome) -} - -// updateInvoice is a callback for DB.UpdateInvoice that contains the invoice -// settlement logic. It returns a hltc resolution that indicates what the -// outcome of the update was. -func updateInvoice(ctx *invoiceUpdateCtx, inv *channeldb.Invoice) ( - *channeldb.InvoiceUpdateDesc, HtlcResolution, er.R) { - - // Don't update the invoice when this is a replayed htlc. - htlc, ok := inv.Htlcs[ctx.circuitKey] - if ok { - switch htlc.State { - case channeldb.HtlcStateCanceled: - return nil, ctx.failRes(ResultReplayToCanceled), nil - - case channeldb.HtlcStateAccepted: - return nil, ctx.acceptRes(resultReplayToAccepted), nil - - case channeldb.HtlcStateSettled: - return nil, ctx.settleRes( - *inv.Terms.PaymentPreimage, - ResultReplayToSettled, - ), nil - - default: - return nil, nil, er.New("unknown htlc state") - } - } - - if ctx.mpp == nil { - return updateLegacy(ctx, inv) - } - - return updateMpp(ctx, inv) -} - -// updateMpp is a callback for DB.UpdateInvoice that contains the invoice -// settlement logic for mpp payments. -func updateMpp(ctx *invoiceUpdateCtx, - inv *channeldb.Invoice) (*channeldb.InvoiceUpdateDesc, - HtlcResolution, er.R) { - - // Start building the accept descriptor. - acceptDesc := &channeldb.HtlcAcceptDesc{ - Amt: ctx.amtPaid, - Expiry: ctx.expiry, - AcceptHeight: ctx.currentHeight, - MppTotalAmt: ctx.mpp.TotalMsat(), - CustomRecords: ctx.customRecords, - } - - // Only accept payments to open invoices. This behaviour differs from - // non-mpp payments that are accepted even after the invoice is settled. - // Because non-mpp payments don't have a payment address, this is needed - // to thwart probing. - if inv.State != channeldb.ContractOpen { - return nil, ctx.failRes(ResultInvoiceNotOpen), nil - } - - // Check the payment address that authorizes the payment. - if ctx.mpp.PaymentAddr() != inv.Terms.PaymentAddr { - return nil, ctx.failRes(ResultAddressMismatch), nil - } - - // Don't accept zero-valued sets. - if ctx.mpp.TotalMsat() == 0 { - return nil, ctx.failRes(ResultHtlcSetTotalTooLow), nil - } - - // Check that the total amt of the htlc set is high enough. In case this - // is a zero-valued invoice, it will always be enough. - if ctx.mpp.TotalMsat() < inv.Terms.Value { - return nil, ctx.failRes(ResultHtlcSetTotalTooLow), nil - } - - // Check whether total amt matches other htlcs in the set. - var newSetTotal lnwire.MilliSatoshi - for _, htlc := range inv.Htlcs { - // Only consider accepted mpp htlcs. It is possible that there - // are htlcs registered in the invoice database that previously - // timed out and are in the canceled state now. - if htlc.State != channeldb.HtlcStateAccepted { - continue - } - - if ctx.mpp.TotalMsat() != htlc.MppTotalAmt { - return nil, ctx.failRes(ResultHtlcSetTotalMismatch), nil - } - - newSetTotal += htlc.Amt - } - - // Add amount of new htlc. - newSetTotal += ctx.amtPaid - - // Make sure the communicated set total isn't overpaid. - if newSetTotal > ctx.mpp.TotalMsat() { - return nil, ctx.failRes(ResultHtlcSetOverpayment), nil - } - - // The invoice is still open. Check the expiry. - if ctx.expiry < uint32(ctx.currentHeight+ctx.finalCltvRejectDelta) { - return nil, ctx.failRes(ResultExpiryTooSoon), nil - } - - if ctx.expiry < uint32(ctx.currentHeight+inv.Terms.FinalCltvDelta) { - return nil, ctx.failRes(ResultExpiryTooSoon), nil - } - - // Record HTLC in the invoice database. - newHtlcs := map[channeldb.CircuitKey]*channeldb.HtlcAcceptDesc{ - ctx.circuitKey: acceptDesc, - } - - update := channeldb.InvoiceUpdateDesc{ - AddHtlcs: newHtlcs, - } - - // If the invoice cannot be settled yet, only record the htlc. - setComplete := newSetTotal == ctx.mpp.TotalMsat() - if !setComplete { - return &update, ctx.acceptRes(resultPartialAccepted), nil - } - - // Check to see if we can settle or this is an hold invoice and - // we need to wait for the preimage. - if inv.HodlInvoice { - update.State = &channeldb.InvoiceStateUpdateDesc{ - NewState: channeldb.ContractAccepted, - } - return &update, ctx.acceptRes(resultAccepted), nil - } - - update.State = &channeldb.InvoiceStateUpdateDesc{ - NewState: channeldb.ContractSettled, - Preimage: inv.Terms.PaymentPreimage, - } - - return &update, ctx.settleRes( - *inv.Terms.PaymentPreimage, ResultSettled, - ), nil -} - -// updateLegacy is a callback for DB.UpdateInvoice that contains the invoice -// settlement logic for legacy payments. -func updateLegacy(ctx *invoiceUpdateCtx, - inv *channeldb.Invoice) (*channeldb.InvoiceUpdateDesc, HtlcResolution, er.R) { - - // If the invoice is already canceled, there is no further - // checking to do. - if inv.State == channeldb.ContractCanceled { - return nil, ctx.failRes(ResultInvoiceAlreadyCanceled), nil - } - - // If an invoice amount is specified, check that enough is paid. Also - // check this for duplicate payments if the invoice is already settled - // or accepted. In case this is a zero-valued invoice, it will always be - // enough. - if ctx.amtPaid < inv.Terms.Value { - return nil, ctx.failRes(ResultAmountTooLow), nil - } - - // TODO(joostjager): Check invoice mpp required feature - // bit when feature becomes mandatory. - - // Don't allow settling the invoice with an old style - // htlc if we are already in the process of gathering an - // mpp set. - for _, htlc := range inv.Htlcs { - if htlc.State == channeldb.HtlcStateAccepted && - htlc.MppTotalAmt > 0 { - - return nil, ctx.failRes(ResultMppInProgress), nil - } - } - - // The invoice is still open. Check the expiry. - if ctx.expiry < uint32(ctx.currentHeight+ctx.finalCltvRejectDelta) { - return nil, ctx.failRes(ResultExpiryTooSoon), nil - } - - if ctx.expiry < uint32(ctx.currentHeight+inv.Terms.FinalCltvDelta) { - return nil, ctx.failRes(ResultExpiryTooSoon), nil - } - - // Record HTLC in the invoice database. - newHtlcs := map[channeldb.CircuitKey]*channeldb.HtlcAcceptDesc{ - ctx.circuitKey: { - Amt: ctx.amtPaid, - Expiry: ctx.expiry, - AcceptHeight: ctx.currentHeight, - CustomRecords: ctx.customRecords, - }, - } - - update := channeldb.InvoiceUpdateDesc{ - AddHtlcs: newHtlcs, - } - - // Don't update invoice state if we are accepting a duplicate payment. - // We do accept or settle the HTLC. - switch inv.State { - case channeldb.ContractAccepted: - return &update, ctx.acceptRes(resultDuplicateToAccepted), nil - - case channeldb.ContractSettled: - return &update, ctx.settleRes( - *inv.Terms.PaymentPreimage, ResultDuplicateToSettled, - ), nil - } - - // Check to see if we can settle or this is an hold invoice and we need - // to wait for the preimage. - if inv.HodlInvoice { - update.State = &channeldb.InvoiceStateUpdateDesc{ - NewState: channeldb.ContractAccepted, - } - - return &update, ctx.acceptRes(resultAccepted), nil - } - - update.State = &channeldb.InvoiceStateUpdateDesc{ - NewState: channeldb.ContractSettled, - Preimage: inv.Terms.PaymentPreimage, - } - - return &update, ctx.settleRes( - *inv.Terms.PaymentPreimage, ResultSettled, - ), nil -} diff --git a/lnd/keychain/btcwallet.go b/lnd/keychain/btcwallet.go deleted file mode 100644 index 8589103c..00000000 --- a/lnd/keychain/btcwallet.go +++ /dev/null @@ -1,404 +0,0 @@ -package keychain - -import ( - "crypto/sha256" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/pktwallet/waddrmgr" - "github.com/pkt-cash/pktd/pktwallet/wallet" - "github.com/pkt-cash/pktd/pktwallet/walletdb" -) - -const ( - // CoinTypeBitcoin specifies the BIP44 coin type for Bitcoin key - // derivation. - CoinTypeBitcoin uint32 = 0 - - // CoinTypeTestnet specifies the BIP44 coin type for all testnet key - // derivation. - CoinTypeTestnet = 1 - - // CoinTypeLitecoin specifies the BIP44 coin type for Litecoin key - // derivation. - CoinTypeLitecoin = 2 -) - -var ( - // lightningAddrSchema is the scope addr schema for all keys that we - // derive. We'll treat them all as p2wkh addresses, as atm we must - // specify a particular type. - lightningAddrSchema = waddrmgr.ScopeAddrSchema{ - ExternalAddrType: waddrmgr.WitnessPubKey, - InternalAddrType: waddrmgr.WitnessPubKey, - } - - // waddrmgrNamespaceKey is the namespace key that the waddrmgr state is - // stored within the top-level waleltdb buckets of btcwallet. - waddrmgrNamespaceKey = []byte("waddrmgr") -) - -// BtcWalletKeyRing is an implementation of both the KeyRing and SecretKeyRing -// interfaces backed by btcwallet's internal root waddrmgr. Internally, we'll -// be using a ScopedKeyManager to do all of our derivations, using the key -// scope and scope addr scehma defined above. Re-using the existing key scope -// construction means that all key derivation will be protected under the root -// seed of the wallet, making each derived key fully deterministic. -type BtcWalletKeyRing struct { - // wallet is a pointer to the active instance of the btcwallet core. - // This is required as we'll need to manually open database - // transactions in order to derive addresses and lookup relevant keys - wallet *wallet.Wallet - - // chainKeyScope defines the purpose and coin type to be used when generating - // keys for this keyring. - chainKeyScope waddrmgr.KeyScope - - // lightningScope is a pointer to the scope that we'll be using as a - // sub key manager to derive all the keys that we require. - lightningScope *waddrmgr.ScopedKeyManager -} - -// NewBtcWalletKeyRing creates a new implementation of the -// keychain.SecretKeyRing interface backed by btcwallet. -// -// NOTE: The passed waddrmgr.Manager MUST be unlocked in order for the keychain -// to function. -func NewBtcWalletKeyRing(w *wallet.Wallet, coinType uint32) SecretKeyRing { - // Construct the key scope that will be used within the waddrmgr to - // create an HD chain for deriving all of our required keys. A different - // scope is used for each specific coin type. - chainKeyScope := waddrmgr.KeyScope{ - Purpose: BIP0043Purpose, - Coin: coinType, - } - - return &BtcWalletKeyRing{ - wallet: w, - chainKeyScope: chainKeyScope, - } -} - -// keyScope attempts to return the key scope that we'll use to derive all of -// our keys. If the scope has already been fetched from the database, then a -// cached version will be returned. Otherwise, we'll fetch it from the database -// and cache it for subsequent accesses. -func (b *BtcWalletKeyRing) keyScope() (*waddrmgr.ScopedKeyManager, er.R) { - // If the scope has already been populated, then we'll return it - // directly. - if b.lightningScope != nil { - return b.lightningScope, nil - } - - // Otherwise, we'll first do a check to ensure that the root manager - // isn't locked, as otherwise we won't be able to *use* the scope. - if b.wallet.Manager.IsLocked() { - return nil, er.Errorf("cannot create BtcWalletKeyRing with " + - "locked waddrmgr.Manager") - } - - // If the manager is indeed unlocked, then we'll fetch the scope, cache - // it, and return to the caller. - lnScope, err := b.wallet.Manager.FetchScopedKeyManager(b.chainKeyScope) - if err != nil { - return nil, err - } - - b.lightningScope = lnScope - - return lnScope, nil -} - -// createAccountIfNotExists will create the corresponding account for a key -// family if it doesn't already exist in the database. -func (b *BtcWalletKeyRing) createAccountIfNotExists( - addrmgrNs walletdb.ReadWriteBucket, keyFam KeyFamily, - scope *waddrmgr.ScopedKeyManager) er.R { - - // If this is the multi-sig key family, then we can return early as - // this is the default account that's created. - if keyFam == KeyFamilyMultiSig { - return nil - } - - // Otherwise, we'll check if the account already exists, if so, we can - // once again bail early. - _, err := scope.AccountName(addrmgrNs, uint32(keyFam)) - if err == nil { - return nil - } - - // If we reach this point, then the account hasn't yet been created, so - // we'll need to create it before we can proceed. - return scope.NewRawAccount(addrmgrNs, uint32(keyFam)) -} - -// DeriveNextKey attempts to derive the *next* key within the key family -// (account in BIP43) specified. This method should return the next external -// child within this branch. -// -// NOTE: This is part of the keychain.KeyRing interface. -func (b *BtcWalletKeyRing) DeriveNextKey(keyFam KeyFamily) (KeyDescriptor, er.R) { - var ( - pubKey *btcec.PublicKey - keyLoc KeyLocator - ) - - db := b.wallet.Database() - err := walletdb.Update(db, func(tx walletdb.ReadWriteTx) er.R { - addrmgrNs := tx.ReadWriteBucket(waddrmgrNamespaceKey) - - scope, err := b.keyScope() - if err != nil { - return err - } - - // If the account doesn't exist, then we may need to create it - // for the first time in order to derive the keys that we - // require. - err = b.createAccountIfNotExists(addrmgrNs, keyFam, scope) - if err != nil { - return err - } - - addrs, err := scope.NextExternalAddresses( - addrmgrNs, uint32(keyFam), 1, - ) - if err != nil { - return err - } - - // Extract the first address, ensuring that it is of the proper - // interface type, otherwise we can't manipulate it below. - addr, ok := addrs[0].(waddrmgr.ManagedPubKeyAddress) - if !ok { - return er.Errorf("address is not a managed pubkey " + - "addr") - } - - pubKey = addr.PubKey() - - _, pathInfo, _ := addr.DerivationInfo() - keyLoc = KeyLocator{ - Family: keyFam, - Index: pathInfo.Index, - } - - return nil - }) - if err != nil { - return KeyDescriptor{}, err - } - - return KeyDescriptor{ - PubKey: pubKey, - KeyLocator: keyLoc, - }, nil -} - -// DeriveKey attempts to derive an arbitrary key specified by the passed -// KeyLocator. This may be used in several recovery scenarios, or when manually -// rotating something like our current default node key. -// -// NOTE: This is part of the keychain.KeyRing interface. -func (b *BtcWalletKeyRing) DeriveKey(keyLoc KeyLocator) (KeyDescriptor, er.R) { - var keyDesc KeyDescriptor - - db := b.wallet.Database() - err := walletdb.Update(db, func(tx walletdb.ReadWriteTx) er.R { - addrmgrNs := tx.ReadWriteBucket(waddrmgrNamespaceKey) - - scope, err := b.keyScope() - if err != nil { - return err - } - - // If the account doesn't exist, then we may need to create it - // for the first time in order to derive the keys that we - // require. - err = b.createAccountIfNotExists(addrmgrNs, keyLoc.Family, scope) - if err != nil { - return err - } - - path := waddrmgr.DerivationPath{ - Account: uint32(keyLoc.Family), - Branch: 0, - Index: uint32(keyLoc.Index), - } - addr, err := scope.DeriveFromKeyPath(addrmgrNs, path) - if err != nil { - return err - } - - keyDesc.KeyLocator = keyLoc - keyDesc.PubKey = addr.(waddrmgr.ManagedPubKeyAddress).PubKey() - - return nil - }) - if err != nil { - return keyDesc, err - } - - return keyDesc, nil -} - -// DerivePrivKey attempts to derive the private key that corresponds to the -// passed key descriptor. -// -// NOTE: This is part of the keychain.SecretKeyRing interface. -func (b *BtcWalletKeyRing) DerivePrivKey(keyDesc KeyDescriptor) ( - *btcec.PrivateKey, er.R) { - - var key *btcec.PrivateKey - - db := b.wallet.Database() - err := walletdb.Update(db, func(tx walletdb.ReadWriteTx) er.R { - addrmgrNs := tx.ReadWriteBucket(waddrmgrNamespaceKey) - - scope, err := b.keyScope() - if err != nil { - return err - } - - // If the account doesn't exist, then we may need to create it - // for the first time in order to derive the keys that we - // require. - err = b.createAccountIfNotExists( - addrmgrNs, keyDesc.Family, scope, - ) - if err != nil { - return err - } - - // If the public key isn't set or they have a non-zero index, - // then we know that the caller instead knows the derivation - // path for a key. - if keyDesc.PubKey == nil || keyDesc.Index > 0 { - // Now that we know the account exists, we can safely - // derive the full private key from the given path. - path := waddrmgr.DerivationPath{ - Account: uint32(keyDesc.Family), - Branch: 0, - Index: uint32(keyDesc.Index), - } - addr, err := scope.DeriveFromKeyPath(addrmgrNs, path) - if err != nil { - return err - } - - key, err = addr.(waddrmgr.ManagedPubKeyAddress).PrivKey() - if err != nil { - return err - } - - return nil - } - - // If the public key isn't nil, then this indicates that we - // need to scan for the private key, assuming that we know the - // valid key family. - nextPath := waddrmgr.DerivationPath{ - Account: uint32(keyDesc.Family), - Branch: 0, - Index: 0, - } - - // We'll now iterate through our key range in an attempt to - // find the target public key. - // - // TODO(roasbeef): possibly move scanning into wallet to allow - // to be parallelized - for i := 0; i < MaxKeyRangeScan; i++ { - // Derive the next key in the range and fetch its - // managed address. - addr, err := scope.DeriveFromKeyPath( - addrmgrNs, nextPath, - ) - if err != nil { - return err - } - managedAddr := addr.(waddrmgr.ManagedPubKeyAddress) - - // If this is the target public key, then we'll return - // it directly back to the caller. - if managedAddr.PubKey().IsEqual(keyDesc.PubKey) { - key, err = managedAddr.PrivKey() - if err != nil { - return err - } - - return nil - } - - // This wasn't the target key, so roll forward and try - // the next one. - nextPath.Index++ - } - - // If we reach this point, then we we're unable to derive the - // private key, so return an error back to the user. - return ErrCannotDerivePrivKey.Default() - }) - if err != nil { - return nil, err - } - - return key, nil -} - -// ECDH performs a scalar multiplication (ECDH-like operation) between the -// target key descriptor and remote public key. The output returned will be -// the sha256 of the resulting shared point serialized in compressed format. If -// k is our private key, and P is the public key, we perform the following -// operation: -// -// sx := k*P s := sha256(sx.SerializeCompressed()) -// -// NOTE: This is part of the keychain.ECDHRing interface. -func (b *BtcWalletKeyRing) ECDH(keyDesc KeyDescriptor, - pub *btcec.PublicKey) ([32]byte, er.R) { - - privKey, err := b.DerivePrivKey(keyDesc) - if err != nil { - return [32]byte{}, err - } - - s := &btcec.PublicKey{} - x, y := btcec.S256().ScalarMult(pub.X, pub.Y, privKey.D.Bytes()) - s.X = x - s.Y = y - - h := sha256.Sum256(s.SerializeCompressed()) - - return h, nil -} - -// SignDigest signs the given SHA256 message digest with the private key -// described in the key descriptor. -// -// NOTE: This is part of the keychain.DigestSignerRing interface. -func (b *BtcWalletKeyRing) SignDigest(keyDesc KeyDescriptor, - digest [32]byte) (*btcec.Signature, er.R) { - - privKey, err := b.DerivePrivKey(keyDesc) - if err != nil { - return nil, err - } - return privKey.Sign(digest[:]) -} - -// SignDigestCompact signs the given SHA256 message digest with the private key -// described in the key descriptor and returns the signature in the compact, -// public key recoverable format. -// -// NOTE: This is part of the keychain.DigestSignerRing interface. -func (b *BtcWalletKeyRing) SignDigestCompact(keyDesc KeyDescriptor, - digest [32]byte) ([]byte, er.R) { - - privKey, err := b.DerivePrivKey(keyDesc) - if err != nil { - return nil, err - } - return btcec.SignCompact(btcec.S256(), privKey, digest[:], true) -} diff --git a/lnd/keychain/derivation.go b/lnd/keychain/derivation.go deleted file mode 100644 index b09451f8..00000000 --- a/lnd/keychain/derivation.go +++ /dev/null @@ -1,250 +0,0 @@ -package keychain - -import ( - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" -) - -const ( - // KeyDerivationVersion is the version of the key derivation schema - // defined below. We use a version as this means that we'll be able to - // accept new seed in the future and be able to discern if the software - // is compatible with the version of the seed. - KeyDerivationVersion = 0 - - // BIP0043Purpose is the "purpose" value that we'll use for the first - // version or our key derivation scheme. All keys are expected to be - // derived from this purpose, then the particular coin type of the - // chain where the keys are to be used. Slightly adhering to BIP0043 - // allows us to not deviate too far from a widely used standard, and - // also fits into existing implementations of the BIP's template. - // - // NOTE: BRICK SQUUUUUAD. - BIP0043Purpose = 1017 -) - -var ( - // MaxKeyRangeScan is the maximum number of keys that we'll attempt to - // scan with if a caller knows the public key, but not the KeyLocator - // and wishes to derive a private key. - MaxKeyRangeScan = 100000 - - // ErrCannotDerivePrivKey is returned when DerivePrivKey is unable to - // derive a private key given only the public key and target key - // family. - ErrCannotDerivePrivKey = er.GenericErrorType.CodeWithDetail("ErrCannotDerivePrivKey", - "unable to derive private key") -) - -// KeyFamily represents a "family" of keys that will be used within various -// contracts created by lnd. These families are meant to be distinct branches -// within the HD key chain of the backing wallet. Usage of key families within -// the interface below are strict in order to promote integrability and the -// ability to restore all keys given a user master seed backup. -// -// The key derivation in this file follows the following hierarchy based on -// BIP43: -// -// * m/1017'/coinType'/keyFamily'/0/index -type KeyFamily uint32 - -const ( - // KeyFamilyMultiSig are keys to be used within multi-sig scripts. - KeyFamilyMultiSig KeyFamily = 0 - - // KeyFamilyRevocationBase are keys that are used within channels to - // create revocation basepoints that the remote party will use to - // create revocation keys for us. - KeyFamilyRevocationBase KeyFamily = 1 - - // KeyFamilyHtlcBase are keys used within channels that will be - // combined with per-state randomness to produce public keys that will - // be used in HTLC scripts. - KeyFamilyHtlcBase KeyFamily = 2 - - // KeyFamilyPaymentBase are keys used within channels that will be - // combined with per-state randomness to produce public keys that will - // be used in scripts that pay directly to us without any delay. - KeyFamilyPaymentBase KeyFamily = 3 - - // KeyFamilyDelayBase are keys used within channels that will be - // combined with per-state randomness to produce public keys that will - // be used in scripts that pay to us, but require a CSV delay before we - // can sweep the funds. - KeyFamilyDelayBase KeyFamily = 4 - - // KeyFamilyRevocationRoot is a family of keys which will be used to - // derive the root of a revocation tree for a particular channel. - KeyFamilyRevocationRoot KeyFamily = 5 - - // KeyFamilyNodeKey is a family of keys that will be used to derive - // keys that will be advertised on the network to represent our current - // "identity" within the network. Peers will need our latest node key - // in order to establish a transport session with us on the Lightning - // p2p level (BOLT-0008). - KeyFamilyNodeKey KeyFamily = 6 - - // KeyFamilyStaticBackup is the family of keys that will be used to - // derive keys that we use to encrypt and decrypt our set of static - // backups. These backups may either be stored within watch towers for - // a payment, or self stored on disk in a single file containing all - // the static channel backups. - KeyFamilyStaticBackup KeyFamily = 7 - - // KeyFamilyTowerSession is the family of keys that will be used to - // derive session keys when negotiating sessions with watchtowers. The - // session keys are limited to the lifetime of the session and are used - // to increase privacy in the watchtower protocol. - KeyFamilyTowerSession KeyFamily = 8 - - // KeyFamilyTowerID is the family of keys used to derive the public key - // of a watchtower. This made distinct from the node key to offer a form - // of rudimentary whitelisting, i.e. via knowledge of the pubkey, - // preventing others from having full access to the tower just as a - // result of knowing the node key. - KeyFamilyTowerID KeyFamily = 9 -) - -// KeyLocator is a two-tuple that can be used to derive *any* key that has ever -// been used under the key derivation mechanisms described in this file. -// Version 0 of our key derivation schema uses the following BIP43-like -// derivation: -// -// * m/1017'/coinType'/keyFamily'/0/index -// -// Our purpose is 1017 (chosen arbitrary for now), and the coin type will vary -// based on which coin/chain the channels are being created on. The key family -// are actually just individual "accounts" in the nomenclature of BIP43. By -// default we assume a branch of 0 (external). Finally, the key index (which -// will vary per channel and use case) is the final element which allows us to -// deterministically derive keys. -type KeyLocator struct { - // TODO(roasbeef): add the key scope as well?? - - // Family is the family of key being identified. - Family KeyFamily - - // Index is the precise index of the key being identified. - Index uint32 -} - -// IsEmpty returns true if a KeyLocator is "empty". This may be the case where -// we learn of a key from a remote party for a contract, but don't know the -// precise details of its derivation (as we don't know the private key!). -func (k KeyLocator) IsEmpty() bool { - return k.Family == 0 && k.Index == 0 -} - -// KeyDescriptor wraps a KeyLocator and also optionally includes a public key. -// Either the KeyLocator must be non-empty, or the public key pointer be -// non-nil. This will be used by the KeyRing interface to lookup arbitrary -// private keys, and also within the SignDescriptor struct to locate precisely -// which keys should be used for signing. -type KeyDescriptor struct { - // KeyLocator is the internal KeyLocator of the descriptor. - KeyLocator - - // PubKey is an optional public key that fully describes a target key. - // If this is nil, the KeyLocator MUST NOT be empty. - PubKey *btcec.PublicKey -} - -// KeyRing is the primary interface that will be used to perform public -// derivation of various keys used within the peer-to-peer network, and also -// within any created contracts. All derivation required by the KeyRing is -// based off of public derivation, so a system with only an extended public key -// (for the particular purpose+family) can derive this set of keys. -type KeyRing interface { - // DeriveNextKey attempts to derive the *next* key within the key - // family (account in BIP43) specified. This method should return the - // next external child within this branch. - DeriveNextKey(keyFam KeyFamily) (KeyDescriptor, er.R) - - // DeriveKey attempts to derive an arbitrary key specified by the - // passed KeyLocator. This may be used in several recovery scenarios, - // or when manually rotating something like our current default node - // key. - DeriveKey(keyLoc KeyLocator) (KeyDescriptor, er.R) -} - -// SecretKeyRing is a ring similar to the regular KeyRing interface, but it is -// also able to derive *private keys*. As this is a super-set of the regular -// KeyRing, we also expect the SecretKeyRing to implement the fully KeyRing -// interface. The methods in this struct may be used to extract the node key in -// order to accept inbound network connections, or to do manual signing for -// recovery purposes. -type SecretKeyRing interface { - KeyRing - - ECDHRing - - DigestSignerRing - - // DerivePrivKey attempts to derive the private key that corresponds to - // the passed key descriptor. If the public key is set, then this - // method will perform an in-order scan over the key set, with a max of - // MaxKeyRangeScan keys. In order for this to work, the caller MUST set - // the KeyFamily within the partially populated KeyLocator. - DerivePrivKey(keyDesc KeyDescriptor) (*btcec.PrivateKey, er.R) -} - -// DigestSignerRing is an interface that abstracts away basic low-level ECDSA -// signing on keys within a key ring. -type DigestSignerRing interface { - // SignDigest signs the given SHA256 message digest with the private key - // described in the key descriptor. - SignDigest(keyDesc KeyDescriptor, digest [32]byte) (*btcec.Signature, - er.R) - - // SignDigestCompact signs the given SHA256 message digest with the - // private key described in the key descriptor and returns the signature - // in the compact, public key recoverable format. - SignDigestCompact(keyDesc KeyDescriptor, digest [32]byte) ([]byte, er.R) -} - -// SingleKeyDigestSigner is an abstraction interface that hides the -// implementation of the low-level ECDSA signing operations by wrapping a -// single, specific private key. -type SingleKeyDigestSigner interface { - // PubKey returns the public key of the wrapped private key. - PubKey() *btcec.PublicKey - - // SignDigest signs the given SHA256 message digest with the wrapped - // private key. - SignDigest(digest [32]byte) (*btcec.Signature, er.R) - - // SignDigestCompact signs the given SHA256 message digest with the - // wrapped private key and returns the signature in the compact, public - // key recoverable format. - SignDigestCompact(digest [32]byte) ([]byte, er.R) -} - -// ECDHRing is an interface that abstracts away basic low-level ECDH shared key -// generation on keys within a key ring. -type ECDHRing interface { - // ECDH performs a scalar multiplication (ECDH-like operation) between - // the target key descriptor and remote public key. The output - // returned will be the sha256 of the resulting shared point serialized - // in compressed format. If k is our private key, and P is the public - // key, we perform the following operation: - // - // sx := k*P - // s := sha256(sx.SerializeCompressed()) - ECDH(keyDesc KeyDescriptor, pubKey *btcec.PublicKey) ([32]byte, er.R) -} - -// SingleKeyECDH is an abstraction interface that hides the implementation of an -// ECDH operation by wrapping a single, specific private key. -type SingleKeyECDH interface { - // PubKey returns the public key of the wrapped private key. - PubKey() *btcec.PublicKey - - // ECDH performs a scalar multiplication (ECDH-like operation) between - // the wrapped private key and remote public key. The output returned - // will be the sha256 of the resulting shared point serialized in - // compressed format. - ECDH(pubKey *btcec.PublicKey) ([32]byte, er.R) -} - -// TODO(roasbeef): extend to actually support scalar mult of key? -// * would allow to push in initial handshake auth into interface as well diff --git a/lnd/keychain/ecdh.go b/lnd/keychain/ecdh.go deleted file mode 100644 index c061e261..00000000 --- a/lnd/keychain/ecdh.go +++ /dev/null @@ -1,83 +0,0 @@ -package keychain - -import ( - "crypto/sha256" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" -) - -// NewPubKeyECDH wraps the given key of the key ring so it adheres to the -// SingleKeyECDH interface. -func NewPubKeyECDH(keyDesc KeyDescriptor, ecdh ECDHRing) *PubKeyECDH { - return &PubKeyECDH{ - keyDesc: keyDesc, - ecdh: ecdh, - } -} - -// PubKeyECDH is an implementation of the SingleKeyECDH interface. It wraps an -// ECDH key ring so it can perform ECDH shared key generation against a single -// abstracted away private key. -type PubKeyECDH struct { - keyDesc KeyDescriptor - ecdh ECDHRing -} - -// PubKey returns the public key of the private key that is abstracted away by -// the interface. -// -// NOTE: This is part of the SingleKeyECDH interface. -func (p *PubKeyECDH) PubKey() *btcec.PublicKey { - return p.keyDesc.PubKey -} - -// ECDH performs a scalar multiplication (ECDH-like operation) between the -// abstracted private key and a remote public key. The output returned will be -// the sha256 of the resulting shared point serialized in compressed format. If -// k is our private key, and P is the public key, we perform the following -// operation: -// -// sx := k*P -// s := sha256(sx.SerializeCompressed()) -// -// NOTE: This is part of the SingleKeyECDH interface. -func (p *PubKeyECDH) ECDH(pubKey *btcec.PublicKey) ([32]byte, er.R) { - return p.ecdh.ECDH(p.keyDesc, pubKey) -} - -// PrivKeyECDH is an implementation of the SingleKeyECDH in which we do have the -// full private key. This can be used to wrap a temporary key to conform to the -// SingleKeyECDH interface. -type PrivKeyECDH struct { - // PrivKey is the private key that is used for the ECDH operation. - PrivKey *btcec.PrivateKey -} - -// PubKey returns the public key of the private key that is abstracted away by -// the interface. -// -// NOTE: This is part of the SingleKeyECDH interface. -func (p *PrivKeyECDH) PubKey() *btcec.PublicKey { - return p.PrivKey.PubKey() -} - -// ECDH performs a scalar multiplication (ECDH-like operation) between the -// abstracted private key and a remote public key. The output returned will be -// the sha256 of the resulting shared point serialized in compressed format. If -// k is our private key, and P is the public key, we perform the following -// operation: -// -// sx := k*P -// s := sha256(sx.SerializeCompressed()) -// -// NOTE: This is part of the SingleKeyECDH interface. -func (p *PrivKeyECDH) ECDH(pub *btcec.PublicKey) ([32]byte, er.R) { - s := &btcec.PublicKey{} - s.X, s.Y = btcec.S256().ScalarMult(pub.X, pub.Y, p.PrivKey.D.Bytes()) - - return sha256.Sum256(s.SerializeCompressed()), nil -} - -var _ SingleKeyECDH = (*PubKeyECDH)(nil) -var _ SingleKeyECDH = (*PrivKeyECDH)(nil) diff --git a/lnd/keychain/interface_test.go b/lnd/keychain/interface_test.go deleted file mode 100644 index 16cb6508..00000000 --- a/lnd/keychain/interface_test.go +++ /dev/null @@ -1,443 +0,0 @@ -package keychain - -import ( - "encoding/hex" - "fmt" - "io/ioutil" - "math/rand" - "os" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/pktwallet/snacl" - "github.com/pkt-cash/pktd/pktwallet/waddrmgr" - "github.com/pkt-cash/pktd/pktwallet/wallet" - "github.com/pkt-cash/pktd/pktwallet/walletdb" - - _ "github.com/pkt-cash/pktd/pktwallet/walletdb/bdb" // Required in order to create the default database. -) - -// versionZeroKeyFamilies is a slice of all the known key families for first -// version of the key derivation schema defined in this package. -var versionZeroKeyFamilies = []KeyFamily{ - KeyFamilyMultiSig, - KeyFamilyRevocationBase, - KeyFamilyHtlcBase, - KeyFamilyPaymentBase, - KeyFamilyDelayBase, - KeyFamilyRevocationRoot, - KeyFamilyNodeKey, - KeyFamilyStaticBackup, - KeyFamilyTowerSession, - KeyFamilyTowerID, -} - -var ( - testHDSeed = chainhash.Hash{ - 0xb7, 0x94, 0x38, 0x5f, 0x2d, 0x1e, 0xf7, 0xab, - 0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4, - 0x4f, 0x2f, 0x6f, 0x25, 0x98, 0xa3, 0xef, 0xb9, - 0x69, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53, - } -) - -func createTestBtcWallet(coinType uint32) (func(), *wallet.Wallet, er.R) { - // Instruct waddrmgr to use the cranked down scrypt parameters when - // creating new wallet encryption keys. - fastScrypt := waddrmgr.FastScryptOptions - keyGen := func(passphrase *[]byte, config *waddrmgr.ScryptOptions) ( - *snacl.SecretKey, er.R) { - - return snacl.NewSecretKey( - passphrase, fastScrypt.N, fastScrypt.R, fastScrypt.P, - ) - } - waddrmgr.SetSecretKeyGen(keyGen) - - // Create a new test wallet that uses fast scrypt as KDF. - tempDir, errr := ioutil.TempDir("", "keyring-lnwallet") - if errr != nil { - return nil, nil, er.E(errr) - } - loader := wallet.NewLoader(&chaincfg.SimNetParams, tempDir, "wallet.db", true, 0) - - pass := []byte("test") - - baseWallet, err := loader.CreateNewWallet( - pass, pass, []byte(hex.EncodeToString(testHDSeed[:])), time.Time{}, nil, - ) - if err != nil { - return nil, nil, err - } - - if err := baseWallet.Unlock(pass, nil); err != nil { - return nil, nil, err - } - - // Construct the key scope required to derive keys for the chose - // coinType. - chainKeyScope := waddrmgr.KeyScope{ - Purpose: BIP0043Purpose, - Coin: coinType, - } - - // We'll now ensure that the KeyScope: (1017, coinType) exists within - // the internal waddrmgr. We'll need this in order to properly generate - // the keys required for signing various contracts. - _, err = baseWallet.Manager.FetchScopedKeyManager(chainKeyScope) - if err != nil { - err := walletdb.Update(baseWallet.Database(), func(tx walletdb.ReadWriteTx) er.R { - addrmgrNs := tx.ReadWriteBucket(waddrmgrNamespaceKey) - - _, err := baseWallet.Manager.NewScopedKeyManager( - addrmgrNs, chainKeyScope, lightningAddrSchema, - ) - return err - }) - if err != nil { - return nil, nil, err - } - } - - cleanUp := func() { - baseWallet.Lock() - os.RemoveAll(tempDir) - } - - return cleanUp, baseWallet, nil -} - -func assertEqualKeyLocator(t *testing.T, a, b KeyLocator) { - t.Helper() - if a != b { - t.Fatalf("mismatched key locators: expected %v, "+ - "got %v", spew.Sdump(a), spew.Sdump(b)) - } -} - -// secretKeyRingConstructor is a function signature that's used as a generic -// constructor for various implementations of the KeyRing interface. A string -// naming the returned interface, a function closure that cleans up any -// resources, and the clean up interface itself are to be returned. -type keyRingConstructor func() (string, func(), KeyRing, er.R) - -// TestKeyRingDerivation tests that each known KeyRing implementation properly -// adheres to the expected behavior of the set of interfaces. -func TestKeyRingDerivation(t *testing.T) { - t.Parallel() - - keyRingImplementations := []keyRingConstructor{ - func() (string, func(), KeyRing, er.R) { - cleanUp, wallet, err := createTestBtcWallet( - CoinTypeBitcoin, - ) - if err != nil { - t.Fatalf("unable to create wallet: %v", err) - } - - keyRing := NewBtcWalletKeyRing(wallet, CoinTypeBitcoin) - - return "btcwallet", cleanUp, keyRing, nil - }, - func() (string, func(), KeyRing, er.R) { - cleanUp, wallet, err := createTestBtcWallet( - CoinTypeLitecoin, - ) - if err != nil { - t.Fatalf("unable to create wallet: %v", err) - } - - keyRing := NewBtcWalletKeyRing(wallet, CoinTypeLitecoin) - - return "ltcwallet", cleanUp, keyRing, nil - }, - func() (string, func(), KeyRing, er.R) { - cleanUp, wallet, err := createTestBtcWallet( - CoinTypeTestnet, - ) - if err != nil { - t.Fatalf("unable to create wallet: %v", err) - } - - keyRing := NewBtcWalletKeyRing(wallet, CoinTypeTestnet) - - return "testwallet", cleanUp, keyRing, nil - }, - } - - const numKeysToDerive = 10 - - // For each implementation constructor registered above, we'll execute - // an identical set of tests in order to ensure that the interface - // adheres to our nominal specification. - for _, keyRingConstructor := range keyRingImplementations { - keyRingName, cleanUp, keyRing, err := keyRingConstructor() - if err != nil { - t.Fatalf("unable to create key ring %v: %v", keyRingName, - err) - } - defer cleanUp() - - success := t.Run(fmt.Sprintf("%v", keyRingName), func(t *testing.T) { - // First, we'll ensure that we're able to derive keys - // from each of the known key families. - for _, keyFam := range versionZeroKeyFamilies { - // First, we'll ensure that we can derive the - // *next* key in the keychain. - keyDesc, err := keyRing.DeriveNextKey(keyFam) - if err != nil { - t.Fatalf("unable to derive next for "+ - "keyFam=%v: %v", keyFam, err) - } - assertEqualKeyLocator(t, - KeyLocator{ - Family: keyFam, - Index: 0, - }, keyDesc.KeyLocator, - ) - - // We'll now re-derive that key to ensure that - // we're able to properly access the key via - // the random access derivation methods. - keyLoc := KeyLocator{ - Family: keyFam, - Index: 0, - } - firstKeyDesc, err := keyRing.DeriveKey(keyLoc) - if err != nil { - t.Fatalf("unable to derive first key for "+ - "keyFam=%v: %v", keyFam, err) - } - if !keyDesc.PubKey.IsEqual(firstKeyDesc.PubKey) { - t.Fatalf("mismatched keys: expected %x, "+ - "got %x", - keyDesc.PubKey.SerializeCompressed(), - firstKeyDesc.PubKey.SerializeCompressed()) - } - assertEqualKeyLocator(t, - KeyLocator{ - Family: keyFam, - Index: 0, - }, firstKeyDesc.KeyLocator, - ) - - // If we now try to manually derive the next 10 - // keys (including the original key), then we - // should get an identical public key back and - // their KeyLocator information - // should be set properly. - for i := 0; i < numKeysToDerive+1; i++ { - keyLoc := KeyLocator{ - Family: keyFam, - Index: uint32(i), - } - keyDesc, err := keyRing.DeriveKey(keyLoc) - if err != nil { - t.Fatalf("unable to derive first key for "+ - "keyFam=%v: %v", keyFam, err) - } - - // Ensure that the key locator matches - // up as well. - assertEqualKeyLocator( - t, keyLoc, keyDesc.KeyLocator, - ) - } - - // If this succeeds, then we'll also try to - // derive a random index within the range. - randKeyIndex := uint32(rand.Int31()) - keyLoc = KeyLocator{ - Family: keyFam, - Index: randKeyIndex, - } - keyDesc, err = keyRing.DeriveKey(keyLoc) - if err != nil { - t.Fatalf("unable to derive key_index=%v "+ - "for keyFam=%v: %v", - randKeyIndex, keyFam, err) - } - assertEqualKeyLocator( - t, keyLoc, keyDesc.KeyLocator, - ) - } - }) - if !success { - break - } - } -} - -// secretKeyRingConstructor is a function signature that's used as a generic -// constructor for various implementations of the SecretKeyRing interface. A -// string naming the returned interface, a function closure that cleans up any -// resources, and the clean up interface itself are to be returned. -type secretKeyRingConstructor func() (string, func(), SecretKeyRing, er.R) - -// TestSecretKeyRingDerivation tests that each known SecretKeyRing -// implementation properly adheres to the expected behavior of the set of -// interface. -func TestSecretKeyRingDerivation(t *testing.T) { - t.Parallel() - - secretKeyRingImplementations := []secretKeyRingConstructor{ - func() (string, func(), SecretKeyRing, er.R) { - cleanUp, wallet, err := createTestBtcWallet( - CoinTypeBitcoin, - ) - if err != nil { - t.Fatalf("unable to create wallet: %v", err) - } - - keyRing := NewBtcWalletKeyRing(wallet, CoinTypeBitcoin) - - return "btcwallet", cleanUp, keyRing, nil - }, - func() (string, func(), SecretKeyRing, er.R) { - cleanUp, wallet, err := createTestBtcWallet( - CoinTypeLitecoin, - ) - if err != nil { - t.Fatalf("unable to create wallet: %v", err) - } - - keyRing := NewBtcWalletKeyRing(wallet, CoinTypeLitecoin) - - return "ltcwallet", cleanUp, keyRing, nil - }, - func() (string, func(), SecretKeyRing, er.R) { - cleanUp, wallet, err := createTestBtcWallet( - CoinTypeTestnet, - ) - if err != nil { - t.Fatalf("unable to create wallet: %v", err) - } - - keyRing := NewBtcWalletKeyRing(wallet, CoinTypeTestnet) - - return "testwallet", cleanUp, keyRing, nil - }, - } - - // For each implementation constructor registered above, we'll execute - // an identical set of tests in order to ensure that the interface - // adheres to our nominal specification. - for _, secretKeyRingConstructor := range secretKeyRingImplementations { - keyRingName, cleanUp, secretKeyRing, err := secretKeyRingConstructor() - if err != nil { - t.Fatalf("unable to create secret key ring %v: %v", - keyRingName, err) - } - defer cleanUp() - - success := t.Run(fmt.Sprintf("%v", keyRingName), func(t *testing.T) { - // For, each key family, we'll ensure that we're able - // to obtain the private key of a randomly select child - // index within the key family. - for _, keyFam := range versionZeroKeyFamilies { - randKeyIndex := uint32(rand.Int31()) - keyLoc := KeyLocator{ - Family: keyFam, - Index: randKeyIndex, - } - - // First, we'll query for the public key for - // this target key locator. - pubKeyDesc, err := secretKeyRing.DeriveKey(keyLoc) - if err != nil { - t.Fatalf("unable to derive pubkey "+ - "(fam=%v, index=%v): %v", - keyLoc.Family, - keyLoc.Index, err) - } - - // With the public key derive, ensure that - // we're able to obtain the corresponding - // private key correctly. - privKey, err := secretKeyRing.DerivePrivKey(KeyDescriptor{ - KeyLocator: keyLoc, - }) - if err != nil { - t.Fatalf("unable to derive priv "+ - "(fam=%v, index=%v): %v", keyLoc.Family, - keyLoc.Index, err) - } - - // Finally, ensure that the keys match up - // properly. - if !pubKeyDesc.PubKey.IsEqual(privKey.PubKey()) { - t.Fatalf("pubkeys mismatched: expected %x, got %x", - pubKeyDesc.PubKey.SerializeCompressed(), - privKey.PubKey().SerializeCompressed()) - } - - // Next, we'll test that we're able to derive a - // key given only the public key and key - // family. - // - // Derive a new key from the key ring. - keyDesc, err := secretKeyRing.DeriveNextKey(keyFam) - if err != nil { - t.Fatalf("unable to derive key: %v", err) - } - - // We'll now construct a key descriptor that - // requires us to scan the key range, and query - // for the key, we should be able to find it as - // it's valid. - keyDesc = KeyDescriptor{ - PubKey: keyDesc.PubKey, - KeyLocator: KeyLocator{ - Family: keyFam, - }, - } - privKey, err = secretKeyRing.DerivePrivKey(keyDesc) - if err != nil { - t.Fatalf("unable to derive priv key "+ - "via scanning: %v", err) - } - - // Having to resort to scanning, we should be - // able to find the target public key. - if !keyDesc.PubKey.IsEqual(privKey.PubKey()) { - t.Fatalf("pubkeys mismatched: expected %x, got %x", - pubKeyDesc.PubKey.SerializeCompressed(), - privKey.PubKey().SerializeCompressed()) - } - - // We'll try again, but this time with an - // unknown public key. - _, pub := btcec.PrivKeyFromBytes( - btcec.S256(), testHDSeed[:], - ) - keyDesc.PubKey = pub - - // If we attempt to query for this key, then we - // should get ErrCannotDerivePrivKey. - privKey, err = secretKeyRing.DerivePrivKey( - keyDesc, - ) - if !ErrCannotDerivePrivKey.Is(err) { - t.Fatalf("expected %T, instead got %v", - ErrCannotDerivePrivKey, err) - } - - // TODO(roasbeef): scalar mult once integrated - } - }) - if !success { - break - } - } -} - -func init() { - // We'll clamp the max range scan to constrain the run time of the - // private key scan test. - MaxKeyRangeScan = 3 -} diff --git a/lnd/keychain/signer.go b/lnd/keychain/signer.go deleted file mode 100644 index dfc33737..00000000 --- a/lnd/keychain/signer.go +++ /dev/null @@ -1,59 +0,0 @@ -package keychain - -import ( - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" -) - -func NewPubKeyDigestSigner(keyDesc KeyDescriptor, - signer DigestSignerRing) *PubKeyDigestSigner { - - return &PubKeyDigestSigner{ - keyDesc: keyDesc, - digestSigner: signer, - } -} - -type PubKeyDigestSigner struct { - keyDesc KeyDescriptor - digestSigner DigestSignerRing -} - -func (p *PubKeyDigestSigner) PubKey() *btcec.PublicKey { - return p.keyDesc.PubKey -} - -func (p *PubKeyDigestSigner) SignDigest(digest [32]byte) (*btcec.Signature, - er.R) { - - return p.digestSigner.SignDigest(p.keyDesc, digest) -} - -func (p *PubKeyDigestSigner) SignDigestCompact(digest [32]byte) ([]byte, - er.R) { - - return p.digestSigner.SignDigestCompact(p.keyDesc, digest) -} - -type PrivKeyDigestSigner struct { - PrivKey *btcec.PrivateKey -} - -func (p *PrivKeyDigestSigner) PubKey() *btcec.PublicKey { - return p.PrivKey.PubKey() -} - -func (p *PrivKeyDigestSigner) SignDigest(digest [32]byte) (*btcec.Signature, - er.R) { - - return p.PrivKey.Sign(digest[:]) -} - -func (p *PrivKeyDigestSigner) SignDigestCompact(digest [32]byte) ([]byte, - er.R) { - - return btcec.SignCompact(btcec.S256(), p.PrivKey, digest[:], true) -} - -var _ SingleKeyDigestSigner = (*PubKeyDigestSigner)(nil) -var _ SingleKeyDigestSigner = (*PrivKeyDigestSigner)(nil) diff --git a/lnd/labels/labels.go b/lnd/labels/labels.go deleted file mode 100644 index 7d905f56..00000000 --- a/lnd/labels/labels.go +++ /dev/null @@ -1,93 +0,0 @@ -// Package labels contains labels used to label transactions broadcast by lnd. -// These labels are used across packages, so they are declared in a separate -// package to avoid dependency issues. -// -// Labels for transactions broadcast by lnd have two set fields followed by an -// optional set labelled data values, all separated by colons. -// - Label version: an integer that indicates the version lnd used -// - Label type: the type of transaction we are labelling -// - {field name}-{value}: a named field followed by its value, these items are -// optional, and there may be more than field present. -// -// For version 0 we have the following optional data fields defined: -// - shortchanid: the short channel ID that a transaction is associated with, -// with its value set to the uint64 short channel id. -package labels - -import ( - "fmt" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/pktwallet/wtxmgr" -) - -// External labels a transaction as user initiated via the api. This -// label is only used when a custom user provided label is not given. -const External = "external" - -// ValidateAPI returns the generic api label if the label provided is empty. -// This allows us to label all transactions published by the api, even if -// no label is provided. If a label is provided, it is validated against -// the known restrictions. -func ValidateAPI(label string) (string, er.R) { - if len(label) > wtxmgr.TxLabelLimit { - return "", er.Errorf("label length: %v exceeds "+ - "limit of %v", len(label), wtxmgr.TxLabelLimit) - } - - // If no label was provided by the user, add the generic user - // send label. - if len(label) == 0 { - return External, nil - } - - return label, nil -} - -// LabelVersion versions our labels so they can be easily update to contain -// new data while still easily string matched. -type LabelVersion uint8 - -// LabelVersionZero is the label version for labels that contain label type and -// channel ID (where available). -const LabelVersionZero LabelVersion = iota - -// LabelType indicates the type of label we are creating. It is a string rather -// than an int for easy string matching and human-readability. -type LabelType string - -const ( - // LabelTypeChannelOpen is used to label channel opens. - LabelTypeChannelOpen LabelType = "openchannel" - - // LabelTypeChannelClose is used to label channel closes. - LabelTypeChannelClose LabelType = "closechannel" - - // LabelTypeJusticeTransaction is used to label justice transactions. - LabelTypeJusticeTransaction LabelType = "justicetx" - - // LabelTypeSweepTransaction is used to label sweeps. - LabelTypeSweepTransaction LabelType = "sweep" -) - -// LabelField is used to tag a value within a label. -type LabelField string - -const ( - // ShortChanID is used to tag short channel id values in our labels. - ShortChanID LabelField = "shortchanid" -) - -// MakeLabel creates a label with the provided type and short channel id. If -// our short channel ID is not known, we simply return version:label_type. If -// we do have a short channel ID set, the label will also contain its value: -// shortchanid-{int64 chan ID}. -func MakeLabel(labelType LabelType, channelID *lnwire.ShortChannelID) string { - if channelID == nil { - return fmt.Sprintf("%v:%v", LabelVersionZero, labelType) - } - - return fmt.Sprintf("%v:%v:%v-%v", LabelVersionZero, labelType, - ShortChanID, channelID.ToUint64()) -} diff --git a/lnd/lncfg/address.go b/lnd/lncfg/address.go deleted file mode 100644 index a28dcf95..00000000 --- a/lnd/lncfg/address.go +++ /dev/null @@ -1,343 +0,0 @@ -package lncfg - -import ( - "context" - "crypto/tls" - "net" - "strconv" - "strings" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/tor" -) - -var ( - loopBackAddrs = []string{"localhost", "127.0.0.1", "[::1]"} -) - -// TCPResolver is a function signature that resolves an address on a given -// network. -type TCPResolver = func(network, addr string) (*net.TCPAddr, er.R) - -// NormalizeAddresses returns a new slice with all the passed addresses -// normalized with the given default port and all duplicates removed. -func NormalizeAddresses(addrs []string, defaultPort string, - tcpResolver TCPResolver) ([]net.Addr, er.R) { - - result := make([]net.Addr, 0, len(addrs)) - seen := map[string]struct{}{} - - for _, addr := range addrs { - parsedAddr, err := ParseAddressString( - addr, defaultPort, tcpResolver, - ) - if err != nil { - return nil, err - } - - if _, ok := seen[parsedAddr.String()]; !ok { - result = append(result, parsedAddr) - seen[parsedAddr.String()] = struct{}{} - } - } - - return result, nil -} - -// EnforceSafeAuthentication enforces "safe" authentication taking into account -// the interfaces that the RPC servers are listening on, and if macaroons and -// TLS is activated or not. To protect users from using dangerous config -// combinations, we'll prevent disabling authentication if the server is -// listening on a public interface. -func EnforceSafeAuthentication(addrs []net.Addr, macaroonsActive, - tlsActive bool) er.R { - - // We'll now examine all addresses that this RPC server is listening - // on. If it's a localhost address or a private address, we'll skip it, - // otherwise, we'll return an error if macaroons are inactive. - for _, addr := range addrs { - if IsLoopback(addr.String()) || IsUnix(addr) || IsPrivate(addr) { - continue - } - - if !macaroonsActive { - return er.Errorf("detected RPC server listening on "+ - "publicly reachable interface %v with "+ - "authentication disabled! Refusing to start "+ - "with --no-macaroons specified", addr) - } - - if !tlsActive { - return er.Errorf("detected RPC server listening on "+ - "publicly reachable interface %v with "+ - "encryption disabled! Refusing to start "+ - "with --notls specified", addr) - } - } - - return nil -} - -// parseNetwork parses the network type of the given address. -func parseNetwork(addr net.Addr) string { - switch addr := addr.(type) { - // TCP addresses resolved through net.ResolveTCPAddr give a default - // network of "tcp", so we'll map back the correct network for the given - // address. This ensures that we can listen on the correct interface - // (IPv4 vs IPv6). - case *net.TCPAddr: - if addr.IP.To4() != nil { - return "tcp4" - } - return "tcp6" - - default: - return addr.Network() - } -} - -// ListenOnAddress creates a listener that listens on the given address. -func ListenOnAddress(addr net.Addr) (net.Listener, er.R) { - l, e := net.Listen(parseNetwork(addr), addr.String()) - return l, er.E(e) -} - -// TLSListenOnAddress creates a TLS listener that listens on the given address. -func TLSListenOnAddress(addr net.Addr, - config *tls.Config) (net.Listener, er.R) { - l, e := tls.Listen(parseNetwork(addr), addr.String(), config) - return l, er.E(e) -} - -// IsLoopback returns true if an address describes a loopback interface. -func IsLoopback(addr string) bool { - for _, loopback := range loopBackAddrs { - if strings.Contains(addr, loopback) { - return true - } - } - - return false -} - -// IsUnix returns true if an address describes an Unix socket address. -func IsUnix(addr net.Addr) bool { - return strings.HasPrefix(addr.Network(), "unix") -} - -// IsPrivate returns true if the address is private. The definitions are, -// https://en.wikipedia.org/wiki/Link-local_address -// https://en.wikipedia.org/wiki/Multicast_address -// Local IPv4 addresses, https://tools.ietf.org/html/rfc1918 -// Local IPv6 addresses, https://tools.ietf.org/html/rfc4193 -func IsPrivate(addr net.Addr) bool { - switch addr := addr.(type) { - case *net.TCPAddr: - // Check 169.254.0.0/16 and fe80::/10. - if addr.IP.IsLinkLocalUnicast() { - return true - } - - // Check 224.0.0.0/4 and ff00::/8. - if addr.IP.IsLinkLocalMulticast() { - return true - } - - // Check 10.0.0.0/8, 172.16.0.0/12 and 192.168.0.0/16. - if ip4 := addr.IP.To4(); ip4 != nil { - return ip4[0] == 10 || - (ip4[0] == 172 && ip4[1]&0xf0 == 16) || - (ip4[0] == 192 && ip4[1] == 168) - } - - // Check fc00::/7. - return len(addr.IP) == net.IPv6len && addr.IP[0]&0xfe == 0xfc - - default: - return false - } -} - -// ParseAddressString converts an address in string format to a net.Addr that is -// compatible with lnd. UDP is not supported because lnd needs reliable -// connections. We accept a custom function to resolve any TCP addresses so -// that caller is able control exactly how resolution is performed. -func ParseAddressString(strAddress string, defaultPort string, - tcpResolver TCPResolver) (net.Addr, er.R) { - - var parsedNetwork, parsedAddr string - - // Addresses can either be in network://address:port format, - // network:address:port, address:port, or just port. We want to support - // all possible types. - if strings.Contains(strAddress, "://") { - parts := strings.Split(strAddress, "://") - parsedNetwork, parsedAddr = parts[0], parts[1] - } else if strings.Contains(strAddress, ":") { - parts := strings.Split(strAddress, ":") - parsedNetwork = parts[0] - parsedAddr = strings.Join(parts[1:], ":") - } - - // Only TCP and Unix socket addresses are valid. We can't use IP or - // UDP only connections for anything we do in lnd. - switch parsedNetwork { - case "unix", "unixpacket": - a, e := net.ResolveUnixAddr(parsedNetwork, parsedAddr) - return a, er.E(e) - - case "tcp", "tcp4", "tcp6": - return tcpResolver( - parsedNetwork, verifyPort(parsedAddr, defaultPort), - ) - - case "ip", "ip4", "ip6", "udp", "udp4", "udp6", "unixgram": - return nil, er.Errorf("only TCP or unix socket "+ - "addresses are supported: %s", parsedAddr) - - default: - // We'll now possibly apply the default port, use the local - // host short circuit, or parse out an all interfaces listen. - addrWithPort := verifyPort(strAddress, defaultPort) - rawHost, rawPort, _ := net.SplitHostPort(addrWithPort) - - // If we reach this point, then we'll check to see if we have - // an onion addresses, if so, we can directly pass the raw - // address and port to create the proper address. - if tor.IsOnionHost(rawHost) { - portNum, err := strconv.Atoi(rawPort) - if err != nil { - return nil, er.E(err) - } - - return &tor.OnionAddr{ - OnionService: rawHost, - Port: portNum, - }, nil - } - - // Otherwise, we'll attempt the resolve the host. The Tor - // resolver is unable to resolve local addresses, so we'll use - // the system resolver instead. - if rawHost == "" || IsLoopback(rawHost) { - a, e := net.ResolveTCPAddr("tcp", addrWithPort) - return a, er.E(e) - } - - return tcpResolver("tcp", addrWithPort) - } -} - -// ParseLNAddressString converts a string of the form @ into an -// lnwire.NetAddress. The must be presented in hex, and result in a -// 33-byte, compressed public key that lies on the secp256k1 curve. The -// may be any address supported by ParseAddressString. If no port is specified, -// the defaultPort will be used. Any tcp addresses that need resolving will be -// resolved using the custom TCPResolver. -func ParseLNAddressString(strAddress string, defaultPort string, - tcpResolver TCPResolver) (*lnwire.NetAddress, er.R) { - - // Split the address string around the @ sign. - parts := strings.Split(strAddress, "@") - - // The string is malformed if there are not exactly two parts. - if len(parts) != 2 { - return nil, er.Errorf("invalid lightning address %s: "+ - "must be of the form @", strAddress) - } - - // Now, take the first portion as the hex pubkey, and the latter as the - // address string. - parsedPubKey, parsedAddr := parts[0], parts[1] - - // Decode the hex pubkey to get the raw compressed pubkey bytes. - pubKeyBytes, err := util.DecodeHex(parsedPubKey) - if err != nil { - return nil, er.Errorf("invalid lightning address pubkey: %v", err) - } - - // The compressed pubkey should have a length of exactly 33 bytes. - if len(pubKeyBytes) != 33 { - return nil, er.Errorf("invalid lightning address pubkey: "+ - "length must be 33 bytes, found %d", len(pubKeyBytes)) - } - - // Parse the pubkey bytes to verify that it corresponds to valid public - // key on the secp256k1 curve. - pubKey, err := btcec.ParsePubKey(pubKeyBytes, btcec.S256()) - if err != nil { - return nil, er.Errorf("invalid lightning address pubkey: %v", err) - } - - // Finally, parse the address string using our generic address parser. - addr, err := ParseAddressString(parsedAddr, defaultPort, tcpResolver) - if err != nil { - return nil, er.Errorf("invalid lightning address address: %v", err) - } - - return &lnwire.NetAddress{ - IdentityKey: pubKey, - Address: addr, - }, nil -} - -// verifyPort makes sure that an address string has both a host and a port. If -// there is no port found, the default port is appended. If the address is just -// a port, then we'll assume that the user is using the short cut to specify a -// localhost:port address. -func verifyPort(address string, defaultPort string) string { - host, port, err := net.SplitHostPort(address) - if err != nil { - // If the address itself is just an integer, then we'll assume - // that we're mapping this directly to a localhost:port pair. - // This ensures we maintain the legacy behavior. - if _, err := strconv.Atoi(address); err == nil { - return net.JoinHostPort("localhost", address) - } - - // Otherwise, we'll assume that the address just failed to - // attach its own port, so we'll use the default port. In the - // case of IPv6 addresses, if the host is already surrounded by - // brackets, then we'll avoid using the JoinHostPort function, - // since it will always add a pair of brackets. - if strings.HasPrefix(address, "[") { - return address + ":" + defaultPort - } - return net.JoinHostPort(address, defaultPort) - } - - // In the case that both the host and port are empty, we'll use the - // default port. - if host == "" && port == "" { - return ":" + defaultPort - } - - return address -} - -func ResolveTCPAddr(n, a string) (*net.TCPAddr, er.R) { - addr, e := net.ResolveTCPAddr(n, a) - return addr, er.E(e) -} - -// ClientAddressDialer creates a gRPC dialer that can also dial unix socket -// addresses instead of just TCP addresses. -func ClientAddressDialer(defaultPort string) func(context.Context, string) (net.Conn, error) { - - return func(ctx context.Context, addr string) (net.Conn, error) { - parsedAddr, err := ParseAddressString( - addr, defaultPort, ResolveTCPAddr, - ) - if err != nil { - return nil, er.Native(err) - } - - d := net.Dialer{} - return d.DialContext( - ctx, parsedAddr.Network(), parsedAddr.String(), - ) - } -} diff --git a/lnd/lncfg/address_test.go b/lnd/lncfg/address_test.go deleted file mode 100644 index cbe2a9af..00000000 --- a/lnd/lncfg/address_test.go +++ /dev/null @@ -1,322 +0,0 @@ -// +build !rpctest - -package lncfg - -import ( - "bytes" - "encoding/hex" - "net" - "testing" - - "github.com/pkt-cash/pktd/btcec" - "github.com/stretchr/testify/require" -) - -// addressTest defines a test vector for an address that contains the non- -// normalized input and the expected normalized output. -type addressTest struct { - address string - expectedNetwork string - expectedAddress string - isLoopback bool - isUnix bool -} - -var ( - defaultTestPort = "1234" - addressTestVectors = []addressTest{ - {"tcp://127.0.0.1:9735", "tcp", "127.0.0.1:9735", true, false}, - {"tcp:127.0.0.1:9735", "tcp", "127.0.0.1:9735", true, false}, - {"127.0.0.1:9735", "tcp", "127.0.0.1:9735", true, false}, - {":9735", "tcp", ":9735", false, false}, - {"", "tcp", ":1234", false, false}, - {":", "tcp", ":1234", false, false}, - {"tcp4://127.0.0.1:9735", "tcp", "127.0.0.1:9735", true, false}, - {"tcp4:127.0.0.1:9735", "tcp", "127.0.0.1:9735", true, false}, - {"127.0.0.1", "tcp", "127.0.0.1:1234", true, false}, - {"[::1]", "tcp", "[::1]:1234", true, false}, - {"::1", "tcp", "[::1]:1234", true, false}, - {"tcp6://::1", "tcp", "[::1]:1234", true, false}, - {"tcp6:::1", "tcp", "[::1]:1234", true, false}, - {"localhost:9735", "tcp", "127.0.0.1:9735", true, false}, - {"localhost", "tcp", "127.0.0.1:1234", true, false}, - {"unix:///tmp/lnd.sock", "unix", "/tmp/lnd.sock", false, true}, - {"unix:/tmp/lnd.sock", "unix", "/tmp/lnd.sock", false, true}, - {"123", "tcp", "127.0.0.1:123", true, false}, - { - "4acth47i6kxnvkewtm6q7ib2s3ufpo5sqbsnzjpbi7utijcltosqemad.onion", - "tcp", - "4acth47i6kxnvkewtm6q7ib2s3ufpo5sqbsnzjpbi7utijcltosqemad.onion:1234", - false, - false, - }, - { - "4acth47i6kxnvkewtm6q7ib2s3ufpo5sqbsnzjpbi7utijcltosqemad.onion:9735", - "tcp", - "4acth47i6kxnvkewtm6q7ib2s3ufpo5sqbsnzjpbi7utijcltosqemad.onion:9735", - false, - false, - }, - { - "3g2upl4pq6kufc4m.onion", - "tcp", - "3g2upl4pq6kufc4m.onion:1234", - false, - false, - }, - { - "3g2upl4pq6kufc4m.onion:9735", - "tcp", - "3g2upl4pq6kufc4m.onion:9735", - false, - false, - }, - } - invalidTestVectors = []string{ - "some string", - "://", - "12.12.12.12.12", - } -) - -// TestAddresses ensures that all supported address formats can be parsed and -// normalized correctly. -func TestAddresses(t *testing.T) { - // First, test all correct addresses. - for _, test := range addressTestVectors { - t.Run(test.address, func(t *testing.T) { - testAddress(t, test) - }) - } - - // Finally, test invalid inputs to see if they are handled correctly. - for _, invalidAddr := range invalidTestVectors { - t.Run(invalidAddr, func(t *testing.T) { - testInvalidAddress(t, invalidAddr) - }) - } -} - -// testAddress parses an address from its string representation, and -// asserts that the parsed net.Addr is correct against the given test case. -func testAddress(t *testing.T, test addressTest) { - addr := []string{test.address} - normalized, err := NormalizeAddresses( - addr, defaultTestPort, ResolveTCPAddr, - ) - if err != nil { - t.Fatalf("unable to normalize address %s: %v", - test.address, err) - } - - if len(addr) == 0 { - t.Fatalf("no normalized addresses returned") - } - - netAddr := normalized[0] - validateAddr(t, netAddr, test) -} - -// testInvalidAddress asserts that parsing the invalidAddr string using -// NormalizeAddresses results in an error. -func testInvalidAddress(t *testing.T, invalidAddr string) { - addr := []string{invalidAddr} - _, err := NormalizeAddresses( - addr, defaultTestPort, ResolveTCPAddr, - ) - if err == nil { - t.Fatalf("expected error when parsing %v", invalidAddr) - } -} - -var ( - pubKeyBytes = []byte{0x03, - 0xc7, 0x82, 0x86, 0xd0, 0xbf, 0xe0, 0xb2, 0x33, - 0x77, 0xe3, 0x47, 0xd7, 0xd9, 0x63, 0x94, 0x3c, - 0x4f, 0x57, 0x5d, 0xdd, 0xd5, 0x7e, 0x2f, 0x1d, - 0x52, 0xa5, 0xbe, 0x1e, 0xb7, 0xf6, 0x25, 0xa4, - } - - pubKeyHex = hex.EncodeToString(pubKeyBytes) - - pubKey, _ = btcec.ParsePubKey(pubKeyBytes, btcec.S256()) -) - -type lnAddressCase struct { - lnAddress string - expectedPubKey *btcec.PublicKey - - addressTest -} - -// lnAddressTests constructs valid LNAddress test vectors from the existing set -// of valid address test vectors. All addresses will use the same public key for -// the positive tests. -var lnAddressTests = func() []lnAddressCase { - var cases []lnAddressCase - for _, addrTest := range addressTestVectors { - cases = append(cases, lnAddressCase{ - lnAddress: pubKeyHex + "@" + addrTest.address, - expectedPubKey: pubKey, - addressTest: addrTest, - }) - } - - return cases -}() - -var invalidLNAddressTests = []string{ - "", // empty string - "@", // empty pubkey - "nonhexpubkey@", // non-hex public key - pubKeyHex[:len(pubKeyHex)-2] + "@", // pubkey too short - pubKeyHex + "aa@", // pubkey too long - pubKeyHex[:len(pubKeyHex)-1] + "7@", // pubkey not on curve - pubKeyHex + "@some string", // invalid address - pubKeyHex + "@://", // invalid address - pubKeyHex + "@21.21.21.21.21", // invalid address -} - -// TestLNAddresses performs both positive and negative tests against -// ParseLNAddressString. -func TestLNAddresses(t *testing.T) { - for _, test := range lnAddressTests { - t.Run(test.lnAddress, func(t *testing.T) { - testLNAddress(t, test) - }) - } - - for _, invalidAddr := range invalidLNAddressTests { - t.Run(invalidAddr, func(t *testing.T) { - testInvalidLNAddress(t, invalidAddr) - }) - } -} - -// testLNAddress parses an LNAddress from its string representation, and asserts -// that the parsed IdentityKey and Address are correct according to its test -// case. -func testLNAddress(t *testing.T, test lnAddressCase) { - // Parse the LNAddress using the default port and TCP resolver. - lnAddr, err := ParseLNAddressString( - test.lnAddress, defaultTestPort, ResolveTCPAddr, - ) - if err != nil { - t.Fatalf("unable to parse ln address: %v", err) - } - - // Assert that the public key matches the expected public key. - pkBytes := lnAddr.IdentityKey.SerializeCompressed() - if !bytes.Equal(pkBytes, pubKeyBytes) { - t.Fatalf("mismatched pubkey, want: %x, got: %v", - pubKeyBytes, pkBytes) - } - - // Assert that the address after the @ is parsed properly, as if it were - // just a standalone address parsed by ParseAddressString. - validateAddr(t, lnAddr.Address, test.addressTest) -} - -// testLNAddressCase asserts that parsing the given invalidAddr string results -// in an error when parsed with ParseLNAddressString. -func testInvalidLNAddress(t *testing.T, invalidAddr string) { - _, err := ParseLNAddressString( - invalidAddr, defaultTestPort, ResolveTCPAddr, - ) - if err == nil { - t.Fatalf("expected error when parsing invalid lnaddress: %v", - invalidAddr) - } -} - -// validateAddr asserts that an addr parsed by ParseAddressString matches the -// properties expected by its addressTest. In particular, it validates that the -// Network() and String() methods match the expectedNetwork and expectedAddress, -// respectively. Further, we test the IsLoopback and IsUnix detection methods -// against addr and assert that they match the expected values in the test case. -func validateAddr(t *testing.T, addr net.Addr, test addressTest) { - - t.Helper() - - // Assert that the parsed network and address match what we expect. - if addr.Network() != test.expectedNetwork || - addr.String() != test.expectedAddress { - t.Fatalf("mismatched address: expected %s://%s, "+ - "got %s://%s", - test.expectedNetwork, test.expectedAddress, - addr.Network(), addr.String(), - ) - } - - // Assert whether we expect this address to be a loopback address. - isAddrLoopback := IsLoopback(addr.String()) - if test.isLoopback != isAddrLoopback { - t.Fatalf("mismatched loopback detection: expected "+ - "%v, got %v for addr %s", - test.isLoopback, isAddrLoopback, test.address, - ) - } - - // Assert whether we expect this address to be a unix address. - isAddrUnix := IsUnix(addr) - if test.isUnix != isAddrUnix { - t.Fatalf("mismatched unix detection: expected "+ - "%v, got %v for addr %s", - test.isUnix, isAddrUnix, test.address, - ) - } -} - -func TestIsPrivate(t *testing.T) { - nonPrivateIPList := []net.IP{ - net.IPv4(169, 255, 0, 0), - {0xfe, 0x79, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - net.IPv4(225, 0, 0, 0), - {0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - net.IPv4(11, 0, 0, 0), - net.IPv4(172, 15, 0, 0), - net.IPv4(192, 169, 0, 0), - {0xfe, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - net.IPv4(8, 8, 8, 8), - {2, 0, 0, 1, 4, 8, 6, 0, 4, 8, 6, 0, 8, 8, 8, 8}, - } - privateIPList := []net.IP{ - net.IPv4(169, 254, 0, 0), - {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - net.IPv4(224, 0, 0, 0), - {0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - net.IPv4(10, 0, 0, 1), - net.IPv4(172, 16, 0, 1), - net.IPv4(172, 31, 255, 255), - net.IPv4(192, 168, 0, 1), - {0xfc, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - } - - testParams := []struct { - name string - ipList []net.IP - private bool - }{ - { - "Non-private addresses should return false", - nonPrivateIPList, false, - }, - { - "Private addresses should return true", - privateIPList, true, - }, - } - - for _, tt := range testParams { - test := tt - t.Run(test.name, func(t *testing.T) { - for _, ip := range test.ipList { - addr := &net.TCPAddr{IP: ip} - require.Equal( - t, test.private, IsPrivate(addr), - "expected IP: %s to be %v", ip, test.private, - ) - } - }) - } -} diff --git a/lnd/lncfg/autopilot.go b/lnd/lncfg/autopilot.go deleted file mode 100644 index 65d24287..00000000 --- a/lnd/lncfg/autopilot.go +++ /dev/null @@ -1,14 +0,0 @@ -package lncfg - -// AutoPilot holds the configuration options for the daemon's autopilot. -type AutoPilot struct { - Active bool `long:"active" description:"If the autopilot agent should be active or not."` - Heuristic map[string]float64 `long:"heuristic" description:"Heuristic to activate, and the weight to give it during scoring."` - MaxChannels int `long:"maxchannels" description:"The maximum number of channels that should be created"` - Allocation float64 `long:"allocation" description:"The percentage of total funds that should be committed to automatic channel establishment"` - MinChannelSize int64 `long:"minchansize" description:"The smallest channel that the autopilot agent should create"` - MaxChannelSize int64 `long:"maxchansize" description:"The largest channel that the autopilot agent should create"` - Private bool `long:"private" description:"Whether the channels created by the autopilot agent should be private or not. Private channels won't be announced to the network."` - MinConfs int32 `long:"minconfs" description:"The minimum number of confirmations each of your inputs in funding transactions created by the autopilot agent must have."` - ConfTarget uint32 `long:"conftarget" description:"The confirmation target (in blocks) for channels opened by autopilot."` -} diff --git a/lnd/lncfg/bitcoind.go b/lnd/lncfg/bitcoind.go deleted file mode 100644 index 6076a3b1..00000000 --- a/lnd/lncfg/bitcoind.go +++ /dev/null @@ -1,13 +0,0 @@ -package lncfg - -// Bitcoind holds the configuration options for the daemon's connection to -// bitcoind. -type Bitcoind struct { - Dir string `long:"dir" description:"The base directory that contains the node's data, logs, configuration file, etc."` - RPCHost string `long:"rpchost" description:"The daemon's rpc listening address. If a port is omitted, then the default port for the selected chain parameters will be used."` - RPCUser string `long:"rpcuser" description:"Username for RPC connections"` - RPCPass string `long:"rpcpass" default-mask:"-" description:"Password for RPC connections"` - ZMQPubRawBlock string `long:"zmqpubrawblock" description:"The address listening for ZMQ connections to deliver raw block notifications"` - ZMQPubRawTx string `long:"zmqpubrawtx" description:"The address listening for ZMQ connections to deliver raw transaction notifications"` - EstimateMode string `long:"estimatemode" description:"The fee estimate mode. Must be either ECONOMICAL or CONSERVATIVE."` -} diff --git a/lnd/lncfg/btcd.go b/lnd/lncfg/btcd.go deleted file mode 100644 index f214bc88..00000000 --- a/lnd/lncfg/btcd.go +++ /dev/null @@ -1,11 +0,0 @@ -package lncfg - -// Btcd holds the configuration options for the daemon's connection to btcd. -type Btcd struct { - Dir string `long:"dir" description:"The base directory that contains the node's data, logs, configuration file, etc."` - RPCHost string `long:"rpchost" description:"The daemon's rpc listening address. If a port is omitted, then the default port for the selected chain parameters will be used."` - RPCUser string `long:"rpcuser" description:"Username for RPC connections"` - RPCPass string `long:"rpcpass" default-mask:"-" description:"Password for RPC connections"` - RPCCert string `long:"rpccert" description:"File containing the daemon's certificate file"` - RawRPCCert string `long:"rawrpccert" description:"The raw bytes of the daemon's PEM-encoded certificate chain which will be used to authenticate the RPC connection."` -} diff --git a/lnd/lncfg/caches.go b/lnd/lncfg/caches.go deleted file mode 100644 index 1b507a5f..00000000 --- a/lnd/lncfg/caches.go +++ /dev/null @@ -1,45 +0,0 @@ -package lncfg - -import "github.com/pkt-cash/pktd/btcutil/er" - -const ( - // MinRejectCacheSize is a floor on the maximum capacity allowed for - // channeldb's reject cache. This amounts to roughly 125 KB when full. - MinRejectCacheSize = 5000 - - // MinChannelCacheSize is a floor on the maximum capacity allowed for - // channeldb's channel cache. This amounts to roughly 2 MB when full. - MinChannelCacheSize = 1000 -) - -// Caches holds the configuration for various caches within lnd. -type Caches struct { - // RejectCacheSize is the maximum number of entries stored in lnd's - // reject cache, which is used for efficiently rejecting gossip updates. - // Memory usage is roughly 25b per entry. - RejectCacheSize int `long:"reject-cache-size" description:"Maximum number of entries contained in the reject cache, which is used to speed up filtering of new channel announcements and channel updates from peers. Each entry requires 25 bytes."` - - // ChannelCacheSize is the maximum number of entries stored in lnd's - // channel cache, which is used reduce memory allocations in reply to - // peers querying for gossip traffic. Memory usage is roughly 2Kb per - // entry. - ChannelCacheSize int `long:"channel-cache-size" description:"Maximum number of entries contained in the channel cache, which is used to reduce memory allocations from gossip queries from peers. Each entry requires roughly 2Kb."` -} - -// Validate checks the Caches configuration for values that are too small to be -// sane. -func (c *Caches) Validate() er.R { - if c.RejectCacheSize < MinRejectCacheSize { - return er.Errorf("reject cache size %d is less than min: %d", - c.RejectCacheSize, MinRejectCacheSize) - } - if c.ChannelCacheSize < MinChannelCacheSize { - return er.Errorf("channel cache size %d is less than min: %d", - c.ChannelCacheSize, MinChannelCacheSize) - } - - return nil -} - -// Compile-time constraint to ensure Caches implements the Validator interface. -var _ Validator = (*Caches)(nil) diff --git a/lnd/lncfg/chain.go b/lnd/lncfg/chain.go deleted file mode 100644 index 02fb6890..00000000 --- a/lnd/lncfg/chain.go +++ /dev/null @@ -1,47 +0,0 @@ -package lncfg - -import ( - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -// Chain holds the configuration options for the daemon's chain settings. -type Chain struct { - Active bool `long:"active" description:"If the chain should be active or not."` - ChainDir string `long:"chaindir" description:"The directory to store the chain's data within."` - - Node string `long:"node" description:"The blockchain interface to use." choice:"btcd" choice:"bitcoind" choice:"neutrino" choice:"ltcd" choice:"litecoind"` - - MainNet bool `long:"mainnet" description:"Use the main network"` - TestNet3 bool `long:"testnet" description:"Use the test network"` - SimNet bool `long:"simnet" description:"Use the simulation test network"` - RegTest bool `long:"regtest" description:"Use the regression test network"` - - DefaultNumChanConfs int `long:"defaultchanconfs" description:"The default number of confirmations a channel must have before it's considered open. If this is not set, we will scale the value according to the channel size."` - DefaultRemoteDelay int `long:"defaultremotedelay" description:"The default number of blocks we will require our channel counterparty to wait before accessing its funds in case of unilateral close. If this is not set, we will scale the value according to the channel size."` - MaxLocalDelay uint16 `long:"maxlocaldelay" description:"The maximum blocks we will allow our funds to be timelocked before accessing its funds in case of unilateral close. If a peer proposes a value greater than this, we will reject the channel."` - MinHTLCIn lnwire.MilliSatoshi `long:"minhtlc" description:"The smallest HTLC we are willing to accept on our channels, in millisatoshi"` - MinHTLCOut lnwire.MilliSatoshi `long:"minhtlcout" description:"The smallest HTLC we are willing to send out on our channels, in millisatoshi"` - BaseFee lnwire.MilliSatoshi `long:"basefee" description:"The base fee in millisatoshi we will charge for forwarding payments on our channels"` - FeeRate lnwire.MilliSatoshi `long:"feerate" description:"The fee rate used when forwarding payments on our channels. The total fee charged is basefee + (amount * feerate / 1000000), where amount is the forwarded amount."` - TimeLockDelta uint32 `long:"timelockdelta" description:"The CLTV delta we will subtract from a forwarded HTLC's timelock value"` - DNSSeeds []string `long:"dnsseed" description:"The seed DNS server(s) to use for initial peer discovery. Must be specified as a '[,]' tuple where the SOA address is needed for DNS resolution through Tor but is optional for clearnet users. Multiple tuples can be specified, will overwrite the default seed servers."` -} - -// Validate performs validation on our chain config. -func (c *Chain) Validate(minTimeLockDelta uint32, minDelay uint16) er.R { - if c.TimeLockDelta < minTimeLockDelta { - return er.Errorf("timelockdelta must be at least %v", - minTimeLockDelta) - } - - // Check that our max local delay isn't set below some reasonable - // minimum value. We do this to prevent setting an unreasonably low - // delay, which would mean that the node would accept no channels. - if c.MaxLocalDelay < minDelay { - return er.Errorf("MaxLocalDelay must be at least: %v", - minDelay) - } - - return nil -} diff --git a/lnd/lncfg/config.go b/lnd/lncfg/config.go deleted file mode 100644 index bc1b9ff5..00000000 --- a/lnd/lncfg/config.go +++ /dev/null @@ -1,99 +0,0 @@ -package lncfg - -import ( - "os" - "os/user" - "path/filepath" - "strings" -) - -const ( - // DefaultConfigFilename is the default configuration file name lnd - // tries to load. - DefaultConfigFilename = "lnd.conf" - - // DefaultMaxPendingChannels is the default maximum number of incoming - // pending channels permitted per peer. - DefaultMaxPendingChannels = 1 - - // DefaultIncomingBroadcastDelta defines the number of blocks before the - // expiry of an incoming htlc at which we force close the channel. We - // only go to chain if we also have the preimage to actually pull in the - // htlc. BOLT #2 suggests 7 blocks. We use a few more for extra safety. - // Within this window we need to get our sweep or 2nd level success tx - // confirmed, because after that the remote party is also able to claim - // the htlc using the timeout path. - DefaultIncomingBroadcastDelta = 10 - - // DefaultFinalCltvRejectDelta defines the number of blocks before the - // expiry of an incoming exit hop htlc at which we cancel it back - // immediately. It is an extra safety measure over the final cltv - // requirement as it is defined in the invoice. It ensures that we - // cancel back htlcs that, when held on to, may cause us to force close - // the channel because we enter the incoming broadcast window. Bolt #11 - // suggests 9 blocks here. We use a few more for additional safety. - // - // There is still a small gap that remains between receiving the - // RevokeAndAck and canceling back. If a new block arrives within that - // window, we may still force close the channel. There is currently no - // way to reject an UpdateAddHtlc of which we already know that it will - // push us in the broadcast window. - DefaultFinalCltvRejectDelta = DefaultIncomingBroadcastDelta + 3 - - // DefaultOutgoingBroadcastDelta defines the number of blocks before the - // expiry of an outgoing htlc at which we force close the channel. We - // are not in a hurry to force close, because there is nothing to claim - // for us. We do need to time the htlc out, because there may be an - // incoming htlc that will time out too (albeit later). Bolt #2 suggests - // a value of -1 here, but we allow one block less to prevent potential - // confusion around the negative value. It means we force close the - // channel at exactly the htlc expiry height. - DefaultOutgoingBroadcastDelta = 0 - - // DefaultOutgoingCltvRejectDelta defines the number of blocks before - // the expiry of an outgoing htlc at which we don't want to offer it to - // the next peer anymore. If that happens, we cancel back the incoming - // htlc. This is to prevent the situation where we have an outstanding - // htlc that brings or will soon bring us inside the outgoing broadcast - // window and trigger us to force close the channel. Bolt #2 suggests a - // value of 0. We pad it a bit, to prevent a slow round trip to the next - // peer and a block arriving during that round trip to trigger force - // closure. - DefaultOutgoingCltvRejectDelta = DefaultOutgoingBroadcastDelta + 3 -) - -// CleanAndExpandPath expands environment variables and leading ~ in the -// passed path, cleans the result, and returns it. -// This function is taken from https://github.com/btcsuite/btcd -func CleanAndExpandPath(path string) string { - if path == "" { - return "" - } - - // Expand initial ~ to OS specific home directory. - if strings.HasPrefix(path, "~") { - var homeDir string - u, err := user.Current() - if err == nil { - homeDir = u.HomeDir - } else { - homeDir = os.Getenv("HOME") - } - - path = strings.Replace(path, "~", homeDir, 1) - } - - // NOTE: The os.ExpandEnv doesn't work with Windows-style %VARIABLE%, - // but the variables can still be expanded via POSIX-style $VARIABLE. - return filepath.Clean(os.ExpandEnv(path)) -} - -// NormalizeNetwork returns the common name of a network type used to create -// file paths. This allows differently versioned networks to use the same path. -func NormalizeNetwork(network string) string { - if strings.HasPrefix(network, "testnet") { - return "testnet" - } - - return network -} diff --git a/lnd/lncfg/db.go b/lnd/lncfg/db.go deleted file mode 100644 index 18cb7af0..00000000 --- a/lnd/lncfg/db.go +++ /dev/null @@ -1,108 +0,0 @@ -package lncfg - -import ( - "context" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb/kvdb" -) - -const ( - dbName = "channel.db" - BoltBackend = "bolt" - EtcdBackend = "etcd" -) - -// DB holds database configuration for LND. -type DB struct { - Backend string `long:"backend" description:"The selected database backend."` - - Etcd *kvdb.EtcdConfig `group:"etcd" namespace:"etcd" description:"Etcd settings."` - - Bolt *kvdb.BoltConfig `group:"bolt" namespace:"bolt" description:"Bolt settings."` -} - -// NewDB creates and returns a new default DB config. -func DefaultDB() *DB { - return &DB{ - Backend: BoltBackend, - Bolt: &kvdb.BoltConfig{ - AutoCompactMinAge: kvdb.DefaultBoltAutoCompactMinAge, - }, - } -} - -// Validate validates the DB config. -func (db *DB) Validate() er.R { - switch db.Backend { - case BoltBackend: - - case EtcdBackend: - if !db.Etcd.Embedded && db.Etcd.Host == "" { - return er.Errorf("etcd host must be set") - } - - default: - return er.Errorf("unknown backend, must be either \"%v\" or \"%v\"", - BoltBackend, EtcdBackend) - } - - return nil -} - -// DatabaseBackends is a two-tuple that holds the set of active database -// backends for the daemon. The two backends we expose are the local database -// backend, and the remote backend. The LocalDB attribute will always be -// populated. However, the remote DB will only be set if a replicated database -// is active. -type DatabaseBackends struct { - // LocalDB points to the local non-replicated backend. - LocalDB kvdb.Backend - - // RemoteDB points to a possibly networked replicated backend. If no - // replicated backend is active, then this pointer will be nil. - RemoteDB kvdb.Backend -} - -// GetBackends returns a set of kvdb.Backends as set in the DB config. The -// local database will ALWAYS be non-nil, while the remote database will only -// be populated if etcd is specified. -func (db *DB) GetBackends(ctx context.Context, dbPath string, - networkName string) (*DatabaseBackends, er.R) { - - var ( - localDB, remoteDB kvdb.Backend - err er.R - ) - - if db.Backend == EtcdBackend { - if db.Etcd.Embedded { - remoteDB, _, err = kvdb.GetEtcdTestBackend(dbPath, dbName) - } else { - // Prefix will separate key/values in the db. - remoteDB, err = kvdb.GetEtcdBackend(ctx, networkName, db.Etcd) - } - if err != nil { - return nil, err - } - } - - localDB, err = kvdb.GetBoltBackend(&kvdb.BoltBackendConfig{ - DBPath: dbPath, - DBFileName: dbName, - NoFreelistSync: !db.Bolt.SyncFreelist, - AutoCompact: db.Bolt.AutoCompact, - AutoCompactMinAge: db.Bolt.AutoCompactMinAge, - }) - if err != nil { - return nil, err - } - - return &DatabaseBackends{ - LocalDB: localDB, - RemoteDB: remoteDB, - }, nil -} - -// Compile-time constraint to ensure Workers implements the Validator interface. -var _ Validator = (*DB)(nil) diff --git a/lnd/lncfg/healthcheck.go b/lnd/lncfg/healthcheck.go deleted file mode 100644 index 1e5e03de..00000000 --- a/lnd/lncfg/healthcheck.go +++ /dev/null @@ -1,90 +0,0 @@ -package lncfg - -import ( - "time" - - "github.com/pkt-cash/pktd/btcutil/er" -) - -var ( - // MinHealthCheckInterval is the minimum interval we allow between - // health checks. - MinHealthCheckInterval = time.Minute - - // MinHealthCheckTimeout is the minimum timeout we allow for health - // check calls. - MinHealthCheckTimeout = time.Second - - // MinHealthCheckBackoff is the minimum back off we allow between health - // check retries. - MinHealthCheckBackoff = time.Second -) - -// HealthCheckConfig contains the configuration for the different health checks -// the lnd runs. -type HealthCheckConfig struct { - ChainCheck *CheckConfig `group:"chainbackend" namespace:"chainbackend"` - - DiskCheck *DiskCheckConfig `group:"diskspace" namespace:"diskspace"` -} - -// Validate checks the values configured for our health checks. -func (h *HealthCheckConfig) Validate() er.R { - if err := h.ChainCheck.validate("chain backend"); err != nil { - return err - } - - if err := h.DiskCheck.validate("disk space"); err != nil { - return err - } - - if h.DiskCheck.RequiredRemaining < 0 || - h.DiskCheck.RequiredRemaining >= 1 { - - return er.New("disk required ratio must be in [0:1)") - } - - return nil -} - -type CheckConfig struct { - Interval time.Duration `long:"interval" description:"How often to run a health check."` - - Attempts int `long:"attempts" description:"The number of calls we will make for the check before failing. Set this value to 0 to disable a check."` - - Timeout time.Duration `long:"timeout" description:"The amount of time we allow the health check to take before failing due to timeout."` - - Backoff time.Duration `long:"backoff" description:"The amount of time to back-off between failed health checks."` -} - -// validate checks the values in a health check config entry if it is enabled. -func (c *CheckConfig) validate(name string) er.R { - if c.Attempts == 0 { - return nil - } - - if c.Backoff < MinHealthCheckBackoff { - return er.Errorf("%v backoff: %v below minimum: %v", name, - c.Backoff, MinHealthCheckBackoff) - } - - if c.Timeout < MinHealthCheckTimeout { - return er.Errorf("%v timeout: %v below minimum: %v", name, - c.Timeout, MinHealthCheckTimeout) - } - - if c.Interval < MinHealthCheckInterval { - return er.Errorf("%v interval: %v below minimum: %v", name, - c.Interval, MinHealthCheckInterval) - } - - return nil -} - -// DiskCheckConfig contains configuration for ensuring that our node has -// sufficient disk space. -type DiskCheckConfig struct { - RequiredRemaining float64 `long:"diskrequired" description:"The minimum ratio of free disk space to total capacity that we allow before shutting lnd down safely."` - - *CheckConfig -} diff --git a/lnd/lncfg/interface.go b/lnd/lncfg/interface.go deleted file mode 100644 index d0e64817..00000000 --- a/lnd/lncfg/interface.go +++ /dev/null @@ -1,23 +0,0 @@ -package lncfg - -import "github.com/pkt-cash/pktd/btcutil/er" - -// Validator is a generic interface for validating sub configurations. -type Validator interface { - // Validate returns an error if a particular configuration is invalid or - // insane. - Validate() er.R -} - -// Validate accepts a variadic list of Validators and checks that each one -// passes its Validate method. An error is returned from the first Validator -// that fails. -func Validate(validators ...Validator) er.R { - for _, validator := range validators { - if err := validator.Validate(); err != nil { - return err - } - } - - return nil -} diff --git a/lnd/lncfg/monitoring_off.go b/lnd/lncfg/monitoring_off.go deleted file mode 100644 index 5ee21a12..00000000 --- a/lnd/lncfg/monitoring_off.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !monitoring - -package lncfg - -// Prometheus configures the Prometheus exporter when monitoring is enabled. -// Monitoring is currently disabled. -type Prometheus struct{} - -// DefaultPrometheus is the default configuration for the Prometheus metrics -// exporter when monitoring is enabled. Monitoring is currently disabled. -func DefaultPrometheus() Prometheus { - return Prometheus{} -} - -// Enabled returns whether or not Prometheus monitoring is enabled. Monitoring -// is currently disabled, so Enabled will always return false. -func (p *Prometheus) Enabled() bool { - return false -} diff --git a/lnd/lncfg/monitoring_on.go b/lnd/lncfg/monitoring_on.go deleted file mode 100644 index dc31e51d..00000000 --- a/lnd/lncfg/monitoring_on.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build monitoring - -package lncfg - -// Prometheus is the set of configuration data that specifies the listening -// address of the Prometheus exporter. -type Prometheus struct { - // Listen is the listening address that we should use to allow the main - // Prometheus server to scrape our metrics. - Listen string `long:"listen" description:"the interface we should listen on for Prometheus"` - - // Enable indicates whether to export lnd gRPC performance metrics to - // Prometheus. Default is false. - Enable bool `long:"enable" description:"enable Prometheus exporting of lnd gRPC performance metrics."` -} - -// DefaultPrometheus is the default configuration for the Prometheus metrics -// exporter. -func DefaultPrometheus() Prometheus { - return Prometheus{ - Listen: "127.0.0.1:8989", - Enable: false, - } -} - -// Enabled returns whether or not Prometheus monitoring is enabled. Monitoring -// is disabled by default, but may be enabled by the user. -func (p *Prometheus) Enabled() bool { - return p.Enable -} diff --git a/lnd/lncfg/neutrino.go b/lnd/lncfg/neutrino.go deleted file mode 100644 index dc7bb581..00000000 --- a/lnd/lncfg/neutrino.go +++ /dev/null @@ -1,17 +0,0 @@ -package lncfg - -import "time" - -// Neutrino holds the configuration options for the daemon's connection to -// neutrino. -type Neutrino struct { - AddPeers []string `short:"a" long:"addpeer" description:"Add a peer to connect with at startup"` - ConnectPeers []string `long:"connect" description:"Connect only to the specified peers at startup"` - MaxPeers int `long:"maxpeers" description:"Max number of inbound and outbound peers"` - BanDuration time.Duration `long:"banduration" description:"How long to ban misbehaving peers. Valid time units are {s, m, h}. Minimum 1 second"` - BanThreshold uint32 `long:"banthreshold" description:"Maximum allowed ban score before disconnecting and banning misbehaving peers."` - FeeURL string `long:"feeurl" description:"DEPRECATED: Optional URL for fee estimation. If a URL is not specified, static fees will be used for estimation."` - AssertFilterHeader string `long:"assertfilterheader" description:"Optional filter header in height:hash format to assert the state of neutrino's filter header chain on startup. If the assertion does not hold, then the filter header chain will be re-synced from the genesis block."` - UserAgentName string `long:"useragentname" description:"Used to help identify ourselves to other bitcoin peers"` - UserAgentVersion string `long:"useragentversion" description:"Used to help identify ourselves to other bitcoin peers"` -} diff --git a/lnd/lncfg/protocol.go b/lnd/lncfg/protocol.go deleted file mode 100644 index 5e1be5e6..00000000 --- a/lnd/lncfg/protocol.go +++ /dev/null @@ -1,26 +0,0 @@ -package lncfg - -// ProtocolOptions is a struct that we use to be able to test backwards -// compatibility of protocol additions, while defaulting to the latest within -// lnd, or to enable experimental protocol changes. -type ProtocolOptions struct { - // LegacyProtocol is a sub-config that houses all the legacy protocol - // options. These are mostly used for integration tests as most modern - // nodes shuld always run with them on by default. - LegacyProtocol `group:"legacy" namespace:"legacy"` - - // ExperimentalProtocol is a sub-config that houses any experimental - // protocol features that also require a build-tag to activate. - ExperimentalProtocol - - // WumboChans should be set if we want to enable support for wumbo - // (channels larger than 0.16 BTC) channels, which is the opposite of - // mini. - WumboChans bool `long:"wumbo-channels" description:"if set, then lnd will create and accept requests for channels larger chan 0.16 BTC"` -} - -// Wumbo returns true if lnd should permit the creation and acceptance of wumbo -// channels. -func (l *ProtocolOptions) Wumbo() bool { - return l.WumboChans -} diff --git a/lnd/lncfg/protocol_experimental_off.go b/lnd/lncfg/protocol_experimental_off.go deleted file mode 100644 index 20d1ce48..00000000 --- a/lnd/lncfg/protocol_experimental_off.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !dev - -package lncfg - -// ExperimentalProtocol is a sub-config that houses any experimental protocol -// features that also require a build-tag to activate. -type ExperimentalProtocol struct { -} - -// AnchorCommitments returns true if support for the anchor commitment type -// should be signaled. -func (l *ExperimentalProtocol) AnchorCommitments() bool { - return false -} diff --git a/lnd/lncfg/protocol_experimental_on.go b/lnd/lncfg/protocol_experimental_on.go deleted file mode 100644 index dac8cfea..00000000 --- a/lnd/lncfg/protocol_experimental_on.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build dev - -package lncfg - -// ExperimentalProtocol is a sub-config that houses any experimental protocol -// features that also require a build-tag to activate. -type ExperimentalProtocol struct { - // Anchors should be set if we want to support opening or accepting - // channels having the anchor commitment type. - Anchors bool `long:"anchors" description:"EXPERIMENTAL: enable experimental support for anchor commitments, won't work with watchtowers"` -} - -// AnchorCommitments returns true if support for the anchor commitment type -// should be signaled. -func (l *ExperimentalProtocol) AnchorCommitments() bool { - return l.Anchors -} diff --git a/lnd/lncfg/protocol_legacy_off.go b/lnd/lncfg/protocol_legacy_off.go deleted file mode 100644 index 060569d8..00000000 --- a/lnd/lncfg/protocol_legacy_off.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !dev - -package lncfg - -// Legacy is a sub-config that houses all the legacy protocol options. These -// are mostly used for integration tests as most modern nodes shuld always run -// with them on by default. -type LegacyProtocol struct { -} - -// LegacyOnion returns true if the old legacy onion format should be used when -// we're an intermediate or final hop. This controls if we set the -// TLVOnionPayloadOptional bit or not. -func (l *LegacyProtocol) LegacyOnion() bool { - return false -} - -// NoStaticRemoteKey returns true if the old commitment format with a tweaked -// remote key should be used for new funded channels. -func (l *LegacyProtocol) NoStaticRemoteKey() bool { - return false -} diff --git a/lnd/lncfg/protocol_legacy_on.go b/lnd/lncfg/protocol_legacy_on.go deleted file mode 100644 index 712d5fed..00000000 --- a/lnd/lncfg/protocol_legacy_on.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build dev - -package lncfg - -// Legacy is a sub-config that houses all the legacy protocol options. These -// are mostly used for integration tests as most modern nodes shuld always run -// with them on by default. -type LegacyProtocol struct { - // LegacyOnionFormat if set to true, then we won't signal - // TLVOnionPayloadOptional. As a result, nodes that include us in the - // route won't use the new modern onion framing. - LegacyOnionFormat bool `long:"onion" description:"force node to not advertise the new modern TLV onion format"` - - // CommitmentTweak guards if we should use the old legacy commitment - // protocol, or the newer variant that doesn't have a tweak for the - // remote party's output in the commitment. If set to true, then we - // won't signal StaticRemoteKeyOptional. - CommitmentTweak bool `long:"committweak" description:"force node to not advertise the new commitment format"` -} - -// LegacyOnion returns true if the old legacy onion format should be used when -// we're an intermediate or final hop. This controls if we set the -// TLVOnionPayloadOptional bit or not. -func (l *LegacyProtocol) LegacyOnion() bool { - return l.LegacyOnionFormat -} - -// NoStaticRemoteKey returns true if the old commitment format with a tweaked -// remote key should be used for new funded channels. -func (l *LegacyProtocol) NoStaticRemoteKey() bool { - return l.CommitmentTweak -} diff --git a/lnd/lncfg/routing.go b/lnd/lncfg/routing.go deleted file mode 100644 index 2eabb38e..00000000 --- a/lnd/lncfg/routing.go +++ /dev/null @@ -1,6 +0,0 @@ -package lncfg - -// Routing holds the configuration options for routing. -type Routing struct { - AssumeChannelValid bool `long:"assumechanvalid" description:"Skip checking channel spentness during graph validation. This speedup comes at the risk of using an unvalidated view of the network for routing. (default: false)"` -} diff --git a/lnd/lncfg/tor.go b/lnd/lncfg/tor.go deleted file mode 100644 index e7070c38..00000000 --- a/lnd/lncfg/tor.go +++ /dev/null @@ -1,16 +0,0 @@ -package lncfg - -// Tor holds the configuration options for the daemon's connection to tor. -type Tor struct { - Active bool `long:"active" description:"Allow outbound and inbound connections to be routed through Tor"` - SOCKS string `long:"socks" description:"The host:port that Tor's exposed SOCKS5 proxy is listening on"` - DNS string `long:"dns" description:"The DNS server as host:port that Tor will use for SRV queries - NOTE must have TCP resolution enabled"` - StreamIsolation bool `long:"streamisolation" description:"Enable Tor stream isolation by randomizing user credentials for each connection."` - Control string `long:"control" description:"The host:port that Tor is listening on for Tor control connections"` - TargetIPAddress string `long:"targetipaddress" description:"IP address that Tor should use as the target of the hidden service"` - Password string `long:"password" description:"The password used to arrive at the HashedControlPassword for the control port. If provided, the HASHEDPASSWORD authentication method will be used instead of the SAFECOOKIE one."` - V2 bool `long:"v2" description:"Automatically set up a v2 onion service to listen for inbound connections"` - V3 bool `long:"v3" description:"Automatically set up a v3 onion service to listen for inbound connections"` - PrivateKeyPath string `long:"privatekeypath" description:"The path to the private key of the onion service being created"` - WatchtowerKeyPath string `long:"watchtowerkeypath" description:"The path to the private key of the watchtower onion service being created"` -} diff --git a/lnd/lncfg/watchtower.go b/lnd/lncfg/watchtower.go deleted file mode 100644 index 05f22952..00000000 --- a/lnd/lncfg/watchtower.go +++ /dev/null @@ -1,13 +0,0 @@ -package lncfg - -import "github.com/pkt-cash/pktd/lnd/watchtower" - -// Watchtower holds the daemon specific configuration parameters for running a -// watchtower that shares resources with the daemon. -type Watchtower struct { - Active bool `long:"active" description:"If the watchtower should be active or not"` - - TowerDir string `long:"towerdir" description:"Directory of the watchtower.db"` - - watchtower.Conf -} diff --git a/lnd/lncfg/workers.go b/lnd/lncfg/workers.go deleted file mode 100644 index 5bcd88dd..00000000 --- a/lnd/lncfg/workers.go +++ /dev/null @@ -1,52 +0,0 @@ -package lncfg - -import "github.com/pkt-cash/pktd/btcutil/er" - -const ( - // DefaultReadWorkers is the default maximum number of concurrent - // workers used by the daemon's read pool. - DefaultReadWorkers = 100 - - // DefaultWriteWorkers is the default maximum number of concurrent - // workers used by the daemon's write pool. - DefaultWriteWorkers = 8 - - // DefaultSigWorkers is the default maximum number of concurrent workers - // used by the daemon's sig pool. - DefaultSigWorkers = 8 -) - -// Workers exposes CLI configuration for turning resources consumed by worker -// pools. -type Workers struct { - // Read is the maximum number of concurrent read pool workers. - Read int `long:"read" description:"Maximum number of concurrent read pool workers. This number should be proportional to the number of peers."` - - // Write is the maximum number of concurrent write pool workers. - Write int `long:"write" description:"Maximum number of concurrent write pool workers. This number should be proportional to the number of CPUs on the host. "` - - // Sig is the maximum number of concurrent sig pool workers. - Sig int `long:"sig" description:"Maximum number of concurrent sig pool workers. This number should be proportional to the number of CPUs on the host."` -} - -// Validate checks the Workers configuration to ensure that the input values are -// sane. -func (w *Workers) Validate() er.R { - if w.Read <= 0 { - return er.Errorf("number of read workers (%d) must be "+ - "positive", w.Read) - } - if w.Write <= 0 { - return er.Errorf("number of write workers (%d) must be "+ - "positive", w.Write) - } - if w.Sig <= 0 { - return er.Errorf("number of sig workers (%d) must be "+ - "positive", w.Sig) - } - - return nil -} - -// Compile-time constraint to ensure Workers implements the Validator interface. -var _ Validator = (*Workers)(nil) diff --git a/lnd/lncfg/workers_test.go b/lnd/lncfg/workers_test.go deleted file mode 100644 index 8bd7df7b..00000000 --- a/lnd/lncfg/workers_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package lncfg_test - -import ( - "testing" - - "github.com/pkt-cash/pktd/lnd/lncfg" -) - -const ( - maxUint = ^uint(0) - maxInt = int(maxUint >> 1) - minInt = -maxInt - 1 -) - -// TestValidateWorkers asserts that validating the Workers config only succeeds -// if all fields specify a positive number of workers. -func TestValidateWorkers(t *testing.T) { - tests := []struct { - name string - cfg *lncfg.Workers - valid bool - }{ - { - name: "min valid", - cfg: &lncfg.Workers{ - Read: 1, - Write: 1, - Sig: 1, - }, - valid: true, - }, - { - name: "max valid", - cfg: &lncfg.Workers{ - Read: maxInt, - Write: maxInt, - Sig: maxInt, - }, - valid: true, - }, - { - name: "read max invalid", - cfg: &lncfg.Workers{ - Read: 0, - Write: 1, - Sig: 1, - }, - }, - { - name: "write max invalid", - cfg: &lncfg.Workers{ - Read: 1, - Write: 0, - Sig: 1, - }, - }, - { - name: "sig max invalid", - cfg: &lncfg.Workers{ - Read: 1, - Write: 1, - Sig: 0, - }, - }, - { - name: "read min invalid", - cfg: &lncfg.Workers{ - Read: minInt, - Write: 1, - Sig: 1, - }, - }, - { - name: "write min invalid", - cfg: &lncfg.Workers{ - Read: 1, - Write: minInt, - Sig: 1, - }, - }, - { - name: "sig min invalid", - cfg: &lncfg.Workers{ - Read: 1, - Write: 1, - Sig: minInt, - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - err := test.cfg.Validate() - switch { - case test.valid && err != nil: - t.Fatalf("valid config was invalid: %v", err) - case !test.valid && err == nil: - t.Fatalf("invalid config was valid") - } - }) - } -} diff --git a/lnd/lncfg/wtclient.go b/lnd/lncfg/wtclient.go deleted file mode 100644 index c6f5029c..00000000 --- a/lnd/lncfg/wtclient.go +++ /dev/null @@ -1,42 +0,0 @@ -package lncfg - -import ( - "fmt" - - "github.com/pkt-cash/pktd/btcutil/er" -) - -// WtClient holds the configuration options for the daemon's watchtower client. -type WtClient struct { - // Active determines whether a watchtower client should be created to - // back up channel states with registered watchtowers. - Active bool `long:"active" description:"Whether the daemon should use private watchtowers to back up revoked channel states."` - - // PrivateTowerURIs specifies the lightning URIs of the towers the - // watchtower client should send new backups to. - PrivateTowerURIs []string `long:"private-tower-uris" description:"(Deprecated) Specifies the URIs of private watchtowers to use in backing up revoked states. URIs must be of the form @. Only 1 URI is supported at this time, if none are provided the tower will not be enabled."` - - // SweepFeeRate specifies the fee rate in sat/byte to be used when - // constructing justice transactions sent to the tower. - SweepFeeRate uint64 `long:"sweep-fee-rate" description:"Specifies the fee rate in sat/byte to be used when constructing justice transactions sent to the watchtower."` -} - -// Validate ensures the user has provided a valid configuration. -// -// NOTE: Part of the Validator interface. -func (c *WtClient) Validate() er.R { - // TODO(wilmer): remove in v0.9.0 release. - if len(c.PrivateTowerURIs) > 0 { - fmt.Println("The `wtclient.private-tower-uris` option has " + - "been deprecated as of v0.8.0-beta and will be " + - "removed in v0.9.0-beta. To setup watchtowers for " + - "the client, set `wtclient.active` and run " + - "`lncli wtclient -h` for more information.") - } - - return nil -} - -// Compile-time constraint to ensure WtClient implements the Validator -// interface. -var _ Validator = (*WtClient)(nil) diff --git a/lnd/lnd.go b/lnd/lnd.go deleted file mode 100644 index 83308a62..00000000 --- a/lnd/lnd.go +++ /dev/null @@ -1,1638 +0,0 @@ -// Copyright (c) 2013-2017 The btcsuite developers -// Copyright (c) 2015-2016 The Decred developers -// Copyright (C) 2015-2017 The Lightning Network Developers - -package lnd - -import ( - "context" - "crypto/tls" - "fmt" - "io/ioutil" - "net" - "net/http" - _ "net/http/pprof" // Blank import to set up profiling HTTP handlers. - "os" - "path/filepath" - "runtime/pprof" - "strconv" - "strings" - "sync" - "time" - - proxy "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/neutrino" - "github.com/pkt-cash/pktd/neutrino/headerfs" - "github.com/pkt-cash/pktd/pktconfig/version" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/pktwallet/wallet" - "github.com/pkt-cash/pktd/pktwallet/walletdb" - "golang.org/x/crypto/acme/autocert" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "gopkg.in/macaroon-bakery.v2/bakery" - "gopkg.in/macaroon.v2" - - "github.com/pkt-cash/pktd/lnd/autopilot" - "github.com/pkt-cash/pktd/lnd/cert" - "github.com/pkt-cash/pktd/lnd/chainreg" - "github.com/pkt-cash/pktd/lnd/chanacceptor" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/lncfg" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwallet/btcwallet" - "github.com/pkt-cash/pktd/lnd/macaroons" - "github.com/pkt-cash/pktd/lnd/metaservice" - "github.com/pkt-cash/pktd/lnd/signal" - "github.com/pkt-cash/pktd/lnd/tor" - "github.com/pkt-cash/pktd/lnd/walletunlocker" - "github.com/pkt-cash/pktd/lnd/watchtower" - "github.com/pkt-cash/pktd/lnd/watchtower/wtdb" -) - -// WalletUnlockerAuthOptions returns a list of DialOptions that can be used to -// authenticate with the wallet unlocker service. -// -// NOTE: This should only be called after the WalletUnlocker listener has -// signaled it is ready. -func WalletUnlockerAuthOptions(cfg *Config) ([]grpc.DialOption, er.R) { - creds, err := credentials.NewClientTLSFromFile(cfg.TLSCertPath, "") - if err != nil { - return nil, er.Errorf("unable to read TLS cert: %v", err) - } - - // Create a dial options array with the TLS credentials. - opts := []grpc.DialOption{ - grpc.WithTransportCredentials(creds), - } - - return opts, nil -} - -// AdminAuthOptions returns a list of DialOptions that can be used to -// authenticate with the RPC server with admin capabilities. -// -// NOTE: This should only be called after the RPCListener has signaled it is -// ready. -func AdminAuthOptions(cfg *Config) ([]grpc.DialOption, er.R) { - creds, err := credentials.NewClientTLSFromFile(cfg.TLSCertPath, "") - if err != nil { - return nil, er.Errorf("unable to read TLS cert: %v", err) - } - - // Create a dial options array. - opts := []grpc.DialOption{ - grpc.WithTransportCredentials(creds), - } - - // Get the admin macaroon if macaroons are active. - if !cfg.NoMacaroons { - // Load the adming macaroon file. - macBytes, err := ioutil.ReadFile(cfg.AdminMacPath) - if err != nil { - return nil, er.Errorf("unable to read macaroon "+ - "path (check the network setting!): %v", err) - } - - mac := &macaroon.Macaroon{} - if err = mac.UnmarshalBinary(macBytes); err != nil { - return nil, er.Errorf("unable to decode macaroon: %v", - err) - } - - // Now we append the macaroon credentials to the dial options. - cred := macaroons.NewMacaroonCredential(mac) - opts = append(opts, grpc.WithPerRPCCredentials(cred)) - } - - return opts, nil -} - -// GrpcRegistrar is an interface that must be satisfied by an external subserver -// that wants to be able to register its own gRPC server onto lnd's main -// grpc.Server instance. -type GrpcRegistrar interface { - // RegisterGrpcSubserver is called for each net.Listener on which lnd - // creates a grpc.Server instance. External subservers implementing this - // method can then register their own gRPC server structs to the main - // server instance. - RegisterGrpcSubserver(*grpc.Server) er.R -} - -// RestRegistrar is an interface that must be satisfied by an external subserver -// that wants to be able to register its own REST mux onto lnd's main -// proxy.ServeMux instance. -type RestRegistrar interface { - // RegisterRestSubserver is called after lnd creates the main - // proxy.ServeMux instance. External subservers implementing this method - // can then register their own REST proxy stubs to the main server - // instance. - RegisterRestSubserver(context.Context, *proxy.ServeMux, string, - []grpc.DialOption) er.R -} - -// RPCSubserverConfig is a struct that can be used to register an external -// subserver with the custom permissions that map to the gRPC server that is -// going to be registered with the GrpcRegistrar. -type RPCSubserverConfig struct { - // Registrar is a callback that is invoked for each net.Listener on - // which lnd creates a grpc.Server instance. - Registrar GrpcRegistrar - - // Permissions is the permissions required for the external subserver. - // It is a map between the full HTTP URI of each RPC and its required - // macaroon permissions. If multiple action/entity tuples are specified - // per URI, they are all required. See rpcserver.go for a list of valid - // action and entity values. - Permissions map[string][]bakery.Op - - // MacaroonValidator is a custom macaroon validator that should be used - // instead of the default lnd validator. If specified, the custom - // validator is used for all URIs specified in the above Permissions - // map. - MacaroonValidator macaroons.MacaroonValidator -} - -// ListenerWithSignal is a net.Listener that has an additional Ready channel that -// will be closed when a server starts listening. -type ListenerWithSignal struct { - net.Listener - - // Ready will be closed by the server listening on Listener. - Ready chan struct{} - - // ExternalRPCSubserverCfg is optional and specifies the registration - // callback and permissions to register external gRPC subservers. - ExternalRPCSubserverCfg *RPCSubserverConfig - - // ExternalRestRegistrar is optional and specifies the registration - // callback to register external REST subservers. - ExternalRestRegistrar RestRegistrar -} - -// ListenerCfg is a wrapper around custom listeners that can be passed to lnd -// when calling its main method. -type ListenerCfg struct { - // WalletUnlocker can be set to the listener to use for the wallet - // unlocker. If nil a regular network listener will be created. - WalletUnlocker *ListenerWithSignal - - // RPCListener can be set to the listener to use for the RPC server. If - // nil a regular network listener will be created. - RPCListener *ListenerWithSignal -} - -// rpcListeners is a function type used for closures that fetches a set of RPC -// listeners for the current configuration. If no custom listeners are present, -// this should return normal listeners from the RPC endpoints defined in the -// config. The second return value us a closure that will close the fetched -// listeners. -type rpcListeners func() ([]*ListenerWithSignal, func(), er.R) - -// Main is the true entry point for lnd. It accepts a fully populated and -// validated main configuration struct and an optional listener config struct. -// This function starts all main system components then blocks until a signal -// is received on the shutdownChan at which point everything is shut down again. -func Main(cfg *Config, lisCfg ListenerCfg, shutdownChan <-chan struct{}) er.R { - // Show version at startup. - log.Infof("Version: %s debuglevel=%s", - version.Version(), cfg.DebugLevel) - - var network string - switch { - case cfg.Bitcoin.TestNet3 || cfg.Litecoin.TestNet3: - network = "testnet" - - case cfg.Bitcoin.MainNet || cfg.Litecoin.MainNet || cfg.Pkt.MainNet: - network = "mainnet" - - case cfg.Bitcoin.SimNet || cfg.Litecoin.SimNet: - network = "simnet" - - case cfg.Bitcoin.RegTest || cfg.Litecoin.RegTest: - network = "regtest" - } - - log.Infof("Active chain: %v (network=%v)", - strings.Title(cfg.registeredChains.PrimaryChain().String()), - network, - ) - - // Enable http profiling server if requested. - if cfg.Profile != "" { - go func() { - listenAddr := net.JoinHostPort("", cfg.Profile) - profileRedirect := http.RedirectHandler("/debug/pprof", - http.StatusSeeOther) - http.Handle("/", profileRedirect) - fmt.Println(http.ListenAndServe(listenAddr, nil)) - }() - } - - // Write cpu profile if requested. - if cfg.CPUProfile != "" { - f, err := os.Create(cfg.CPUProfile) - if err != nil { - err := er.Errorf("unable to create CPU profile: %v", - err) - log.Error(err) - return err - } - pprof.StartCPUProfile(f) - defer f.Close() - defer pprof.StopCPUProfile() - } - - ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - localChanDB, remoteChanDB, cleanUp, err := initializeDatabases(ctx, cfg) - switch { - case channeldb.ErrDryRunMigrationOK.Is(err): - log.Infof("%v, exiting", err) - return nil - case err != nil: - return er.Errorf("unable to open databases: %v", err) - } - - defer cleanUp() - - // Only process macaroons if --no-macaroons isn't set. - serverOpts, restDialOpts, restListen, cleanUp, err := getTLSConfig(cfg) - if err != nil { - err := er.Errorf("unable to load TLS credentials: %v", err) - log.Error(err) - return err - } - - defer cleanUp() - - // We use the first RPC listener as the destination for our REST proxy. - // If the listener is set to listen on all interfaces, we replace it - // with localhost, as we cannot dial it directly. - restProxyDest := cfg.RPCListeners[0].String() - switch { - case strings.Contains(restProxyDest, "0.0.0.0"): - restProxyDest = strings.Replace( - restProxyDest, "0.0.0.0", "127.0.0.1", 1, - ) - - case strings.Contains(restProxyDest, "[::]"): - restProxyDest = strings.Replace( - restProxyDest, "[::]", "[::1]", 1, - ) - } - - // Before starting the wallet, we'll create and start our Neutrino - // light client instance, if enabled, in order to allow it to sync - // while the rest of the daemon continues startup. - mainChain := cfg.Bitcoin - if cfg.registeredChains.PrimaryChain() == chainreg.LitecoinChain { - mainChain = cfg.Litecoin - } - if cfg.registeredChains.PrimaryChain() == chainreg.PktChain { - mainChain = cfg.Pkt - } - var neutrinoCS *neutrino.ChainService - if mainChain.Node == "neutrino" { - neutrinoBackend, neutrinoCleanUp, err := initNeutrinoBackend( - cfg, mainChain.ChainDir, - ) - if err != nil { - err := er.Errorf("unable to initialize neutrino "+ - "backend: %v", err) - log.Error(err) - return err - } - defer neutrinoCleanUp() - neutrinoCS = neutrinoBackend - } - - var ( - walletInitParams WalletUnlockParams - shutdownUnlocker = func() {} - privateWalletPw = lnwallet.DefaultPrivatePassphrase - publicWalletPw = lnwallet.DefaultPublicPassphrase - ) - - // If the user didn't request a seed, then we'll manually assume a - // wallet birthday of now, as otherwise the seed would've specified - // this information. - walletInitParams.Birthday = time.Now() - - // getListeners is a closure that creates listeners from the - // RPCListeners defined in the config. It also returns a cleanup - // closure and the server options to use for the GRPC server. - getListeners := func() ([]*ListenerWithSignal, func(), er.R) { - var grpcListeners []*ListenerWithSignal - for _, grpcEndpoint := range cfg.RPCListeners { - // Start a gRPC server listening for HTTP/2 - // connections. - lis, err := lncfg.ListenOnAddress(grpcEndpoint) - if err != nil { - log.Errorf("unable to listen on %s", - grpcEndpoint) - return nil, nil, err - } - grpcListeners = append( - grpcListeners, &ListenerWithSignal{ - Listener: lis, - Ready: make(chan struct{}), - }) - } - - cleanup := func() { - for _, lis := range grpcListeners { - lis.Close() - } - } - return grpcListeners, cleanup, nil - } - - // walletUnlockerListeners is a closure we'll hand to the wallet - // unlocker, that will be called when it needs listeners for its GPRC - // server. - walletUnlockerListeners := func() ([]*ListenerWithSignal, func(), - er.R) { - - // If we have chosen to start with a dedicated listener for the - // wallet unlocker, we return it directly. - if lisCfg.WalletUnlocker != nil { - return []*ListenerWithSignal{lisCfg.WalletUnlocker}, - func() {}, nil - } - - // Otherwise we'll return the regular listeners. - return getListeners() - } - - // Set up meta Service pass neutrino for getinfo - metaService := metaservice.NewMetaService(neutrinoCS) - - // We wait until the user provides a password over RPC. In case lnd is - // started with the --noseedbackup flag, we use the default password - // for wallet encryption. - - if !cfg.NoSeedBackup { - params, shutdown, err := waitForWalletPassword( - cfg, cfg.RESTListeners, serverOpts, restDialOpts, - restProxyDest, restListen, walletUnlockerListeners, metaService, - ) - if err != nil { - err := er.Errorf("unable to set up wallet password "+ - "listeners: %v", err) - log.Error(err) - return err - } - - walletInitParams = *params - shutdownUnlocker = shutdown - privateWalletPw = walletInitParams.Password - publicWalletPw = walletInitParams.Password - //Pass wallet to metaservice for getinfo2 - metaService.SetWallet(walletInitParams.Wallet) - defer func() { - if err := walletInitParams.UnloadWallet(); err != nil { - log.Errorf("Could not unload wallet: %v", err) - } - }() - - if walletInitParams.RecoveryWindow > 0 { - log.Infof("Wallet recovery mode enabled with "+ - "address lookahead of %d addresses", - walletInitParams.RecoveryWindow) - } - } - - var macaroonService *macaroons.Service - if !cfg.NoMacaroons { - // Create the macaroon authentication/authorization service. - macaroonService, err = macaroons.NewService( - cfg.networkDir, "lnd", walletInitParams.StatelessInit, - macaroons.IPLockChecker, - ) - if err != nil { - err := er.Errorf("unable to set up macaroon "+ - "authentication: %v", err) - log.Error(err) - return err - } - defer macaroonService.Close() - - // Try to unlock the macaroon store with the private password. - // Ignore ErrAlreadyUnlocked since it could be unlocked by the - // wallet unlocker. - err = macaroonService.CreateUnlock(&privateWalletPw) - if err != nil && !macaroons.ErrAlreadyUnlocked.Is(err) { - err := er.Errorf("unable to unlock macaroons: %v", err) - log.Error(err) - return err - } - - // In case we actually needed to unlock the wallet, we now need - // to create an instance of the admin macaroon and send it to - // the unlocker so it can forward it to the user. In no seed - // backup mode, there's nobody listening on the channel and we'd - // block here forever. - if !cfg.NoSeedBackup { - adminMacBytes, err := bakeMacaroon( - ctx, macaroonService, adminPermissions(), - ) - if err != nil { - return err - } - - // The channel is buffered by one element so writing - // should not block here. - walletInitParams.MacResponseChan <- adminMacBytes - } - - // If the user requested a stateless initialization, no macaroon - // files should be created. - if !walletInitParams.StatelessInit && - !fileExists(cfg.AdminMacPath) && - !fileExists(cfg.ReadMacPath) && - !fileExists(cfg.InvoiceMacPath) { - - // Create macaroon files for lncli to use if they don't - // exist. - err = genMacaroons( - ctx, macaroonService, cfg.AdminMacPath, - cfg.ReadMacPath, cfg.InvoiceMacPath, - ) - if err != nil { - err := er.Errorf("unable to create macaroons "+ - "%v", err) - log.Error(err) - return err - } - } - - // As a security service to the user, if they requested - // stateless initialization and there are macaroon files on disk - // we log a warning. - if walletInitParams.StatelessInit { - msg := "Found %s macaroon on disk (%s) even though " + - "--stateless_init was requested. Unencrypted " + - "state is accessible by the host system. You " + - "should change the password and use " + - "--new_mac_root_key with --stateless_init to " + - "clean up and invalidate old macaroons." - - if fileExists(cfg.AdminMacPath) { - log.Warnf(msg, "admin", cfg.AdminMacPath) - } - if fileExists(cfg.ReadMacPath) { - log.Warnf(msg, "readonly", cfg.ReadMacPath) - } - if fileExists(cfg.InvoiceMacPath) { - log.Warnf(msg, "invoice", cfg.InvoiceMacPath) - } - } - } - - // Now we're definitely done with the unlocker, shut it down so we can - // start the main RPC service later. - shutdownUnlocker() - - // With the information parsed from the configuration, create valid - // instances of the pertinent interfaces required to operate the - // Lightning Network Daemon. - // - // When we create the chain control, we need storage for the height - // hints and also the wallet itself, for these two we want them to be - // replicated, so we'll pass in the remote channel DB instance. - chainControlCfg := &chainreg.Config{ - Bitcoin: cfg.Bitcoin, - Litecoin: cfg.Litecoin, - Pkt: cfg.Pkt, - PrimaryChain: cfg.registeredChains.PrimaryChain, - HeightHintCacheQueryDisable: cfg.HeightHintCacheQueryDisable, - NeutrinoMode: cfg.NeutrinoMode, - BitcoindMode: cfg.BitcoindMode, - LitecoindMode: cfg.LitecoindMode, - BtcdMode: cfg.BtcdMode, - LtcdMode: cfg.LtcdMode, - LocalChanDB: localChanDB, - RemoteChanDB: remoteChanDB, - PrivateWalletPw: privateWalletPw, - PublicWalletPw: publicWalletPw, - Birthday: walletInitParams.Birthday, - RecoveryWindow: walletInitParams.RecoveryWindow, - Wallet: walletInitParams.Wallet, - NeutrinoCS: neutrinoCS, - ActiveNetParams: cfg.ActiveNetParams, - FeeURL: cfg.FeeURL, - } - - activeChainControl, err := chainreg.NewChainControl(chainControlCfg) - if err != nil { - err := er.Errorf("unable to create chain control: %v", err) - log.Error(err) - return err - } - - // Finally before we start the server, we'll register the "holy - // trinity" of interface for our current "home chain" with the active - // chainRegistry interface. - primaryChain := cfg.registeredChains.PrimaryChain() - cfg.registeredChains.RegisterChain(primaryChain, activeChainControl) - - // TODO(roasbeef): add rotation - idKeyDesc, err := activeChainControl.KeyRing.DeriveKey( - keychain.KeyLocator{ - Family: keychain.KeyFamilyNodeKey, - Index: 0, - }, - ) - if err != nil { - err := er.Errorf("error deriving node key: %v", err) - log.Error(err) - return err - } - - if cfg.Tor.Active { - log.Infof("Proxying all network traffic via Tor "+ - "(stream_isolation=%v)! NOTE: Ensure the backend node "+ - "is proxying over Tor as well", cfg.Tor.StreamIsolation) - } - - // If the watchtower client should be active, open the client database. - // This is done here so that Close always executes when lndMain returns. - var towerClientDB *wtdb.ClientDB - if cfg.WtClient.Active { - var err er.R - towerClientDB, err = wtdb.OpenClientDB(cfg.localDatabaseDir()) - if err != nil { - err := er.Errorf("unable to open watchtower client "+ - "database: %v", err) - log.Error(err) - return err - } - defer towerClientDB.Close() - } - - // If tor is active and either v2 or v3 onion services have been specified, - // make a tor controller and pass it into both the watchtower server and - // the regular lnd server. - var torController *tor.Controller - if cfg.Tor.Active && (cfg.Tor.V2 || cfg.Tor.V3) { - torController = tor.NewController( - cfg.Tor.Control, cfg.Tor.TargetIPAddress, cfg.Tor.Password, - ) - - // Start the tor controller before giving it to any other subsystems. - if err := torController.Start(); err != nil { - err := er.Errorf("unable to initialize tor controller: %v", err) - log.Error(err) - return err - } - defer func() { - if err := torController.Stop(); err != nil { - log.Errorf("error stopping tor controller: %v", err) - } - }() - } - - var tower *watchtower.Standalone - if cfg.Watchtower.Active { - // Segment the watchtower directory by chain and network. - towerDBDir := filepath.Join( - cfg.Watchtower.TowerDir, - cfg.registeredChains.PrimaryChain().String(), - lncfg.NormalizeNetwork(cfg.ActiveNetParams.Name), - ) - - towerDB, err := wtdb.OpenTowerDB(towerDBDir) - if err != nil { - err := er.Errorf("unable to open watchtower "+ - "database: %v", err) - log.Error(err) - return err - } - defer towerDB.Close() - - towerKeyDesc, err := activeChainControl.KeyRing.DeriveKey( - keychain.KeyLocator{ - Family: keychain.KeyFamilyTowerID, - Index: 0, - }, - ) - if err != nil { - err := er.Errorf("error deriving tower key: %v", err) - log.Error(err) - return err - } - - wtCfg := &watchtower.Config{ - BlockFetcher: activeChainControl.ChainIO, - DB: towerDB, - EpochRegistrar: activeChainControl.ChainNotifier, - Net: cfg.net, - NewAddress: func() (btcutil.Address, er.R) { - return activeChainControl.Wallet.NewAddress( - lnwallet.WitnessPubKey, false, - ) - }, - NodeKeyECDH: keychain.NewPubKeyECDH( - towerKeyDesc, activeChainControl.KeyRing, - ), - PublishTx: activeChainControl.Wallet.PublishTransaction, - ChainHash: *cfg.ActiveNetParams.GenesisHash, - } - - // If there is a tor controller (user wants auto hidden services), then - // store a pointer in the watchtower config. - if torController != nil { - wtCfg.TorController = torController - wtCfg.WatchtowerKeyPath = cfg.Tor.WatchtowerKeyPath - - switch { - case cfg.Tor.V2: - wtCfg.Type = tor.V2 - case cfg.Tor.V3: - wtCfg.Type = tor.V3 - } - } - - wtConfig, err := cfg.Watchtower.Apply(wtCfg, lncfg.NormalizeAddresses) - if err != nil { - err := er.Errorf("unable to configure watchtower: %v", - err) - log.Error(err) - return err - } - - tower, err = watchtower.New(wtConfig) - if err != nil { - err := er.Errorf("unable to create watchtower: %v", err) - log.Error(err) - return err - } - } - - // Initialize the ChainedAcceptor. - chainedAcceptor := chanacceptor.NewChainedAcceptor() - - // Set up the core server which will listen for incoming peer - // connections. - server, err := newServer( - cfg, cfg.Listeners, localChanDB, remoteChanDB, towerClientDB, - activeChainControl, &idKeyDesc, walletInitParams.ChansToRestore, - chainedAcceptor, torController, - ) - if err != nil { - err := er.Errorf("unable to create server: %v", err) - log.Error(err) - return err - } - - // Set up an autopilot manager from the current config. This will be - // used to manage the underlying autopilot agent, starting and stopping - // it at will. - atplCfg, err := initAutoPilot(server, cfg.Autopilot, mainChain, cfg.ActiveNetParams) - if err != nil { - err := er.Errorf("unable to initialize autopilot: %v", err) - log.Error(err) - return err - } - - atplManager, err := autopilot.NewManager(atplCfg) - if err != nil { - err := er.Errorf("unable to create autopilot manager: %v", err) - log.Error(err) - return err - } - if err := atplManager.Start(); err != nil { - err := er.Errorf("unable to start autopilot manager: %v", err) - log.Error(err) - return err - } - defer atplManager.Stop() - - // rpcListeners is a closure we'll hand to the rpc server, that will be - // called when it needs listeners for its GPRC server. - rpcListeners := func() ([]*ListenerWithSignal, func(), er.R) { - // If we have chosen to start with a dedicated listener for the - // rpc server, we return it directly. - if lisCfg.RPCListener != nil { - return []*ListenerWithSignal{lisCfg.RPCListener}, - func() {}, nil - } - - // Otherwise we'll return the regular listeners. - return getListeners() - } - - // Initialize, and register our implementation of the gRPC interface - // exported by the rpcServer. - rpcServer, err := newRPCServer( - cfg, server, macaroonService, cfg.SubRPCServers, serverOpts, - restDialOpts, restProxyDest, atplManager, server.invoices, - tower, restListen, rpcListeners, chainedAcceptor, metaService, - ) - if err != nil { - err := er.Errorf("unable to create RPC server: %v", err) - log.Error(err) - return err - } - if err := rpcServer.Start(); err != nil { - err := er.Errorf("unable to start RPC server: %v", err) - log.Error(err) - return err - } - defer rpcServer.Stop() - - // If we're not in regtest or simnet mode, We'll wait until we're fully - // synced to continue the start up of the remainder of the daemon. This - // ensures that we don't accept any possibly invalid state transitions, or - // accept channels with spent funds. - if !(cfg.Bitcoin.RegTest || cfg.Bitcoin.SimNet || - cfg.Litecoin.RegTest || cfg.Litecoin.SimNet) { - - _, bestHeight, err := activeChainControl.ChainIO.GetBestBlock() - if err != nil { - err := er.Errorf("unable to determine chain tip: %v", - err) - log.Error(err) - return err - } - - log.Infof("Waiting for chain backend to finish sync, "+ - "start_height=%v", bestHeight) - - for { - if !signal.Alive() { - return nil - } - - synced, _, err := activeChainControl.Wallet.IsSynced() - if err != nil { - err := er.Errorf("unable to determine if "+ - "wallet is synced: %v", err) - log.Error(err) - return err - } - - if synced { - break - } - - time.Sleep(time.Second * 1) - } - - _, bestHeight, err = activeChainControl.ChainIO.GetBestBlock() - if err != nil { - err := er.Errorf("unable to determine chain tip: %v", - err) - log.Error(err) - return err - } - - log.Infof("Chain backend is fully synced (end_height=%v)!", - bestHeight) - } - - // With all the relevant chains initialized, we can finally start the - // server itself. - if err := server.Start(); err != nil { - err := er.Errorf("unable to start server: %v", err) - log.Error(err) - return err - } - defer server.Stop() - - // Now that the server has started, if the autopilot mode is currently - // active, then we'll start the autopilot agent immediately. It will be - // stopped together with the autopilot service. - if cfg.Autopilot.Active { - if err := atplManager.StartAgent(); err != nil { - err := er.Errorf("unable to start autopilot agent: %v", - err) - log.Error(err) - return err - } - } - - if cfg.Watchtower.Active { - if err := tower.Start(); err != nil { - err := er.Errorf("unable to start watchtower: %v", err) - log.Error(err) - return err - } - defer tower.Stop() - } - - // Wait for shutdown signal from either a graceful server stop or from - // the interrupt handler. - <-shutdownChan - return nil -} - -// getTLSConfig1 returns a TLS configuration for the gRPC server and credentials -// and a proxy destination for the REST reverse proxy. -func getTLSConfig1(cfg *Config) ( - []grpc.ServerOption, - *tls.Config, - func(), - er.R, -) { - - // Ensure we create TLS key and certificate if they don't exist. - if !fileExists(cfg.TLSCertPath) && !fileExists(cfg.TLSKeyPath) { - log.Infof("Generating TLS certificates...") - err := cert.GenCertPair( - "lnd autogenerated cert", cfg.TLSCertPath, - cfg.TLSKeyPath, cfg.TLSExtraIPs, cfg.TLSExtraDomains, - cfg.TLSDisableAutofill, cert.DefaultAutogenValidity, - ) - if err != nil { - return nil, nil, nil, err - } - log.Infof("Done generating TLS certificates") - } - - certData, parsedCert, errr := cert.LoadCert( - cfg.TLSCertPath, cfg.TLSKeyPath, - ) - if errr != nil { - return nil, nil, nil, er.E(errr) - } - - // We check whether the certifcate we have on disk match the IPs and - // domains specified by the config. If the extra IPs or domains have - // changed from when the certificate was created, we will refresh the - // certificate if auto refresh is active. - refresh := false - var err er.R - if cfg.TLSAutoRefresh { - refresh, err = cert.IsOutdated( - parsedCert, cfg.TLSExtraIPs, - cfg.TLSExtraDomains, cfg.TLSDisableAutofill, - ) - if err != nil { - return nil, nil, nil, err - } - } - - // If the certificate expired or it was outdated, delete it and the TLS - // key and generate a new pair. - if time.Now().After(parsedCert.NotAfter) || refresh { - log.Info("TLS certificate is expired or outdated, " + - "generating a new one") - - errr := os.Remove(cfg.TLSCertPath) - if errr != nil { - return nil, nil, nil, er.E(errr) - } - - errr = os.Remove(cfg.TLSKeyPath) - if errr != nil { - return nil, nil, nil, er.E(errr) - } - - log.Infof("Renewing TLS certificates...") - err = cert.GenCertPair( - "lnd autogenerated cert", cfg.TLSCertPath, - cfg.TLSKeyPath, cfg.TLSExtraIPs, cfg.TLSExtraDomains, - cfg.TLSDisableAutofill, cert.DefaultAutogenValidity, - ) - if err != nil { - return nil, nil, nil, err - } - log.Infof("Done renewing TLS certificates") - - // Reload the certificate data. - certData, _, errr = cert.LoadCert( - cfg.TLSCertPath, cfg.TLSKeyPath, - ) - if errr != nil { - return nil, nil, nil, er.E(errr) - } - } - - tlsCfg := cert.TLSConfFromCert(certData) - - // If Let's Encrypt is enabled, instantiate autocert to request/renew - // the certificates. - cleanUp := func() {} - if cfg.LetsEncryptDomain != "" { - log.Infof("Using Let's Encrypt certificate for domain %v", - cfg.LetsEncryptDomain) - - manager := autocert.Manager{ - Cache: autocert.DirCache(cfg.LetsEncryptDir), - Prompt: autocert.AcceptTOS, - HostPolicy: autocert.HostWhitelist(cfg.LetsEncryptDomain), - } - - srv := &http.Server{ - Addr: cfg.LetsEncryptListen, - Handler: manager.HTTPHandler(nil), - } - shutdownCompleted := make(chan struct{}) - cleanUp = func() { - err := srv.Shutdown(context.Background()) - if err != nil { - log.Errorf("Autocert listener shutdown "+ - " error: %v", err) - - return - } - <-shutdownCompleted - log.Infof("Autocert challenge listener stopped") - } - - go func() { - log.Infof("Autocert challenge listener started "+ - "at %v", cfg.LetsEncryptListen) - - err := srv.ListenAndServe() - if err != http.ErrServerClosed { - log.Errorf("autocert http: %v", err) - } - close(shutdownCompleted) - }() - - getCertificate := func(h *tls.ClientHelloInfo) ( - *tls.Certificate, error) { - - lecert, err := manager.GetCertificate(h) - if err != nil { - log.Errorf("GetCertificate: %v", err) - return &certData, nil - } - - return lecert, err - } - - // The self-signed tls.cert remains available as fallback. - tlsCfg.GetCertificate = getCertificate - } - - serverCreds := credentials.NewTLS(tlsCfg) - serverOpts := []grpc.ServerOption{grpc.Creds(serverCreds)} - - return serverOpts, tlsCfg, cleanUp, nil - - // // For our REST dial options, we'll still use TLS, but also increase - // // the max message size that we'll decode to allow clients to hit - // // endpoints which return more data such as the DescribeGraph call. - // // We set this to 200MiB atm. Should be the same value as maxMsgRecvSize - // // in cmd/lncli/main.go. - // restDialOpts := []grpc.DialOption{ - // grpc.WithTransportCredentials(restCreds), - // grpc.WithDefaultCallOptions( - // grpc.MaxCallRecvMsgSize(1 * 1024 * 1024 * 200), - // ), - // } - - // // Return a function closure that can be used to listen on a given - // // address with the current TLS config. - // restListen := func(addr net.Addr) (net.Listener, er.R) { - // // For restListen we will call ListenOnAddress if TLS is - // // disabled. - // if cfg.DisableRestTLS { - // return lncfg.ListenOnAddress(addr) - // } - - // return lncfg.TLSListenOnAddress(addr, tlsCfg) - // } - - // return serverOpts, restDialOpts, restListen, cleanUp, nil -} - -// getTLSConfig1 returns a TLS configuration for the gRPC server and credentials -// and a proxy destination for the REST reverse proxy. -func getTLSConfig(cfg *Config) ( - []grpc.ServerOption, - []grpc.DialOption, - func(net.Addr) (net.Listener, er.R), - func(), er.R, -) { - restCreds, errr := credentials.NewClientTLSFromFile(cfg.TLSCertPath, "") - if errr != nil { - return nil, nil, nil, nil, er.E(errr) - } - - // For our REST dial options, we'll still use TLS, but also increase - // the max message size that we'll decode to allow clients to hit - // endpoints which return more data such as the DescribeGraph call. - // We set this to 200MiB atm. Should be the same value as maxMsgRecvSize - // in cmd/lncli/main.go. - restDialOpts := []grpc.DialOption{ - grpc.WithTransportCredentials(restCreds), - grpc.WithDefaultCallOptions( - grpc.MaxCallRecvMsgSize(1 * 1024 * 1024 * 200), - ), - } - - cleanUp := func() {} - var serverOpts []grpc.ServerOption - var tlsCfg *tls.Config - - if !cfg.NoTLS { - var err er.R - serverOpts, tlsCfg, cleanUp, err = getTLSConfig1(cfg) - if err != nil { - return nil, nil, nil, nil, err - } - } - - // Return a function closure that can be used to listen on a given - // address with the current TLS config. - restListen := func(addr net.Addr) (net.Listener, er.R) { - // For restListen we will call ListenOnAddress if TLS is - // disabled. - if cfg.DisableRestTLS || tlsCfg == nil { - return lncfg.ListenOnAddress(addr) - } - - return lncfg.TLSListenOnAddress(addr, tlsCfg) - } - - return serverOpts, restDialOpts, restListen, cleanUp, nil -} - -// fileExists reports whether the named file or directory exists. -// This function is taken from https://github.com/btcsuite/btcd -func fileExists(name string) bool { - if _, err := os.Stat(name); err != nil { - if os.IsNotExist(err) { - return false - } - } - return true -} - -// bakeMacaroon creates a new macaroon with newest version and the given -// permissions then returns it binary serialized. -func bakeMacaroon(ctx context.Context, svc *macaroons.Service, - permissions []bakery.Op) ([]byte, er.R) { - - mac, err := svc.NewMacaroon( - ctx, macaroons.DefaultRootKeyID, permissions..., - ) - if err != nil { - return nil, err - } - - b, e := mac.M().MarshalBinary() - return b, er.E(e) -} - -// genMacaroons generates three macaroon files; one admin-level, one for -// invoice access and one read-only. These can also be used to generate more -// granular macaroons. -func genMacaroons(ctx context.Context, svc *macaroons.Service, - admFile, roFile, invoiceFile string) er.R { - - // First, we'll generate a macaroon that only allows the caller to - // access invoice related calls. This is useful for merchants and other - // services to allow an isolated instance that can only query and - // modify invoices. - invoiceMacBytes, err := bakeMacaroon(ctx, svc, invoicePermissions) - if err != nil { - return err - } - errr := ioutil.WriteFile(invoiceFile, invoiceMacBytes, 0644) - if errr != nil { - _ = os.Remove(invoiceFile) - return er.E(errr) - } - - // Generate the read-only macaroon and write it to a file. - roBytes, err := bakeMacaroon(ctx, svc, readPermissions) - if err != nil { - return err - } - if errr = ioutil.WriteFile(roFile, roBytes, 0644); errr != nil { - _ = os.Remove(roFile) - return er.E(errr) - } - - // Generate the admin macaroon and write it to a file. - admBytes, err := bakeMacaroon(ctx, svc, adminPermissions()) - if err != nil { - return err - } - if errr = ioutil.WriteFile(admFile, admBytes, 0600); errr != nil { - _ = os.Remove(admFile) - return er.E(errr) - } - - return nil -} - -// adminPermissions returns a list of all permissions in a safe way that doesn't -// modify any of the source lists. -func adminPermissions() []bakery.Op { - admin := make([]bakery.Op, len(readPermissions)+len(writePermissions)) - copy(admin[:len(readPermissions)], readPermissions) - copy(admin[len(readPermissions):], writePermissions) - return admin -} - -// WalletUnlockParams holds the variables used to parameterize the unlocking of -// lnd's wallet after it has already been created. -type WalletUnlockParams struct { - // Password is the public and private wallet passphrase. - Password []byte - - // Birthday specifies the approximate time that this wallet was created. - // This is used to bound any rescans on startup. - Birthday time.Time - - // RecoveryWindow specifies the address lookahead when entering recovery - // mode. A recovery will be attempted if this value is non-zero. - RecoveryWindow uint32 - - // Wallet is the loaded and unlocked Wallet. This is returned - // from the unlocker service to avoid it being unlocked twice (once in - // the unlocker service to check if the password is correct and again - // later when lnd actually uses it). Because unlocking involves scrypt - // which is resource intensive, we want to avoid doing it twice. - Wallet *wallet.Wallet - - // ChansToRestore a set of static channel backups that should be - // restored before the main server instance starts up. - ChansToRestore walletunlocker.ChannelsToRecover - - // UnloadWallet is a function for unloading the wallet, which should - // be called on shutdown. - UnloadWallet func() er.R - - // StatelessInit signals that the user requested the daemon to be - // initialized stateless, which means no unencrypted macaroons should be - // written to disk. - StatelessInit bool - - // MacResponseChan is the channel for sending back the admin macaroon to - // the WalletUnlocker service. - MacResponseChan chan []byte -} - -// waitForWalletPassword will spin up gRPC and REST endpoints for the -// WalletUnlocker server, and block until a password is provided by -// the user to this RPC server. -func waitForWalletPassword(cfg *Config, restEndpoints []net.Addr, - serverOpts []grpc.ServerOption, restDialOpts []grpc.DialOption, - restProxyDest string, restListen func(net.Addr) (net.Listener, er.R), - getListeners rpcListeners, metaService *metaservice.MetaService) (*WalletUnlockParams, func(), er.R) { - - chainConfig := cfg.Bitcoin - if cfg.registeredChains.PrimaryChain() == chainreg.LitecoinChain { - chainConfig = cfg.Litecoin - } else if cfg.registeredChains.PrimaryChain() == chainreg.PktChain { - chainConfig = cfg.Pkt - } - - // The macaroonFiles are passed to the wallet unlocker so they can be - // deleted and recreated in case the root macaroon key is also changed - // during the change password operation. - macaroonFiles := []string{ - cfg.AdminMacPath, cfg.ReadMacPath, cfg.InvoiceMacPath, - } - - pwService := walletunlocker.New( - chainConfig.ChainDir, cfg.ActiveNetParams.Params, - !cfg.SyncFreelist, macaroonFiles, - ) - - // Set up a new PasswordService, which will listen for passwords - // provided over RPC. - grpcServer := grpc.NewServer(serverOpts...) - lnrpc.RegisterWalletUnlockerServer(grpcServer, pwService) - // Set up metaservice allowing getinfo to work even when wallet is locked - lnrpc.RegisterMetaServiceServer(grpcServer, metaService) - - var shutdownFuncs []func() - shutdown := func() { - // Make sure nothing blocks on reading on the macaroon channel, - // otherwise the GracefulStop below will never return. - close(pwService.MacResponseChan) - - for _, shutdownFn := range shutdownFuncs { - shutdownFn() - } - } - shutdownFuncs = append(shutdownFuncs, grpcServer.GracefulStop) - - // Start a gRPC server listening for HTTP/2 connections, solely used - // for getting the encryption password from the client. - listeners, cleanup, err := getListeners() - if err != nil { - return nil, shutdown, err - } - shutdownFuncs = append(shutdownFuncs, cleanup) - - // Use a WaitGroup so we can be sure the instructions on how to input the - // password is the last thing to be printed to the console. - var wg sync.WaitGroup - - for _, lis := range listeners { - wg.Add(1) - go func(lis *ListenerWithSignal) { - log.Infof("Password RPC server listening on %s", - lis.Addr()) - - // Close the ready chan to indicate we are listening. - close(lis.Ready) - - wg.Done() - _ = grpcServer.Serve(lis) - }(lis) - } - - // Start a REST proxy for our gRPC server above. - ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) - shutdownFuncs = append(shutdownFuncs, cancel) - - mux := proxy.NewServeMux() - - errr := lnrpc.RegisterWalletUnlockerHandlerFromEndpoint( - ctx, mux, restProxyDest, restDialOpts, - ) - if errr != nil { - return nil, shutdown, er.E(errr) - } - - srv := &http.Server{Handler: allowCORS(mux, cfg.RestCORS)} - - for _, restEndpoint := range restEndpoints { - lis, err := restListen(restEndpoint) - if err != nil { - log.Errorf("Password gRPC proxy unable to listen "+ - "on %s", restEndpoint) - return nil, shutdown, err - } - shutdownFuncs = append(shutdownFuncs, func() { - err := lis.Close() - if err != nil { - log.Errorf("Error closing listener: %v", - err) - } - }) - - wg.Add(1) - go func() { - log.Infof("Password gRPC proxy started at %s", - lis.Addr()) - wg.Done() - _ = srv.Serve(lis) - }() - } - - // Wait for gRPC and REST servers to be up running. - wg.Wait() - - // Wait for user to provide the password. - log.Infof("Waiting for wallet encryption password. Use `lncli " + - "create` to create a wallet, `lncli unlock` to unlock an " + - "existing wallet, or `lncli changepassword` to change the " + - "password of an existing wallet and unlock it.") - - // We currently don't distinguish between getting a password to be used - // for creation or unlocking, as a new wallet db will be created if - // none exists when creating the chain control. - select { - - // The wallet is being created for the first time, we'll check to see - // if the user provided any entropy for seed creation. If so, then - // we'll create the wallet early to load the seed. - case initMsg := <-pwService.InitMsgs: - password := initMsg.Passphrase - cipherSeed := initMsg.Seed - recoveryWindow := initMsg.RecoveryWindow - - netDir := btcwallet.NetworkDir( - chainConfig.ChainDir, cfg.ActiveNetParams.Params, - ) - loader := wallet.NewLoader( - cfg.ActiveNetParams.Params, netDir, "wallet.db", !cfg.SyncFreelist, - recoveryWindow, - ) - - newWallet, err := loader.CreateNewWallet( - password, password, nil, time.Time{}, cipherSeed, - ) - if err != nil { - // Don't leave the file open in case the new wallet - // could not be created for whatever reason. - if err := loader.UnloadWallet(); err != nil { - log.Errorf("Could not unload new "+ - "wallet: %v", err) - } - return nil, shutdown, err - } - - // For new wallets, the ResetWalletTransactions flag is a no-op. - if cfg.ResetWalletTransactions { - log.Warnf("Ignoring reset-wallet-transactions " + - "flag for new wallet as it has no effect") - } - - return &WalletUnlockParams{ - Password: password, - Birthday: cipherSeed.Birthday(), - RecoveryWindow: recoveryWindow, - Wallet: newWallet, - ChansToRestore: initMsg.ChanBackups, - UnloadWallet: loader.UnloadWallet, - StatelessInit: initMsg.StatelessInit, - MacResponseChan: pwService.MacResponseChan, - }, shutdown, nil - - // The wallet has already been created in the past, and is simply being - // unlocked. So we'll just return these passphrases. - case unlockMsg := <-pwService.UnlockMsgs: - // Resetting the transactions is something the user likely only - // wants to do once so we add a prominent warning to the log to - // remind the user to turn off the setting again after - // successful completion. - if cfg.ResetWalletTransactions { - log.Warnf("Dropping all transaction history from " + - "on-chain wallet. Remember to disable " + - "reset-wallet-transactions flag for next " + - "start of lnd") - - err := wallet.DropTransactionHistory( - unlockMsg.Wallet.Database(), true, - ) - if err != nil { - if err := unlockMsg.UnloadWallet(); err != nil { - log.Errorf("Could not unload "+ - "wallet: %v", err) - } - return nil, shutdown, err - } - } - - return &WalletUnlockParams{ - Password: unlockMsg.Passphrase, - RecoveryWindow: unlockMsg.RecoveryWindow, - Wallet: unlockMsg.Wallet, - ChansToRestore: unlockMsg.ChanBackups, - UnloadWallet: unlockMsg.UnloadWallet, - StatelessInit: unlockMsg.StatelessInit, - MacResponseChan: pwService.MacResponseChan, - }, shutdown, nil - - case <-signal.ShutdownChannel(): - return nil, shutdown, er.Errorf("shutting down") - } -} - -// initializeDatabases extracts the current databases that we'll use for normal -// operation in the daemon. Two databases are returned: one remote and one -// local. However, only if the replicated database is active will the remote -// database point to a unique database. Otherwise, the local and remote DB will -// both point to the same local database. A function closure that closes all -// opened databases is also returned. -func initializeDatabases(ctx context.Context, - cfg *Config) (*channeldb.DB, *channeldb.DB, func(), er.R) { - - log.Infof("Opening the main database, this might take a few " + - "minutes...") - - if cfg.DB.Backend == lncfg.BoltBackend { - log.Infof("Opening bbolt database, sync_freelist=%v, "+ - "auto_compact=%v", cfg.DB.Bolt.SyncFreelist, - cfg.DB.Bolt.AutoCompact) - } - - startOpenTime := time.Now() - - databaseBackends, err := cfg.DB.GetBackends( - ctx, cfg.localDatabaseDir(), cfg.networkName(), - ) - if err != nil { - return nil, nil, nil, er.Errorf("unable to obtain database "+ - "backends: %v", err) - } - - // If the remoteDB is nil, then we'll just open a local DB as normal, - // having the remote and local pointer be the exact same instance. - var ( - localChanDB, remoteChanDB *channeldb.DB - closeFuncs []func() - ) - if databaseBackends.RemoteDB == nil { - // Open the channeldb, which is dedicated to storing channel, - // and network related metadata. - localChanDB, err = channeldb.CreateWithBackend( - databaseBackends.LocalDB, - channeldb.OptionSetRejectCacheSize(cfg.Caches.RejectCacheSize), - channeldb.OptionSetChannelCacheSize(cfg.Caches.ChannelCacheSize), - channeldb.OptionDryRunMigration(cfg.DryRunMigration), - ) - switch { - case channeldb.ErrDryRunMigrationOK.Is(err): - return nil, nil, nil, err - - case err != nil: - err := er.Errorf("unable to open local channeldb: %v", err) - log.Error(err) - return nil, nil, nil, err - } - - closeFuncs = append(closeFuncs, func() { - localChanDB.Close() - }) - - remoteChanDB = localChanDB - } else { - log.Infof("Database replication is available! Creating " + - "local and remote channeldb instances") - - // Otherwise, we'll open two instances, one for the state we - // only need locally, and the other for things we want to - // ensure are replicated. - localChanDB, err = channeldb.CreateWithBackend( - databaseBackends.LocalDB, - channeldb.OptionSetRejectCacheSize(cfg.Caches.RejectCacheSize), - channeldb.OptionSetChannelCacheSize(cfg.Caches.ChannelCacheSize), - channeldb.OptionDryRunMigration(cfg.DryRunMigration), - ) - switch { - // As we want to allow both versions to get thru the dry run - // migration, we'll only exit the second time here once the - // remote instance has had a time to migrate as well. - case channeldb.ErrDryRunMigrationOK.Is(err): - log.Infof("Local DB dry run migration successful") - - case err != nil: - err := er.Errorf("unable to open local channeldb: %v", err) - log.Error(err) - return nil, nil, nil, err - } - - closeFuncs = append(closeFuncs, func() { - localChanDB.Close() - }) - - log.Infof("Opening replicated database instance...") - - remoteChanDB, err = channeldb.CreateWithBackend( - databaseBackends.RemoteDB, - channeldb.OptionDryRunMigration(cfg.DryRunMigration), - ) - switch { - case channeldb.ErrDryRunMigrationOK.Is(err): - return nil, nil, nil, err - - case err != nil: - localChanDB.Close() - - err := er.Errorf("unable to open remote channeldb: %v", err) - log.Error(err) - return nil, nil, nil, err - } - - closeFuncs = append(closeFuncs, func() { - remoteChanDB.Close() - }) - } - - openTime := time.Since(startOpenTime) - log.Infof("Database now open (time_to_open=%v)!", openTime) - - cleanUp := func() { - for _, closeFunc := range closeFuncs { - closeFunc() - } - } - - return localChanDB, remoteChanDB, cleanUp, nil -} - -// initNeutrinoBackend inits a new instance of the neutrino light client -// backend given a target chain directory to store the chain state. -func initNeutrinoBackend(cfg *Config, chainDir string) (*neutrino.ChainService, - func(), er.R) { - - // First we'll open the database file for neutrino, creating the - // database if needed. We append the normalized network name here to - // match the behavior of btcwallet. - dbPath := filepath.Join( - chainDir, lncfg.NormalizeNetwork(cfg.ActiveNetParams.Name), - ) - - // Ensure that the neutrino db path exists. - if errr := os.MkdirAll(dbPath, 0700); errr != nil { - return nil, nil, er.E(errr) - } - - dbName := filepath.Join(dbPath, "neutrino.db") - db, err := walletdb.Create("bdb", dbName, !cfg.SyncFreelist) - if err != nil { - return nil, nil, er.Errorf("unable to create neutrino "+ - "database: %v", err) - } - - headerStateAssertion, errr := parseHeaderStateAssertion( - cfg.NeutrinoMode.AssertFilterHeader, - ) - if errr != nil { - db.Close() - return nil, nil, errr - } - - // With the database open, we can now create an instance of the - // neutrino light client. We pass in relevant configuration parameters - // required. - config := neutrino.Config{ - DataDir: dbPath, - Database: db, - ChainParams: *cfg.ActiveNetParams.Params, - AddPeers: cfg.NeutrinoMode.AddPeers, - ConnectPeers: cfg.NeutrinoMode.ConnectPeers, - Dialer: func(addr net.Addr) (net.Conn, er.R) { - return cfg.net.Dial( - addr.Network(), addr.String(), - cfg.ConnectionTimeout, - ) - }, - NameResolver: func(host string) ([]net.IP, er.R) { - addrs, err := cfg.net.LookupHost(host) - if err != nil { - return nil, err - } - - ips := make([]net.IP, 0, len(addrs)) - for _, strIP := range addrs { - ip := net.ParseIP(strIP) - if ip == nil { - continue - } - - ips = append(ips, ip) - } - - return ips, nil - }, - AssertFilterHeader: headerStateAssertion, - } - - neutrino.MaxPeers = 8 - neutrino.BanDuration = time.Hour * 48 - neutrino.UserAgentName = cfg.NeutrinoMode.UserAgentName - neutrino.UserAgentVersion = cfg.NeutrinoMode.UserAgentVersion - - neutrinoCS, err := neutrino.NewChainService(config) - if err != nil { - db.Close() - return nil, nil, er.Errorf("unable to create neutrino light "+ - "client: %v", err) - } - - if err := neutrinoCS.Start(); err != nil { - db.Close() - return nil, nil, err - } - - cleanUp := func() { - if err := neutrinoCS.Stop(); err != nil { - log.Infof("Unable to stop neutrino light client: %v", err) - } - db.Close() - } - - return neutrinoCS, cleanUp, nil -} - -// parseHeaderStateAssertion parses the user-specified neutrino header state -// into a headerfs.FilterHeader. -func parseHeaderStateAssertion(state string) (*headerfs.FilterHeader, er.R) { - if len(state) == 0 { - return nil, nil - } - - split := strings.Split(state, ":") - if len(split) != 2 { - return nil, er.Errorf("header state assertion %v in "+ - "unexpected format, expected format height:hash", state) - } - - height, errr := strconv.ParseUint(split[0], 10, 32) - if errr != nil { - return nil, er.Errorf("invalid filter header height: %v", errr) - } - - hash, err := chainhash.NewHashFromStr(split[1]) - if err != nil { - return nil, er.Errorf("invalid filter header hash: %v", err) - } - - return &headerfs.FilterHeader{ - Height: uint32(height), - FilterHash: *hash, - }, nil -} diff --git a/lnd/lnpeer/errors.go b/lnd/lnpeer/errors.go deleted file mode 100644 index 37796bb8..00000000 --- a/lnd/lnpeer/errors.go +++ /dev/null @@ -1,9 +0,0 @@ -package lnpeer - -import "github.com/pkt-cash/pktd/btcutil/er" - -var ( - // ErrPeerExiting signals that the peer received a disconnect request. - ErrPeerExiting = er.GenericErrorType.CodeWithDetail("ErrPeerExiting", - "peer exiting") -) diff --git a/lnd/lnpeer/peer.go b/lnd/lnpeer/peer.go deleted file mode 100644 index b1b296f4..00000000 --- a/lnd/lnpeer/peer.go +++ /dev/null @@ -1,60 +0,0 @@ -package lnpeer - -import ( - "net" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/wire" -) - -// Peer is an interface which represents a remote lightning node. -type Peer interface { - // SendMessage sends a variadic number of high-priority message to - // remote peer. The first argument denotes if the method should block - // until the messages have been sent to the remote peer or an error is - // returned, otherwise it returns immediately after queuing. - SendMessage(sync bool, msgs ...lnwire.Message) er.R - - // SendMessageLazy sends a variadic number of low-priority message to - // remote peer. The first argument denotes if the method should block - // until the messages have been sent to the remote peer or an error is - // returned, otherwise it returns immediately after queueing. - SendMessageLazy(sync bool, msgs ...lnwire.Message) er.R - - // AddNewChannel adds a new channel to the peer. The channel should fail - // to be added if the cancel channel is closed. - AddNewChannel(channel *channeldb.OpenChannel, cancel <-chan struct{}) er.R - - // WipeChannel removes the channel uniquely identified by its channel - // point from all indexes associated with the peer. - WipeChannel(*wire.OutPoint) - - // PubKey returns the serialized public key of the remote peer. - PubKey() [33]byte - - // IdentityKey returns the public key of the remote peer. - IdentityKey() *btcec.PublicKey - - // Address returns the network address of the remote peer. - Address() net.Addr - - // QuitSignal is a method that should return a channel which will be - // sent upon or closed once the backing peer exits. This allows callers - // using the interface to cancel any processing in the event the backing - // implementation exits. - QuitSignal() <-chan struct{} - - // LocalFeatures returns the set of features that has been advertised by - // the us to the remote peer. This allows sub-systems that use this - // interface to gate their behavior off the set of negotiated feature - // bits. - LocalFeatures() *lnwire.FeatureVector - - // RemoteFeatures returns the set of features that has been advertised - // by the remote peer. This allows sub-systems that use this interface - // to gate their behavior off the set of negotiated feature bits. - RemoteFeatures() *lnwire.FeatureVector -} diff --git a/lnd/lnrpc/.clang-format b/lnd/lnrpc/.clang-format deleted file mode 100644 index f1914278..00000000 --- a/lnd/lnrpc/.clang-format +++ /dev/null @@ -1,7 +0,0 @@ ---- -Language: Proto -BasedOnStyle: Google -IndentWidth: 4 -AllowShortFunctionsOnASingleLine: None -SpaceBeforeParens: Always -CompactNamespaces: false diff --git a/lnd/lnrpc/README.md b/lnd/lnrpc/README.md deleted file mode 100644 index a6cf1514..00000000 --- a/lnd/lnrpc/README.md +++ /dev/null @@ -1,219 +0,0 @@ -lnrpc -===== - -[![Build Status](http://img.shields.io/travis/lightningnetwork/lnd.svg)](https://travis-ci.org/lightningnetwork/lnd) -[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/lightningnetwork/lnd/blob/master/LICENSE) -[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/lightningnetwork/lnd/lnrpc) - -This lnrpc package implements both a client and server for `lnd`s RPC system -which is based off of the high-performance cross-platform -[gRPC](http://www.grpc.io/) RPC framework. By default, only the Go -client+server libraries are compiled within the package. In order to compile -the client side libraries for other supported languages, the `protoc` tool will -need to be used to generate the compiled protos for a specific language. - -The following languages are supported as clients to `lnrpc`: C++, Go, Node.js, -Java, Ruby, Android Java, PHP, Python, C#, Objective-C. - -## Service: Lightning - -The list of defined RPCs on the service `Lightning` are the following (with a brief -description): - - * WalletBalance - * Returns the wallet's current confirmed balance in BTC. - * ChannelBalance - * Returns the daemons' available aggregate channel balance in BTC. - * GetTransactions - * Returns a list of on-chain transactions that pay to or are spends from - `lnd`. - * SendCoins - * Sends an amount of satoshis to a specific address. - * ListUnspent - * Lists available utxos within a range of confirmations. - * SubscribeTransactions - * Returns a stream which sends async notifications each time a transaction - is created or one is received that pays to us. - * SendMany - * Allows the caller to create a transaction with an arbitrary fan-out - (many outputs). - * NewAddress - * Returns a new address, the following address types are supported: - pay-to-witness-key-hash (p2wkh) and nested-pay-to-witness-key-hash - (np2wkh). - * SignMessage - * Signs a message with the node's identity key and returns a - zbase32 encoded signature. - * VerifyMessage - * Verifies a signature signed by another node on a message. The other node - must be an active node in the channel database. - * ConnectPeer - * Connects to a peer identified by a public key and host. - * DisconnectPeer - * Disconnects a peer identified by a public key. - * ListPeers - * Lists all available connected peers. - * GetInfo - * Returns basic data concerning the daemon. - * GetRecoveryInfo - * Returns information about recovery process. - * PendingChannels - * List the number of pending (not fully confirmed) channels. - * ListChannels - * List all active channels the daemon manages. - * OpenChannelSync - * OpenChannelSync is a synchronous version of the OpenChannel RPC call. - * OpenChannel - * Attempts to open a channel to a target peer with a specific amount and - push amount. - * CloseChannel - * Attempts to close a target channel. A channel can either be closed - cooperatively if the channel peer is online, or using a "force" close to - broadcast the latest channel state. - * SendPayment - * Send a payment over Lightning to a target peer. - * SendPaymentSync - * SendPaymentSync is the synchronous non-streaming version of SendPayment. - * SendToRoute - * Send a payment over Lightning to a target peer through a route explicitly - defined by the user. - * SendToRouteSync - * SendToRouteSync is the synchronous non-streaming version of SendToRoute. - * AddInvoice - * Adds an invoice to the daemon. Invoices are automatically settled once - seen as an incoming HTLC. - * ListInvoices - * Lists all stored invoices. - * LookupInvoice - * Attempts to look up an invoice by payment hash (r-hash). - * SubscribeInvoices - * Creates a uni-directional stream which receives async notifications as - the daemon settles invoices - * DecodePayReq - * Decode a payment request, returning a full description of the conditions - encoded within the payment request. - * ListPayments - * List all outgoing Lightning payments the daemon has made. - * DeleteAllPayments - * Deletes all outgoing payments from DB. - * DescribeGraph - * Returns a description of the known channel graph from the PoV of the - node. - * GetChanInfo - * Returns information for a specific channel identified by channel ID. - * GetNodeInfo - * Returns information for a particular node identified by its identity - public key. - * QueryRoutes - * Queries for a possible route to a target peer which can carry a certain - amount of payment. - * GetNetworkInfo - * Returns some network level statistics. - * StopDaemon - * Sends a shutdown request to the interrupt handler, triggering a graceful - shutdown of the daemon. - * SubscribeChannelGraph - * Creates a stream which receives async notifications upon any changes to the - channel graph topology from the point of view of the responding node. - * DebugLevel - * Set logging verbosity of lnd programmatically - * FeeReport - * Allows the caller to obtain a report detailing the current fee schedule - enforced by the node globally for each channel. - * UpdateChannelPolicy - * Allows the caller to update the fee schedule and channel policies for all channels - globally, or a particular channel. - * ForwardingHistory - * ForwardingHistory allows the caller to query the htlcswitch for a - record of all HTLCs forwarded. - * BakeMacaroon - * Bakes a new macaroon with the provided list of permissions and - restrictions - * ListMacaroonIDs - * List all the macaroon root key IDs that are in use. - * DeleteMacaroonID - * Remove a specific macaroon root key ID from the database and invalidates - all macaroons derived from the key with that ID. - -## Service: WalletUnlocker - -The list of defined RPCs on the service `WalletUnlocker` are the following (with a brief -description): - - * CreateWallet - * Set encryption password for the wallet database. - * UnlockWallet - * Provide a password to unlock the wallet database. - -## Installation and Updating - -```bash -$ go get -u github.com/lightningnetwork/lnd/lnrpc -``` - -## Generate protobuf definitions - -### Linux - -For linux there is an easy install script that is also used for the Travis CI -build. Just run the following command (requires `sudo` permissions and the tools -`make`, `go`, `wget` and `unzip` to be installed) from the repository's root -folder: - -`./scripts/install_travis_proto.sh` - -### MacOS / Unix like systems - -1. Download [v.3.4.0](https://github.com/google/protobuf/releases/tag/v3.4.0) of -`protoc` for your operating system and add it to your `PATH`. -For example, if using macOS: -```bash -$ curl -LO https://github.com/google/protobuf/releases/download/v3.4.0/protoc-3.4.0-osx-x86_64.zip -$ unzip protoc-3.4.0-osx-x86_64.zip -d protoc -$ export PATH=$PWD/protoc/bin:$PATH -``` - -2. Install `golang/protobuf` at version `v1.3.2`. -```bash -$ git clone https://github.com/golang/protobuf $GOPATH/src/github.com/golang/protobuf -$ cd $GOPATH/src/github.com/golang/protobuf -$ git reset --hard v1.3.2 -$ make -``` - -3. Install 'genproto' at commit `20e1ac93f88cf06d2b1defb90b9e9e126c7dfff6`. -```bash -$ go get google.golang.org/genproto -$ cd $GOPATH/src/google.golang.org/genproto -$ git reset --hard 20e1ac93f88cf06d2b1defb90b9e9e126c7dfff6 -``` - -4. Install `grpc-ecosystem/grpc-gateway` at version `v1.14.3`. -```bash -$ git clone https://github.com/grpc-ecosystem/grpc-gateway $GOPATH/src/github.com/grpc-ecosystem/grpc-gateway -$ cd $GOPATH/src/github.com/grpc-ecosystem/grpc-gateway -$ git reset --hard v1.14.3 -$ go install ./protoc-gen-grpc-gateway ./protoc-gen-swagger -``` - -5. Run [`gen_protos.sh`](https://github.com/lightningnetwork/lnd/blob/master/lnrpc/gen_protos.sh) -or `make rpc` to generate new protobuf definitions. - -## Format .proto files - -We use `clang-format` to make sure the `.proto` files are formatted correctly. -You can install the formatter on Ubuntu by running `apt install clang-format`. - -Consult [this page](http://releases.llvm.org/download.html) to find binaries -for other operating systems or distributions. - -## Makefile commands - -The following commands are available with `make`: - -* `rpc`: Compile `.proto` files (calls `lnrpc/gen_protos.sh`). -* `rpc-format`: Formats all `.proto` files according to our formatting rules. - Requires `clang-format`, see previous chapter. -* `rpc-check`: Runs both previous commands and makes sure the git work tree is - not dirty. This can be used to check that the `.proto` files are formatted - and compiled properly. diff --git a/lnd/lnrpc/autopilotrpc/autopilot.pb.go b/lnd/lnrpc/autopilotrpc/autopilot.pb.go deleted file mode 100644 index c0ffe362..00000000 --- a/lnd/lnrpc/autopilotrpc/autopilot.pb.go +++ /dev/null @@ -1,644 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: autopilotrpc/autopilot.proto - -package autopilotrpc - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type StatusRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StatusRequest) Reset() { *m = StatusRequest{} } -func (m *StatusRequest) String() string { return proto.CompactTextString(m) } -func (*StatusRequest) ProtoMessage() {} -func (*StatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_e0b9dc347a92e084, []int{0} -} - -func (m *StatusRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StatusRequest.Unmarshal(m, b) -} -func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic) -} -func (m *StatusRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatusRequest.Merge(m, src) -} -func (m *StatusRequest) XXX_Size() int { - return xxx_messageInfo_StatusRequest.Size(m) -} -func (m *StatusRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StatusRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StatusRequest proto.InternalMessageInfo - -type StatusResponse struct { - // Indicates whether the autopilot is active or not. - Active bool `protobuf:"varint,1,opt,name=active,proto3" json:"active,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StatusResponse) Reset() { *m = StatusResponse{} } -func (m *StatusResponse) String() string { return proto.CompactTextString(m) } -func (*StatusResponse) ProtoMessage() {} -func (*StatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_e0b9dc347a92e084, []int{1} -} - -func (m *StatusResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StatusResponse.Unmarshal(m, b) -} -func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic) -} -func (m *StatusResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatusResponse.Merge(m, src) -} -func (m *StatusResponse) XXX_Size() int { - return xxx_messageInfo_StatusResponse.Size(m) -} -func (m *StatusResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StatusResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StatusResponse proto.InternalMessageInfo - -func (m *StatusResponse) GetActive() bool { - if m != nil { - return m.Active - } - return false -} - -type ModifyStatusRequest struct { - // Whether the autopilot agent should be enabled or not. - Enable bool `protobuf:"varint,1,opt,name=enable,proto3" json:"enable,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ModifyStatusRequest) Reset() { *m = ModifyStatusRequest{} } -func (m *ModifyStatusRequest) String() string { return proto.CompactTextString(m) } -func (*ModifyStatusRequest) ProtoMessage() {} -func (*ModifyStatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_e0b9dc347a92e084, []int{2} -} - -func (m *ModifyStatusRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ModifyStatusRequest.Unmarshal(m, b) -} -func (m *ModifyStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ModifyStatusRequest.Marshal(b, m, deterministic) -} -func (m *ModifyStatusRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ModifyStatusRequest.Merge(m, src) -} -func (m *ModifyStatusRequest) XXX_Size() int { - return xxx_messageInfo_ModifyStatusRequest.Size(m) -} -func (m *ModifyStatusRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ModifyStatusRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ModifyStatusRequest proto.InternalMessageInfo - -func (m *ModifyStatusRequest) GetEnable() bool { - if m != nil { - return m.Enable - } - return false -} - -type ModifyStatusResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ModifyStatusResponse) Reset() { *m = ModifyStatusResponse{} } -func (m *ModifyStatusResponse) String() string { return proto.CompactTextString(m) } -func (*ModifyStatusResponse) ProtoMessage() {} -func (*ModifyStatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_e0b9dc347a92e084, []int{3} -} - -func (m *ModifyStatusResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ModifyStatusResponse.Unmarshal(m, b) -} -func (m *ModifyStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ModifyStatusResponse.Marshal(b, m, deterministic) -} -func (m *ModifyStatusResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ModifyStatusResponse.Merge(m, src) -} -func (m *ModifyStatusResponse) XXX_Size() int { - return xxx_messageInfo_ModifyStatusResponse.Size(m) -} -func (m *ModifyStatusResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ModifyStatusResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ModifyStatusResponse proto.InternalMessageInfo - -type QueryScoresRequest struct { - Pubkeys []string `protobuf:"bytes,1,rep,name=pubkeys,proto3" json:"pubkeys,omitempty"` - // If set, we will ignore the local channel state when calculating scores. - IgnoreLocalState bool `protobuf:"varint,2,opt,name=ignore_local_state,json=ignoreLocalState,proto3" json:"ignore_local_state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *QueryScoresRequest) Reset() { *m = QueryScoresRequest{} } -func (m *QueryScoresRequest) String() string { return proto.CompactTextString(m) } -func (*QueryScoresRequest) ProtoMessage() {} -func (*QueryScoresRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_e0b9dc347a92e084, []int{4} -} - -func (m *QueryScoresRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_QueryScoresRequest.Unmarshal(m, b) -} -func (m *QueryScoresRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_QueryScoresRequest.Marshal(b, m, deterministic) -} -func (m *QueryScoresRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryScoresRequest.Merge(m, src) -} -func (m *QueryScoresRequest) XXX_Size() int { - return xxx_messageInfo_QueryScoresRequest.Size(m) -} -func (m *QueryScoresRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryScoresRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryScoresRequest proto.InternalMessageInfo - -func (m *QueryScoresRequest) GetPubkeys() []string { - if m != nil { - return m.Pubkeys - } - return nil -} - -func (m *QueryScoresRequest) GetIgnoreLocalState() bool { - if m != nil { - return m.IgnoreLocalState - } - return false -} - -type QueryScoresResponse struct { - Results []*QueryScoresResponse_HeuristicResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *QueryScoresResponse) Reset() { *m = QueryScoresResponse{} } -func (m *QueryScoresResponse) String() string { return proto.CompactTextString(m) } -func (*QueryScoresResponse) ProtoMessage() {} -func (*QueryScoresResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_e0b9dc347a92e084, []int{5} -} - -func (m *QueryScoresResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_QueryScoresResponse.Unmarshal(m, b) -} -func (m *QueryScoresResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_QueryScoresResponse.Marshal(b, m, deterministic) -} -func (m *QueryScoresResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryScoresResponse.Merge(m, src) -} -func (m *QueryScoresResponse) XXX_Size() int { - return xxx_messageInfo_QueryScoresResponse.Size(m) -} -func (m *QueryScoresResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryScoresResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryScoresResponse proto.InternalMessageInfo - -func (m *QueryScoresResponse) GetResults() []*QueryScoresResponse_HeuristicResult { - if m != nil { - return m.Results - } - return nil -} - -type QueryScoresResponse_HeuristicResult struct { - Heuristic string `protobuf:"bytes,1,opt,name=heuristic,proto3" json:"heuristic,omitempty"` - Scores map[string]float64 `protobuf:"bytes,2,rep,name=scores,proto3" json:"scores,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *QueryScoresResponse_HeuristicResult) Reset() { *m = QueryScoresResponse_HeuristicResult{} } -func (m *QueryScoresResponse_HeuristicResult) String() string { return proto.CompactTextString(m) } -func (*QueryScoresResponse_HeuristicResult) ProtoMessage() {} -func (*QueryScoresResponse_HeuristicResult) Descriptor() ([]byte, []int) { - return fileDescriptor_e0b9dc347a92e084, []int{5, 0} -} - -func (m *QueryScoresResponse_HeuristicResult) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_QueryScoresResponse_HeuristicResult.Unmarshal(m, b) -} -func (m *QueryScoresResponse_HeuristicResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_QueryScoresResponse_HeuristicResult.Marshal(b, m, deterministic) -} -func (m *QueryScoresResponse_HeuristicResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryScoresResponse_HeuristicResult.Merge(m, src) -} -func (m *QueryScoresResponse_HeuristicResult) XXX_Size() int { - return xxx_messageInfo_QueryScoresResponse_HeuristicResult.Size(m) -} -func (m *QueryScoresResponse_HeuristicResult) XXX_DiscardUnknown() { - xxx_messageInfo_QueryScoresResponse_HeuristicResult.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryScoresResponse_HeuristicResult proto.InternalMessageInfo - -func (m *QueryScoresResponse_HeuristicResult) GetHeuristic() string { - if m != nil { - return m.Heuristic - } - return "" -} - -func (m *QueryScoresResponse_HeuristicResult) GetScores() map[string]float64 { - if m != nil { - return m.Scores - } - return nil -} - -type SetScoresRequest struct { - // The name of the heuristic to provide scores to. - Heuristic string `protobuf:"bytes,1,opt,name=heuristic,proto3" json:"heuristic,omitempty"` - // - //A map from hex-encoded public keys to scores. Scores must be in the range - //[0.0, 1.0]. - Scores map[string]float64 `protobuf:"bytes,2,rep,name=scores,proto3" json:"scores,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetScoresRequest) Reset() { *m = SetScoresRequest{} } -func (m *SetScoresRequest) String() string { return proto.CompactTextString(m) } -func (*SetScoresRequest) ProtoMessage() {} -func (*SetScoresRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_e0b9dc347a92e084, []int{6} -} - -func (m *SetScoresRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetScoresRequest.Unmarshal(m, b) -} -func (m *SetScoresRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetScoresRequest.Marshal(b, m, deterministic) -} -func (m *SetScoresRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetScoresRequest.Merge(m, src) -} -func (m *SetScoresRequest) XXX_Size() int { - return xxx_messageInfo_SetScoresRequest.Size(m) -} -func (m *SetScoresRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetScoresRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SetScoresRequest proto.InternalMessageInfo - -func (m *SetScoresRequest) GetHeuristic() string { - if m != nil { - return m.Heuristic - } - return "" -} - -func (m *SetScoresRequest) GetScores() map[string]float64 { - if m != nil { - return m.Scores - } - return nil -} - -type SetScoresResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetScoresResponse) Reset() { *m = SetScoresResponse{} } -func (m *SetScoresResponse) String() string { return proto.CompactTextString(m) } -func (*SetScoresResponse) ProtoMessage() {} -func (*SetScoresResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_e0b9dc347a92e084, []int{7} -} - -func (m *SetScoresResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetScoresResponse.Unmarshal(m, b) -} -func (m *SetScoresResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetScoresResponse.Marshal(b, m, deterministic) -} -func (m *SetScoresResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetScoresResponse.Merge(m, src) -} -func (m *SetScoresResponse) XXX_Size() int { - return xxx_messageInfo_SetScoresResponse.Size(m) -} -func (m *SetScoresResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SetScoresResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SetScoresResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*StatusRequest)(nil), "autopilotrpc.StatusRequest") - proto.RegisterType((*StatusResponse)(nil), "autopilotrpc.StatusResponse") - proto.RegisterType((*ModifyStatusRequest)(nil), "autopilotrpc.ModifyStatusRequest") - proto.RegisterType((*ModifyStatusResponse)(nil), "autopilotrpc.ModifyStatusResponse") - proto.RegisterType((*QueryScoresRequest)(nil), "autopilotrpc.QueryScoresRequest") - proto.RegisterType((*QueryScoresResponse)(nil), "autopilotrpc.QueryScoresResponse") - proto.RegisterType((*QueryScoresResponse_HeuristicResult)(nil), "autopilotrpc.QueryScoresResponse.HeuristicResult") - proto.RegisterMapType((map[string]float64)(nil), "autopilotrpc.QueryScoresResponse.HeuristicResult.ScoresEntry") - proto.RegisterType((*SetScoresRequest)(nil), "autopilotrpc.SetScoresRequest") - proto.RegisterMapType((map[string]float64)(nil), "autopilotrpc.SetScoresRequest.ScoresEntry") - proto.RegisterType((*SetScoresResponse)(nil), "autopilotrpc.SetScoresResponse") -} - -func init() { proto.RegisterFile("autopilotrpc/autopilot.proto", fileDescriptor_e0b9dc347a92e084) } - -var fileDescriptor_e0b9dc347a92e084 = []byte{ - // 464 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xcf, 0x6b, 0xd4, 0x40, - 0x14, 0xc7, 0x49, 0x8a, 0x5b, 0xf3, 0xb6, 0xda, 0x75, 0xb6, 0x94, 0x10, 0x17, 0xdd, 0xce, 0x69, - 0x11, 0x9b, 0xa5, 0xf5, 0xa2, 0x82, 0x07, 0x2b, 0x82, 0x60, 0x3d, 0x38, 0x4b, 0x2f, 0x22, 0x94, - 0xc9, 0x74, 0xec, 0x86, 0x8d, 0x99, 0x38, 0x3f, 0x0a, 0xf9, 0x87, 0xbc, 0xfa, 0x37, 0x78, 0xf4, - 0xbf, 0x92, 0x64, 0x92, 0x98, 0x84, 0x35, 0x22, 0xf4, 0x10, 0x98, 0xf7, 0xbe, 0x6f, 0x3e, 0x6f, - 0xde, 0x9b, 0x97, 0x81, 0x19, 0x35, 0x5a, 0x64, 0x71, 0x22, 0xb4, 0xcc, 0xd8, 0xb2, 0x31, 0xc2, - 0x4c, 0x0a, 0x2d, 0xd0, 0x5e, 0x5b, 0xc5, 0xfb, 0x70, 0x6f, 0xa5, 0xa9, 0x36, 0x8a, 0xf0, 0x6f, - 0x86, 0x2b, 0x8d, 0x17, 0x70, 0xbf, 0x76, 0xa8, 0x4c, 0xa4, 0x8a, 0xa3, 0x43, 0x18, 0x51, 0xa6, - 0xe3, 0x1b, 0xee, 0x3b, 0x73, 0x67, 0x71, 0x97, 0x54, 0x16, 0x3e, 0x86, 0xe9, 0x07, 0x71, 0x15, - 0x7f, 0xc9, 0x3b, 0x80, 0x22, 0x9c, 0xa7, 0x34, 0x4a, 0x9a, 0x70, 0x6b, 0xe1, 0x43, 0x38, 0xe8, - 0x86, 0x5b, 0x3c, 0xfe, 0x0c, 0xe8, 0xa3, 0xe1, 0x32, 0x5f, 0x31, 0x21, 0x79, 0x43, 0xf1, 0x61, - 0x37, 0x33, 0xd1, 0x86, 0xe7, 0xca, 0x77, 0xe6, 0x3b, 0x0b, 0x8f, 0xd4, 0x26, 0x7a, 0x0a, 0x28, - 0xbe, 0x4e, 0x85, 0xe4, 0x97, 0x89, 0x60, 0x34, 0xb9, 0x54, 0x9a, 0x6a, 0xee, 0xbb, 0x65, 0xae, - 0x89, 0x55, 0xce, 0x0b, 0xa1, 0x48, 0xc3, 0xf1, 0x77, 0x17, 0xa6, 0x1d, 0x7c, 0x55, 0xd4, 0x7b, - 0xd8, 0x95, 0x5c, 0x99, 0x44, 0x5b, 0xfe, 0xf8, 0xf4, 0x24, 0x6c, 0xf7, 0x25, 0xdc, 0xb2, 0x27, - 0x7c, 0xc7, 0x8d, 0x8c, 0x95, 0x8e, 0x19, 0x29, 0x77, 0x92, 0x9a, 0x10, 0xfc, 0x74, 0x60, 0xbf, - 0x27, 0xa2, 0x19, 0x78, 0xeb, 0xda, 0x55, 0x76, 0xc2, 0x23, 0x7f, 0x1c, 0xe8, 0x02, 0x46, 0xaa, - 0x84, 0xfb, 0x6e, 0x99, 0xfd, 0xd5, 0x7f, 0x67, 0x0f, 0xad, 0xfc, 0x36, 0xd5, 0x32, 0x27, 0x15, - 0x2c, 0x78, 0x01, 0xe3, 0x96, 0x1b, 0x4d, 0x60, 0x67, 0xc3, 0xf3, 0x2a, 0x7b, 0xb1, 0x44, 0x07, - 0x70, 0xe7, 0x86, 0x26, 0xc6, 0xf6, 0xcb, 0x21, 0xd6, 0x78, 0xe9, 0x3e, 0x77, 0xf0, 0x0f, 0x07, - 0x26, 0x2b, 0xae, 0xbb, 0xb7, 0x30, 0x5c, 0xc4, 0x59, 0xaf, 0x88, 0x27, 0xdd, 0x22, 0xfa, 0xb4, - 0xdb, 0x3e, 0xf1, 0x14, 0x1e, 0xb4, 0x52, 0xd8, 0x2e, 0x9d, 0xfe, 0x72, 0xc1, 0x7b, 0x5d, 0x9f, - 0x02, 0xbd, 0x81, 0x91, 0x9d, 0x36, 0xf4, 0xb0, 0x77, 0xb6, 0xf6, 0xc8, 0x06, 0xb3, 0xed, 0x62, - 0x35, 0x2a, 0x17, 0xb0, 0xd7, 0x1e, 0x5c, 0x74, 0xd4, 0x8d, 0xde, 0xf2, 0x0f, 0x04, 0x78, 0x28, - 0xa4, 0xc2, 0x12, 0x18, 0xb7, 0xae, 0x19, 0xcd, 0x07, 0x26, 0xc0, 0x42, 0x8f, 0xfe, 0x39, 0x23, - 0xe8, 0x1c, 0xbc, 0xa6, 0x25, 0xe8, 0xd1, 0xf0, 0x75, 0x04, 0x8f, 0xff, 0xaa, 0x5b, 0xda, 0xd9, - 0xc9, 0xa7, 0xe5, 0x75, 0xac, 0xd7, 0x26, 0x0a, 0x99, 0xf8, 0xba, 0xcc, 0x36, 0xfa, 0x98, 0x51, - 0xb5, 0x2e, 0x16, 0x57, 0xcb, 0x24, 0x2d, 0xbe, 0xce, 0xfb, 0x22, 0x33, 0x16, 0x8d, 0xca, 0x37, - 0xe6, 0xd9, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x08, 0x70, 0x2a, 0x77, 0x83, 0x04, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// AutopilotClient is the client API for Autopilot service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type AutopilotClient interface { - // - //Status returns whether the daemon's autopilot agent is active. - Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) - // - //ModifyStatus is used to modify the status of the autopilot agent, like - //enabling or disabling it. - ModifyStatus(ctx context.Context, in *ModifyStatusRequest, opts ...grpc.CallOption) (*ModifyStatusResponse, error) - // - //QueryScores queries all available autopilot heuristics, in addition to any - //active combination of these heruristics, for the scores they would give to - //the given nodes. - QueryScores(ctx context.Context, in *QueryScoresRequest, opts ...grpc.CallOption) (*QueryScoresResponse, error) - // - //SetScores attempts to set the scores used by the running autopilot agent, - //if the external scoring heuristic is enabled. - SetScores(ctx context.Context, in *SetScoresRequest, opts ...grpc.CallOption) (*SetScoresResponse, error) -} - -type autopilotClient struct { - cc *grpc.ClientConn -} - -func NewAutopilotClient(cc *grpc.ClientConn) AutopilotClient { - return &autopilotClient{cc} -} - -func (c *autopilotClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { - out := new(StatusResponse) - err := c.cc.Invoke(ctx, "/autopilotrpc.Autopilot/Status", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *autopilotClient) ModifyStatus(ctx context.Context, in *ModifyStatusRequest, opts ...grpc.CallOption) (*ModifyStatusResponse, error) { - out := new(ModifyStatusResponse) - err := c.cc.Invoke(ctx, "/autopilotrpc.Autopilot/ModifyStatus", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *autopilotClient) QueryScores(ctx context.Context, in *QueryScoresRequest, opts ...grpc.CallOption) (*QueryScoresResponse, error) { - out := new(QueryScoresResponse) - err := c.cc.Invoke(ctx, "/autopilotrpc.Autopilot/QueryScores", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *autopilotClient) SetScores(ctx context.Context, in *SetScoresRequest, opts ...grpc.CallOption) (*SetScoresResponse, error) { - out := new(SetScoresResponse) - err := c.cc.Invoke(ctx, "/autopilotrpc.Autopilot/SetScores", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// AutopilotServer is the server API for Autopilot service. -type AutopilotServer interface { - // - //Status returns whether the daemon's autopilot agent is active. - Status(context.Context, *StatusRequest) (*StatusResponse, error) - // - //ModifyStatus is used to modify the status of the autopilot agent, like - //enabling or disabling it. - ModifyStatus(context.Context, *ModifyStatusRequest) (*ModifyStatusResponse, error) - // - //QueryScores queries all available autopilot heuristics, in addition to any - //active combination of these heruristics, for the scores they would give to - //the given nodes. - QueryScores(context.Context, *QueryScoresRequest) (*QueryScoresResponse, error) - // - //SetScores attempts to set the scores used by the running autopilot agent, - //if the external scoring heuristic is enabled. - SetScores(context.Context, *SetScoresRequest) (*SetScoresResponse, error) -} - -// UnimplementedAutopilotServer can be embedded to have forward compatible implementations. -type UnimplementedAutopilotServer struct { -} - -func (*UnimplementedAutopilotServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") -} -func (*UnimplementedAutopilotServer) ModifyStatus(ctx context.Context, req *ModifyStatusRequest) (*ModifyStatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ModifyStatus not implemented") -} -func (*UnimplementedAutopilotServer) QueryScores(ctx context.Context, req *QueryScoresRequest) (*QueryScoresResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method QueryScores not implemented") -} -func (*UnimplementedAutopilotServer) SetScores(ctx context.Context, req *SetScoresRequest) (*SetScoresResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetScores not implemented") -} - -func RegisterAutopilotServer(s *grpc.Server, srv AutopilotServer) { - s.RegisterService(&_Autopilot_serviceDesc, srv) -} - -func _Autopilot_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AutopilotServer).Status(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/autopilotrpc.Autopilot/Status", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AutopilotServer).Status(ctx, req.(*StatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Autopilot_ModifyStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ModifyStatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AutopilotServer).ModifyStatus(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/autopilotrpc.Autopilot/ModifyStatus", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AutopilotServer).ModifyStatus(ctx, req.(*ModifyStatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Autopilot_QueryScores_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryScoresRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AutopilotServer).QueryScores(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/autopilotrpc.Autopilot/QueryScores", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AutopilotServer).QueryScores(ctx, req.(*QueryScoresRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Autopilot_SetScores_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetScoresRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AutopilotServer).SetScores(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/autopilotrpc.Autopilot/SetScores", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AutopilotServer).SetScores(ctx, req.(*SetScoresRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Autopilot_serviceDesc = grpc.ServiceDesc{ - ServiceName: "autopilotrpc.Autopilot", - HandlerType: (*AutopilotServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Status", - Handler: _Autopilot_Status_Handler, - }, - { - MethodName: "ModifyStatus", - Handler: _Autopilot_ModifyStatus_Handler, - }, - { - MethodName: "QueryScores", - Handler: _Autopilot_QueryScores_Handler, - }, - { - MethodName: "SetScores", - Handler: _Autopilot_SetScores_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "autopilotrpc/autopilot.proto", -} diff --git a/lnd/lnrpc/autopilotrpc/autopilot.pb.gw.go b/lnd/lnrpc/autopilotrpc/autopilot.pb.gw.go deleted file mode 100644 index 8ec688a4..00000000 --- a/lnd/lnrpc/autopilotrpc/autopilot.pb.gw.go +++ /dev/null @@ -1,380 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: autopilotrpc/autopilot.proto - -/* -Package autopilotrpc is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package autopilotrpc - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage - -func request_Autopilot_Status_0(ctx context.Context, marshaler runtime.Marshaler, client AutopilotClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq StatusRequest - var metadata runtime.ServerMetadata - - msg, err := client.Status(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Autopilot_Status_0(ctx context.Context, marshaler runtime.Marshaler, server AutopilotServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq StatusRequest - var metadata runtime.ServerMetadata - - msg, err := server.Status(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Autopilot_ModifyStatus_0(ctx context.Context, marshaler runtime.Marshaler, client AutopilotClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ModifyStatusRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ModifyStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Autopilot_ModifyStatus_0(ctx context.Context, marshaler runtime.Marshaler, server AutopilotServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ModifyStatusRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ModifyStatus(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Autopilot_QueryScores_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Autopilot_QueryScores_0(ctx context.Context, marshaler runtime.Marshaler, client AutopilotClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryScoresRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Autopilot_QueryScores_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.QueryScores(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Autopilot_QueryScores_0(ctx context.Context, marshaler runtime.Marshaler, server AutopilotServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryScoresRequest - var metadata runtime.ServerMetadata - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_Autopilot_QueryScores_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.QueryScores(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Autopilot_SetScores_0(ctx context.Context, marshaler runtime.Marshaler, client AutopilotClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SetScoresRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.SetScores(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Autopilot_SetScores_0(ctx context.Context, marshaler runtime.Marshaler, server AutopilotServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SetScoresRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.SetScores(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterAutopilotHandlerServer registers the http handlers for service Autopilot to "mux". -// UnaryRPC :call AutopilotServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -func RegisterAutopilotHandlerServer(ctx context.Context, mux *runtime.ServeMux, server AutopilotServer) error { - - mux.Handle("GET", pattern_Autopilot_Status_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Autopilot_Status_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Autopilot_Status_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Autopilot_ModifyStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Autopilot_ModifyStatus_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Autopilot_ModifyStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Autopilot_QueryScores_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Autopilot_QueryScores_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Autopilot_QueryScores_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Autopilot_SetScores_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Autopilot_SetScores_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Autopilot_SetScores_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterAutopilotHandlerFromEndpoint is same as RegisterAutopilotHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterAutopilotHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterAutopilotHandler(ctx, mux, conn) -} - -// RegisterAutopilotHandler registers the http handlers for service Autopilot to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterAutopilotHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterAutopilotHandlerClient(ctx, mux, NewAutopilotClient(conn)) -} - -// RegisterAutopilotHandlerClient registers the http handlers for service Autopilot -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "AutopilotClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AutopilotClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "AutopilotClient" to call the correct interceptors. -func RegisterAutopilotHandlerClient(ctx context.Context, mux *runtime.ServeMux, client AutopilotClient) error { - - mux.Handle("GET", pattern_Autopilot_Status_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Autopilot_Status_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Autopilot_Status_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Autopilot_ModifyStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Autopilot_ModifyStatus_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Autopilot_ModifyStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Autopilot_QueryScores_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Autopilot_QueryScores_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Autopilot_QueryScores_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Autopilot_SetScores_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Autopilot_SetScores_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Autopilot_SetScores_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Autopilot_Status_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "autopilot", "status"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Autopilot_ModifyStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "autopilot", "modify"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Autopilot_QueryScores_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "autopilot", "scores"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Autopilot_SetScores_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "autopilot", "scores"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Autopilot_Status_0 = runtime.ForwardResponseMessage - - forward_Autopilot_ModifyStatus_0 = runtime.ForwardResponseMessage - - forward_Autopilot_QueryScores_0 = runtime.ForwardResponseMessage - - forward_Autopilot_SetScores_0 = runtime.ForwardResponseMessage -) diff --git a/lnd/lnrpc/autopilotrpc/autopilot.proto b/lnd/lnrpc/autopilotrpc/autopilot.proto deleted file mode 100644 index e43a5c56..00000000 --- a/lnd/lnrpc/autopilotrpc/autopilot.proto +++ /dev/null @@ -1,80 +0,0 @@ -syntax = "proto3"; - -package autopilotrpc; - -option go_package = "github.com/pkt-cash/pktd/lnd/lnrpc/autopilotrpc"; - -// Autopilot is a service that can be used to get information about the current -// state of the daemon's autopilot agent, and also supply it with information -// that can be used when deciding where to open channels. -service Autopilot { - /* - Status returns whether the daemon's autopilot agent is active. - */ - rpc Status (StatusRequest) returns (StatusResponse); - - /* - ModifyStatus is used to modify the status of the autopilot agent, like - enabling or disabling it. - */ - rpc ModifyStatus (ModifyStatusRequest) returns (ModifyStatusResponse); - - /* - QueryScores queries all available autopilot heuristics, in addition to any - active combination of these heruristics, for the scores they would give to - the given nodes. - */ - rpc QueryScores (QueryScoresRequest) returns (QueryScoresResponse); - - /* - SetScores attempts to set the scores used by the running autopilot agent, - if the external scoring heuristic is enabled. - */ - rpc SetScores (SetScoresRequest) returns (SetScoresResponse); -} - -message StatusRequest { -} - -message StatusResponse { - // Indicates whether the autopilot is active or not. - bool active = 1; -} - -message ModifyStatusRequest { - // Whether the autopilot agent should be enabled or not. - bool enable = 1; -} - -message ModifyStatusResponse { -} - -message QueryScoresRequest { - repeated string pubkeys = 1; - - // If set, we will ignore the local channel state when calculating scores. - bool ignore_local_state = 2; -} - -message QueryScoresResponse { - message HeuristicResult { - string heuristic = 1; - map scores = 2; - } - - repeated HeuristicResult results = 1; -} - -message SetScoresRequest { - // The name of the heuristic to provide scores to. - string heuristic = 1; - - /* - A map from hex-encoded public keys to scores. Scores must be in the range - [0.0, 1.0]. - */ - map scores = 2; -} - -message SetScoresResponse { -} diff --git a/lnd/lnrpc/autopilotrpc/autopilot.swagger.json b/lnd/lnrpc/autopilotrpc/autopilot.swagger.json deleted file mode 100644 index 665b1ebc..00000000 --- a/lnd/lnrpc/autopilotrpc/autopilot.swagger.json +++ /dev/null @@ -1,246 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "autopilotrpc/autopilot.proto", - "version": "version not set" - }, - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/v2/autopilot/modify": { - "post": { - "summary": "ModifyStatus is used to modify the status of the autopilot agent, like\nenabling or disabling it.", - "operationId": "Autopilot_ModifyStatus", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/autopilotrpcModifyStatusResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/autopilotrpcModifyStatusRequest" - } - } - ], - "tags": [ - "Autopilot" - ] - } - }, - "/v2/autopilot/scores": { - "get": { - "summary": "QueryScores queries all available autopilot heuristics, in addition to any\nactive combination of these heruristics, for the scores they would give to\nthe given nodes.", - "operationId": "Autopilot_QueryScores", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/autopilotrpcQueryScoresResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "pubkeys", - "in": "query", - "required": false, - "type": "array", - "items": { - "type": "string" - }, - "collectionFormat": "multi" - }, - { - "name": "ignore_local_state", - "description": "If set, we will ignore the local channel state when calculating scores.", - "in": "query", - "required": false, - "type": "boolean" - } - ], - "tags": [ - "Autopilot" - ] - }, - "post": { - "summary": "SetScores attempts to set the scores used by the running autopilot agent,\nif the external scoring heuristic is enabled.", - "operationId": "Autopilot_SetScores", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/autopilotrpcSetScoresResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/autopilotrpcSetScoresRequest" - } - } - ], - "tags": [ - "Autopilot" - ] - } - }, - "/v2/autopilot/status": { - "get": { - "summary": "Status returns whether the daemon's autopilot agent is active.", - "operationId": "Autopilot_Status", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/autopilotrpcStatusResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "Autopilot" - ] - } - } - }, - "definitions": { - "QueryScoresResponseHeuristicResult": { - "type": "object", - "properties": { - "heuristic": { - "type": "string" - }, - "scores": { - "type": "object", - "additionalProperties": { - "type": "number", - "format": "double" - } - } - } - }, - "autopilotrpcModifyStatusRequest": { - "type": "object", - "properties": { - "enable": { - "type": "boolean", - "description": "Whether the autopilot agent should be enabled or not." - } - } - }, - "autopilotrpcModifyStatusResponse": { - "type": "object" - }, - "autopilotrpcQueryScoresResponse": { - "type": "object", - "properties": { - "results": { - "type": "array", - "items": { - "$ref": "#/definitions/QueryScoresResponseHeuristicResult" - } - } - } - }, - "autopilotrpcSetScoresRequest": { - "type": "object", - "properties": { - "heuristic": { - "type": "string", - "description": "The name of the heuristic to provide scores to." - }, - "scores": { - "type": "object", - "additionalProperties": { - "type": "number", - "format": "double" - }, - "description": "A map from hex-encoded public keys to scores. Scores must be in the range\n[0.0, 1.0]." - } - } - }, - "autopilotrpcSetScoresResponse": { - "type": "object" - }, - "autopilotrpcStatusResponse": { - "type": "object", - "properties": { - "active": { - "type": "boolean", - "description": "Indicates whether the autopilot is active or not." - } - } - }, - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string" - }, - "value": { - "type": "string", - "format": "byte" - } - } - }, - "runtimeError": { - "type": "object", - "properties": { - "error": { - "type": "string" - }, - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - } - } -} diff --git a/lnd/lnrpc/autopilotrpc/autopilot_server.go b/lnd/lnrpc/autopilotrpc/autopilot_server.go deleted file mode 100644 index 448c58db..00000000 --- a/lnd/lnrpc/autopilotrpc/autopilot_server.go +++ /dev/null @@ -1,267 +0,0 @@ -// +build autopilotrpc - -package autopilotrpc - -import ( - "context" - "encoding/hex" - "sync/atomic" - - "github.com/pkt-cash/pktd/btcec" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/pkt-cash/pktd/lnd/autopilot" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "google.golang.org/grpc" - "gopkg.in/macaroon-bakery.v2/bakery" -) - -const ( - // subServerName is the name of the sub rpc server. We'll use this name - // to register ourselves, and we also require that the main - // SubServerConfigDispatcher instance recognize tt as the name of our - // RPC service. - subServerName = "AutopilotRPC" -) - -var ( - // macPermissions maps RPC calls to the permissions they require. - macPermissions = map[string][]bakery.Op{ - "/autopilotrpc.Autopilot/Status": {{ - Entity: "info", - Action: "read", - }}, - "/autopilotrpc.Autopilot/ModifyStatus": {{ - Entity: "onchain", - Action: "write", - }, { - Entity: "offchain", - Action: "write", - }}, - "/autopilotrpc.Autopilot/QueryScores": {{ - Entity: "info", - Action: "read", - }}, - "/autopilotrpc.Autopilot/SetScores": {{ - Entity: "onchain", - Action: "write", - }, { - Entity: "offchain", - Action: "write", - }}, - } -) - -// Server is a sub-server of the main RPC server: the autopilot RPC. This sub -// RPC server allows external callers to access the status of the autopilot -// currently active within lnd, as well as configuring it at runtime. -type Server struct { - started int32 // To be used atomically. - shutdown int32 // To be used atomically. - - cfg *Config - - manager *autopilot.Manager -} - -// A compile time check to ensure that Server fully implements the -// AutopilotServer gRPC service. -var _ AutopilotServer = (*Server)(nil) - -// New returns a new instance of the autopilotrpc Autopilot sub-server. We also -// return the set of permissions for the macaroons that we may create within -// this method. If the macaroons we need aren't found in the filepath, then -// we'll create them on start up. If we're unable to locate, or create the -// macaroons we need, then we'll return with an error. -func New(cfg *Config) (*Server, lnrpc.MacaroonPerms, er.R) { - // We don't create any new macaroons for this subserver, instead reuse - // existing onchain/offchain permissions. - server := &Server{ - cfg: cfg, - manager: cfg.Manager, - } - - return server, macPermissions, nil -} - -// Start launches any helper goroutines required for the Server to function. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) Start() er.R { - if atomic.AddInt32(&s.started, 1) != 1 { - return nil - } - - return s.manager.Start() -} - -// Stop signals any active goroutines for a graceful closure. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) Stop() er.R { - if atomic.AddInt32(&s.shutdown, 1) != 1 { - return nil - } - - return s.manager.Stop() -} - -// Name returns a unique string representation of the sub-server. This can be -// used to identify the sub-server and also de-duplicate them. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) Name() string { - return subServerName -} - -// RegisterWithRootServer will be called by the root gRPC server to direct a -// sub RPC server to register itself with the main gRPC root server. Until this -// is called, each sub-server won't be able to have -// requests routed towards it. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) RegisterWithRootServer(grpcServer *grpc.Server) er.R { - // We make sure that we register it with the main gRPC server to ensure - // all our methods are routed properly. - RegisterAutopilotServer(grpcServer, s) - - log.Debugf("Autopilot RPC server successfully register with root " + - "gRPC server") - - return nil -} - -// RegisterWithRestServer will be called by the root REST mux to direct a sub -// RPC server to register itself with the main REST mux server. Until this is -// called, each sub-server won't be able to have requests routed towards it. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) RegisterWithRestServer(ctx context.Context, - mux *runtime.ServeMux, dest string, opts []grpc.DialOption) er.R { - - // We make sure that we register it with the main REST server to ensure - // all our methods are routed properly. - err := RegisterAutopilotHandlerFromEndpoint(ctx, mux, dest, opts) - if err != nil { - log.Errorf("Could not register Autopilot REST server "+ - "with root REST server: %v", err) - return err - } - - log.Debugf("Autopilot REST server successfully registered with " + - "root REST server") - return nil -} - -// Status returns the current status of the autopilot agent. -// -// NOTE: Part of the AutopilotServer interface. -func (s *Server) Status(ctx context.Context, - in *StatusRequest) (*StatusResponse, er.R) { - - return &StatusResponse{ - Active: s.manager.IsActive(), - }, nil -} - -// ModifyStatus activates the current autopilot agent, if active. -// -// NOTE: Part of the AutopilotServer interface. -func (s *Server) ModifyStatus(ctx context.Context, - in *ModifyStatusRequest) (*ModifyStatusResponse, er.R) { - - log.Debugf("Setting agent enabled=%v", in.Enable) - - var err error - if in.Enable { - err = s.manager.StartAgent() - } else { - err = s.manager.StopAgent() - } - return &ModifyStatusResponse{}, err -} - -// QueryScores queries all available autopilot heuristics, in addition to any -// active combination of these heruristics, for the scores they would give to -// the given nodes. -// -// NOTE: Part of the AutopilotServer interface. -func (s *Server) QueryScores(ctx context.Context, in *QueryScoresRequest) ( - *QueryScoresResponse, er.R) { - - var nodes []autopilot.NodeID - for _, pubStr := range in.Pubkeys { - pubHex, err := util.DecodeHex(pubStr) - if err != nil { - return nil, err - } - pubKey, err := btcec.ParsePubKey(pubHex, btcec.S256()) - if err != nil { - return nil, err - } - nID := autopilot.NewNodeID(pubKey) - nodes = append(nodes, nID) - } - - // Query the heuristics. - heuristicScores, err := s.manager.QueryHeuristics( - nodes, !in.IgnoreLocalState, - ) - if err != nil { - return nil, err - } - - resp := &QueryScoresResponse{} - for heuristic, scores := range heuristicScores { - result := &QueryScoresResponse_HeuristicResult{ - Heuristic: heuristic, - Scores: make(map[string]float64), - } - - for pub, score := range scores { - pubkeyHex := hex.EncodeToString(pub[:]) - result.Scores[pubkeyHex] = score - } - - // Since a node not being part of the internally returned - // scores imply a zero score, we add these before we return the - // RPC results. - for _, node := range nodes { - if _, ok := scores[node]; ok { - continue - } - pubkeyHex := hex.EncodeToString(node[:]) - result.Scores[pubkeyHex] = 0.0 - } - - resp.Results = append(resp.Results, result) - } - - return resp, nil -} - -// SetScores sets the scores of the external score heuristic, if active. -// -// NOTE: Part of the AutopilotServer interface. -func (s *Server) SetScores(ctx context.Context, - in *SetScoresRequest) (*SetScoresResponse, er.R) { - - scores := make(map[autopilot.NodeID]float64) - for pubStr, score := range in.Scores { - pubHex, err := util.DecodeHex(pubStr) - if err != nil { - return nil, err - } - pubKey, err := btcec.ParsePubKey(pubHex, btcec.S256()) - if err != nil { - return nil, err - } - nID := autopilot.NewNodeID(pubKey) - scores[nID] = score - } - - if err := s.manager.SetNodeScores(in.Heuristic, scores); err != nil { - return nil, err - } - - return &SetScoresResponse{}, nil -} diff --git a/lnd/lnrpc/autopilotrpc/config_active.go b/lnd/lnrpc/autopilotrpc/config_active.go deleted file mode 100644 index fb59942f..00000000 --- a/lnd/lnrpc/autopilotrpc/config_active.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build autopilotrpc - -package autopilotrpc - -import ( - "github.com/pkt-cash/pktd/lnd/autopilot" -) - -// Config is the primary configuration struct for the autopilot RPC server. It -// contains all the items required for the rpc server to carry out its -// duties. The fields with struct tags are meant to be parsed as normal -// configuration options, while if able to be populated, the latter fields MUST -// also be specified. -type Config struct { - // Manager is the running autopilot manager. - Manager *autopilot.Manager -} diff --git a/lnd/lnrpc/autopilotrpc/config_default.go b/lnd/lnrpc/autopilotrpc/config_default.go deleted file mode 100644 index 2d42ab51..00000000 --- a/lnd/lnrpc/autopilotrpc/config_default.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !autopilotrpc - -package autopilotrpc - -// Config is empty for non-autopilotrpc builds. -type Config struct{} diff --git a/lnd/lnrpc/autopilotrpc/driver.go b/lnd/lnrpc/autopilotrpc/driver.go deleted file mode 100644 index 86ae587f..00000000 --- a/lnd/lnrpc/autopilotrpc/driver.go +++ /dev/null @@ -1,63 +0,0 @@ -// +build autopilotrpc - -package autopilotrpc - -import ( - "fmt" - - "github.com/pkt-cash/pktd/lnd/lnrpc" -) - -// createNewSubServer is a helper method that will create the new sub server -// given the main config dispatcher method. If we're unable to find the config -// that is meant for us in the config dispatcher, then we'll exit with an -// error. -func createNewSubServer(configRegistry lnrpc.SubServerConfigDispatcher) ( - lnrpc.SubServer, lnrpc.MacaroonPerms, er.R) { - - // We'll attempt to look up the config that we expect, according to our - // subServerName name. If we can't find this, then we'll exit with an - // error, as we're unable to properly initialize ourselves without this - // config. - subServerConf, ok := configRegistry.FetchConfig(subServerName) - if !ok { - return nil, nil, er.Errorf("unable to find config for "+ - "subserver type %s", subServerName) - } - - // Now that we've found an object mapping to our service name, we'll - // ensure that it's the type we need. - config, ok := subServerConf.(*Config) - if !ok { - return nil, nil, er.Errorf("wrong type of config for "+ - "subserver %s, expected %T got %T", subServerName, - &Config{}, subServerConf) - } - - // Before we try to make the new service instance, we'll perform - // some sanity checks on the arguments to ensure that they're useable. - switch { - case config.Manager == nil: - return nil, nil, er.Errorf("Manager must be set to create " + - "Autopilotrpc") - } - - return New(config) -} - -func init() { - subServer := &lnrpc.SubServerDriver{ - SubServerName: subServerName, - New: func(c lnrpc.SubServerConfigDispatcher) (lnrpc.SubServer, - lnrpc.MacaroonPerms, er.R) { - return createNewSubServer(c) - }, - } - - // If the build tag is active, then we'll register ourselves as a - // sub-RPC server within the global lnrpc package namespace. - if err := lnrpc.RegisterSubServer(subServer); err != nil { - panic(fmt.Sprintf("failed to register sub server driver "+ - "'%s': %v", subServerName, err)) - } -} diff --git a/lnd/lnrpc/chainrpc/chainnotifier.pb.go b/lnd/lnrpc/chainrpc/chainnotifier.pb.go deleted file mode 100644 index 746dab25..00000000 --- a/lnd/lnrpc/chainrpc/chainnotifier.pb.go +++ /dev/null @@ -1,941 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: chainrpc/chainnotifier.proto - -package chainrpc - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type ConfRequest struct { - // - //The transaction hash for which we should request a confirmation notification - //for. If set to a hash of all zeros, then the confirmation notification will - //be requested for the script instead. - Txid []byte `protobuf:"bytes,1,opt,name=txid,proto3" json:"txid,omitempty"` - // - //An output script within a transaction with the hash above which will be used - //by light clients to match block filters. If the transaction hash is set to a - //hash of all zeros, then a confirmation notification will be requested for - //this script instead. - Script []byte `protobuf:"bytes,2,opt,name=script,proto3" json:"script,omitempty"` - // - //The number of desired confirmations the transaction/output script should - //reach before dispatching a confirmation notification. - NumConfs uint32 `protobuf:"varint,3,opt,name=num_confs,json=numConfs,proto3" json:"num_confs,omitempty"` - // - //The earliest height in the chain for which the transaction/output script - //could have been included in a block. This should in most cases be set to the - //broadcast height of the transaction/output script. - HeightHint uint32 `protobuf:"varint,4,opt,name=height_hint,json=heightHint,proto3" json:"height_hint,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConfRequest) Reset() { *m = ConfRequest{} } -func (m *ConfRequest) String() string { return proto.CompactTextString(m) } -func (*ConfRequest) ProtoMessage() {} -func (*ConfRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b10e6f8a1c9d2638, []int{0} -} - -func (m *ConfRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConfRequest.Unmarshal(m, b) -} -func (m *ConfRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConfRequest.Marshal(b, m, deterministic) -} -func (m *ConfRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConfRequest.Merge(m, src) -} -func (m *ConfRequest) XXX_Size() int { - return xxx_messageInfo_ConfRequest.Size(m) -} -func (m *ConfRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ConfRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ConfRequest proto.InternalMessageInfo - -func (m *ConfRequest) GetTxid() []byte { - if m != nil { - return m.Txid - } - return nil -} - -func (m *ConfRequest) GetScript() []byte { - if m != nil { - return m.Script - } - return nil -} - -func (m *ConfRequest) GetNumConfs() uint32 { - if m != nil { - return m.NumConfs - } - return 0 -} - -func (m *ConfRequest) GetHeightHint() uint32 { - if m != nil { - return m.HeightHint - } - return 0 -} - -type ConfDetails struct { - // The raw bytes of the confirmed transaction. - RawTx []byte `protobuf:"bytes,1,opt,name=raw_tx,json=rawTx,proto3" json:"raw_tx,omitempty"` - // The hash of the block in which the confirmed transaction was included in. - BlockHash []byte `protobuf:"bytes,2,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` - // The height of the block in which the confirmed transaction was included - // in. - BlockHeight uint32 `protobuf:"varint,3,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` - // The index of the confirmed transaction within the transaction. - TxIndex uint32 `protobuf:"varint,4,opt,name=tx_index,json=txIndex,proto3" json:"tx_index,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConfDetails) Reset() { *m = ConfDetails{} } -func (m *ConfDetails) String() string { return proto.CompactTextString(m) } -func (*ConfDetails) ProtoMessage() {} -func (*ConfDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_b10e6f8a1c9d2638, []int{1} -} - -func (m *ConfDetails) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConfDetails.Unmarshal(m, b) -} -func (m *ConfDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConfDetails.Marshal(b, m, deterministic) -} -func (m *ConfDetails) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConfDetails.Merge(m, src) -} -func (m *ConfDetails) XXX_Size() int { - return xxx_messageInfo_ConfDetails.Size(m) -} -func (m *ConfDetails) XXX_DiscardUnknown() { - xxx_messageInfo_ConfDetails.DiscardUnknown(m) -} - -var xxx_messageInfo_ConfDetails proto.InternalMessageInfo - -func (m *ConfDetails) GetRawTx() []byte { - if m != nil { - return m.RawTx - } - return nil -} - -func (m *ConfDetails) GetBlockHash() []byte { - if m != nil { - return m.BlockHash - } - return nil -} - -func (m *ConfDetails) GetBlockHeight() uint32 { - if m != nil { - return m.BlockHeight - } - return 0 -} - -func (m *ConfDetails) GetTxIndex() uint32 { - if m != nil { - return m.TxIndex - } - return 0 -} - -type Reorg struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Reorg) Reset() { *m = Reorg{} } -func (m *Reorg) String() string { return proto.CompactTextString(m) } -func (*Reorg) ProtoMessage() {} -func (*Reorg) Descriptor() ([]byte, []int) { - return fileDescriptor_b10e6f8a1c9d2638, []int{2} -} - -func (m *Reorg) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Reorg.Unmarshal(m, b) -} -func (m *Reorg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Reorg.Marshal(b, m, deterministic) -} -func (m *Reorg) XXX_Merge(src proto.Message) { - xxx_messageInfo_Reorg.Merge(m, src) -} -func (m *Reorg) XXX_Size() int { - return xxx_messageInfo_Reorg.Size(m) -} -func (m *Reorg) XXX_DiscardUnknown() { - xxx_messageInfo_Reorg.DiscardUnknown(m) -} - -var xxx_messageInfo_Reorg proto.InternalMessageInfo - -type ConfEvent struct { - // Types that are valid to be assigned to Event: - // *ConfEvent_Conf - // *ConfEvent_Reorg - Event isConfEvent_Event `protobuf_oneof:"event"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConfEvent) Reset() { *m = ConfEvent{} } -func (m *ConfEvent) String() string { return proto.CompactTextString(m) } -func (*ConfEvent) ProtoMessage() {} -func (*ConfEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_b10e6f8a1c9d2638, []int{3} -} - -func (m *ConfEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConfEvent.Unmarshal(m, b) -} -func (m *ConfEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConfEvent.Marshal(b, m, deterministic) -} -func (m *ConfEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConfEvent.Merge(m, src) -} -func (m *ConfEvent) XXX_Size() int { - return xxx_messageInfo_ConfEvent.Size(m) -} -func (m *ConfEvent) XXX_DiscardUnknown() { - xxx_messageInfo_ConfEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_ConfEvent proto.InternalMessageInfo - -type isConfEvent_Event interface { - isConfEvent_Event() -} - -type ConfEvent_Conf struct { - Conf *ConfDetails `protobuf:"bytes,1,opt,name=conf,proto3,oneof"` -} - -type ConfEvent_Reorg struct { - Reorg *Reorg `protobuf:"bytes,2,opt,name=reorg,proto3,oneof"` -} - -func (*ConfEvent_Conf) isConfEvent_Event() {} - -func (*ConfEvent_Reorg) isConfEvent_Event() {} - -func (m *ConfEvent) GetEvent() isConfEvent_Event { - if m != nil { - return m.Event - } - return nil -} - -func (m *ConfEvent) GetConf() *ConfDetails { - if x, ok := m.GetEvent().(*ConfEvent_Conf); ok { - return x.Conf - } - return nil -} - -func (m *ConfEvent) GetReorg() *Reorg { - if x, ok := m.GetEvent().(*ConfEvent_Reorg); ok { - return x.Reorg - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*ConfEvent) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*ConfEvent_Conf)(nil), - (*ConfEvent_Reorg)(nil), - } -} - -type Outpoint struct { - // The hash of the transaction. - Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` - // The index of the output within the transaction. - Index uint32 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Outpoint) Reset() { *m = Outpoint{} } -func (m *Outpoint) String() string { return proto.CompactTextString(m) } -func (*Outpoint) ProtoMessage() {} -func (*Outpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_b10e6f8a1c9d2638, []int{4} -} - -func (m *Outpoint) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Outpoint.Unmarshal(m, b) -} -func (m *Outpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Outpoint.Marshal(b, m, deterministic) -} -func (m *Outpoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_Outpoint.Merge(m, src) -} -func (m *Outpoint) XXX_Size() int { - return xxx_messageInfo_Outpoint.Size(m) -} -func (m *Outpoint) XXX_DiscardUnknown() { - xxx_messageInfo_Outpoint.DiscardUnknown(m) -} - -var xxx_messageInfo_Outpoint proto.InternalMessageInfo - -func (m *Outpoint) GetHash() []byte { - if m != nil { - return m.Hash - } - return nil -} - -func (m *Outpoint) GetIndex() uint32 { - if m != nil { - return m.Index - } - return 0 -} - -type SpendRequest struct { - // - //The outpoint for which we should request a spend notification for. If set to - //a zero outpoint, then the spend notification will be requested for the - //script instead. - Outpoint *Outpoint `protobuf:"bytes,1,opt,name=outpoint,proto3" json:"outpoint,omitempty"` - // - //The output script for the outpoint above. This will be used by light clients - //to match block filters. If the outpoint is set to a zero outpoint, then a - //spend notification will be requested for this script instead. - Script []byte `protobuf:"bytes,2,opt,name=script,proto3" json:"script,omitempty"` - // - //The earliest height in the chain for which the outpoint/output script could - //have been spent. This should in most cases be set to the broadcast height of - //the outpoint/output script. - HeightHint uint32 `protobuf:"varint,3,opt,name=height_hint,json=heightHint,proto3" json:"height_hint,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SpendRequest) Reset() { *m = SpendRequest{} } -func (m *SpendRequest) String() string { return proto.CompactTextString(m) } -func (*SpendRequest) ProtoMessage() {} -func (*SpendRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b10e6f8a1c9d2638, []int{5} -} - -func (m *SpendRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SpendRequest.Unmarshal(m, b) -} -func (m *SpendRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SpendRequest.Marshal(b, m, deterministic) -} -func (m *SpendRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SpendRequest.Merge(m, src) -} -func (m *SpendRequest) XXX_Size() int { - return xxx_messageInfo_SpendRequest.Size(m) -} -func (m *SpendRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SpendRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SpendRequest proto.InternalMessageInfo - -func (m *SpendRequest) GetOutpoint() *Outpoint { - if m != nil { - return m.Outpoint - } - return nil -} - -func (m *SpendRequest) GetScript() []byte { - if m != nil { - return m.Script - } - return nil -} - -func (m *SpendRequest) GetHeightHint() uint32 { - if m != nil { - return m.HeightHint - } - return 0 -} - -type SpendDetails struct { - // The outpoint was that spent. - SpendingOutpoint *Outpoint `protobuf:"bytes,1,opt,name=spending_outpoint,json=spendingOutpoint,proto3" json:"spending_outpoint,omitempty"` - // The raw bytes of the spending transaction. - RawSpendingTx []byte `protobuf:"bytes,2,opt,name=raw_spending_tx,json=rawSpendingTx,proto3" json:"raw_spending_tx,omitempty"` - // The hash of the spending transaction. - SpendingTxHash []byte `protobuf:"bytes,3,opt,name=spending_tx_hash,json=spendingTxHash,proto3" json:"spending_tx_hash,omitempty"` - // The input of the spending transaction that fulfilled the spend request. - SpendingInputIndex uint32 `protobuf:"varint,4,opt,name=spending_input_index,json=spendingInputIndex,proto3" json:"spending_input_index,omitempty"` - // The height at which the spending transaction was included in a block. - SpendingHeight uint32 `protobuf:"varint,5,opt,name=spending_height,json=spendingHeight,proto3" json:"spending_height,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SpendDetails) Reset() { *m = SpendDetails{} } -func (m *SpendDetails) String() string { return proto.CompactTextString(m) } -func (*SpendDetails) ProtoMessage() {} -func (*SpendDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_b10e6f8a1c9d2638, []int{6} -} - -func (m *SpendDetails) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SpendDetails.Unmarshal(m, b) -} -func (m *SpendDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SpendDetails.Marshal(b, m, deterministic) -} -func (m *SpendDetails) XXX_Merge(src proto.Message) { - xxx_messageInfo_SpendDetails.Merge(m, src) -} -func (m *SpendDetails) XXX_Size() int { - return xxx_messageInfo_SpendDetails.Size(m) -} -func (m *SpendDetails) XXX_DiscardUnknown() { - xxx_messageInfo_SpendDetails.DiscardUnknown(m) -} - -var xxx_messageInfo_SpendDetails proto.InternalMessageInfo - -func (m *SpendDetails) GetSpendingOutpoint() *Outpoint { - if m != nil { - return m.SpendingOutpoint - } - return nil -} - -func (m *SpendDetails) GetRawSpendingTx() []byte { - if m != nil { - return m.RawSpendingTx - } - return nil -} - -func (m *SpendDetails) GetSpendingTxHash() []byte { - if m != nil { - return m.SpendingTxHash - } - return nil -} - -func (m *SpendDetails) GetSpendingInputIndex() uint32 { - if m != nil { - return m.SpendingInputIndex - } - return 0 -} - -func (m *SpendDetails) GetSpendingHeight() uint32 { - if m != nil { - return m.SpendingHeight - } - return 0 -} - -type SpendEvent struct { - // Types that are valid to be assigned to Event: - // *SpendEvent_Spend - // *SpendEvent_Reorg - Event isSpendEvent_Event `protobuf_oneof:"event"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SpendEvent) Reset() { *m = SpendEvent{} } -func (m *SpendEvent) String() string { return proto.CompactTextString(m) } -func (*SpendEvent) ProtoMessage() {} -func (*SpendEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_b10e6f8a1c9d2638, []int{7} -} - -func (m *SpendEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SpendEvent.Unmarshal(m, b) -} -func (m *SpendEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SpendEvent.Marshal(b, m, deterministic) -} -func (m *SpendEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_SpendEvent.Merge(m, src) -} -func (m *SpendEvent) XXX_Size() int { - return xxx_messageInfo_SpendEvent.Size(m) -} -func (m *SpendEvent) XXX_DiscardUnknown() { - xxx_messageInfo_SpendEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_SpendEvent proto.InternalMessageInfo - -type isSpendEvent_Event interface { - isSpendEvent_Event() -} - -type SpendEvent_Spend struct { - Spend *SpendDetails `protobuf:"bytes,1,opt,name=spend,proto3,oneof"` -} - -type SpendEvent_Reorg struct { - Reorg *Reorg `protobuf:"bytes,2,opt,name=reorg,proto3,oneof"` -} - -func (*SpendEvent_Spend) isSpendEvent_Event() {} - -func (*SpendEvent_Reorg) isSpendEvent_Event() {} - -func (m *SpendEvent) GetEvent() isSpendEvent_Event { - if m != nil { - return m.Event - } - return nil -} - -func (m *SpendEvent) GetSpend() *SpendDetails { - if x, ok := m.GetEvent().(*SpendEvent_Spend); ok { - return x.Spend - } - return nil -} - -func (m *SpendEvent) GetReorg() *Reorg { - if x, ok := m.GetEvent().(*SpendEvent_Reorg); ok { - return x.Reorg - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*SpendEvent) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*SpendEvent_Spend)(nil), - (*SpendEvent_Reorg)(nil), - } -} - -type BlockEpoch struct { - // The hash of the block. - Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` - // The height of the block. - Height uint32 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BlockEpoch) Reset() { *m = BlockEpoch{} } -func (m *BlockEpoch) String() string { return proto.CompactTextString(m) } -func (*BlockEpoch) ProtoMessage() {} -func (*BlockEpoch) Descriptor() ([]byte, []int) { - return fileDescriptor_b10e6f8a1c9d2638, []int{8} -} - -func (m *BlockEpoch) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BlockEpoch.Unmarshal(m, b) -} -func (m *BlockEpoch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BlockEpoch.Marshal(b, m, deterministic) -} -func (m *BlockEpoch) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockEpoch.Merge(m, src) -} -func (m *BlockEpoch) XXX_Size() int { - return xxx_messageInfo_BlockEpoch.Size(m) -} -func (m *BlockEpoch) XXX_DiscardUnknown() { - xxx_messageInfo_BlockEpoch.DiscardUnknown(m) -} - -var xxx_messageInfo_BlockEpoch proto.InternalMessageInfo - -func (m *BlockEpoch) GetHash() []byte { - if m != nil { - return m.Hash - } - return nil -} - -func (m *BlockEpoch) GetHeight() uint32 { - if m != nil { - return m.Height - } - return 0 -} - -func init() { - proto.RegisterType((*ConfRequest)(nil), "chainrpc.ConfRequest") - proto.RegisterType((*ConfDetails)(nil), "chainrpc.ConfDetails") - proto.RegisterType((*Reorg)(nil), "chainrpc.Reorg") - proto.RegisterType((*ConfEvent)(nil), "chainrpc.ConfEvent") - proto.RegisterType((*Outpoint)(nil), "chainrpc.Outpoint") - proto.RegisterType((*SpendRequest)(nil), "chainrpc.SpendRequest") - proto.RegisterType((*SpendDetails)(nil), "chainrpc.SpendDetails") - proto.RegisterType((*SpendEvent)(nil), "chainrpc.SpendEvent") - proto.RegisterType((*BlockEpoch)(nil), "chainrpc.BlockEpoch") -} - -func init() { proto.RegisterFile("chainrpc/chainnotifier.proto", fileDescriptor_b10e6f8a1c9d2638) } - -var fileDescriptor_b10e6f8a1c9d2638 = []byte{ - // 574 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x4f, 0x6f, 0x13, 0x3f, - 0x10, 0xed, 0xb6, 0xdd, 0xfc, 0x99, 0x24, 0xbf, 0xb4, 0xfe, 0xa5, 0x51, 0x5a, 0x40, 0x94, 0x3d, - 0xd0, 0x48, 0x48, 0x21, 0x0a, 0x1c, 0xb8, 0x21, 0x35, 0x14, 0x25, 0x97, 0x22, 0x6d, 0x7b, 0x5f, - 0x6d, 0x37, 0x4e, 0xd6, 0xd0, 0xd8, 0x8b, 0xed, 0x90, 0xbd, 0xf2, 0x69, 0xf9, 0x12, 0x1c, 0x90, - 0x67, 0xed, 0x4d, 0x1a, 0x8a, 0x84, 0xb8, 0x79, 0x66, 0xde, 0x3e, 0xbf, 0xf1, 0x7b, 0x09, 0x3c, - 0x4d, 0xd2, 0x98, 0x71, 0x99, 0x25, 0xaf, 0xf1, 0xc0, 0x85, 0x66, 0x73, 0x46, 0xe5, 0x20, 0x93, - 0x42, 0x0b, 0x52, 0x73, 0xd3, 0x60, 0x0d, 0x8d, 0xb1, 0xe0, 0xf3, 0x90, 0x7e, 0x5d, 0x51, 0xa5, - 0x09, 0x81, 0x43, 0x9d, 0xb3, 0x59, 0xcf, 0x3b, 0xf7, 0xfa, 0xcd, 0x10, 0xcf, 0xa4, 0x0b, 0x15, - 0x95, 0x48, 0x96, 0xe9, 0xde, 0x3e, 0x76, 0x6d, 0x45, 0x9e, 0x40, 0x9d, 0xaf, 0x96, 0x51, 0x22, - 0xf8, 0x5c, 0xf5, 0x0e, 0xce, 0xbd, 0x7e, 0x2b, 0xac, 0xf1, 0xd5, 0xd2, 0xd0, 0x29, 0xf2, 0x1c, - 0x1a, 0x29, 0x65, 0x8b, 0x54, 0x47, 0x29, 0xe3, 0xba, 0x77, 0x88, 0x63, 0x28, 0x5a, 0x13, 0xc6, - 0x75, 0xf0, 0xdd, 0x2b, 0x6e, 0xfe, 0x40, 0x75, 0xcc, 0xee, 0x15, 0x39, 0x81, 0x8a, 0x8c, 0xd7, - 0x91, 0xce, 0xed, 0xdd, 0xbe, 0x8c, 0xd7, 0xb7, 0x39, 0x79, 0x06, 0x70, 0x77, 0x2f, 0x92, 0x2f, - 0x51, 0x1a, 0xab, 0xd4, 0x0a, 0xa8, 0x63, 0x67, 0x12, 0xab, 0x94, 0xbc, 0x80, 0xa6, 0x1d, 0x23, - 0xb3, 0x95, 0xd1, 0x28, 0x00, 0xd8, 0x22, 0xa7, 0x50, 0xd3, 0x79, 0xc4, 0xf8, 0x8c, 0xe6, 0x56, - 0x46, 0x55, 0xe7, 0x53, 0x53, 0x06, 0x55, 0xf0, 0x43, 0x2a, 0xe4, 0x22, 0xf8, 0x0c, 0x75, 0xa3, - 0xe5, 0xea, 0x1b, 0xe5, 0x9a, 0xbc, 0x82, 0x43, 0xb3, 0x13, 0xea, 0x68, 0x8c, 0x4e, 0x06, 0xee, - 0xad, 0x06, 0x5b, 0x72, 0x27, 0x7b, 0x21, 0x82, 0xc8, 0x05, 0xf8, 0xd2, 0x50, 0xa0, 0xb4, 0xc6, - 0xa8, 0xbd, 0x41, 0x23, 0xf3, 0x64, 0x2f, 0x2c, 0xe6, 0x97, 0x55, 0xf0, 0xa9, 0xa1, 0x0f, 0xde, - 0x42, 0xed, 0xd3, 0x4a, 0x67, 0x82, 0x71, 0x7c, 0x6e, 0xdc, 0xcb, 0x3e, 0xb7, 0x39, 0x93, 0x0e, - 0xf8, 0x85, 0xd8, 0x7d, 0x14, 0x5b, 0x14, 0xc1, 0x1a, 0x9a, 0x37, 0x19, 0xe5, 0x33, 0x67, 0xd4, - 0x00, 0x6a, 0xc2, 0xb2, 0x58, 0xa1, 0x64, 0x73, 0xb5, 0xe3, 0x0f, 0x4b, 0xcc, 0x1f, 0x4d, 0xdc, - 0xf1, 0xe9, 0xe0, 0x37, 0x9f, 0x7e, 0x7a, 0xf6, 0x66, 0x67, 0xd4, 0x7b, 0x38, 0x56, 0xa6, 0x66, - 0x7c, 0x11, 0xfd, 0x85, 0x84, 0x23, 0x07, 0x2e, 0x97, 0x7e, 0x09, 0x6d, 0xe3, 0x74, 0x49, 0xa2, - 0x73, 0xab, 0xa9, 0x25, 0xe3, 0xf5, 0x8d, 0xed, 0xde, 0xe6, 0xa4, 0x0f, 0x47, 0x5b, 0x98, 0x22, - 0x00, 0x07, 0x08, 0xfc, 0x4f, 0x95, 0x28, 0x4c, 0xc1, 0x10, 0x3a, 0x25, 0x92, 0xf1, 0x6c, 0xa5, - 0x1f, 0xd8, 0x4d, 0xdc, 0x6c, 0x6a, 0x46, 0xe8, 0x3c, 0xb9, 0x80, 0x76, 0xf9, 0x85, 0x8d, 0x8e, - 0x8f, 0xe0, 0x92, 0xba, 0x48, 0x4f, 0xc0, 0x01, 0x50, 0x52, 0x11, 0x8d, 0x01, 0xf8, 0x38, 0xb7, - 0xfb, 0x76, 0x37, 0xfb, 0x6e, 0x3f, 0x91, 0x31, 0x1d, 0x61, 0xff, 0x90, 0x8e, 0x77, 0x00, 0x97, - 0x26, 0xbc, 0x57, 0x99, 0x48, 0xd2, 0x47, 0xf3, 0xd1, 0x85, 0x8a, 0x55, 0x5c, 0x04, 0xc4, 0x56, - 0xa3, 0x1f, 0x1e, 0xb4, 0xc6, 0x86, 0xfe, 0xda, 0xfe, 0xd6, 0xc9, 0x14, 0x4e, 0x43, 0xba, 0x60, - 0x4a, 0x53, 0x69, 0xa2, 0xcb, 0xe4, 0x32, 0xd6, 0x4c, 0x70, 0x75, 0xad, 0xe7, 0x9c, 0xec, 0xe4, - 0xda, 0xe6, 0xea, 0xec, 0xff, 0x87, 0x6d, 0x5c, 0x7b, 0xe8, 0x91, 0x31, 0x1c, 0x3b, 0x2a, 0xdc, - 0x14, 0x29, 0x76, 0xd7, 0x77, 0x1c, 0x9d, 0x9d, 0xbe, 0x23, 0xf9, 0x08, 0x5d, 0x47, 0xb2, 0xd9, - 0x11, 0x99, 0xb6, 0xbe, 0xd8, 0x4c, 0xce, 0x1e, 0xed, 0x0e, 0xbd, 0xbb, 0x0a, 0xfe, 0x89, 0xbd, - 0xf9, 0x15, 0x00, 0x00, 0xff, 0xff, 0x65, 0xe6, 0xc2, 0xe4, 0xe4, 0x04, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ChainNotifierClient is the client API for ChainNotifier service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ChainNotifierClient interface { - // - //RegisterConfirmationsNtfn is a synchronous response-streaming RPC that - //registers an intent for a client to be notified once a confirmation request - //has reached its required number of confirmations on-chain. - // - //A client can specify whether the confirmation request should be for a - //particular transaction by its hash or for an output script by specifying a - //zero hash. - RegisterConfirmationsNtfn(ctx context.Context, in *ConfRequest, opts ...grpc.CallOption) (ChainNotifier_RegisterConfirmationsNtfnClient, error) - // - //RegisterSpendNtfn is a synchronous response-streaming RPC that registers an - //intent for a client to be notification once a spend request has been spent - //by a transaction that has confirmed on-chain. - // - //A client can specify whether the spend request should be for a particular - //outpoint or for an output script by specifying a zero outpoint. - RegisterSpendNtfn(ctx context.Context, in *SpendRequest, opts ...grpc.CallOption) (ChainNotifier_RegisterSpendNtfnClient, error) - // - //RegisterBlockEpochNtfn is a synchronous response-streaming RPC that - //registers an intent for a client to be notified of blocks in the chain. The - //stream will return a hash and height tuple of a block for each new/stale - //block in the chain. It is the client's responsibility to determine whether - //the tuple returned is for a new or stale block in the chain. - // - //A client can also request a historical backlog of blocks from a particular - //point. This allows clients to be idempotent by ensuring that they do not - //missing processing a single block within the chain. - RegisterBlockEpochNtfn(ctx context.Context, in *BlockEpoch, opts ...grpc.CallOption) (ChainNotifier_RegisterBlockEpochNtfnClient, error) -} - -type chainNotifierClient struct { - cc *grpc.ClientConn -} - -func NewChainNotifierClient(cc *grpc.ClientConn) ChainNotifierClient { - return &chainNotifierClient{cc} -} - -func (c *chainNotifierClient) RegisterConfirmationsNtfn(ctx context.Context, in *ConfRequest, opts ...grpc.CallOption) (ChainNotifier_RegisterConfirmationsNtfnClient, error) { - stream, err := c.cc.NewStream(ctx, &_ChainNotifier_serviceDesc.Streams[0], "/chainrpc.ChainNotifier/RegisterConfirmationsNtfn", opts...) - if err != nil { - return nil, err - } - x := &chainNotifierRegisterConfirmationsNtfnClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type ChainNotifier_RegisterConfirmationsNtfnClient interface { - Recv() (*ConfEvent, error) - grpc.ClientStream -} - -type chainNotifierRegisterConfirmationsNtfnClient struct { - grpc.ClientStream -} - -func (x *chainNotifierRegisterConfirmationsNtfnClient) Recv() (*ConfEvent, error) { - m := new(ConfEvent) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *chainNotifierClient) RegisterSpendNtfn(ctx context.Context, in *SpendRequest, opts ...grpc.CallOption) (ChainNotifier_RegisterSpendNtfnClient, error) { - stream, err := c.cc.NewStream(ctx, &_ChainNotifier_serviceDesc.Streams[1], "/chainrpc.ChainNotifier/RegisterSpendNtfn", opts...) - if err != nil { - return nil, err - } - x := &chainNotifierRegisterSpendNtfnClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type ChainNotifier_RegisterSpendNtfnClient interface { - Recv() (*SpendEvent, error) - grpc.ClientStream -} - -type chainNotifierRegisterSpendNtfnClient struct { - grpc.ClientStream -} - -func (x *chainNotifierRegisterSpendNtfnClient) Recv() (*SpendEvent, error) { - m := new(SpendEvent) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *chainNotifierClient) RegisterBlockEpochNtfn(ctx context.Context, in *BlockEpoch, opts ...grpc.CallOption) (ChainNotifier_RegisterBlockEpochNtfnClient, error) { - stream, err := c.cc.NewStream(ctx, &_ChainNotifier_serviceDesc.Streams[2], "/chainrpc.ChainNotifier/RegisterBlockEpochNtfn", opts...) - if err != nil { - return nil, err - } - x := &chainNotifierRegisterBlockEpochNtfnClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type ChainNotifier_RegisterBlockEpochNtfnClient interface { - Recv() (*BlockEpoch, error) - grpc.ClientStream -} - -type chainNotifierRegisterBlockEpochNtfnClient struct { - grpc.ClientStream -} - -func (x *chainNotifierRegisterBlockEpochNtfnClient) Recv() (*BlockEpoch, error) { - m := new(BlockEpoch) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// ChainNotifierServer is the server API for ChainNotifier service. -type ChainNotifierServer interface { - // - //RegisterConfirmationsNtfn is a synchronous response-streaming RPC that - //registers an intent for a client to be notified once a confirmation request - //has reached its required number of confirmations on-chain. - // - //A client can specify whether the confirmation request should be for a - //particular transaction by its hash or for an output script by specifying a - //zero hash. - RegisterConfirmationsNtfn(*ConfRequest, ChainNotifier_RegisterConfirmationsNtfnServer) error - // - //RegisterSpendNtfn is a synchronous response-streaming RPC that registers an - //intent for a client to be notification once a spend request has been spent - //by a transaction that has confirmed on-chain. - // - //A client can specify whether the spend request should be for a particular - //outpoint or for an output script by specifying a zero outpoint. - RegisterSpendNtfn(*SpendRequest, ChainNotifier_RegisterSpendNtfnServer) error - // - //RegisterBlockEpochNtfn is a synchronous response-streaming RPC that - //registers an intent for a client to be notified of blocks in the chain. The - //stream will return a hash and height tuple of a block for each new/stale - //block in the chain. It is the client's responsibility to determine whether - //the tuple returned is for a new or stale block in the chain. - // - //A client can also request a historical backlog of blocks from a particular - //point. This allows clients to be idempotent by ensuring that they do not - //missing processing a single block within the chain. - RegisterBlockEpochNtfn(*BlockEpoch, ChainNotifier_RegisterBlockEpochNtfnServer) error -} - -// UnimplementedChainNotifierServer can be embedded to have forward compatible implementations. -type UnimplementedChainNotifierServer struct { -} - -func (*UnimplementedChainNotifierServer) RegisterConfirmationsNtfn(req *ConfRequest, srv ChainNotifier_RegisterConfirmationsNtfnServer) error { - return status.Errorf(codes.Unimplemented, "method RegisterConfirmationsNtfn not implemented") -} -func (*UnimplementedChainNotifierServer) RegisterSpendNtfn(req *SpendRequest, srv ChainNotifier_RegisterSpendNtfnServer) error { - return status.Errorf(codes.Unimplemented, "method RegisterSpendNtfn not implemented") -} -func (*UnimplementedChainNotifierServer) RegisterBlockEpochNtfn(req *BlockEpoch, srv ChainNotifier_RegisterBlockEpochNtfnServer) error { - return status.Errorf(codes.Unimplemented, "method RegisterBlockEpochNtfn not implemented") -} - -func RegisterChainNotifierServer(s *grpc.Server, srv ChainNotifierServer) { - s.RegisterService(&_ChainNotifier_serviceDesc, srv) -} - -func _ChainNotifier_RegisterConfirmationsNtfn_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ConfRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ChainNotifierServer).RegisterConfirmationsNtfn(m, &chainNotifierRegisterConfirmationsNtfnServer{stream}) -} - -type ChainNotifier_RegisterConfirmationsNtfnServer interface { - Send(*ConfEvent) error - grpc.ServerStream -} - -type chainNotifierRegisterConfirmationsNtfnServer struct { - grpc.ServerStream -} - -func (x *chainNotifierRegisterConfirmationsNtfnServer) Send(m *ConfEvent) error { - return x.ServerStream.SendMsg(m) -} - -func _ChainNotifier_RegisterSpendNtfn_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SpendRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ChainNotifierServer).RegisterSpendNtfn(m, &chainNotifierRegisterSpendNtfnServer{stream}) -} - -type ChainNotifier_RegisterSpendNtfnServer interface { - Send(*SpendEvent) error - grpc.ServerStream -} - -type chainNotifierRegisterSpendNtfnServer struct { - grpc.ServerStream -} - -func (x *chainNotifierRegisterSpendNtfnServer) Send(m *SpendEvent) error { - return x.ServerStream.SendMsg(m) -} - -func _ChainNotifier_RegisterBlockEpochNtfn_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(BlockEpoch) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ChainNotifierServer).RegisterBlockEpochNtfn(m, &chainNotifierRegisterBlockEpochNtfnServer{stream}) -} - -type ChainNotifier_RegisterBlockEpochNtfnServer interface { - Send(*BlockEpoch) error - grpc.ServerStream -} - -type chainNotifierRegisterBlockEpochNtfnServer struct { - grpc.ServerStream -} - -func (x *chainNotifierRegisterBlockEpochNtfnServer) Send(m *BlockEpoch) error { - return x.ServerStream.SendMsg(m) -} - -var _ChainNotifier_serviceDesc = grpc.ServiceDesc{ - ServiceName: "chainrpc.ChainNotifier", - HandlerType: (*ChainNotifierServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "RegisterConfirmationsNtfn", - Handler: _ChainNotifier_RegisterConfirmationsNtfn_Handler, - ServerStreams: true, - }, - { - StreamName: "RegisterSpendNtfn", - Handler: _ChainNotifier_RegisterSpendNtfn_Handler, - ServerStreams: true, - }, - { - StreamName: "RegisterBlockEpochNtfn", - Handler: _ChainNotifier_RegisterBlockEpochNtfn_Handler, - ServerStreams: true, - }, - }, - Metadata: "chainrpc/chainnotifier.proto", -} diff --git a/lnd/lnrpc/chainrpc/chainnotifier.pb.gw.go b/lnd/lnrpc/chainrpc/chainnotifier.pb.gw.go deleted file mode 100644 index 1d6e496a..00000000 --- a/lnd/lnrpc/chainrpc/chainnotifier.pb.gw.go +++ /dev/null @@ -1,253 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: chainrpc/chainnotifier.proto - -/* -Package chainrpc is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package chainrpc - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage - -func request_ChainNotifier_RegisterConfirmationsNtfn_0(ctx context.Context, marshaler runtime.Marshaler, client ChainNotifierClient, req *http.Request, pathParams map[string]string) (ChainNotifier_RegisterConfirmationsNtfnClient, runtime.ServerMetadata, error) { - var protoReq ConfRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - stream, err := client.RegisterConfirmationsNtfn(ctx, &protoReq) - if err != nil { - return nil, metadata, err - } - header, err := stream.Header() - if err != nil { - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil - -} - -func request_ChainNotifier_RegisterSpendNtfn_0(ctx context.Context, marshaler runtime.Marshaler, client ChainNotifierClient, req *http.Request, pathParams map[string]string) (ChainNotifier_RegisterSpendNtfnClient, runtime.ServerMetadata, error) { - var protoReq SpendRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - stream, err := client.RegisterSpendNtfn(ctx, &protoReq) - if err != nil { - return nil, metadata, err - } - header, err := stream.Header() - if err != nil { - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil - -} - -func request_ChainNotifier_RegisterBlockEpochNtfn_0(ctx context.Context, marshaler runtime.Marshaler, client ChainNotifierClient, req *http.Request, pathParams map[string]string) (ChainNotifier_RegisterBlockEpochNtfnClient, runtime.ServerMetadata, error) { - var protoReq BlockEpoch - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - stream, err := client.RegisterBlockEpochNtfn(ctx, &protoReq) - if err != nil { - return nil, metadata, err - } - header, err := stream.Header() - if err != nil { - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil - -} - -// RegisterChainNotifierHandlerServer registers the http handlers for service ChainNotifier to "mux". -// UnaryRPC :call ChainNotifierServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -func RegisterChainNotifierHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ChainNotifierServer) error { - - mux.Handle("POST", pattern_ChainNotifier_RegisterConfirmationsNtfn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") - _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - }) - - mux.Handle("POST", pattern_ChainNotifier_RegisterSpendNtfn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") - _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - }) - - mux.Handle("POST", pattern_ChainNotifier_RegisterBlockEpochNtfn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") - _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - }) - - return nil -} - -// RegisterChainNotifierHandlerFromEndpoint is same as RegisterChainNotifierHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterChainNotifierHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterChainNotifierHandler(ctx, mux, conn) -} - -// RegisterChainNotifierHandler registers the http handlers for service ChainNotifier to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterChainNotifierHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterChainNotifierHandlerClient(ctx, mux, NewChainNotifierClient(conn)) -} - -// RegisterChainNotifierHandlerClient registers the http handlers for service ChainNotifier -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ChainNotifierClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ChainNotifierClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "ChainNotifierClient" to call the correct interceptors. -func RegisterChainNotifierHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ChainNotifierClient) error { - - mux.Handle("POST", pattern_ChainNotifier_RegisterConfirmationsNtfn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_ChainNotifier_RegisterConfirmationsNtfn_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_ChainNotifier_RegisterConfirmationsNtfn_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_ChainNotifier_RegisterSpendNtfn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_ChainNotifier_RegisterSpendNtfn_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_ChainNotifier_RegisterSpendNtfn_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_ChainNotifier_RegisterBlockEpochNtfn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_ChainNotifier_RegisterBlockEpochNtfn_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_ChainNotifier_RegisterBlockEpochNtfn_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_ChainNotifier_RegisterConfirmationsNtfn_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v2", "chainnotifier", "register", "confirmations"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_ChainNotifier_RegisterSpendNtfn_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v2", "chainnotifier", "register", "spends"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_ChainNotifier_RegisterBlockEpochNtfn_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v2", "chainnotifier", "register", "blocks"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_ChainNotifier_RegisterConfirmationsNtfn_0 = runtime.ForwardResponseStream - - forward_ChainNotifier_RegisterSpendNtfn_0 = runtime.ForwardResponseStream - - forward_ChainNotifier_RegisterBlockEpochNtfn_0 = runtime.ForwardResponseStream -) diff --git a/lnd/lnrpc/chainrpc/chainnotifier.proto b/lnd/lnrpc/chainrpc/chainnotifier.proto deleted file mode 100644 index e8fa0377..00000000 --- a/lnd/lnrpc/chainrpc/chainnotifier.proto +++ /dev/null @@ -1,180 +0,0 @@ -syntax = "proto3"; - -package chainrpc; - -// ChainNotifier is a service that can be used to get information about the -// chain backend by registering notifiers for chain events. -service ChainNotifier { - /* - RegisterConfirmationsNtfn is a synchronous response-streaming RPC that - registers an intent for a client to be notified once a confirmation request - has reached its required number of confirmations on-chain. - - A client can specify whether the confirmation request should be for a - particular transaction by its hash or for an output script by specifying a - zero hash. - */ - rpc RegisterConfirmationsNtfn (ConfRequest) returns (stream ConfEvent); - - /* - RegisterSpendNtfn is a synchronous response-streaming RPC that registers an - intent for a client to be notification once a spend request has been spent - by a transaction that has confirmed on-chain. - - A client can specify whether the spend request should be for a particular - outpoint or for an output script by specifying a zero outpoint. - */ - rpc RegisterSpendNtfn (SpendRequest) returns (stream SpendEvent); - - /* - RegisterBlockEpochNtfn is a synchronous response-streaming RPC that - registers an intent for a client to be notified of blocks in the chain. The - stream will return a hash and height tuple of a block for each new/stale - block in the chain. It is the client's responsibility to determine whether - the tuple returned is for a new or stale block in the chain. - - A client can also request a historical backlog of blocks from a particular - point. This allows clients to be idempotent by ensuring that they do not - missing processing a single block within the chain. - */ - rpc RegisterBlockEpochNtfn (BlockEpoch) returns (stream BlockEpoch); -} - -message ConfRequest { - /* - The transaction hash for which we should request a confirmation notification - for. If set to a hash of all zeros, then the confirmation notification will - be requested for the script instead. - */ - bytes txid = 1; - - /* - An output script within a transaction with the hash above which will be used - by light clients to match block filters. If the transaction hash is set to a - hash of all zeros, then a confirmation notification will be requested for - this script instead. - */ - bytes script = 2; - - /* - The number of desired confirmations the transaction/output script should - reach before dispatching a confirmation notification. - */ - uint32 num_confs = 3; - - /* - The earliest height in the chain for which the transaction/output script - could have been included in a block. This should in most cases be set to the - broadcast height of the transaction/output script. - */ - uint32 height_hint = 4; -} - -message ConfDetails { - // The raw bytes of the confirmed transaction. - bytes raw_tx = 1; - - // The hash of the block in which the confirmed transaction was included in. - bytes block_hash = 2; - - // The height of the block in which the confirmed transaction was included - // in. - uint32 block_height = 3; - - // The index of the confirmed transaction within the transaction. - uint32 tx_index = 4; -} - -message Reorg { - // TODO(wilmer): need to know how the client will use this first. -} - -message ConfEvent { - oneof event { - /* - An event that includes the confirmation details of the request - (txid/ouput script). - */ - ConfDetails conf = 1; - - /* - An event send when the transaction of the request is reorged out of the - chain. - */ - Reorg reorg = 2; - } -} - -message Outpoint { - // The hash of the transaction. - bytes hash = 1; - - // The index of the output within the transaction. - uint32 index = 2; -} - -message SpendRequest { - /* - The outpoint for which we should request a spend notification for. If set to - a zero outpoint, then the spend notification will be requested for the - script instead. - */ - Outpoint outpoint = 1; - - /* - The output script for the outpoint above. This will be used by light clients - to match block filters. If the outpoint is set to a zero outpoint, then a - spend notification will be requested for this script instead. - */ - bytes script = 2; - - /* - The earliest height in the chain for which the outpoint/output script could - have been spent. This should in most cases be set to the broadcast height of - the outpoint/output script. - */ - uint32 height_hint = 3; - - // TODO(wilmer): extend to support num confs on spending tx. -} - -message SpendDetails { - // The outpoint was that spent. - Outpoint spending_outpoint = 1; - - // The raw bytes of the spending transaction. - bytes raw_spending_tx = 2; - - // The hash of the spending transaction. - bytes spending_tx_hash = 3; - - // The input of the spending transaction that fulfilled the spend request. - uint32 spending_input_index = 4; - - // The height at which the spending transaction was included in a block. - uint32 spending_height = 5; -} - -message SpendEvent { - oneof event { - /* - An event that includes the details of the spending transaction of the - request (outpoint/output script). - */ - SpendDetails spend = 1; - - /* - An event sent when the spending transaction of the request was - reorged out of the chain. - */ - Reorg reorg = 2; - } -} - -message BlockEpoch { - // The hash of the block. - bytes hash = 1; - - // The height of the block. - uint32 height = 2; -} diff --git a/lnd/lnrpc/chainrpc/chainnotifier.swagger.json b/lnd/lnrpc/chainrpc/chainnotifier.swagger.json deleted file mode 100644 index 64332bb5..00000000 --- a/lnd/lnrpc/chainrpc/chainnotifier.swagger.json +++ /dev/null @@ -1,361 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "chainrpc/chainnotifier.proto", - "version": "version not set" - }, - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/v2/chainnotifier/register/blocks": { - "post": { - "summary": "RegisterBlockEpochNtfn is a synchronous response-streaming RPC that\nregisters an intent for a client to be notified of blocks in the chain. The\nstream will return a hash and height tuple of a block for each new/stale\nblock in the chain. It is the client's responsibility to determine whether\nthe tuple returned is for a new or stale block in the chain.", - "description": "A client can also request a historical backlog of blocks from a particular\npoint. This allows clients to be idempotent by ensuring that they do not\nmissing processing a single block within the chain.", - "operationId": "ChainNotifier_RegisterBlockEpochNtfn", - "responses": { - "200": { - "description": "A successful response.(streaming responses)", - "schema": { - "type": "object", - "properties": { - "result": { - "$ref": "#/definitions/chainrpcBlockEpoch" - }, - "error": { - "$ref": "#/definitions/runtimeStreamError" - } - }, - "title": "Stream result of chainrpcBlockEpoch" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/chainrpcBlockEpoch" - } - } - ], - "tags": [ - "ChainNotifier" - ] - } - }, - "/v2/chainnotifier/register/confirmations": { - "post": { - "summary": "RegisterConfirmationsNtfn is a synchronous response-streaming RPC that\nregisters an intent for a client to be notified once a confirmation request\nhas reached its required number of confirmations on-chain.", - "description": "A client can specify whether the confirmation request should be for a\nparticular transaction by its hash or for an output script by specifying a\nzero hash.", - "operationId": "ChainNotifier_RegisterConfirmationsNtfn", - "responses": { - "200": { - "description": "A successful response.(streaming responses)", - "schema": { - "type": "object", - "properties": { - "result": { - "$ref": "#/definitions/chainrpcConfEvent" - }, - "error": { - "$ref": "#/definitions/runtimeStreamError" - } - }, - "title": "Stream result of chainrpcConfEvent" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/chainrpcConfRequest" - } - } - ], - "tags": [ - "ChainNotifier" - ] - } - }, - "/v2/chainnotifier/register/spends": { - "post": { - "summary": "RegisterSpendNtfn is a synchronous response-streaming RPC that registers an\nintent for a client to be notification once a spend request has been spent\nby a transaction that has confirmed on-chain.", - "description": "A client can specify whether the spend request should be for a particular\noutpoint or for an output script by specifying a zero outpoint.", - "operationId": "ChainNotifier_RegisterSpendNtfn", - "responses": { - "200": { - "description": "A successful response.(streaming responses)", - "schema": { - "type": "object", - "properties": { - "result": { - "$ref": "#/definitions/chainrpcSpendEvent" - }, - "error": { - "$ref": "#/definitions/runtimeStreamError" - } - }, - "title": "Stream result of chainrpcSpendEvent" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/chainrpcSpendRequest" - } - } - ], - "tags": [ - "ChainNotifier" - ] - } - } - }, - "definitions": { - "chainrpcBlockEpoch": { - "type": "object", - "properties": { - "hash": { - "type": "string", - "format": "byte", - "description": "The hash of the block." - }, - "height": { - "type": "integer", - "format": "int64", - "description": "The height of the block." - } - } - }, - "chainrpcConfDetails": { - "type": "object", - "properties": { - "raw_tx": { - "type": "string", - "format": "byte", - "description": "The raw bytes of the confirmed transaction." - }, - "block_hash": { - "type": "string", - "format": "byte", - "description": "The hash of the block in which the confirmed transaction was included in." - }, - "block_height": { - "type": "integer", - "format": "int64", - "description": "The height of the block in which the confirmed transaction was included\nin." - }, - "tx_index": { - "type": "integer", - "format": "int64", - "description": "The index of the confirmed transaction within the transaction." - } - } - }, - "chainrpcConfEvent": { - "type": "object", - "properties": { - "conf": { - "$ref": "#/definitions/chainrpcConfDetails", - "description": "An event that includes the confirmation details of the request\n(txid/ouput script)." - }, - "reorg": { - "$ref": "#/definitions/chainrpcReorg", - "description": "An event send when the transaction of the request is reorged out of the\nchain." - } - } - }, - "chainrpcConfRequest": { - "type": "object", - "properties": { - "txid": { - "type": "string", - "format": "byte", - "description": "The transaction hash for which we should request a confirmation notification\nfor. If set to a hash of all zeros, then the confirmation notification will\nbe requested for the script instead." - }, - "script": { - "type": "string", - "format": "byte", - "description": "An output script within a transaction with the hash above which will be used\nby light clients to match block filters. If the transaction hash is set to a\nhash of all zeros, then a confirmation notification will be requested for\nthis script instead." - }, - "num_confs": { - "type": "integer", - "format": "int64", - "description": "The number of desired confirmations the transaction/output script should\nreach before dispatching a confirmation notification." - }, - "height_hint": { - "type": "integer", - "format": "int64", - "description": "The earliest height in the chain for which the transaction/output script\ncould have been included in a block. This should in most cases be set to the\nbroadcast height of the transaction/output script." - } - } - }, - "chainrpcOutpoint": { - "type": "object", - "properties": { - "hash": { - "type": "string", - "format": "byte", - "description": "The hash of the transaction." - }, - "index": { - "type": "integer", - "format": "int64", - "description": "The index of the output within the transaction." - } - } - }, - "chainrpcReorg": { - "type": "object" - }, - "chainrpcSpendDetails": { - "type": "object", - "properties": { - "spending_outpoint": { - "$ref": "#/definitions/chainrpcOutpoint", - "description": "The outpoint was that spent." - }, - "raw_spending_tx": { - "type": "string", - "format": "byte", - "description": "The raw bytes of the spending transaction." - }, - "spending_tx_hash": { - "type": "string", - "format": "byte", - "description": "The hash of the spending transaction." - }, - "spending_input_index": { - "type": "integer", - "format": "int64", - "description": "The input of the spending transaction that fulfilled the spend request." - }, - "spending_height": { - "type": "integer", - "format": "int64", - "description": "The height at which the spending transaction was included in a block." - } - } - }, - "chainrpcSpendEvent": { - "type": "object", - "properties": { - "spend": { - "$ref": "#/definitions/chainrpcSpendDetails", - "description": "An event that includes the details of the spending transaction of the\nrequest (outpoint/output script)." - }, - "reorg": { - "$ref": "#/definitions/chainrpcReorg", - "description": "An event sent when the spending transaction of the request was\nreorged out of the chain." - } - } - }, - "chainrpcSpendRequest": { - "type": "object", - "properties": { - "outpoint": { - "$ref": "#/definitions/chainrpcOutpoint", - "description": "The outpoint for which we should request a spend notification for. If set to\na zero outpoint, then the spend notification will be requested for the\nscript instead." - }, - "script": { - "type": "string", - "format": "byte", - "description": "The output script for the outpoint above. This will be used by light clients\nto match block filters. If the outpoint is set to a zero outpoint, then a\nspend notification will be requested for this script instead." - }, - "height_hint": { - "type": "integer", - "format": "int64", - "description": "The earliest height in the chain for which the outpoint/output script could\nhave been spent. This should in most cases be set to the broadcast height of\nthe outpoint/output script." - } - } - }, - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string" - }, - "value": { - "type": "string", - "format": "byte" - } - } - }, - "runtimeError": { - "type": "object", - "properties": { - "error": { - "type": "string" - }, - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - }, - "runtimeStreamError": { - "type": "object", - "properties": { - "grpc_code": { - "type": "integer", - "format": "int32" - }, - "http_code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "http_status": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - } - } -} diff --git a/lnd/lnrpc/chainrpc/chainnotifier_server.go b/lnd/lnrpc/chainrpc/chainnotifier_server.go deleted file mode 100644 index 8a688e40..00000000 --- a/lnd/lnrpc/chainrpc/chainnotifier_server.go +++ /dev/null @@ -1,487 +0,0 @@ -// +build chainrpc - -package chainrpc - -import ( - "bytes" - "context" - "io/ioutil" - "os" - "path/filepath" - "sync" - - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/macaroons" - "github.com/pkt-cash/pktd/wire" - "google.golang.org/grpc" - "gopkg.in/macaroon-bakery.v2/bakery" -) - -const ( - // subServerName is the name of the RPC sub-server. We'll use this name - // to register ourselves, and we also require that the main - // SubServerConfigDispatcher instance recognize this as the name of the - // config file that we need. - subServerName = "ChainRPC" -) - -var ( - // macaroonOps are the set of capabilities that our minted macaroon (if - // it doesn't already exist) will have. - macaroonOps = []bakery.Op{ - { - Entity: "onchain", - Action: "read", - }, - } - - // macPermissions maps RPC calls to the permissions they require. - macPermissions = map[string][]bakery.Op{ - "/chainrpc.ChainNotifier/RegisterConfirmationsNtfn": {{ - Entity: "onchain", - Action: "read", - }}, - "/chainrpc.ChainNotifier/RegisterSpendNtfn": {{ - Entity: "onchain", - Action: "read", - }}, - "/chainrpc.ChainNotifier/RegisterBlockEpochNtfn": {{ - Entity: "onchain", - Action: "read", - }}, - } - - // DefaultChainNotifierMacFilename is the default name of the chain - // notifier macaroon that we expect to find via a file handle within the - // main configuration file in this package. - DefaultChainNotifierMacFilename = "chainnotifier.macaroon" - - // ErrChainNotifierServerShuttingDown is an error returned when we are - // waiting for a notification to arrive but the chain notifier server - // has been shut down. - ErrChainNotifierServerShuttingDown = Err.CodeWithDetail("ErrChainNotifierServerShuttingDown", "chain notifier RPC "+ - "subserver shutting down") - - // ErrChainNotifierServerNotActive indicates that the chain notifier hasn't - // finished the startup process. - ErrChainNotifierServerNotActive = Err.CodeWithDetail("ErrChainNotifierServerNotActive", "chain notifier RPC is "+ - "still in the process of starting") -) - -// Server is a sub-server of the main RPC server: the chain notifier RPC. This -// RPC sub-server allows external callers to access the full chain notifier -// capabilities of lnd. This allows callers to create custom protocols, external -// to lnd, even backed by multiple distinct lnd across independent failure -// domains. -type Server struct { - started sync.Once - stopped sync.Once - - cfg Config - - quit chan struct{} -} - -// New returns a new instance of the chainrpc ChainNotifier sub-server. We also -// return the set of permissions for the macaroons that we may create within -// this method. If the macaroons we need aren't found in the filepath, then -// we'll create them on start up. If we're unable to locate, or create the -// macaroons we need, then we'll return with an error. -func New(cfg *Config) (*Server, lnrpc.MacaroonPerms, er.R) { - // If the path of the chain notifier macaroon wasn't generated, then - // we'll assume that it's found at the default network directory. - if cfg.ChainNotifierMacPath == "" { - cfg.ChainNotifierMacPath = filepath.Join( - cfg.NetworkDir, DefaultChainNotifierMacFilename, - ) - } - - // Now that we know the full path of the chain notifier macaroon, we can - // check to see if we need to create it or not. If stateless_init is set - // then we don't write the macaroons. - macFilePath := cfg.ChainNotifierMacPath - if cfg.MacService != nil && !cfg.MacService.StatelessInit && - !lnrpc.FileExists(macFilePath) { - - log.Infof("Baking macaroons for ChainNotifier RPC Server at: %v", - macFilePath) - - // At this point, we know that the chain notifier macaroon - // doesn't yet, exist, so we need to create it with the help of - // the main macaroon service. - chainNotifierMac, err := cfg.MacService.NewMacaroon( - context.Background(), macaroons.DefaultRootKeyID, - macaroonOps..., - ) - if err != nil { - return nil, nil, err - } - chainNotifierMacBytes, err := chainNotifierMac.M().MarshalBinary() - if err != nil { - return nil, nil, err - } - err = ioutil.WriteFile(macFilePath, chainNotifierMacBytes, 0644) - if err != nil { - _ = os.Remove(macFilePath) - return nil, nil, err - } - } - - return &Server{ - cfg: *cfg, - quit: make(chan struct{}), - }, macPermissions, nil -} - -// Compile-time checks to ensure that Server fully implements the -// ChainNotifierServer gRPC service and lnrpc.SubServer interface. -var _ ChainNotifierServer = (*Server)(nil) -var _ lnrpc.SubServer = (*Server)(nil) - -// Start launches any helper goroutines required for the server to function. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) Start() er.R { - s.started.Do(func() {}) - return nil -} - -// Stop signals any active goroutines for a graceful closure. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) Stop() er.R { - s.stopped.Do(func() { - close(s.quit) - }) - return nil -} - -// Name returns a unique string representation of the sub-server. This can be -// used to identify the sub-server and also de-duplicate them. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) Name() string { - return subServerName -} - -// RegisterWithRootServer will be called by the root gRPC server to direct a RPC -// sub-server to register itself with the main gRPC root server. Until this is -// called, each sub-server won't be able to have requests routed towards it. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) RegisterWithRootServer(grpcServer *grpc.Server) er.R { - // We make sure that we register it with the main gRPC server to ensure - // all our methods are routed properly. - RegisterChainNotifierServer(grpcServer, s) - - log.Debug("ChainNotifier RPC server successfully register with root " + - "gRPC server") - - return nil -} - -// RegisterWithRestServer will be called by the root REST mux to direct a sub -// RPC server to register itself with the main REST mux server. Until this is -// called, each sub-server won't be able to have requests routed towards it. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) RegisterWithRestServer(ctx context.Context, - mux *runtime.ServeMux, dest string, opts []grpc.DialOption) er.R { - - // We make sure that we register it with the main REST server to ensure - // all our methods are routed properly. - err := RegisterChainNotifierHandlerFromEndpoint(ctx, mux, dest, opts) - if err != nil { - log.Errorf("Could not register ChainNotifier REST server "+ - "with root REST server: %v", err) - return err - } - - log.Debugf("ChainNotifier REST server successfully registered with " + - "root REST server") - return nil -} - -// RegisterConfirmationsNtfn is a synchronous response-streaming RPC that -// registers an intent for a client to be notified once a confirmation request -// has reached its required number of confirmations on-chain. -// -// A client can specify whether the confirmation request should be for a -// particular transaction by its hash or for an output script by specifying a -// zero hash. -// -// NOTE: This is part of the chainrpc.ChainNotifierService interface. -func (s *Server) RegisterConfirmationsNtfn(in *ConfRequest, - confStream ChainNotifier_RegisterConfirmationsNtfnServer) er.R { - - if !s.cfg.ChainNotifier.Started() { - return ErrChainNotifierServerNotActive.Default() - } - - // We'll start by reconstructing the RPC request into what the - // underlying ChainNotifier expects. - var txid chainhash.Hash - copy(txid[:], in.Txid) - - // We'll then register for the spend notification of the request. - confEvent, err := s.cfg.ChainNotifier.RegisterConfirmationsNtfn( - &txid, in.Script, in.NumConfs, in.HeightHint, - ) - if err != nil { - return err - } - defer confEvent.Cancel() - - // With the request registered, we'll wait for its spend notification to - // be dispatched. - for { - select { - // The transaction satisfying the request has confirmed on-chain - // and reached its required number of confirmations. We'll - // dispatch an event to the caller indicating so. - case details, ok := <-confEvent.Confirmed: - if !ok { - return chainntnfs.ErrChainNotifierShuttingDown.Default() - } - - var rawTxBuf bytes.Buffer - err := details.Tx.Serialize(&rawTxBuf) - if err != nil { - return err - } - - rpcConfDetails := &ConfDetails{ - RawTx: rawTxBuf.Bytes(), - BlockHash: details.BlockHash[:], - BlockHeight: details.BlockHeight, - TxIndex: details.TxIndex, - } - - conf := &ConfEvent{ - Event: &ConfEvent_Conf{ - Conf: rpcConfDetails, - }, - } - if err := confStream.Send(conf); err != nil { - return err - } - - // The transaction satisfying the request has been reorged out - // of the chain, so we'll send an event describing so. - case _, ok := <-confEvent.NegativeConf: - if !ok { - return chainntnfs.ErrChainNotifierShuttingDown.Default() - } - - reorg := &ConfEvent{ - Event: &ConfEvent_Reorg{Reorg: &Reorg{}}, - } - if err := confStream.Send(reorg); err != nil { - return err - } - - // The transaction satisfying the request has confirmed and is - // no longer under the risk of being reorged out of the chain, - // so we can safely exit. - case _, ok := <-confEvent.Done: - if !ok { - return chainntnfs.ErrChainNotifierShuttingDown.Default() - } - - return nil - - // The response stream's context for whatever reason has been - // closed. We'll return the error indicated by the context - // itself to the caller. - case <-confStream.Context().Done(): - return confStream.Context().Err() - - // The server has been requested to shut down. - case <-s.quit: - return ErrChainNotifierServerShuttingDown.Default() - } - } -} - -// RegisterSpendNtfn is a synchronous response-streaming RPC that registers an -// intent for a client to be notification once a spend request has been spent by -// a transaction that has confirmed on-chain. -// -// A client can specify whether the spend request should be for a particular -// outpoint or for an output script by specifying a zero outpoint. -// -// NOTE: This is part of the chainrpc.ChainNotifierService interface. -func (s *Server) RegisterSpendNtfn(in *SpendRequest, - spendStream ChainNotifier_RegisterSpendNtfnServer) er.R { - - if !s.cfg.ChainNotifier.Started() { - return ErrChainNotifierServerNotActive.Default() - } - - // We'll start by reconstructing the RPC request into what the - // underlying ChainNotifier expects. - var op *wire.OutPoint - if in.Outpoint != nil { - var txid chainhash.Hash - copy(txid[:], in.Outpoint.Hash) - op = &wire.OutPoint{Hash: txid, Index: in.Outpoint.Index} - } - - // We'll then register for the spend notification of the request. - spendEvent, err := s.cfg.ChainNotifier.RegisterSpendNtfn( - op, in.Script, in.HeightHint, - ) - if err != nil { - return err - } - defer spendEvent.Cancel() - - // With the request registered, we'll wait for its spend notification to - // be dispatched. - for { - select { - // A transaction that spends the given has confirmed on-chain. - // We'll return an event to the caller indicating so that - // includes the details of the spending transaction. - case details, ok := <-spendEvent.Spend: - if !ok { - return chainntnfs.ErrChainNotifierShuttingDown.Default() - } - - var rawSpendingTxBuf bytes.Buffer - err := details.SpendingTx.Serialize(&rawSpendingTxBuf) - if err != nil { - return err - } - - rpcSpendDetails := &SpendDetails{ - SpendingOutpoint: &Outpoint{ - Hash: details.SpentOutPoint.Hash[:], - Index: details.SpentOutPoint.Index, - }, - RawSpendingTx: rawSpendingTxBuf.Bytes(), - SpendingTxHash: details.SpenderTxHash[:], - SpendingInputIndex: details.SpenderInputIndex, - SpendingHeight: uint32(details.SpendingHeight), - } - - spend := &SpendEvent{ - Event: &SpendEvent_Spend{ - Spend: rpcSpendDetails, - }, - } - if err := spendStream.Send(spend); err != nil { - return err - } - - // The spending transaction of the request has been reorged of - // the chain. We'll return an event to the caller indicating so. - case _, ok := <-spendEvent.Reorg: - if !ok { - return chainntnfs.ErrChainNotifierShuttingDown.Default() - } - - reorg := &SpendEvent{ - Event: &SpendEvent_Reorg{Reorg: &Reorg{}}, - } - if err := spendStream.Send(reorg); err != nil { - return err - } - - // The spending transaction of the requests has confirmed - // on-chain and is no longer under the risk of being reorged out - // of the chain, so we can safely exit. - case _, ok := <-spendEvent.Done: - if !ok { - return chainntnfs.ErrChainNotifierShuttingDown.Default() - } - - return nil - - // The response stream's context for whatever reason has been - // closed. We'll return the error indicated by the context - // itself to the caller. - case <-spendStream.Context().Done(): - return spendStream.Context().Err() - - // The server has been requested to shut down. - case <-s.quit: - return ErrChainNotifierServerShuttingDown.Default() - } - } -} - -// RegisterBlockEpochNtfn is a synchronous response-streaming RPC that registers -// an intent for a client to be notified of blocks in the chain. The stream will -// return a hash and height tuple of a block for each new/stale block in the -// chain. It is the client's responsibility to determine whether the tuple -// returned is for a new or stale block in the chain. -// -// A client can also request a historical backlog of blocks from a particular -// point. This allows clients to be idempotent by ensuring that they do not -// missing processing a single block within the chain. -// -// NOTE: This is part of the chainrpc.ChainNotifierService interface. -func (s *Server) RegisterBlockEpochNtfn(in *BlockEpoch, - epochStream ChainNotifier_RegisterBlockEpochNtfnServer) er.R { - - if !s.cfg.ChainNotifier.Started() { - return ErrChainNotifierServerNotActive.Default() - } - - // We'll start by reconstructing the RPC request into what the - // underlying ChainNotifier expects. - var hash chainhash.Hash - copy(hash[:], in.Hash) - - // If the request isn't for a zero hash and a zero height, then we - // should deliver a backlog of notifications from the given block - // (hash/height tuple) until tip, and continue delivering epochs for - // new blocks. - var blockEpoch *chainntnfs.BlockEpoch - if hash != chainntnfs.ZeroHash && in.Height != 0 { - blockEpoch = &chainntnfs.BlockEpoch{ - Hash: &hash, - Height: int32(in.Height), - } - } - - epochEvent, err := s.cfg.ChainNotifier.RegisterBlockEpochNtfn(blockEpoch) - if err != nil { - return err - } - defer epochEvent.Cancel() - - for { - select { - // A notification for a block has been received. This block can - // either be a new block or stale. - case blockEpoch, ok := <-epochEvent.Epochs: - if !ok { - return chainntnfs.ErrChainNotifierShuttingDown.Default() - } - - epoch := &BlockEpoch{ - Hash: blockEpoch.Hash[:], - Height: uint32(blockEpoch.Height), - } - if err := epochStream.Send(epoch); err != nil { - return err - } - - // The response stream's context for whatever reason has been - // closed. We'll return the error indicated by the context - // itself to the caller. - case <-epochStream.Context().Done(): - return epochStream.Context().Err() - - // The server has been requested to shut down. - case <-s.quit: - return ErrChainNotifierServerShuttingDown.Default() - } - } -} diff --git a/lnd/lnrpc/chainrpc/config_active.go b/lnd/lnrpc/chainrpc/config_active.go deleted file mode 100644 index 37c377db..00000000 --- a/lnd/lnrpc/chainrpc/config_active.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build chainrpc - -package chainrpc - -import ( - "github.com/pkt-cash/pktd/lnd/chainntnfs" - "github.com/pkt-cash/pktd/lnd/macaroons" -) - -// Config is the primary configuration struct for the chain notifier RPC server. -// It contains all the items required for the server to carry out its duties. -// The fields with struct tags are meant to be parsed as normal configuration -// options, while if able to be populated, the latter fields MUST also be -// specified. -type Config struct { - // ChainNotifierMacPath is the path for the chain notifier macaroon. If - // unspecified then we assume that the macaroon will be found under the - // network directory, named DefaultChainNotifierMacFilename. - ChainNotifierMacPath string `long:"notifiermacaroonpath" description:"Path to the chain notifier macaroon"` - - // NetworkDir is the main network directory wherein the chain notifier - // RPC server will find the macaroon named - // DefaultChainNotifierMacFilename. - NetworkDir string - - // MacService is the main macaroon service that we'll use to handle - // authentication for the chain notifier RPC server. - MacService *macaroons.Service - - // ChainNotifier is the chain notifier instance that backs the chain - // notifier RPC server. The job of the chain notifier RPC server is - // simply to proxy valid requests to the active chain notifier instance. - ChainNotifier chainntnfs.ChainNotifier -} diff --git a/lnd/lnrpc/chainrpc/config_default.go b/lnd/lnrpc/chainrpc/config_default.go deleted file mode 100644 index 53bb97a1..00000000 --- a/lnd/lnrpc/chainrpc/config_default.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !chainrpc - -package chainrpc - -// Config is empty for non-chainrpc builds. -type Config struct{} diff --git a/lnd/lnrpc/chainrpc/driver.go b/lnd/lnrpc/chainrpc/driver.go deleted file mode 100644 index cea9566e..00000000 --- a/lnd/lnrpc/chainrpc/driver.go +++ /dev/null @@ -1,71 +0,0 @@ -// +build chainrpc - -package chainrpc - -import ( - "fmt" - - "github.com/pkt-cash/pktd/lnd/lnrpc" -) - -// createNewSubServer is a helper method that will create the new chain notifier -// sub server given the main config dispatcher method. If we're unable to find -// the config that is meant for us in the config dispatcher, then we'll exit -// with an error. -func createNewSubServer(configRegistry lnrpc.SubServerConfigDispatcher) ( - lnrpc.SubServer, lnrpc.MacaroonPerms, er.R) { - - // We'll attempt to look up the config that we expect, according to our - // subServerName name. If we can't find this, then we'll exit with an - // error, as we're unable to properly initialize ourselves without this - // config. - chainNotifierServerConf, ok := configRegistry.FetchConfig(subServerName) - if !ok { - return nil, nil, er.Errorf("unable to find config for "+ - "subserver type %s", subServerName) - } - - // Now that we've found an object mapping to our service name, we'll - // ensure that it's the type we need. - config, ok := chainNotifierServerConf.(*Config) - if !ok { - return nil, nil, er.Errorf("wrong type of config for "+ - "subserver %s, expected %T got %T", subServerName, - &Config{}, chainNotifierServerConf) - } - - // Before we try to make the new chain notifier service instance, we'll - // perform some sanity checks on the arguments to ensure that they're - // usable. - switch { - // If the macaroon service is set (we should use macaroons), then - // ensure that we know where to look for them, or create them if not - // found. - case config.MacService != nil && config.NetworkDir == "": - return nil, nil, er.Errorf("NetworkDir must be set to create " + - "chainrpc") - case config.ChainNotifier == nil: - return nil, nil, er.Errorf("ChainNotifier must be set to " + - "create chainrpc") - } - - return New(config) -} - -func init() { - subServer := &lnrpc.SubServerDriver{ - SubServerName: subServerName, - New: func(c lnrpc.SubServerConfigDispatcher) ( - lnrpc.SubServer, lnrpc.MacaroonPerms, er.R) { - - return createNewSubServer(c) - }, - } - - // If the build tag is active, then we'll register ourselves as a - // sub-RPC server within the global lnrpc package namespace. - if err := lnrpc.RegisterSubServer(subServer); err != nil { - panic(fmt.Sprintf("failed to register subserver driver %s: %v", - subServerName, err)) - } -} diff --git a/lnd/lnrpc/file_utils.go b/lnd/lnrpc/file_utils.go deleted file mode 100644 index ffb83fbf..00000000 --- a/lnd/lnrpc/file_utils.go +++ /dev/null @@ -1,15 +0,0 @@ -package lnrpc - -import ( - "os" -) - -// FileExists reports whether the named file or directory exists. -func FileExists(name string) bool { - if _, err := os.Stat(name); err != nil { - if os.IsNotExist(err) { - return false - } - } - return true -} diff --git a/lnd/lnrpc/gen_protos.sh b/lnd/lnrpc/gen_protos.sh deleted file mode 100755 index 42b42651..00000000 --- a/lnd/lnrpc/gen_protos.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/sh - -echo "Generating root gRPC server protos" - -PROTOS="rpc.proto walletunlocker.proto metaservice.proto pkt.proto **/*.proto" - -# For each of the sub-servers, we then generate their protos, but a restricted -# set as they don't yet require REST proxies, or swagger docs. -for file in $PROTOS; do - DIRECTORY=$(dirname "${file}") - echo "Generating protos from ${file}, into ${DIRECTORY}" - - # Generate the protos. - protoc -I/usr/local/include -I. \ - --go_out=plugins=grpc,paths=source_relative:. \ - "${file}" - - # Generate the REST reverse proxy. - protoc -I/usr/local/include -I. \ - --grpc-gateway_out=logtostderr=true,paths=source_relative,grpc_api_configuration=rest-annotations.yaml:. \ - "${file}" - - - # Finally, generate the swagger file which describes the REST API in detail. - protoc -I/usr/local/include -I. \ - --swagger_out=logtostderr=true,grpc_api_configuration=rest-annotations.yaml:. \ - "${file}" -done diff --git a/lnd/lnrpc/invoicesrpc/addinvoice.go b/lnd/lnrpc/invoicesrpc/addinvoice.go deleted file mode 100644 index e0472bf5..00000000 --- a/lnd/lnrpc/invoicesrpc/addinvoice.go +++ /dev/null @@ -1,542 +0,0 @@ -package invoicesrpc - -import ( - "bytes" - "context" - "crypto/rand" - "math" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/pktlog/log" - "github.com/pkt-cash/pktd/wire" - - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/netann" - "github.com/pkt-cash/pktd/lnd/routing" - "github.com/pkt-cash/pktd/lnd/zpay32" -) - -// AddInvoiceConfig contains dependencies for invoice creation. -type AddInvoiceConfig struct { - // AddInvoice is called to add the invoice to the registry. - AddInvoice func(invoice *channeldb.Invoice, paymentHash lntypes.Hash) ( - uint64, er.R) - - // IsChannelActive is used to generate valid hop hints. - IsChannelActive func(chanID lnwire.ChannelID) bool - - // ChainParams are required to properly decode invoice payment requests - // that are marshalled over rpc. - ChainParams *chaincfg.Params - - // NodeSigner is an implementation of the MessageSigner implementation - // that's backed by the identity private key of the running lnd node. - NodeSigner *netann.NodeSigner - - // DefaultCLTVExpiry is the default invoice expiry if no values is - // specified. - DefaultCLTVExpiry uint32 - - // ChanDB is a global boltdb instance which is needed to access the - // channel graph. - ChanDB *channeldb.DB - - // Graph holds a reference to the ChannelGraph database. - Graph *channeldb.ChannelGraph - - // GenInvoiceFeatures returns a feature containing feature bits that - // should be advertised on freshly generated invoices. - GenInvoiceFeatures func() *lnwire.FeatureVector -} - -// AddInvoiceData contains the required data to create a new invoice. -type AddInvoiceData struct { - // An optional memo to attach along with the invoice. Used for record - // keeping purposes for the invoice's creator, and will also be set in - // the description field of the encoded payment request if the - // description_hash field is not being used. - Memo string - - // The preimage which will allow settling an incoming HTLC payable to - // this preimage. If Preimage is set, Hash should be nil. If both - // Preimage and Hash are nil, a random preimage is generated. - Preimage *lntypes.Preimage - - // The hash of the preimage. If Hash is set, Preimage should be nil. - // This condition indicates that we have a 'hold invoice' for which the - // htlc will be accepted and held until the preimage becomes known. - Hash *lntypes.Hash - - // The value of this invoice in millisatoshis. - Value lnwire.MilliSatoshi - - // Hash (SHA-256) of a description of the payment. Used if the - // description of payment (memo) is too long to naturally fit within the - // description field of an encoded payment request. - DescriptionHash []byte - - // Payment request expiry time in seconds. Default is 3600 (1 hour). - Expiry int64 - - // Fallback on-chain address. - FallbackAddr string - - // Delta to use for the time-lock of the CLTV extended to the final hop. - CltvExpiry uint64 - - // Whether this invoice should include routing hints for private - // channels. - Private bool - - // HodlInvoice signals that this invoice shouldn't be settled - // immediately upon receiving the payment. - HodlInvoice bool - - // RouteHints are optional route hints that can each be individually used - // to assist in reaching the invoice's destination. - RouteHints [][]zpay32.HopHint -} - -// AddInvoice attempts to add a new invoice to the invoice database. Any -// duplicated invoices are rejected, therefore all invoices *must* have a -// unique payment preimage. -func AddInvoice(ctx context.Context, cfg *AddInvoiceConfig, - invoice *AddInvoiceData) (*lntypes.Hash, *channeldb.Invoice, er.R) { - - var ( - paymentPreimage *lntypes.Preimage - paymentHash lntypes.Hash - ) - - switch { - - // Only either preimage or hash can be set. - case invoice.Preimage != nil && invoice.Hash != nil: - return nil, nil, - er.New("preimage and hash both set") - - // If no hash or preimage is given, generate a random preimage. - case invoice.Preimage == nil && invoice.Hash == nil: - paymentPreimage = &lntypes.Preimage{} - if _, err := rand.Read(paymentPreimage[:]); err != nil { - return nil, nil, er.E(err) - } - paymentHash = paymentPreimage.Hash() - - // If just a hash is given, we create a hold invoice by setting the - // preimage to unknown. - case invoice.Preimage == nil && invoice.Hash != nil: - paymentHash = *invoice.Hash - - // A specific preimage was supplied. Use that for the invoice. - case invoice.Preimage != nil && invoice.Hash == nil: - preimage := *invoice.Preimage - paymentPreimage = &preimage - paymentHash = invoice.Preimage.Hash() - } - - // The size of the memo, receipt and description hash attached must not - // exceed the maximum values for either of the fields. - if len(invoice.Memo) > channeldb.MaxMemoSize { - return nil, nil, er.Errorf("memo too large: %v bytes "+ - "(maxsize=%v)", len(invoice.Memo), channeldb.MaxMemoSize) - } - if len(invoice.DescriptionHash) > 0 && len(invoice.DescriptionHash) != 32 { - return nil, nil, er.Errorf("description hash is %v bytes, must be 32", - len(invoice.DescriptionHash)) - } - - // We set the max invoice amount to 100k BTC, which itself is several - // multiples off the current block reward. - maxInvoiceAmt := btcutil.Amount(btcutil.UnitsPerCoin() * 100000) - - switch { - // The value of the invoice must not be negative. - case int64(invoice.Value) < 0: - return nil, nil, er.Errorf("payments of negative value "+ - "are not allowed, value is %v", int64(invoice.Value)) - - // Also ensure that the invoice is actually realistic, while preventing - // any issues due to underflow. - case invoice.Value.ToSatoshis() > maxInvoiceAmt: - return nil, nil, er.Errorf("invoice amount %v is "+ - "too large, max is %v", invoice.Value.ToSatoshis(), - maxInvoiceAmt) - } - - amtMSat := invoice.Value - - // We also create an encoded payment request which allows the - // caller to compactly send the invoice to the payer. We'll create a - // list of options to be added to the encoded payment request. For now - // we only support the required fields description/description_hash, - // expiry, fallback address, and the amount field. - var options []func(*zpay32.Invoice) - - // We only include the amount in the invoice if it is greater than 0. - // By not including the amount, we enable the creation of invoices that - // allow the payee to specify the amount of satoshis they wish to send. - if amtMSat > 0 { - options = append(options, zpay32.Amount(amtMSat)) - } - - // If specified, add a fallback address to the payment request. - if len(invoice.FallbackAddr) > 0 { - addr, err := btcutil.DecodeAddress(invoice.FallbackAddr, - cfg.ChainParams) - if err != nil { - return nil, nil, er.Errorf("invalid fallback address: %v", - err) - } - options = append(options, zpay32.FallbackAddr(addr)) - } - - // If expiry is set, specify it. If it is not provided, no expiry time - // will be explicitly added to this payment request, which will imply - // the default 3600 seconds. - if invoice.Expiry > 0 { - - // We'll ensure that the specified expiry is restricted to sane - // number of seconds. As a result, we'll reject an invoice with - // an expiry greater than 1 year. - maxExpiry := time.Hour * 24 * 365 - expSeconds := invoice.Expiry - - if float64(expSeconds) > maxExpiry.Seconds() { - return nil, nil, er.Errorf("expiry of %v seconds "+ - "greater than max expiry of %v seconds", - float64(expSeconds), maxExpiry.Seconds()) - } - - expiry := time.Duration(invoice.Expiry) * time.Second - options = append(options, zpay32.Expiry(expiry)) - } - - // If the description hash is set, then we add it do the list of options. - // If not, use the memo field as the payment request description. - if len(invoice.DescriptionHash) > 0 { - var descHash [32]byte - copy(descHash[:], invoice.DescriptionHash[:]) - options = append(options, zpay32.DescriptionHash(descHash)) - } else { - // Use the memo field as the description. If this is not set - // this will just be an empty string. - options = append(options, zpay32.Description(invoice.Memo)) - } - - // We'll use our current default CLTV value unless one was specified as - // an option on the command line when creating an invoice. - switch { - case invoice.CltvExpiry > math.MaxUint16: - return nil, nil, er.Errorf("CLTV delta of %v is too large, max "+ - "accepted is: %v", invoice.CltvExpiry, math.MaxUint16) - case invoice.CltvExpiry != 0: - // Disallow user-chosen final CLTV deltas below the required - // minimum. - if invoice.CltvExpiry < routing.MinCLTVDelta { - return nil, nil, er.Errorf("CLTV delta of %v must be "+ - "greater than minimum of %v", - routing.MinCLTVDelta, invoice.CltvExpiry) - } - - options = append(options, - zpay32.CLTVExpiry(invoice.CltvExpiry)) - default: - // TODO(roasbeef): assumes set delta between versions - defaultDelta := cfg.DefaultCLTVExpiry - options = append(options, zpay32.CLTVExpiry(uint64(defaultDelta))) - } - - // We make sure that the given invoice routing hints number is within the - // valid range - if len(invoice.RouteHints) > 20 { - return nil, nil, er.Errorf("number of routing hints must not exceed " + - "maximum of 20") - } - - // We continue by populating the requested routing hints indexing their - // corresponding channels so we won't duplicate them. - forcedHints := make(map[uint64]struct{}) - for _, h := range invoice.RouteHints { - if len(h) == 0 { - return nil, nil, er.Errorf("number of hop hint within a route must " + - "be positive") - } - options = append(options, zpay32.RouteHint(h)) - - // Only this first hop is our direct channel. - forcedHints[h[0].ChannelID] = struct{}{} - } - - // If we were requested to include routing hints in the invoice, then - // we'll fetch all of our available private channels and create routing - // hints for them. - if invoice.Private { - openChannels, err := cfg.ChanDB.FetchAllChannels() - if err != nil { - return nil, nil, er.Errorf("could not fetch all channels") - } - - if len(openChannels) > 0 { - // We filter the channels by excluding the ones that were specified by - // the caller and were already added. - var filteredChannels []*channeldb.OpenChannel - for _, c := range openChannels { - if _, ok := forcedHints[c.ShortChanID().ToUint64()]; ok { - continue - } - filteredChannels = append(filteredChannels, c) - } - - // We'll restrict the number of individual route hints - // to 20 to avoid creating overly large invoices. - numMaxHophints := 20 - len(forcedHints) - hopHints := selectHopHints( - amtMSat, cfg, filteredChannels, numMaxHophints, - ) - - options = append(options, hopHints...) - } - } - - // Set our desired invoice features and add them to our list of options. - invoiceFeatures := cfg.GenInvoiceFeatures() - options = append(options, zpay32.Features(invoiceFeatures)) - - // Generate and set a random payment address for this invoice. If the - // sender understands payment addresses, this can be used to avoid - // intermediaries probing the receiver. - var paymentAddr [32]byte - if _, err := rand.Read(paymentAddr[:]); err != nil { - return nil, nil, er.E(err) - } - options = append(options, zpay32.PaymentAddr(paymentAddr)) - - // Create and encode the payment request as a bech32 (zpay32) string. - creationDate := time.Now() - payReq, err := zpay32.NewInvoice( - cfg.ChainParams, paymentHash, creationDate, options..., - ) - if err != nil { - return nil, nil, err - } - - payReqString, err := payReq.Encode( - zpay32.MessageSigner{ - SignCompact: func(msg []byte) ([]byte, er.R) { - hash := chainhash.HashB(msg) - return cfg.NodeSigner.SignDigestCompact(hash) - }, - }, - ) - if err != nil { - return nil, nil, err - } - - newInvoice := &channeldb.Invoice{ - CreationDate: creationDate, - Memo: []byte(invoice.Memo), - PaymentRequest: []byte(payReqString), - Terms: channeldb.ContractTerm{ - FinalCltvDelta: int32(payReq.MinFinalCLTVExpiry()), - Expiry: payReq.Expiry(), - Value: amtMSat, - PaymentPreimage: paymentPreimage, - PaymentAddr: paymentAddr, - Features: invoiceFeatures, - }, - HodlInvoice: invoice.HodlInvoice, - } - - log.Tracef("[addinvoice] adding new invoice %v", - log.C(func() string { - return spew.Sdump(newInvoice) - }), - ) - - // With all sanity checks passed, write the invoice to the database. - _, err = cfg.AddInvoice(newInvoice, paymentHash) - if err != nil { - return nil, nil, err - } - - return &paymentHash, newInvoice, nil -} - -// chanCanBeHopHint returns true if the target channel is eligible to be a hop -// hint. -func chanCanBeHopHint(channel *channeldb.OpenChannel, cfg *AddInvoiceConfig) ( - *channeldb.ChannelEdgePolicy, bool) { - - // Since we're only interested in our private channels, we'll skip - // public ones. - isPublic := channel.ChannelFlags&lnwire.FFAnnounceChannel != 0 - if isPublic { - return nil, false - } - - // Make sure the channel is active. - chanPoint := lnwire.NewChanIDFromOutPoint( - &channel.FundingOutpoint, - ) - if !cfg.IsChannelActive(chanPoint) { - log.Debugf("Skipping channel %v due to not "+ - "being eligible to forward payments", - chanPoint) - return nil, false - } - - // To ensure we don't leak unadvertised nodes, we'll make sure our - // counterparty is publicly advertised within the network. Otherwise, - // we'll end up leaking information about nodes that intend to stay - // unadvertised, like in the case of a node only having private - // channels. - var remotePub [33]byte - copy(remotePub[:], channel.IdentityPub.SerializeCompressed()) - isRemoteNodePublic, err := cfg.Graph.IsPublicNode(remotePub) - if err != nil { - log.Errorf("Unable to determine if node %x "+ - "is advertised: %v", remotePub, err) - return nil, false - } - - if !isRemoteNodePublic { - log.Debugf("Skipping channel %v due to "+ - "counterparty %x being unadvertised", - chanPoint, remotePub) - return nil, false - } - - // Fetch the policies for each end of the channel. - chanID := channel.ShortChanID().ToUint64() - info, p1, p2, err := cfg.Graph.FetchChannelEdgesByID(chanID) - if err != nil { - log.Errorf("Unable to fetch the routing "+ - "policies for the edges of the channel "+ - "%v: %v", chanPoint, err) - return nil, false - } - - // Now, we'll need to determine which is the correct policy for HTLCs - // being sent from the remote node. - var remotePolicy *channeldb.ChannelEdgePolicy - if bytes.Equal(remotePub[:], info.NodeKey1Bytes[:]) { - remotePolicy = p1 - } else { - remotePolicy = p2 - } - - return remotePolicy, true -} - -// addHopHint creates a hop hint out of the passed channel and channel policy. -// The new hop hint is appended to the passed slice. -func addHopHint(hopHints *[]func(*zpay32.Invoice), - channel *channeldb.OpenChannel, chanPolicy *channeldb.ChannelEdgePolicy) { - - hopHint := zpay32.HopHint{ - NodeID: channel.IdentityPub, - ChannelID: channel.ShortChanID().ToUint64(), - FeeBaseMSat: uint32(chanPolicy.FeeBaseMSat), - FeeProportionalMillionths: uint32( - chanPolicy.FeeProportionalMillionths, - ), - CLTVExpiryDelta: chanPolicy.TimeLockDelta, - } - *hopHints = append( - *hopHints, zpay32.RouteHint([]zpay32.HopHint{hopHint}), - ) -} - -// selectHopHints will select up to numMaxHophints from the set of passed open -// channels. The set of hop hints will be returned as a slice of functional -// options that'll append the route hint to the set of all route hints. -// -// TODO(roasbeef): do proper sub-set sum max hints usually << numChans -func selectHopHints(amtMSat lnwire.MilliSatoshi, cfg *AddInvoiceConfig, - openChannels []*channeldb.OpenChannel, - numMaxHophints int) []func(*zpay32.Invoice) { - - // We'll add our hop hints in two passes, first we'll add all channels - // that are eligible to be hop hints, and also have a local balance - // above the payment amount. - var totalHintBandwidth lnwire.MilliSatoshi - hopHintChans := make(map[wire.OutPoint]struct{}) - hopHints := make([]func(*zpay32.Invoice), 0, numMaxHophints) - for _, channel := range openChannels { - // If this channel can't be a hop hint, then skip it. - edgePolicy, canBeHopHint := chanCanBeHopHint(channel, cfg) - if edgePolicy == nil || !canBeHopHint { - continue - } - - // Similarly, in this first pass, we'll ignore all channels in - // isolation can't satisfy this payment. - if channel.LocalCommitment.RemoteBalance < amtMSat { - continue - } - - // Now that we now this channel use usable, add it as a hop - // hint and the indexes we'll use later. - addHopHint(&hopHints, channel, edgePolicy) - - hopHintChans[channel.FundingOutpoint] = struct{}{} - totalHintBandwidth += channel.LocalCommitment.RemoteBalance - } - - // If we have enough hop hints at this point, then we'll exit early. - // Otherwise, we'll continue to add more that may help out mpp users. - if len(hopHints) >= numMaxHophints { - return hopHints - } - - // In this second pass we'll add channels, and we'll either stop when - // we have 20 hop hints, we've run through all the available channels, - // or if the sum of available bandwidth in the routing hints exceeds 2x - // the payment amount. We do 2x here to account for a margin of error - // if some of the selected channels no longer become operable. - hopHintFactor := lnwire.MilliSatoshi(2) - for i := 0; i < len(openChannels); i++ { - // If we hit either of our early termination conditions, then - // we'll break the loop here. - if totalHintBandwidth > amtMSat*hopHintFactor || - len(hopHints) >= numMaxHophints { - - break - } - - channel := openChannels[i] - - // Skip the channel if we already selected it. - if _, ok := hopHintChans[channel.FundingOutpoint]; ok { - continue - } - - // If the channel can't be a hop hint, then we'll skip it. - // Otherwise, we'll use the policy information to populate the - // hop hint. - remotePolicy, canBeHopHint := chanCanBeHopHint(channel, cfg) - if !canBeHopHint || remotePolicy == nil { - continue - } - - // Include the route hint in our set of options that will be - // used when creating the invoice. - addHopHint(&hopHints, channel, remotePolicy) - - // As we've just added a new hop hint, we'll accumulate it's - // available balance now to update our tally. - // - // TODO(roasbeef): have a cut off based on min bandwidth? - totalHintBandwidth += channel.LocalCommitment.RemoteBalance - } - - return hopHints -} diff --git a/lnd/lnrpc/invoicesrpc/config_active.go b/lnd/lnrpc/invoicesrpc/config_active.go deleted file mode 100644 index 220457e8..00000000 --- a/lnd/lnrpc/invoicesrpc/config_active.go +++ /dev/null @@ -1,54 +0,0 @@ -// +build invoicesrpc - -package invoicesrpc - -import ( - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/invoices" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/macaroons" - "github.com/pkt-cash/pktd/lnd/netann" -) - -// Config is the primary configuration struct for the invoices RPC server. It -// contains all the items required for the rpc server to carry out its -// duties. The fields with struct tags are meant to be parsed as normal -// configuration options, while if able to be populated, the latter fields MUST -// also be specified. -type Config struct { - // NetworkDir is the main network directory wherein the invoices rpc - // server will find the macaroon named DefaultInvoicesMacFilename. - NetworkDir string - - // MacService is the main macaroon service that we'll use to handle - // authentication for the invoices rpc server. - MacService *macaroons.Service - - // InvoiceRegistry is a central registry of all the outstanding invoices - // created by the daemon. - InvoiceRegistry *invoices.InvoiceRegistry - - // IsChannelActive is used to generate valid hop hints. - IsChannelActive func(chanID lnwire.ChannelID) bool - - // ChainParams are required to properly decode invoice payment requests - // that are marshalled over rpc. - ChainParams *chaincfg.Params - - // NodeSigner is an implementation of the MessageSigner implementation - // that's backed by the identity private key of the running lnd node. - NodeSigner *netann.NodeSigner - - // DefaultCLTVExpiry is the default invoice expiry if no values is - // specified. - DefaultCLTVExpiry uint32 - - // ChanDB is a global boltdb instance which is needed to access the - // channel graph. - ChanDB *channeldb.DB - - // GenInvoiceFeatures returns a feature containing feature bits that - // should be advertised on freshly generated invoices. - GenInvoiceFeatures func() *lnwire.FeatureVector -} diff --git a/lnd/lnrpc/invoicesrpc/config_default.go b/lnd/lnrpc/invoicesrpc/config_default.go deleted file mode 100644 index bb40c480..00000000 --- a/lnd/lnrpc/invoicesrpc/config_default.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !invoicesrpc - -package invoicesrpc - -// Config is empty for non-invoicesrpc builds. -type Config struct{} diff --git a/lnd/lnrpc/invoicesrpc/driver.go b/lnd/lnrpc/invoicesrpc/driver.go deleted file mode 100644 index b7cfcc1b..00000000 --- a/lnd/lnrpc/invoicesrpc/driver.go +++ /dev/null @@ -1,55 +0,0 @@ -// +build invoicesrpc - -package invoicesrpc - -import ( - "fmt" - - "github.com/pkt-cash/pktd/lnd/lnrpc" -) - -// createNewSubServer is a helper method that will create the new sub server -// given the main config dispatcher method. If we're unable to find the config -// that is meant for us in the config dispatcher, then we'll exit with an -// error. -func createNewSubServer(configRegistry lnrpc.SubServerConfigDispatcher) ( - lnrpc.SubServer, lnrpc.MacaroonPerms, er.R) { - - // We'll attempt to look up the config that we expect, according to our - // subServerName name. If we can't find this, then we'll exit with an - // error, as we're unable to properly initialize ourselves without this - // config. - subServerConf, ok := configRegistry.FetchConfig(subServerName) - if !ok { - return nil, nil, er.Errorf("unable to find config for "+ - "subserver type %s", subServerName) - } - - // Now that we've found an object mapping to our service name, we'll - // ensure that it's the type we need. - config, ok := subServerConf.(*Config) - if !ok { - return nil, nil, er.Errorf("wrong type of config for "+ - "subserver %s, expected %T got %T", subServerName, - &Config{}, subServerConf) - } - - return New(config) -} - -func init() { - subServer := &lnrpc.SubServerDriver{ - SubServerName: subServerName, - New: func(c lnrpc.SubServerConfigDispatcher) (lnrpc.SubServer, - lnrpc.MacaroonPerms, er.R) { - return createNewSubServer(c) - }, - } - - // If the build tag is active, then we'll register ourselves as a - // sub-RPC server within the global lnrpc package namespace. - if err := lnrpc.RegisterSubServer(subServer); err != nil { - panic(fmt.Sprintf("failed to register sub server driver "+ - "'%s': %v", subServerName, err)) - } -} diff --git a/lnd/lnrpc/invoicesrpc/invoices.pb.go b/lnd/lnrpc/invoicesrpc/invoices.pb.go deleted file mode 100644 index 12da9eb3..00000000 --- a/lnd/lnrpc/invoicesrpc/invoices.pb.go +++ /dev/null @@ -1,681 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: invoicesrpc/invoices.proto - -package invoicesrpc - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - lnrpc "github.com/pkt-cash/pktd/lnd/lnrpc" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type CancelInvoiceMsg struct { - // Hash corresponding to the (hold) invoice to cancel. - PaymentHash []byte `protobuf:"bytes,1,opt,name=payment_hash,json=paymentHash,proto3" json:"payment_hash,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CancelInvoiceMsg) Reset() { *m = CancelInvoiceMsg{} } -func (m *CancelInvoiceMsg) String() string { return proto.CompactTextString(m) } -func (*CancelInvoiceMsg) ProtoMessage() {} -func (*CancelInvoiceMsg) Descriptor() ([]byte, []int) { - return fileDescriptor_090ab9c4958b987d, []int{0} -} - -func (m *CancelInvoiceMsg) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CancelInvoiceMsg.Unmarshal(m, b) -} -func (m *CancelInvoiceMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CancelInvoiceMsg.Marshal(b, m, deterministic) -} -func (m *CancelInvoiceMsg) XXX_Merge(src proto.Message) { - xxx_messageInfo_CancelInvoiceMsg.Merge(m, src) -} -func (m *CancelInvoiceMsg) XXX_Size() int { - return xxx_messageInfo_CancelInvoiceMsg.Size(m) -} -func (m *CancelInvoiceMsg) XXX_DiscardUnknown() { - xxx_messageInfo_CancelInvoiceMsg.DiscardUnknown(m) -} - -var xxx_messageInfo_CancelInvoiceMsg proto.InternalMessageInfo - -func (m *CancelInvoiceMsg) GetPaymentHash() []byte { - if m != nil { - return m.PaymentHash - } - return nil -} - -type CancelInvoiceResp struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CancelInvoiceResp) Reset() { *m = CancelInvoiceResp{} } -func (m *CancelInvoiceResp) String() string { return proto.CompactTextString(m) } -func (*CancelInvoiceResp) ProtoMessage() {} -func (*CancelInvoiceResp) Descriptor() ([]byte, []int) { - return fileDescriptor_090ab9c4958b987d, []int{1} -} - -func (m *CancelInvoiceResp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CancelInvoiceResp.Unmarshal(m, b) -} -func (m *CancelInvoiceResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CancelInvoiceResp.Marshal(b, m, deterministic) -} -func (m *CancelInvoiceResp) XXX_Merge(src proto.Message) { - xxx_messageInfo_CancelInvoiceResp.Merge(m, src) -} -func (m *CancelInvoiceResp) XXX_Size() int { - return xxx_messageInfo_CancelInvoiceResp.Size(m) -} -func (m *CancelInvoiceResp) XXX_DiscardUnknown() { - xxx_messageInfo_CancelInvoiceResp.DiscardUnknown(m) -} - -var xxx_messageInfo_CancelInvoiceResp proto.InternalMessageInfo - -type AddHoldInvoiceRequest struct { - // - //An optional memo to attach along with the invoice. Used for record keeping - //purposes for the invoice's creator, and will also be set in the description - //field of the encoded payment request if the description_hash field is not - //being used. - Memo string `protobuf:"bytes,1,opt,name=memo,proto3" json:"memo,omitempty"` - // The hash of the preimage - Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` - // - //The value of this invoice in satoshis - // - //The fields value and value_msat are mutually exclusive. - Value int64 `protobuf:"varint,3,opt,name=value,proto3" json:"value,omitempty"` - // - //The value of this invoice in millisatoshis - // - //The fields value and value_msat are mutually exclusive. - ValueMsat int64 `protobuf:"varint,10,opt,name=value_msat,json=valueMsat,proto3" json:"value_msat,omitempty"` - // - //Hash (SHA-256) of a description of the payment. Used if the description of - //payment (memo) is too long to naturally fit within the description field - //of an encoded payment request. - DescriptionHash []byte `protobuf:"bytes,4,opt,name=description_hash,json=descriptionHash,proto3" json:"description_hash,omitempty"` - // Payment request expiry time in seconds. Default is 3600 (1 hour). - Expiry int64 `protobuf:"varint,5,opt,name=expiry,proto3" json:"expiry,omitempty"` - // Fallback on-chain address. - FallbackAddr string `protobuf:"bytes,6,opt,name=fallback_addr,json=fallbackAddr,proto3" json:"fallback_addr,omitempty"` - // Delta to use for the time-lock of the CLTV extended to the final hop. - CltvExpiry uint64 `protobuf:"varint,7,opt,name=cltv_expiry,json=cltvExpiry,proto3" json:"cltv_expiry,omitempty"` - // - //Route hints that can each be individually used to assist in reaching the - //invoice's destination. - RouteHints []*lnrpc.RouteHint `protobuf:"bytes,8,rep,name=route_hints,json=routeHints,proto3" json:"route_hints,omitempty"` - // Whether this invoice should include routing hints for private channels. - Private bool `protobuf:"varint,9,opt,name=private,proto3" json:"private,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddHoldInvoiceRequest) Reset() { *m = AddHoldInvoiceRequest{} } -func (m *AddHoldInvoiceRequest) String() string { return proto.CompactTextString(m) } -func (*AddHoldInvoiceRequest) ProtoMessage() {} -func (*AddHoldInvoiceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_090ab9c4958b987d, []int{2} -} - -func (m *AddHoldInvoiceRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AddHoldInvoiceRequest.Unmarshal(m, b) -} -func (m *AddHoldInvoiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AddHoldInvoiceRequest.Marshal(b, m, deterministic) -} -func (m *AddHoldInvoiceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddHoldInvoiceRequest.Merge(m, src) -} -func (m *AddHoldInvoiceRequest) XXX_Size() int { - return xxx_messageInfo_AddHoldInvoiceRequest.Size(m) -} -func (m *AddHoldInvoiceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AddHoldInvoiceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AddHoldInvoiceRequest proto.InternalMessageInfo - -func (m *AddHoldInvoiceRequest) GetMemo() string { - if m != nil { - return m.Memo - } - return "" -} - -func (m *AddHoldInvoiceRequest) GetHash() []byte { - if m != nil { - return m.Hash - } - return nil -} - -func (m *AddHoldInvoiceRequest) GetValue() int64 { - if m != nil { - return m.Value - } - return 0 -} - -func (m *AddHoldInvoiceRequest) GetValueMsat() int64 { - if m != nil { - return m.ValueMsat - } - return 0 -} - -func (m *AddHoldInvoiceRequest) GetDescriptionHash() []byte { - if m != nil { - return m.DescriptionHash - } - return nil -} - -func (m *AddHoldInvoiceRequest) GetExpiry() int64 { - if m != nil { - return m.Expiry - } - return 0 -} - -func (m *AddHoldInvoiceRequest) GetFallbackAddr() string { - if m != nil { - return m.FallbackAddr - } - return "" -} - -func (m *AddHoldInvoiceRequest) GetCltvExpiry() uint64 { - if m != nil { - return m.CltvExpiry - } - return 0 -} - -func (m *AddHoldInvoiceRequest) GetRouteHints() []*lnrpc.RouteHint { - if m != nil { - return m.RouteHints - } - return nil -} - -func (m *AddHoldInvoiceRequest) GetPrivate() bool { - if m != nil { - return m.Private - } - return false -} - -type AddHoldInvoiceResp struct { - // - //A bare-bones invoice for a payment within the Lightning Network. With the - //details of the invoice, the sender has all the data necessary to send a - //payment to the recipient. - PaymentRequest string `protobuf:"bytes,1,opt,name=payment_request,json=paymentRequest,proto3" json:"payment_request,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddHoldInvoiceResp) Reset() { *m = AddHoldInvoiceResp{} } -func (m *AddHoldInvoiceResp) String() string { return proto.CompactTextString(m) } -func (*AddHoldInvoiceResp) ProtoMessage() {} -func (*AddHoldInvoiceResp) Descriptor() ([]byte, []int) { - return fileDescriptor_090ab9c4958b987d, []int{3} -} - -func (m *AddHoldInvoiceResp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AddHoldInvoiceResp.Unmarshal(m, b) -} -func (m *AddHoldInvoiceResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AddHoldInvoiceResp.Marshal(b, m, deterministic) -} -func (m *AddHoldInvoiceResp) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddHoldInvoiceResp.Merge(m, src) -} -func (m *AddHoldInvoiceResp) XXX_Size() int { - return xxx_messageInfo_AddHoldInvoiceResp.Size(m) -} -func (m *AddHoldInvoiceResp) XXX_DiscardUnknown() { - xxx_messageInfo_AddHoldInvoiceResp.DiscardUnknown(m) -} - -var xxx_messageInfo_AddHoldInvoiceResp proto.InternalMessageInfo - -func (m *AddHoldInvoiceResp) GetPaymentRequest() string { - if m != nil { - return m.PaymentRequest - } - return "" -} - -type SettleInvoiceMsg struct { - // Externally discovered pre-image that should be used to settle the hold - // invoice. - Preimage []byte `protobuf:"bytes,1,opt,name=preimage,proto3" json:"preimage,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SettleInvoiceMsg) Reset() { *m = SettleInvoiceMsg{} } -func (m *SettleInvoiceMsg) String() string { return proto.CompactTextString(m) } -func (*SettleInvoiceMsg) ProtoMessage() {} -func (*SettleInvoiceMsg) Descriptor() ([]byte, []int) { - return fileDescriptor_090ab9c4958b987d, []int{4} -} - -func (m *SettleInvoiceMsg) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SettleInvoiceMsg.Unmarshal(m, b) -} -func (m *SettleInvoiceMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SettleInvoiceMsg.Marshal(b, m, deterministic) -} -func (m *SettleInvoiceMsg) XXX_Merge(src proto.Message) { - xxx_messageInfo_SettleInvoiceMsg.Merge(m, src) -} -func (m *SettleInvoiceMsg) XXX_Size() int { - return xxx_messageInfo_SettleInvoiceMsg.Size(m) -} -func (m *SettleInvoiceMsg) XXX_DiscardUnknown() { - xxx_messageInfo_SettleInvoiceMsg.DiscardUnknown(m) -} - -var xxx_messageInfo_SettleInvoiceMsg proto.InternalMessageInfo - -func (m *SettleInvoiceMsg) GetPreimage() []byte { - if m != nil { - return m.Preimage - } - return nil -} - -type SettleInvoiceResp struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SettleInvoiceResp) Reset() { *m = SettleInvoiceResp{} } -func (m *SettleInvoiceResp) String() string { return proto.CompactTextString(m) } -func (*SettleInvoiceResp) ProtoMessage() {} -func (*SettleInvoiceResp) Descriptor() ([]byte, []int) { - return fileDescriptor_090ab9c4958b987d, []int{5} -} - -func (m *SettleInvoiceResp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SettleInvoiceResp.Unmarshal(m, b) -} -func (m *SettleInvoiceResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SettleInvoiceResp.Marshal(b, m, deterministic) -} -func (m *SettleInvoiceResp) XXX_Merge(src proto.Message) { - xxx_messageInfo_SettleInvoiceResp.Merge(m, src) -} -func (m *SettleInvoiceResp) XXX_Size() int { - return xxx_messageInfo_SettleInvoiceResp.Size(m) -} -func (m *SettleInvoiceResp) XXX_DiscardUnknown() { - xxx_messageInfo_SettleInvoiceResp.DiscardUnknown(m) -} - -var xxx_messageInfo_SettleInvoiceResp proto.InternalMessageInfo - -type SubscribeSingleInvoiceRequest struct { - // Hash corresponding to the (hold) invoice to subscribe to. - RHash []byte `protobuf:"bytes,2,opt,name=r_hash,json=rHash,proto3" json:"r_hash,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SubscribeSingleInvoiceRequest) Reset() { *m = SubscribeSingleInvoiceRequest{} } -func (m *SubscribeSingleInvoiceRequest) String() string { return proto.CompactTextString(m) } -func (*SubscribeSingleInvoiceRequest) ProtoMessage() {} -func (*SubscribeSingleInvoiceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_090ab9c4958b987d, []int{6} -} - -func (m *SubscribeSingleInvoiceRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SubscribeSingleInvoiceRequest.Unmarshal(m, b) -} -func (m *SubscribeSingleInvoiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SubscribeSingleInvoiceRequest.Marshal(b, m, deterministic) -} -func (m *SubscribeSingleInvoiceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SubscribeSingleInvoiceRequest.Merge(m, src) -} -func (m *SubscribeSingleInvoiceRequest) XXX_Size() int { - return xxx_messageInfo_SubscribeSingleInvoiceRequest.Size(m) -} -func (m *SubscribeSingleInvoiceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SubscribeSingleInvoiceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SubscribeSingleInvoiceRequest proto.InternalMessageInfo - -func (m *SubscribeSingleInvoiceRequest) GetRHash() []byte { - if m != nil { - return m.RHash - } - return nil -} - -func init() { - proto.RegisterType((*CancelInvoiceMsg)(nil), "invoicesrpc.CancelInvoiceMsg") - proto.RegisterType((*CancelInvoiceResp)(nil), "invoicesrpc.CancelInvoiceResp") - proto.RegisterType((*AddHoldInvoiceRequest)(nil), "invoicesrpc.AddHoldInvoiceRequest") - proto.RegisterType((*AddHoldInvoiceResp)(nil), "invoicesrpc.AddHoldInvoiceResp") - proto.RegisterType((*SettleInvoiceMsg)(nil), "invoicesrpc.SettleInvoiceMsg") - proto.RegisterType((*SettleInvoiceResp)(nil), "invoicesrpc.SettleInvoiceResp") - proto.RegisterType((*SubscribeSingleInvoiceRequest)(nil), "invoicesrpc.SubscribeSingleInvoiceRequest") -} - -func init() { proto.RegisterFile("invoicesrpc/invoices.proto", fileDescriptor_090ab9c4958b987d) } - -var fileDescriptor_090ab9c4958b987d = []byte{ - // 522 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0x5d, 0x6f, 0xd3, 0x30, - 0x14, 0x55, 0xba, 0xb6, 0x6b, 0x6f, 0xbb, 0xae, 0x18, 0x36, 0x45, 0x91, 0xca, 0x4a, 0x78, 0xa0, - 0x20, 0x91, 0x8e, 0x21, 0xde, 0xe0, 0x61, 0x20, 0xa4, 0x82, 0x34, 0x1e, 0x52, 0xc1, 0x03, 0x2f, - 0x91, 0xeb, 0x98, 0xc6, 0x5a, 0x3e, 0x8c, 0xed, 0x56, 0xec, 0x2f, 0xf2, 0x0f, 0xf8, 0x37, 0xc8, - 0x8e, 0x33, 0x25, 0x61, 0xec, 0x21, 0xd2, 0xbd, 0xe7, 0xda, 0x27, 0x47, 0xe7, 0xdc, 0x04, 0x3c, - 0x96, 0xef, 0x0b, 0x46, 0xa8, 0x14, 0x9c, 0x2c, 0xab, 0x3a, 0xe0, 0xa2, 0x50, 0x05, 0x1a, 0xd5, - 0x66, 0xde, 0x50, 0x70, 0x52, 0xe2, 0xfe, 0x1b, 0x98, 0x7e, 0xc0, 0x39, 0xa1, 0xe9, 0xa7, 0x72, - 0x7e, 0x25, 0xb7, 0xe8, 0x09, 0x8c, 0x39, 0xbe, 0xc9, 0x68, 0xae, 0xa2, 0x04, 0xcb, 0xc4, 0x75, - 0xe6, 0xce, 0x62, 0x1c, 0x8e, 0x2c, 0xb6, 0xc2, 0x32, 0xf1, 0x1f, 0xc2, 0x83, 0xc6, 0xb5, 0x90, - 0x4a, 0xee, 0xff, 0xee, 0xc0, 0xc9, 0x65, 0x1c, 0xaf, 0x8a, 0x34, 0xbe, 0x85, 0x7f, 0xee, 0xa8, - 0x54, 0x08, 0x41, 0x37, 0xa3, 0x59, 0x61, 0x98, 0x86, 0xa1, 0xa9, 0x35, 0x66, 0xd8, 0x3b, 0x86, - 0xdd, 0xd4, 0xe8, 0x11, 0xf4, 0xf6, 0x38, 0xdd, 0x51, 0xf7, 0x60, 0xee, 0x2c, 0x0e, 0xc2, 0xb2, - 0x41, 0x33, 0x00, 0x53, 0x44, 0x99, 0xc4, 0xca, 0x05, 0x33, 0x1a, 0x1a, 0xe4, 0x4a, 0x62, 0x85, - 0x9e, 0xc3, 0x34, 0xa6, 0x92, 0x08, 0xc6, 0x15, 0x2b, 0xf2, 0x52, 0x72, 0xd7, 0x90, 0x1e, 0xd7, - 0x70, 0x2d, 0x1b, 0x9d, 0x42, 0x9f, 0xfe, 0xe2, 0x4c, 0xdc, 0xb8, 0x3d, 0xc3, 0x62, 0x3b, 0xf4, - 0x14, 0x8e, 0x7e, 0xe0, 0x34, 0xdd, 0x60, 0x72, 0x1d, 0xe1, 0x38, 0x16, 0x6e, 0xdf, 0x08, 0x1d, - 0x57, 0xe0, 0x65, 0x1c, 0x0b, 0x74, 0x06, 0x23, 0x92, 0xaa, 0x7d, 0x64, 0x19, 0x0e, 0xe7, 0xce, - 0xa2, 0x1b, 0x82, 0x86, 0x3e, 0x96, 0x2c, 0xaf, 0x60, 0x24, 0x8a, 0x9d, 0xa2, 0x51, 0xc2, 0x72, - 0x25, 0xdd, 0xc1, 0xfc, 0x60, 0x31, 0xba, 0x98, 0x06, 0x69, 0xae, 0xed, 0x0e, 0xf5, 0x64, 0xc5, - 0x72, 0x15, 0x82, 0xa8, 0x4a, 0x89, 0x5c, 0x38, 0xe4, 0x82, 0xed, 0xb1, 0xa2, 0xee, 0x70, 0xee, - 0x2c, 0x06, 0x61, 0xd5, 0xfa, 0xef, 0x00, 0xb5, 0xbd, 0x94, 0x1c, 0x3d, 0x83, 0xe3, 0x2a, 0x1a, - 0x51, 0x7a, 0x6b, 0x3d, 0x9d, 0x58, 0xd8, 0x3a, 0xee, 0x07, 0x30, 0x5d, 0x53, 0xa5, 0x52, 0x5a, - 0xcb, 0xd5, 0x83, 0x01, 0x17, 0x94, 0x65, 0x78, 0x4b, 0x6d, 0xa6, 0xb7, 0xbd, 0x0e, 0xb4, 0x71, - 0xde, 0x04, 0xfa, 0x16, 0x66, 0xeb, 0xdd, 0x46, 0x5b, 0xb8, 0xa1, 0x6b, 0x96, 0x6f, 0x6b, 0xd3, - 0x32, 0xd7, 0x13, 0xe8, 0x8b, 0xa8, 0x96, 0x62, 0x4f, 0x68, 0x9b, 0x3f, 0x77, 0x07, 0xce, 0xb4, - 0x73, 0xf1, 0xa7, 0x03, 0x03, 0x7b, 0x5e, 0xa2, 0x6f, 0x70, 0x7a, 0x37, 0x15, 0x7a, 0x11, 0xd4, - 0x56, 0x33, 0xb8, 0xf7, 0x7d, 0xde, 0xc4, 0x9a, 0x69, 0xe1, 0x73, 0x07, 0x7d, 0x81, 0xa3, 0xc6, - 0x22, 0xa2, 0x59, 0x83, 0xae, 0xbd, 0xdb, 0xde, 0xe3, 0xff, 0x8f, 0x8d, 0xc1, 0x5f, 0x61, 0xd2, - 0xb4, 0x1d, 0xf9, 0x8d, 0x1b, 0x77, 0xee, 0xb7, 0x77, 0x76, 0xef, 0x19, 0xc9, 0xb5, 0xcc, 0x86, - 0xbd, 0x2d, 0x99, 0xed, 0xa8, 0x5a, 0x32, 0xff, 0x49, 0xe6, 0xfd, 0xf9, 0xf7, 0x60, 0xcb, 0x54, - 0xb2, 0xdb, 0x04, 0xa4, 0xc8, 0x96, 0xfc, 0x5a, 0xbd, 0x24, 0x58, 0x26, 0xba, 0x88, 0x97, 0x69, - 0xae, 0x9f, 0xfa, 0x3f, 0x40, 0x70, 0xb2, 0xe9, 0x9b, 0xef, 0xfd, 0xf5, 0xdf, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x0d, 0x90, 0xe8, 0xb1, 0x25, 0x04, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// InvoicesClient is the client API for Invoices service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type InvoicesClient interface { - // - //SubscribeSingleInvoice returns a uni-directional stream (server -> client) - //to notify the client of state transitions of the specified invoice. - //Initially the current invoice state is always sent out. - SubscribeSingleInvoice(ctx context.Context, in *SubscribeSingleInvoiceRequest, opts ...grpc.CallOption) (Invoices_SubscribeSingleInvoiceClient, error) - // - //CancelInvoice cancels a currently open invoice. If the invoice is already - //canceled, this call will succeed. If the invoice is already settled, it will - //fail. - CancelInvoice(ctx context.Context, in *CancelInvoiceMsg, opts ...grpc.CallOption) (*CancelInvoiceResp, error) - // - //AddHoldInvoice creates a hold invoice. It ties the invoice to the hash - //supplied in the request. - AddHoldInvoice(ctx context.Context, in *AddHoldInvoiceRequest, opts ...grpc.CallOption) (*AddHoldInvoiceResp, error) - // - //SettleInvoice settles an accepted invoice. If the invoice is already - //settled, this call will succeed. - SettleInvoice(ctx context.Context, in *SettleInvoiceMsg, opts ...grpc.CallOption) (*SettleInvoiceResp, error) -} - -type invoicesClient struct { - cc *grpc.ClientConn -} - -func NewInvoicesClient(cc *grpc.ClientConn) InvoicesClient { - return &invoicesClient{cc} -} - -func (c *invoicesClient) SubscribeSingleInvoice(ctx context.Context, in *SubscribeSingleInvoiceRequest, opts ...grpc.CallOption) (Invoices_SubscribeSingleInvoiceClient, error) { - stream, err := c.cc.NewStream(ctx, &_Invoices_serviceDesc.Streams[0], "/invoicesrpc.Invoices/SubscribeSingleInvoice", opts...) - if err != nil { - return nil, err - } - x := &invoicesSubscribeSingleInvoiceClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Invoices_SubscribeSingleInvoiceClient interface { - Recv() (*lnrpc.Invoice, error) - grpc.ClientStream -} - -type invoicesSubscribeSingleInvoiceClient struct { - grpc.ClientStream -} - -func (x *invoicesSubscribeSingleInvoiceClient) Recv() (*lnrpc.Invoice, error) { - m := new(lnrpc.Invoice) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *invoicesClient) CancelInvoice(ctx context.Context, in *CancelInvoiceMsg, opts ...grpc.CallOption) (*CancelInvoiceResp, error) { - out := new(CancelInvoiceResp) - err := c.cc.Invoke(ctx, "/invoicesrpc.Invoices/CancelInvoice", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *invoicesClient) AddHoldInvoice(ctx context.Context, in *AddHoldInvoiceRequest, opts ...grpc.CallOption) (*AddHoldInvoiceResp, error) { - out := new(AddHoldInvoiceResp) - err := c.cc.Invoke(ctx, "/invoicesrpc.Invoices/AddHoldInvoice", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *invoicesClient) SettleInvoice(ctx context.Context, in *SettleInvoiceMsg, opts ...grpc.CallOption) (*SettleInvoiceResp, error) { - out := new(SettleInvoiceResp) - err := c.cc.Invoke(ctx, "/invoicesrpc.Invoices/SettleInvoice", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// InvoicesServer is the server API for Invoices service. -type InvoicesServer interface { - // - //SubscribeSingleInvoice returns a uni-directional stream (server -> client) - //to notify the client of state transitions of the specified invoice. - //Initially the current invoice state is always sent out. - SubscribeSingleInvoice(*SubscribeSingleInvoiceRequest, Invoices_SubscribeSingleInvoiceServer) error - // - //CancelInvoice cancels a currently open invoice. If the invoice is already - //canceled, this call will succeed. If the invoice is already settled, it will - //fail. - CancelInvoice(context.Context, *CancelInvoiceMsg) (*CancelInvoiceResp, error) - // - //AddHoldInvoice creates a hold invoice. It ties the invoice to the hash - //supplied in the request. - AddHoldInvoice(context.Context, *AddHoldInvoiceRequest) (*AddHoldInvoiceResp, error) - // - //SettleInvoice settles an accepted invoice. If the invoice is already - //settled, this call will succeed. - SettleInvoice(context.Context, *SettleInvoiceMsg) (*SettleInvoiceResp, error) -} - -// UnimplementedInvoicesServer can be embedded to have forward compatible implementations. -type UnimplementedInvoicesServer struct { -} - -func (*UnimplementedInvoicesServer) SubscribeSingleInvoice(req *SubscribeSingleInvoiceRequest, srv Invoices_SubscribeSingleInvoiceServer) error { - return status.Errorf(codes.Unimplemented, "method SubscribeSingleInvoice not implemented") -} -func (*UnimplementedInvoicesServer) CancelInvoice(ctx context.Context, req *CancelInvoiceMsg) (*CancelInvoiceResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method CancelInvoice not implemented") -} -func (*UnimplementedInvoicesServer) AddHoldInvoice(ctx context.Context, req *AddHoldInvoiceRequest) (*AddHoldInvoiceResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method AddHoldInvoice not implemented") -} -func (*UnimplementedInvoicesServer) SettleInvoice(ctx context.Context, req *SettleInvoiceMsg) (*SettleInvoiceResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method SettleInvoice not implemented") -} - -func RegisterInvoicesServer(s *grpc.Server, srv InvoicesServer) { - s.RegisterService(&_Invoices_serviceDesc, srv) -} - -func _Invoices_SubscribeSingleInvoice_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SubscribeSingleInvoiceRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(InvoicesServer).SubscribeSingleInvoice(m, &invoicesSubscribeSingleInvoiceServer{stream}) -} - -type Invoices_SubscribeSingleInvoiceServer interface { - Send(*lnrpc.Invoice) error - grpc.ServerStream -} - -type invoicesSubscribeSingleInvoiceServer struct { - grpc.ServerStream -} - -func (x *invoicesSubscribeSingleInvoiceServer) Send(m *lnrpc.Invoice) error { - return x.ServerStream.SendMsg(m) -} - -func _Invoices_CancelInvoice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CancelInvoiceMsg) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(InvoicesServer).CancelInvoice(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/invoicesrpc.Invoices/CancelInvoice", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(InvoicesServer).CancelInvoice(ctx, req.(*CancelInvoiceMsg)) - } - return interceptor(ctx, in, info, handler) -} - -func _Invoices_AddHoldInvoice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddHoldInvoiceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(InvoicesServer).AddHoldInvoice(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/invoicesrpc.Invoices/AddHoldInvoice", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(InvoicesServer).AddHoldInvoice(ctx, req.(*AddHoldInvoiceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Invoices_SettleInvoice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SettleInvoiceMsg) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(InvoicesServer).SettleInvoice(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/invoicesrpc.Invoices/SettleInvoice", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(InvoicesServer).SettleInvoice(ctx, req.(*SettleInvoiceMsg)) - } - return interceptor(ctx, in, info, handler) -} - -var _Invoices_serviceDesc = grpc.ServiceDesc{ - ServiceName: "invoicesrpc.Invoices", - HandlerType: (*InvoicesServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CancelInvoice", - Handler: _Invoices_CancelInvoice_Handler, - }, - { - MethodName: "AddHoldInvoice", - Handler: _Invoices_AddHoldInvoice_Handler, - }, - { - MethodName: "SettleInvoice", - Handler: _Invoices_SettleInvoice_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "SubscribeSingleInvoice", - Handler: _Invoices_SubscribeSingleInvoice_Handler, - ServerStreams: true, - }, - }, - Metadata: "invoicesrpc/invoices.proto", -} diff --git a/lnd/lnrpc/invoicesrpc/invoices.pb.gw.go b/lnd/lnrpc/invoicesrpc/invoices.pb.gw.go deleted file mode 100644 index 13988265..00000000 --- a/lnd/lnrpc/invoicesrpc/invoices.pb.gw.go +++ /dev/null @@ -1,385 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: invoicesrpc/invoices.proto - -/* -Package invoicesrpc is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package invoicesrpc - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage - -func request_Invoices_SubscribeSingleInvoice_0(ctx context.Context, marshaler runtime.Marshaler, client InvoicesClient, req *http.Request, pathParams map[string]string) (Invoices_SubscribeSingleInvoiceClient, runtime.ServerMetadata, error) { - var protoReq SubscribeSingleInvoiceRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["r_hash"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "r_hash") - } - - protoReq.RHash, err = runtime.Bytes(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "r_hash", err) - } - - stream, err := client.SubscribeSingleInvoice(ctx, &protoReq) - if err != nil { - return nil, metadata, err - } - header, err := stream.Header() - if err != nil { - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil - -} - -func request_Invoices_CancelInvoice_0(ctx context.Context, marshaler runtime.Marshaler, client InvoicesClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq CancelInvoiceMsg - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.CancelInvoice(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Invoices_CancelInvoice_0(ctx context.Context, marshaler runtime.Marshaler, server InvoicesServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq CancelInvoiceMsg - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.CancelInvoice(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Invoices_AddHoldInvoice_0(ctx context.Context, marshaler runtime.Marshaler, client InvoicesClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AddHoldInvoiceRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.AddHoldInvoice(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Invoices_AddHoldInvoice_0(ctx context.Context, marshaler runtime.Marshaler, server InvoicesServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AddHoldInvoiceRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.AddHoldInvoice(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Invoices_SettleInvoice_0(ctx context.Context, marshaler runtime.Marshaler, client InvoicesClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SettleInvoiceMsg - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.SettleInvoice(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Invoices_SettleInvoice_0(ctx context.Context, marshaler runtime.Marshaler, server InvoicesServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SettleInvoiceMsg - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.SettleInvoice(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterInvoicesHandlerServer registers the http handlers for service Invoices to "mux". -// UnaryRPC :call InvoicesServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -func RegisterInvoicesHandlerServer(ctx context.Context, mux *runtime.ServeMux, server InvoicesServer) error { - - mux.Handle("GET", pattern_Invoices_SubscribeSingleInvoice_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") - _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - }) - - mux.Handle("POST", pattern_Invoices_CancelInvoice_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Invoices_CancelInvoice_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Invoices_CancelInvoice_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Invoices_AddHoldInvoice_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Invoices_AddHoldInvoice_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Invoices_AddHoldInvoice_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Invoices_SettleInvoice_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Invoices_SettleInvoice_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Invoices_SettleInvoice_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterInvoicesHandlerFromEndpoint is same as RegisterInvoicesHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterInvoicesHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterInvoicesHandler(ctx, mux, conn) -} - -// RegisterInvoicesHandler registers the http handlers for service Invoices to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterInvoicesHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterInvoicesHandlerClient(ctx, mux, NewInvoicesClient(conn)) -} - -// RegisterInvoicesHandlerClient registers the http handlers for service Invoices -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "InvoicesClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "InvoicesClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "InvoicesClient" to call the correct interceptors. -func RegisterInvoicesHandlerClient(ctx context.Context, mux *runtime.ServeMux, client InvoicesClient) error { - - mux.Handle("GET", pattern_Invoices_SubscribeSingleInvoice_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Invoices_SubscribeSingleInvoice_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Invoices_SubscribeSingleInvoice_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Invoices_CancelInvoice_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Invoices_CancelInvoice_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Invoices_CancelInvoice_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Invoices_AddHoldInvoice_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Invoices_AddHoldInvoice_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Invoices_AddHoldInvoice_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Invoices_SettleInvoice_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Invoices_SettleInvoice_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Invoices_SettleInvoice_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Invoices_SubscribeSingleInvoice_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v2", "invoices", "subscribe", "r_hash"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Invoices_CancelInvoice_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "invoices", "cancel"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Invoices_AddHoldInvoice_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "invoices", "hodl"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Invoices_SettleInvoice_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "invoices", "settle"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Invoices_SubscribeSingleInvoice_0 = runtime.ForwardResponseStream - - forward_Invoices_CancelInvoice_0 = runtime.ForwardResponseMessage - - forward_Invoices_AddHoldInvoice_0 = runtime.ForwardResponseMessage - - forward_Invoices_SettleInvoice_0 = runtime.ForwardResponseMessage -) diff --git a/lnd/lnrpc/invoicesrpc/invoices.proto b/lnd/lnrpc/invoicesrpc/invoices.proto deleted file mode 100644 index 368e5b63..00000000 --- a/lnd/lnrpc/invoicesrpc/invoices.proto +++ /dev/null @@ -1,122 +0,0 @@ -syntax = "proto3"; - -import "rpc.proto"; - -package invoicesrpc; - -option go_package = "github.com/pkt-cash/pktd/lnd/lnrpc/invoicesrpc"; - -// Invoices is a service that can be used to create, accept, settle and cancel -// invoices. -service Invoices { - /* - SubscribeSingleInvoice returns a uni-directional stream (server -> client) - to notify the client of state transitions of the specified invoice. - Initially the current invoice state is always sent out. - */ - rpc SubscribeSingleInvoice (SubscribeSingleInvoiceRequest) - returns (stream lnrpc.Invoice); - - /* - CancelInvoice cancels a currently open invoice. If the invoice is already - canceled, this call will succeed. If the invoice is already settled, it will - fail. - */ - rpc CancelInvoice (CancelInvoiceMsg) returns (CancelInvoiceResp); - - /* - AddHoldInvoice creates a hold invoice. It ties the invoice to the hash - supplied in the request. - */ - rpc AddHoldInvoice (AddHoldInvoiceRequest) returns (AddHoldInvoiceResp); - - /* - SettleInvoice settles an accepted invoice. If the invoice is already - settled, this call will succeed. - */ - rpc SettleInvoice (SettleInvoiceMsg) returns (SettleInvoiceResp); -} - -message CancelInvoiceMsg { - // Hash corresponding to the (hold) invoice to cancel. - bytes payment_hash = 1; -} -message CancelInvoiceResp { -} - -message AddHoldInvoiceRequest { - /* - An optional memo to attach along with the invoice. Used for record keeping - purposes for the invoice's creator, and will also be set in the description - field of the encoded payment request if the description_hash field is not - being used. - */ - string memo = 1; - - // The hash of the preimage - bytes hash = 2; - - /* - The value of this invoice in satoshis - - The fields value and value_msat are mutually exclusive. - */ - int64 value = 3; - - /* - The value of this invoice in millisatoshis - - The fields value and value_msat are mutually exclusive. - */ - int64 value_msat = 10; - - /* - Hash (SHA-256) of a description of the payment. Used if the description of - payment (memo) is too long to naturally fit within the description field - of an encoded payment request. - */ - bytes description_hash = 4; - - // Payment request expiry time in seconds. Default is 3600 (1 hour). - int64 expiry = 5; - - // Fallback on-chain address. - string fallback_addr = 6; - - // Delta to use for the time-lock of the CLTV extended to the final hop. - uint64 cltv_expiry = 7; - - /* - Route hints that can each be individually used to assist in reaching the - invoice's destination. - */ - repeated lnrpc.RouteHint route_hints = 8; - - // Whether this invoice should include routing hints for private channels. - bool private = 9; -} - -message AddHoldInvoiceResp { - /* - A bare-bones invoice for a payment within the Lightning Network. With the - details of the invoice, the sender has all the data necessary to send a - payment to the recipient. - */ - string payment_request = 1; -} - -message SettleInvoiceMsg { - // Externally discovered pre-image that should be used to settle the hold - // invoice. - bytes preimage = 1; -} - -message SettleInvoiceResp { -} - -message SubscribeSingleInvoiceRequest { - reserved 1; - - // Hash corresponding to the (hold) invoice to subscribe to. - bytes r_hash = 2; -} diff --git a/lnd/lnrpc/invoicesrpc/invoices.swagger.json b/lnd/lnrpc/invoicesrpc/invoices.swagger.json deleted file mode 100644 index 846be0e9..00000000 --- a/lnd/lnrpc/invoicesrpc/invoices.swagger.json +++ /dev/null @@ -1,565 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "invoicesrpc/invoices.proto", - "version": "version not set" - }, - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/v2/invoices/cancel": { - "post": { - "summary": "CancelInvoice cancels a currently open invoice. If the invoice is already\ncanceled, this call will succeed. If the invoice is already settled, it will\nfail.", - "operationId": "Invoices_CancelInvoice", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/invoicesrpcCancelInvoiceResp" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/invoicesrpcCancelInvoiceMsg" - } - } - ], - "tags": [ - "Invoices" - ] - } - }, - "/v2/invoices/hodl": { - "post": { - "summary": "AddHoldInvoice creates a hold invoice. It ties the invoice to the hash\nsupplied in the request.", - "operationId": "Invoices_AddHoldInvoice", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/invoicesrpcAddHoldInvoiceResp" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/invoicesrpcAddHoldInvoiceRequest" - } - } - ], - "tags": [ - "Invoices" - ] - } - }, - "/v2/invoices/settle": { - "post": { - "summary": "SettleInvoice settles an accepted invoice. If the invoice is already\nsettled, this call will succeed.", - "operationId": "Invoices_SettleInvoice", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/invoicesrpcSettleInvoiceResp" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/invoicesrpcSettleInvoiceMsg" - } - } - ], - "tags": [ - "Invoices" - ] - } - }, - "/v2/invoices/subscribe/{r_hash}": { - "get": { - "summary": "SubscribeSingleInvoice returns a uni-directional stream (server -\u003e client)\nto notify the client of state transitions of the specified invoice.\nInitially the current invoice state is always sent out.", - "operationId": "Invoices_SubscribeSingleInvoice", - "responses": { - "200": { - "description": "A successful response.(streaming responses)", - "schema": { - "type": "object", - "properties": { - "result": { - "$ref": "#/definitions/lnrpcInvoice" - }, - "error": { - "$ref": "#/definitions/runtimeStreamError" - } - }, - "title": "Stream result of lnrpcInvoice" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "r_hash", - "description": "Hash corresponding to the (hold) invoice to subscribe to.", - "in": "path", - "required": true, - "type": "string", - "format": "byte" - } - ], - "tags": [ - "Invoices" - ] - } - } - }, - "definitions": { - "InvoiceInvoiceState": { - "type": "string", - "enum": [ - "OPEN", - "SETTLED", - "CANCELED", - "ACCEPTED" - ], - "default": "OPEN" - }, - "invoicesrpcAddHoldInvoiceRequest": { - "type": "object", - "properties": { - "memo": { - "type": "string", - "description": "An optional memo to attach along with the invoice. Used for record keeping\npurposes for the invoice's creator, and will also be set in the description\nfield of the encoded payment request if the description_hash field is not\nbeing used." - }, - "hash": { - "type": "string", - "format": "byte", - "title": "The hash of the preimage" - }, - "value": { - "type": "string", - "format": "int64", - "description": "The fields value and value_msat are mutually exclusive.", - "title": "The value of this invoice in satoshis" - }, - "value_msat": { - "type": "string", - "format": "int64", - "description": "The fields value and value_msat are mutually exclusive.", - "title": "The value of this invoice in millisatoshis" - }, - "description_hash": { - "type": "string", - "format": "byte", - "description": "Hash (SHA-256) of a description of the payment. Used if the description of\npayment (memo) is too long to naturally fit within the description field\nof an encoded payment request." - }, - "expiry": { - "type": "string", - "format": "int64", - "description": "Payment request expiry time in seconds. Default is 3600 (1 hour)." - }, - "fallback_addr": { - "type": "string", - "description": "Fallback on-chain address." - }, - "cltv_expiry": { - "type": "string", - "format": "uint64", - "description": "Delta to use for the time-lock of the CLTV extended to the final hop." - }, - "route_hints": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcRouteHint" - }, - "description": "Route hints that can each be individually used to assist in reaching the\ninvoice's destination." - }, - "private": { - "type": "boolean", - "description": "Whether this invoice should include routing hints for private channels." - } - } - }, - "invoicesrpcAddHoldInvoiceResp": { - "type": "object", - "properties": { - "payment_request": { - "type": "string", - "description": "A bare-bones invoice for a payment within the Lightning Network. With the\ndetails of the invoice, the sender has all the data necessary to send a\npayment to the recipient." - } - } - }, - "invoicesrpcCancelInvoiceMsg": { - "type": "object", - "properties": { - "payment_hash": { - "type": "string", - "format": "byte", - "description": "Hash corresponding to the (hold) invoice to cancel." - } - } - }, - "invoicesrpcCancelInvoiceResp": { - "type": "object" - }, - "invoicesrpcSettleInvoiceMsg": { - "type": "object", - "properties": { - "preimage": { - "type": "string", - "format": "byte", - "description": "Externally discovered pre-image that should be used to settle the hold\ninvoice." - } - } - }, - "invoicesrpcSettleInvoiceResp": { - "type": "object" - }, - "lnrpcFeature": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "is_required": { - "type": "boolean" - }, - "is_known": { - "type": "boolean" - } - } - }, - "lnrpcHopHint": { - "type": "object", - "properties": { - "node_id": { - "type": "string", - "description": "The public key of the node at the start of the channel." - }, - "chan_id": { - "type": "string", - "format": "uint64", - "description": "The unique identifier of the channel." - }, - "fee_base_msat": { - "type": "integer", - "format": "int64", - "description": "The base fee of the channel denominated in millisatoshis." - }, - "fee_proportional_millionths": { - "type": "integer", - "format": "int64", - "description": "The fee rate of the channel for sending one satoshi across it denominated in\nmillionths of a satoshi." - }, - "cltv_expiry_delta": { - "type": "integer", - "format": "int64", - "description": "The time-lock delta of the channel." - } - } - }, - "lnrpcInvoice": { - "type": "object", - "properties": { - "memo": { - "type": "string", - "description": "An optional memo to attach along with the invoice. Used for record keeping\npurposes for the invoice's creator, and will also be set in the description\nfield of the encoded payment request if the description_hash field is not\nbeing used." - }, - "r_preimage": { - "type": "string", - "format": "byte", - "description": "The hex-encoded preimage (32 byte) which will allow settling an incoming\nHTLC payable to this preimage. When using REST, this field must be encoded\nas base64." - }, - "r_hash": { - "type": "string", - "format": "byte", - "description": "The hash of the preimage. When using REST, this field must be encoded as\nbase64." - }, - "value": { - "type": "string", - "format": "int64", - "description": "The fields value and value_msat are mutually exclusive.", - "title": "The value of this invoice in satoshis" - }, - "value_msat": { - "type": "string", - "format": "int64", - "description": "The fields value and value_msat are mutually exclusive.", - "title": "The value of this invoice in millisatoshis" - }, - "settled": { - "type": "boolean", - "title": "Whether this invoice has been fulfilled" - }, - "creation_date": { - "type": "string", - "format": "int64", - "title": "When this invoice was created" - }, - "settle_date": { - "type": "string", - "format": "int64", - "title": "When this invoice was settled" - }, - "payment_request": { - "type": "string", - "description": "A bare-bones invoice for a payment within the Lightning Network. With the\ndetails of the invoice, the sender has all the data necessary to send a\npayment to the recipient." - }, - "description_hash": { - "type": "string", - "format": "byte", - "description": "Hash (SHA-256) of a description of the payment. Used if the description of\npayment (memo) is too long to naturally fit within the description field\nof an encoded payment request. When using REST, this field must be encoded\nas base64." - }, - "expiry": { - "type": "string", - "format": "int64", - "description": "Payment request expiry time in seconds. Default is 3600 (1 hour)." - }, - "fallback_addr": { - "type": "string", - "description": "Fallback on-chain address." - }, - "cltv_expiry": { - "type": "string", - "format": "uint64", - "description": "Delta to use for the time-lock of the CLTV extended to the final hop." - }, - "route_hints": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcRouteHint" - }, - "description": "Route hints that can each be individually used to assist in reaching the\ninvoice's destination." - }, - "private": { - "type": "boolean", - "description": "Whether this invoice should include routing hints for private channels." - }, - "add_index": { - "type": "string", - "format": "uint64", - "description": "The \"add\" index of this invoice. Each newly created invoice will increment\nthis index making it monotonically increasing. Callers to the\nSubscribeInvoices call can use this to instantly get notified of all added\ninvoices with an add_index greater than this one." - }, - "settle_index": { - "type": "string", - "format": "uint64", - "description": "The \"settle\" index of this invoice. Each newly settled invoice will\nincrement this index making it monotonically increasing. Callers to the\nSubscribeInvoices call can use this to instantly get notified of all\nsettled invoices with an settle_index greater than this one." - }, - "amt_paid": { - "type": "string", - "format": "int64", - "description": "Deprecated, use amt_paid_sat or amt_paid_msat." - }, - "amt_paid_sat": { - "type": "string", - "format": "int64", - "description": "The amount that was accepted for this invoice, in satoshis. This will ONLY\nbe set if this invoice has been settled. We provide this field as if the\ninvoice was created with a zero value, then we need to record what amount\nwas ultimately accepted. Additionally, it's possible that the sender paid\nMORE that was specified in the original invoice. So we'll record that here\nas well." - }, - "amt_paid_msat": { - "type": "string", - "format": "int64", - "description": "The amount that was accepted for this invoice, in millisatoshis. This will\nONLY be set if this invoice has been settled. We provide this field as if\nthe invoice was created with a zero value, then we need to record what\namount was ultimately accepted. Additionally, it's possible that the sender\npaid MORE that was specified in the original invoice. So we'll record that\nhere as well." - }, - "state": { - "$ref": "#/definitions/InvoiceInvoiceState", - "description": "The state the invoice is in." - }, - "htlcs": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcInvoiceHTLC" - }, - "description": "List of HTLCs paying to this invoice [EXPERIMENTAL]." - }, - "features": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/lnrpcFeature" - }, - "description": "List of features advertised on the invoice." - }, - "is_keysend": { - "type": "boolean", - "description": "Indicates if this invoice was a spontaneous payment that arrived via keysend\n[EXPERIMENTAL]." - } - } - }, - "lnrpcInvoiceHTLC": { - "type": "object", - "properties": { - "chan_id": { - "type": "string", - "format": "uint64", - "description": "Short channel id over which the htlc was received." - }, - "htlc_index": { - "type": "string", - "format": "uint64", - "description": "Index identifying the htlc on the channel." - }, - "amt_msat": { - "type": "string", - "format": "uint64", - "description": "The amount of the htlc in msat." - }, - "accept_height": { - "type": "integer", - "format": "int32", - "description": "Block height at which this htlc was accepted." - }, - "accept_time": { - "type": "string", - "format": "int64", - "description": "Time at which this htlc was accepted." - }, - "resolve_time": { - "type": "string", - "format": "int64", - "description": "Time at which this htlc was settled or canceled." - }, - "expiry_height": { - "type": "integer", - "format": "int32", - "description": "Block height at which this htlc expires." - }, - "state": { - "$ref": "#/definitions/lnrpcInvoiceHTLCState", - "description": "Current state the htlc is in." - }, - "custom_records": { - "type": "object", - "additionalProperties": { - "type": "string", - "format": "byte" - }, - "description": "Custom tlv records." - }, - "mpp_total_amt_msat": { - "type": "string", - "format": "uint64", - "description": "The total amount of the mpp payment in msat." - } - }, - "title": "Details of an HTLC that paid to an invoice" - }, - "lnrpcInvoiceHTLCState": { - "type": "string", - "enum": [ - "ACCEPTED", - "SETTLED", - "CANCELED" - ], - "default": "ACCEPTED" - }, - "lnrpcRouteHint": { - "type": "object", - "properties": { - "hop_hints": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcHopHint" - }, - "description": "A list of hop hints that when chained together can assist in reaching a\nspecific destination." - } - } - }, - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string" - }, - "value": { - "type": "string", - "format": "byte" - } - } - }, - "runtimeError": { - "type": "object", - "properties": { - "error": { - "type": "string" - }, - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - }, - "runtimeStreamError": { - "type": "object", - "properties": { - "grpc_code": { - "type": "integer", - "format": "int32" - }, - "http_code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "http_status": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - } - } -} diff --git a/lnd/lnrpc/invoicesrpc/invoices_server.go b/lnd/lnrpc/invoicesrpc/invoices_server.go deleted file mode 100644 index d23190df..00000000 --- a/lnd/lnrpc/invoicesrpc/invoices_server.go +++ /dev/null @@ -1,322 +0,0 @@ -// +build invoicesrpc - -package invoicesrpc - -import ( - "context" - "io/ioutil" - "os" - "path/filepath" - - "google.golang.org/grpc" - "gopkg.in/macaroon-bakery.v2/bakery" - - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/macaroons" -) - -const ( - // subServerName is the name of the sub rpc server. We'll use this name - // to register ourselves, and we also require that the main - // SubServerConfigDispatcher instance recognize it as the name of our - // RPC service. - subServerName = "InvoicesRPC" -) - -var ( - // macaroonOps are the set of capabilities that our minted macaroon (if - // it doesn't already exist) will have. - macaroonOps = []bakery.Op{ - { - Entity: "invoices", - Action: "write", - }, - { - Entity: "invoices", - Action: "read", - }, - } - - // macPermissions maps RPC calls to the permissions they require. - macPermissions = map[string][]bakery.Op{ - "/invoicesrpc.Invoices/SubscribeSingleInvoice": {{ - Entity: "invoices", - Action: "read", - }}, - "/invoicesrpc.Invoices/SettleInvoice": {{ - Entity: "invoices", - Action: "write", - }}, - "/invoicesrpc.Invoices/CancelInvoice": {{ - Entity: "invoices", - Action: "write", - }}, - "/invoicesrpc.Invoices/AddHoldInvoice": {{ - Entity: "invoices", - Action: "write", - }}, - } - - // DefaultInvoicesMacFilename is the default name of the invoices - // macaroon that we expect to find via a file handle within the main - // configuration file in this package. - DefaultInvoicesMacFilename = "invoices.macaroon" -) - -// Server is a sub-server of the main RPC server: the invoices RPC. This sub -// RPC server allows external callers to access the status of the invoices -// currently active within lnd, as well as configuring it at runtime. -type Server struct { - quit chan struct{} - - cfg *Config -} - -// A compile time check to ensure that Server fully implements the -// InvoicesServer gRPC service. -var _ InvoicesServer = (*Server)(nil) - -// New returns a new instance of the invoicesrpc Invoices sub-server. We also -// return the set of permissions for the macaroons that we may create within -// this method. If the macaroons we need aren't found in the filepath, then -// we'll create them on start up. If we're unable to locate, or create the -// macaroons we need, then we'll return with an error. -func New(cfg *Config) (*Server, lnrpc.MacaroonPerms, er.R) { - // If the path of the invoices macaroon wasn't specified, then we'll - // assume that it's found at the default network directory. - macFilePath := filepath.Join( - cfg.NetworkDir, DefaultInvoicesMacFilename, - ) - - // Now that we know the full path of the invoices macaroon, we can - // check to see if we need to create it or not. If stateless_init is set - // then we don't write the macaroons. - if cfg.MacService != nil && !cfg.MacService.StatelessInit && - !lnrpc.FileExists(macFilePath) { - - log.Infof("Baking macaroons for invoices RPC Server at: %v", - macFilePath) - - // At this point, we know that the invoices macaroon doesn't - // yet, exist, so we need to create it with the help of the - // main macaroon service. - invoicesMac, err := cfg.MacService.NewMacaroon( - context.Background(), macaroons.DefaultRootKeyID, - macaroonOps..., - ) - if err != nil { - return nil, nil, err - } - invoicesMacBytes, err := invoicesMac.M().MarshalBinary() - if err != nil { - return nil, nil, err - } - err = ioutil.WriteFile(macFilePath, invoicesMacBytes, 0644) - if err != nil { - _ = os.Remove(macFilePath) - return nil, nil, err - } - } - - server := &Server{ - cfg: cfg, - quit: make(chan struct{}, 1), - } - - return server, macPermissions, nil -} - -// Start launches any helper goroutines required for the Server to function. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) Start() er.R { - return nil -} - -// Stop signals any active goroutines for a graceful closure. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) Stop() er.R { - close(s.quit) - - return nil -} - -// Name returns a unique string representation of the sub-server. This can be -// used to identify the sub-server and also de-duplicate them. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) Name() string { - return subServerName -} - -// RegisterWithRootServer will be called by the root gRPC server to direct a sub -// RPC server to register itself with the main gRPC root server. Until this is -// called, each sub-server won't be able to have requests routed towards it. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) RegisterWithRootServer(grpcServer *grpc.Server) er.R { - // We make sure that we register it with the main gRPC server to ensure - // all our methods are routed properly. - RegisterInvoicesServer(grpcServer, s) - - log.Debugf("Invoices RPC server successfully registered with root " + - "gRPC server") - - return nil -} - -// RegisterWithRestServer will be called by the root REST mux to direct a sub -// RPC server to register itself with the main REST mux server. Until this is -// called, each sub-server won't be able to have requests routed towards it. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) RegisterWithRestServer(ctx context.Context, - mux *runtime.ServeMux, dest string, opts []grpc.DialOption) er.R { - - // We make sure that we register it with the main REST server to ensure - // all our methods are routed properly. - err := RegisterInvoicesHandlerFromEndpoint(ctx, mux, dest, opts) - if err != nil { - log.Errorf("Could not register Invoices REST server "+ - "with root REST server: %v", err) - return err - } - - log.Debugf("Invoices REST server successfully registered with " + - "root REST server") - return nil -} - -// SubscribeSingleInvoice returns a uni-directional stream (server -> client) -// for notifying the client of state changes for a specified invoice. -func (s *Server) SubscribeSingleInvoice(req *SubscribeSingleInvoiceRequest, - updateStream Invoices_SubscribeSingleInvoiceServer) er.R { - - hash, err := lntypes.MakeHash(req.RHash) - if err != nil { - return err - } - - invoiceClient, err := s.cfg.InvoiceRegistry.SubscribeSingleInvoice(hash) - if err != nil { - return err - } - defer invoiceClient.Cancel() - - for { - select { - case newInvoice := <-invoiceClient.Updates: - rpcInvoice, err := CreateRPCInvoice( - newInvoice, s.cfg.ChainParams, - ) - if err != nil { - return err - } - - if err := updateStream.Send(rpcInvoice); err != nil { - return err - } - - case <-s.quit: - return nil - } - } -} - -// SettleInvoice settles an accepted invoice. If the invoice is already settled, -// this call will succeed. -func (s *Server) SettleInvoice(ctx context.Context, - in *SettleInvoiceMsg) (*SettleInvoiceResp, er.R) { - - preimage, err := lntypes.MakePreimage(in.Preimage) - if err != nil { - return nil, err - } - - err = s.cfg.InvoiceRegistry.SettleHodlInvoice(preimage) - if err != nil && err != channeldb.ErrInvoiceAlreadySettled { - return nil, err - } - - return &SettleInvoiceResp{}, nil -} - -// CancelInvoice cancels a currently open invoice. If the invoice is already -// canceled, this call will succeed. If the invoice is already settled, it will -// fail. -func (s *Server) CancelInvoice(ctx context.Context, - in *CancelInvoiceMsg) (*CancelInvoiceResp, er.R) { - - paymentHash, err := lntypes.MakeHash(in.PaymentHash) - if err != nil { - return nil, err - } - - err = s.cfg.InvoiceRegistry.CancelInvoice(paymentHash) - if err != nil { - return nil, err - } - - log.Infof("Canceled invoice %v", paymentHash) - - return &CancelInvoiceResp{}, nil -} - -// AddHoldInvoice attempts to add a new hold invoice to the invoice database. -// Any duplicated invoices are rejected, therefore all invoices *must* have a -// unique payment hash. -func (s *Server) AddHoldInvoice(ctx context.Context, - invoice *AddHoldInvoiceRequest) (*AddHoldInvoiceResp, er.R) { - - addInvoiceCfg := &AddInvoiceConfig{ - AddInvoice: s.cfg.InvoiceRegistry.AddInvoice, - IsChannelActive: s.cfg.IsChannelActive, - ChainParams: s.cfg.ChainParams, - NodeSigner: s.cfg.NodeSigner, - DefaultCLTVExpiry: s.cfg.DefaultCLTVExpiry, - ChanDB: s.cfg.ChanDB, - GenInvoiceFeatures: s.cfg.GenInvoiceFeatures, - } - - hash, err := lntypes.MakeHash(invoice.Hash) - if err != nil { - return nil, err - } - - value, err := lnrpc.UnmarshallAmt(invoice.Value, invoice.ValueMsat) - if err != nil { - return nil, err - } - - // Convert the passed routing hints to the required format. - routeHints, err := CreateZpay32HopHints(invoice.RouteHints) - if err != nil { - return nil, err - } - addInvoiceData := &AddInvoiceData{ - Memo: invoice.Memo, - Hash: &hash, - Value: value, - DescriptionHash: invoice.DescriptionHash, - Expiry: invoice.Expiry, - FallbackAddr: invoice.FallbackAddr, - CltvExpiry: invoice.CltvExpiry, - Private: invoice.Private, - HodlInvoice: true, - Preimage: nil, - RouteHints: routeHints, - } - - _, dbInvoice, err := AddInvoice(ctx, addInvoiceCfg, addInvoiceData) - if err != nil { - return nil, err - } - - return &AddHoldInvoiceResp{ - PaymentRequest: string(dbInvoice.PaymentRequest), - }, nil -} diff --git a/lnd/lnrpc/invoicesrpc/utils.go b/lnd/lnrpc/invoicesrpc/utils.go deleted file mode 100644 index 05484404..00000000 --- a/lnd/lnrpc/invoicesrpc/utils.go +++ /dev/null @@ -1,234 +0,0 @@ -package invoicesrpc - -import ( - "encoding/hex" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/zpay32" -) - -// decodePayReq decodes the invoice payment request if present. This is needed, -// because not all information is stored in dedicated invoice fields. If there -// is no payment request present, a dummy request will be returned. This can -// happen with just-in-time inserted keysend invoices. -func decodePayReq(invoice *channeldb.Invoice, - activeNetParams *chaincfg.Params) (*zpay32.Invoice, er.R) { - - paymentRequest := string(invoice.PaymentRequest) - if paymentRequest == "" { - preimage := invoice.Terms.PaymentPreimage - if preimage == nil { - return nil, er.New("cannot reconstruct pay req") - } - hash := [32]byte(preimage.Hash()) - return &zpay32.Invoice{ - PaymentHash: &hash, - }, nil - } - - decoded, err := zpay32.Decode(paymentRequest, activeNetParams) - if err != nil { - return nil, er.Errorf("unable to decode payment "+ - "request: %v", err) - } - return decoded, nil - -} - -// CreateRPCInvoice creates an *lnrpc.Invoice from the *channeldb.Invoice. -func CreateRPCInvoice(invoice *channeldb.Invoice, - activeNetParams *chaincfg.Params) (*lnrpc.Invoice, er.R) { - - decoded, err := decodePayReq(invoice, activeNetParams) - if err != nil { - return nil, err - } - - var descHash []byte - if decoded.DescriptionHash != nil { - descHash = decoded.DescriptionHash[:] - } - - fallbackAddr := "" - if decoded.FallbackAddr != nil { - fallbackAddr = decoded.FallbackAddr.String() - } - - settleDate := int64(0) - if !invoice.SettleDate.IsZero() { - settleDate = invoice.SettleDate.Unix() - } - - // Convert between the `lnrpc` and `routing` types. - routeHints := CreateRPCRouteHints(decoded.RouteHints) - - preimage := invoice.Terms.PaymentPreimage - satAmt := invoice.Terms.Value.ToSatoshis() - satAmtPaid := invoice.AmtPaid.ToSatoshis() - - isSettled := invoice.State == channeldb.ContractSettled - - var state lnrpc.Invoice_InvoiceState - switch invoice.State { - case channeldb.ContractOpen: - state = lnrpc.Invoice_OPEN - case channeldb.ContractSettled: - state = lnrpc.Invoice_SETTLED - case channeldb.ContractCanceled: - state = lnrpc.Invoice_CANCELED - case channeldb.ContractAccepted: - state = lnrpc.Invoice_ACCEPTED - default: - return nil, er.Errorf("unknown invoice state %v", - invoice.State) - } - - rpcHtlcs := make([]*lnrpc.InvoiceHTLC, 0, len(invoice.Htlcs)) - for key, htlc := range invoice.Htlcs { - var state lnrpc.InvoiceHTLCState - switch htlc.State { - case channeldb.HtlcStateAccepted: - state = lnrpc.InvoiceHTLCState_ACCEPTED - case channeldb.HtlcStateSettled: - state = lnrpc.InvoiceHTLCState_SETTLED - case channeldb.HtlcStateCanceled: - state = lnrpc.InvoiceHTLCState_CANCELED - default: - return nil, er.Errorf("unknown state %v", htlc.State) - } - - rpcHtlc := lnrpc.InvoiceHTLC{ - ChanId: key.ChanID.ToUint64(), - HtlcIndex: key.HtlcID, - AcceptHeight: int32(htlc.AcceptHeight), - AcceptTime: htlc.AcceptTime.Unix(), - ExpiryHeight: int32(htlc.Expiry), - AmtMsat: uint64(htlc.Amt), - State: state, - CustomRecords: htlc.CustomRecords, - MppTotalAmtMsat: uint64(htlc.MppTotalAmt), - } - - // Only report resolved times if htlc is resolved. - if htlc.State != channeldb.HtlcStateAccepted { - rpcHtlc.ResolveTime = htlc.ResolveTime.Unix() - } - - rpcHtlcs = append(rpcHtlcs, &rpcHtlc) - } - - rpcInvoice := &lnrpc.Invoice{ - Memo: string(invoice.Memo[:]), - RHash: decoded.PaymentHash[:], - Value: int64(satAmt), - ValueMsat: int64(invoice.Terms.Value), - CreationDate: invoice.CreationDate.Unix(), - SettleDate: settleDate, - Settled: isSettled, - PaymentRequest: string(invoice.PaymentRequest), - DescriptionHash: descHash, - Expiry: int64(invoice.Terms.Expiry.Seconds()), - CltvExpiry: uint64(invoice.Terms.FinalCltvDelta), - FallbackAddr: fallbackAddr, - RouteHints: routeHints, - AddIndex: invoice.AddIndex, - Private: len(routeHints) > 0, - SettleIndex: invoice.SettleIndex, - AmtPaidSat: int64(satAmtPaid), - AmtPaidMsat: int64(invoice.AmtPaid), - AmtPaid: int64(invoice.AmtPaid), - State: state, - Htlcs: rpcHtlcs, - Features: CreateRPCFeatures(invoice.Terms.Features), - IsKeysend: len(invoice.PaymentRequest) == 0, - } - - if preimage != nil { - rpcInvoice.RPreimage = preimage[:] - } - - return rpcInvoice, nil -} - -// CreateRPCFeatures maps a feature vector into a list of lnrpc.Features. -func CreateRPCFeatures(fv *lnwire.FeatureVector) map[uint32]*lnrpc.Feature { - if fv == nil { - return nil - } - - features := fv.Features() - rpcFeatures := make(map[uint32]*lnrpc.Feature, len(features)) - for bit := range features { - rpcFeatures[uint32(bit)] = &lnrpc.Feature{ - Name: fv.Name(bit), - IsRequired: bit.IsRequired(), - IsKnown: fv.IsKnown(bit), - } - } - - return rpcFeatures -} - -// CreateRPCRouteHints takes in the decoded form of an invoice's route hints -// and converts them into the lnrpc type. -func CreateRPCRouteHints(routeHints [][]zpay32.HopHint) []*lnrpc.RouteHint { - var res []*lnrpc.RouteHint - - for _, route := range routeHints { - hopHints := make([]*lnrpc.HopHint, 0, len(route)) - for _, hop := range route { - pubKey := hex.EncodeToString( - hop.NodeID.SerializeCompressed(), - ) - - hint := &lnrpc.HopHint{ - NodeId: pubKey, - ChanId: hop.ChannelID, - FeeBaseMsat: hop.FeeBaseMSat, - FeeProportionalMillionths: hop.FeeProportionalMillionths, - CltvExpiryDelta: uint32(hop.CLTVExpiryDelta), - } - - hopHints = append(hopHints, hint) - } - - routeHint := &lnrpc.RouteHint{HopHints: hopHints} - res = append(res, routeHint) - } - - return res -} - -// CreateZpay32HopHints takes in the lnrpc form of route hints and converts them -// into an invoice decoded form. -func CreateZpay32HopHints(routeHints []*lnrpc.RouteHint) ([][]zpay32.HopHint, er.R) { - var res [][]zpay32.HopHint - for _, route := range routeHints { - hopHints := make([]zpay32.HopHint, 0, len(route.HopHints)) - for _, hop := range route.HopHints { - pubKeyBytes, err := util.DecodeHex(hop.NodeId) - if err != nil { - return nil, err - } - p, err := btcec.ParsePubKey(pubKeyBytes, btcec.S256()) - if err != nil { - return nil, err - } - hopHints = append(hopHints, zpay32.HopHint{ - NodeID: p, - ChannelID: hop.ChanId, - FeeBaseMSat: hop.FeeBaseMsat, - FeeProportionalMillionths: hop.FeeProportionalMillionths, - CLTVExpiryDelta: uint16(hop.CltvExpiryDelta), - }) - } - res = append(res, hopHints) - } - return res, nil -} diff --git a/lnd/lnrpc/lnclipb/lncli.pb.go b/lnd/lnrpc/lnclipb/lncli.pb.go deleted file mode 100644 index 9fc57521..00000000 --- a/lnd/lnrpc/lnclipb/lncli.pb.go +++ /dev/null @@ -1,91 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: lnclipb/lncli.proto - -package lnclipb - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - verrpc "github.com/pkt-cash/pktd/lnd/lnrpc/verrpc" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type VersionResponse struct { - // The version information for lncli. - Lncli *verrpc.Version `protobuf:"bytes,1,opt,name=lncli,proto3" json:"lncli,omitempty"` - // The version information for lnd. - Lnd *verrpc.Version `protobuf:"bytes,2,opt,name=lnd,proto3" json:"lnd,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VersionResponse) Reset() { *m = VersionResponse{} } -func (m *VersionResponse) String() string { return proto.CompactTextString(m) } -func (*VersionResponse) ProtoMessage() {} -func (*VersionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_88b54c9c61b986c4, []int{0} -} - -func (m *VersionResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VersionResponse.Unmarshal(m, b) -} -func (m *VersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VersionResponse.Marshal(b, m, deterministic) -} -func (m *VersionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_VersionResponse.Merge(m, src) -} -func (m *VersionResponse) XXX_Size() int { - return xxx_messageInfo_VersionResponse.Size(m) -} -func (m *VersionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_VersionResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_VersionResponse proto.InternalMessageInfo - -func (m *VersionResponse) GetLncli() *verrpc.Version { - if m != nil { - return m.Lncli - } - return nil -} - -func (m *VersionResponse) GetLnd() *verrpc.Version { - if m != nil { - return m.Lnd - } - return nil -} - -func init() { - proto.RegisterType((*VersionResponse)(nil), "lnclipb.VersionResponse") -} - -func init() { proto.RegisterFile("lnclipb/lncli.proto", fileDescriptor_88b54c9c61b986c4) } - -var fileDescriptor_88b54c9c61b986c4 = []byte{ - // 151 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xce, 0xc9, 0x4b, 0xce, - 0xc9, 0x2c, 0x48, 0xd2, 0x07, 0xd3, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0xec, 0x50, 0x41, - 0x29, 0xe1, 0xb2, 0xd4, 0xa2, 0xa2, 0x82, 0x64, 0x7d, 0x08, 0x05, 0x91, 0x55, 0x8a, 0xe6, 0xe2, - 0x0f, 0x4b, 0x2d, 0x2a, 0xce, 0xcc, 0xcf, 0x0b, 0x4a, 0x2d, 0x2e, 0xc8, 0xcf, 0x2b, 0x4e, 0x15, - 0x52, 0xe5, 0x62, 0x05, 0x6b, 0x91, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x36, 0xe2, 0xd7, 0x83, 0x6a, - 0x80, 0xa9, 0x83, 0xc8, 0x0a, 0x29, 0x72, 0x31, 0xe7, 0xe4, 0xa5, 0x48, 0x30, 0x61, 0x57, 0x04, - 0x92, 0x73, 0xd2, 0x89, 0xd2, 0x4a, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, - 0x2f, 0xc8, 0x2e, 0xd1, 0x4d, 0x4e, 0x2c, 0xce, 0x00, 0x31, 0x52, 0xf4, 0x73, 0xf2, 0x40, 0x18, - 0xe4, 0x1e, 0xa8, 0xfb, 0x92, 0xd8, 0xc0, 0x2e, 0x32, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x18, - 0x78, 0xc1, 0x0b, 0xc6, 0x00, 0x00, 0x00, -} diff --git a/lnd/lnrpc/lnclipb/lncli.proto b/lnd/lnrpc/lnclipb/lncli.proto deleted file mode 100644 index 92bde21e..00000000 --- a/lnd/lnrpc/lnclipb/lncli.proto +++ /dev/null @@ -1,15 +0,0 @@ -syntax = "proto3"; - -import "verrpc/verrpc.proto"; - -package lnclipb; - -option go_package = "github.com/pkt-cash/pktd/lnd/lnrpc/lnclipb"; - -message VersionResponse { - // The version information for lncli. - verrpc.Version lncli = 1; - - // The version information for lnd. - verrpc.Version lnd = 2; -}; diff --git a/lnd/lnrpc/lnclipb/lncli.swagger.json b/lnd/lnrpc/lnclipb/lncli.swagger.json deleted file mode 100644 index eb88452f..00000000 --- a/lnd/lnrpc/lnclipb/lncli.swagger.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "lnclipb/lncli.proto", - "version": "version not set" - }, - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": {}, - "definitions": { - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string" - }, - "value": { - "type": "string", - "format": "byte" - } - } - }, - "runtimeError": { - "type": "object", - "properties": { - "error": { - "type": "string" - }, - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - } - } -} diff --git a/lnd/lnrpc/marshall_utils.go b/lnd/lnrpc/marshall_utils.go deleted file mode 100644 index b24e83ac..00000000 --- a/lnd/lnrpc/marshall_utils.go +++ /dev/null @@ -1,146 +0,0 @@ -package lnrpc - -import ( - "encoding/hex" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/txscript" -) - -var ( - Err = er.NewErrorType("lnd.lnrpc") - // ErrSatMsatMutualExclusive is returned when both a sat and an msat - // amount are set. - ErrSatMsatMutualExclusive = Err.CodeWithDetail("ErrSatMsatMutualExclusive", - "sat and msat arguments are mutually exclusive", - ) -) - -// CalculateFeeLimit returns the fee limit in millisatoshis. If a percentage -// based fee limit has been requested, we'll factor in the ratio provided with -// the amount of the payment. -func CalculateFeeLimit(feeLimit *FeeLimit, - amount lnwire.MilliSatoshi) lnwire.MilliSatoshi { - - switch feeLimit.GetLimit().(type) { - - case *FeeLimit_Fixed: - return lnwire.NewMSatFromSatoshis( - btcutil.Amount(feeLimit.GetFixed()), - ) - - case *FeeLimit_FixedMsat: - return lnwire.MilliSatoshi(feeLimit.GetFixedMsat()) - - case *FeeLimit_Percent: - return amount * lnwire.MilliSatoshi(feeLimit.GetPercent()) / 100 - - default: - // If a fee limit was not specified, we'll use the payment's - // amount as an upper bound in order to avoid payment attempts - // from incurring fees higher than the payment amount itself. - return amount - } -} - -// UnmarshallAmt returns a strong msat type for a sat/msat pair of rpc fields. -func UnmarshallAmt(amtSat, amtMsat int64) (lnwire.MilliSatoshi, er.R) { - if amtSat != 0 && amtMsat != 0 { - return 0, ErrSatMsatMutualExclusive.Default() - } - - if amtSat != 0 { - return lnwire.NewMSatFromSatoshis(btcutil.Amount(amtSat)), nil - } - - return lnwire.MilliSatoshi(amtMsat), nil -} - -// ParseConfs validates the minimum and maximum confirmation arguments of a -// ListUnspent request. -func ParseConfs(min, max int32) (int32, int32, er.R) { - switch { - // Ensure that the user didn't attempt to specify a negative number of - // confirmations, as that isn't possible. - case min < 0: - return 0, 0, er.Errorf("min confirmations must be >= 0") - - // We'll also ensure that the min number of confs is strictly less than - // or equal to the max number of confs for sanity. - case min > max: - return 0, 0, er.Errorf("max confirmations must be >= min " + - "confirmations") - - default: - return min, max, nil - } -} - -// MarshalUtxos translates a []*lnwallet.Utxo into a []*lnrpc.Utxo. -func MarshalUtxos(utxos []*lnwallet.Utxo, activeNetParams *chaincfg.Params) ( - []*Utxo, er.R) { - - res := make([]*Utxo, 0, len(utxos)) - for _, utxo := range utxos { - // Translate lnwallet address type to the proper gRPC proto - // address type. - var addrType AddressType - switch utxo.AddressType { - - case lnwallet.WitnessPubKey: - addrType = AddressType_WITNESS_PUBKEY_HASH - - case lnwallet.NestedWitnessPubKey: - addrType = AddressType_NESTED_PUBKEY_HASH - - case lnwallet.UnknownAddressType: - continue - - default: - return nil, er.Errorf("invalid utxo address type") - } - - // Now that we know we have a proper mapping to an address, - // we'll convert the regular outpoint to an lnrpc variant. - outpoint := &OutPoint{ - TxidBytes: utxo.OutPoint.Hash[:], - TxidStr: utxo.OutPoint.Hash.String(), - OutputIndex: utxo.OutPoint.Index, - } - - utxoResp := Utxo{ - AddressType: addrType, - AmountSat: int64(utxo.Value), - PkScript: hex.EncodeToString(utxo.PkScript), - Outpoint: outpoint, - Confirmations: utxo.Confirmations, - } - - // Finally, we'll attempt to extract the raw address from the - // script so we can display a human friendly address to the end - // user. - _, outAddresses, _, err := txscript.ExtractPkScriptAddrs( - utxo.PkScript, activeNetParams, - ) - if err != nil { - return nil, err - } - - // If we can't properly locate a single address, then this was - // an error in our mapping, and we'll return an error back to - // the user. - if len(outAddresses) != 1 { - return nil, er.Errorf("an output was unexpectedly " + - "multisig") - } - utxoResp.Address = outAddresses[0].String() - - res = append(res, &utxoResp) - } - - return res, nil -} diff --git a/lnd/lnrpc/metaservice.pb.go b/lnd/lnrpc/metaservice.pb.go deleted file mode 100644 index 90e55cbf..00000000 --- a/lnd/lnrpc/metaservice.pb.go +++ /dev/null @@ -1,227 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: metaservice.proto - -package lnrpc - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type GetInfo2Request struct { - InfoResponse *GetInfoResponse `protobuf:"bytes,1,opt,name=InfoResponse,proto3" json:"InfoResponse,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetInfo2Request) Reset() { *m = GetInfo2Request{} } -func (m *GetInfo2Request) String() string { return proto.CompactTextString(m) } -func (*GetInfo2Request) ProtoMessage() {} -func (*GetInfo2Request) Descriptor() ([]byte, []int) { - return fileDescriptor_b3fb5294949b9545, []int{0} -} - -func (m *GetInfo2Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetInfo2Request.Unmarshal(m, b) -} -func (m *GetInfo2Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetInfo2Request.Marshal(b, m, deterministic) -} -func (m *GetInfo2Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetInfo2Request.Merge(m, src) -} -func (m *GetInfo2Request) XXX_Size() int { - return xxx_messageInfo_GetInfo2Request.Size(m) -} -func (m *GetInfo2Request) XXX_DiscardUnknown() { - xxx_messageInfo_GetInfo2Request.DiscardUnknown(m) -} - -var xxx_messageInfo_GetInfo2Request proto.InternalMessageInfo - -func (m *GetInfo2Request) GetInfoResponse() *GetInfoResponse { - if m != nil { - return m.InfoResponse - } - return nil -} - -type GetInfo2Response struct { - Neutrino *NeutrinoInfo `protobuf:"bytes,1,opt,name=neutrino,proto3" json:"neutrino,omitempty"` - Wallet *WalletInfo `protobuf:"bytes,2,opt,name=wallet,proto3" json:"wallet,omitempty"` - Lightning *GetInfoResponse `protobuf:"bytes,3,opt,name=lightning,proto3" json:"lightning,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetInfo2Response) Reset() { *m = GetInfo2Response{} } -func (m *GetInfo2Response) String() string { return proto.CompactTextString(m) } -func (*GetInfo2Response) ProtoMessage() {} -func (*GetInfo2Response) Descriptor() ([]byte, []int) { - return fileDescriptor_b3fb5294949b9545, []int{1} -} - -func (m *GetInfo2Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetInfo2Response.Unmarshal(m, b) -} -func (m *GetInfo2Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetInfo2Response.Marshal(b, m, deterministic) -} -func (m *GetInfo2Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetInfo2Response.Merge(m, src) -} -func (m *GetInfo2Response) XXX_Size() int { - return xxx_messageInfo_GetInfo2Response.Size(m) -} -func (m *GetInfo2Response) XXX_DiscardUnknown() { - xxx_messageInfo_GetInfo2Response.DiscardUnknown(m) -} - -var xxx_messageInfo_GetInfo2Response proto.InternalMessageInfo - -func (m *GetInfo2Response) GetNeutrino() *NeutrinoInfo { - if m != nil { - return m.Neutrino - } - return nil -} - -func (m *GetInfo2Response) GetWallet() *WalletInfo { - if m != nil { - return m.Wallet - } - return nil -} - -func (m *GetInfo2Response) GetLightning() *GetInfoResponse { - if m != nil { - return m.Lightning - } - return nil -} - -func init() { - proto.RegisterType((*GetInfo2Request)(nil), "lnrpc.GetInfo2Request") - proto.RegisterType((*GetInfo2Response)(nil), "lnrpc.GetInfo2Response") -} - -func init() { proto.RegisterFile("metaservice.proto", fileDescriptor_b3fb5294949b9545) } - -var fileDescriptor_b3fb5294949b9545 = []byte{ - // 257 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x90, 0xc1, 0x4a, 0xc4, 0x30, - 0x10, 0x86, 0xa9, 0xe2, 0xb2, 0x9d, 0x15, 0x74, 0x23, 0x68, 0xe9, 0x49, 0x8a, 0x07, 0x3d, 0xd8, - 0x42, 0xf5, 0xa4, 0x37, 0x2f, 0xa2, 0xb0, 0x1e, 0xea, 0x41, 0xf0, 0xd6, 0x8d, 0x63, 0x5b, 0x36, - 0x9b, 0xc4, 0x64, 0xaa, 0x2f, 0xe4, 0x83, 0x4a, 0x93, 0xd4, 0x45, 0x05, 0x0f, 0x81, 0xc9, 0x9f, - 0x2f, 0x1f, 0xc3, 0x0f, 0xf3, 0x35, 0x52, 0x6d, 0xd1, 0xbc, 0x77, 0x1c, 0x73, 0x6d, 0x14, 0x29, - 0xb6, 0x23, 0xa4, 0xd1, 0x3c, 0x8d, 0xf5, 0x8a, 0x7c, 0x92, 0xc6, 0x46, 0x73, 0x3f, 0x66, 0x0b, - 0xd8, 0xbb, 0x45, 0xba, 0x93, 0xaf, 0xaa, 0xac, 0xf0, 0xad, 0x47, 0x4b, 0xec, 0x0a, 0x76, 0x87, - 0x7b, 0x85, 0x56, 0x2b, 0x69, 0x31, 0x89, 0x8e, 0xa3, 0xd3, 0x59, 0x79, 0x98, 0x3b, 0x4d, 0x1e, - 0xe8, 0xf1, 0xb5, 0xfa, 0xc1, 0x66, 0x9f, 0x11, 0xec, 0x6f, 0x7c, 0x3e, 0x64, 0x05, 0x4c, 0x25, - 0xf6, 0x64, 0x3a, 0xa9, 0x82, 0xec, 0x20, 0xc8, 0x1e, 0x42, 0xec, 0x1c, 0xdf, 0x10, 0x3b, 0x83, - 0xc9, 0x47, 0x2d, 0x04, 0x52, 0xb2, 0xe5, 0xf0, 0x79, 0xc0, 0x9f, 0x5c, 0xe8, 0xe0, 0x00, 0xb0, - 0x4b, 0x88, 0x45, 0xd7, 0xb4, 0x24, 0x3b, 0xd9, 0x24, 0xdb, 0xff, 0x6e, 0xba, 0x01, 0xcb, 0x7b, - 0x98, 0x2d, 0x90, 0xea, 0x47, 0xdf, 0x13, 0xbb, 0x86, 0xe9, 0xb8, 0x34, 0xfb, 0xf5, 0x7b, 0x6c, - 0x25, 0x3d, 0xfa, 0x93, 0x7b, 0xed, 0xcd, 0xc9, 0x73, 0xd6, 0x74, 0xd4, 0xf6, 0xcb, 0x9c, 0xab, - 0x75, 0xa1, 0x57, 0x74, 0xce, 0x6b, 0xdb, 0x0e, 0xc3, 0x4b, 0x21, 0xe4, 0x70, 0x8c, 0xe6, 0xcb, - 0x89, 0xab, 0xfb, 0xe2, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x9e, 0xbb, 0xf6, 0xcc, 0xa0, 0x01, 0x00, - 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MetaServiceClient is the client API for MetaService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MetaServiceClient interface { - GetInfo2(ctx context.Context, in *GetInfo2Request, opts ...grpc.CallOption) (*GetInfo2Response, error) -} - -type metaServiceClient struct { - cc *grpc.ClientConn -} - -func NewMetaServiceClient(cc *grpc.ClientConn) MetaServiceClient { - return &metaServiceClient{cc} -} - -func (c *metaServiceClient) GetInfo2(ctx context.Context, in *GetInfo2Request, opts ...grpc.CallOption) (*GetInfo2Response, error) { - out := new(GetInfo2Response) - err := c.cc.Invoke(ctx, "/lnrpc.MetaService/GetInfo2", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MetaServiceServer is the server API for MetaService service. -type MetaServiceServer interface { - GetInfo2(context.Context, *GetInfo2Request) (*GetInfo2Response, error) -} - -// UnimplementedMetaServiceServer can be embedded to have forward compatible implementations. -type UnimplementedMetaServiceServer struct { -} - -func (*UnimplementedMetaServiceServer) GetInfo2(ctx context.Context, req *GetInfo2Request) (*GetInfo2Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetInfo2 not implemented") -} - -func RegisterMetaServiceServer(s *grpc.Server, srv MetaServiceServer) { - s.RegisterService(&_MetaService_serviceDesc, srv) -} - -func _MetaService_GetInfo2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetInfo2Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetaServiceServer).GetInfo2(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.MetaService/GetInfo2", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetaServiceServer).GetInfo2(ctx, req.(*GetInfo2Request)) - } - return interceptor(ctx, in, info, handler) -} - -var _MetaService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "lnrpc.MetaService", - HandlerType: (*MetaServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetInfo2", - Handler: _MetaService_GetInfo2_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "metaservice.proto", -} diff --git a/lnd/lnrpc/metaservice.proto b/lnd/lnrpc/metaservice.proto deleted file mode 100644 index 0c9cc8b1..00000000 --- a/lnd/lnrpc/metaservice.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package lnrpc; - -import "pkt.proto"; -import "rpc.proto"; - -option go_package = "github.com/pkt-cash/pktd/lnd/lnrpc"; - -service MetaService { - rpc GetInfo2 (GetInfo2Request) returns (GetInfo2Response); -} - -message GetInfo2Request { - GetInfoResponse InfoResponse = 1; -} - -message GetInfo2Response { - NeutrinoInfo neutrino = 1; - WalletInfo wallet = 2; - GetInfoResponse lightning = 3; -} \ No newline at end of file diff --git a/lnd/lnrpc/metaservice.swagger.json b/lnd/lnrpc/metaservice.swagger.json deleted file mode 100644 index 7dc1c4ea..00000000 --- a/lnd/lnrpc/metaservice.swagger.json +++ /dev/null @@ -1,437 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "metaservice.proto", - "version": "version not set" - }, - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": {}, - "definitions": { - "lnrpcChain": { - "type": "object", - "properties": { - "chain": { - "type": "string", - "title": "The blockchain the node is on (eg bitcoin, litecoin)" - }, - "network": { - "type": "string", - "title": "The network the node is on (eg regtest, testnet, mainnet)" - } - } - }, - "lnrpcFeature": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "is_required": { - "type": "boolean", - "format": "boolean" - }, - "is_known": { - "type": "boolean", - "format": "boolean" - } - } - }, - "lnrpcGetInfo2Response": { - "type": "object", - "properties": { - "neutrino": { - "$ref": "#/definitions/lnrpcNeutrinoInfo" - }, - "wallet": { - "$ref": "#/definitions/lnrpcWalletInfo" - }, - "lightning": { - "$ref": "#/definitions/lnrpcGetInfoResponse" - } - } - }, - "lnrpcGetInfoResponse": { - "type": "object", - "properties": { - "version": { - "type": "string", - "description": "The version of the LND software that the node is running." - }, - "commit_hash": { - "type": "string", - "description": "The SHA1 commit hash that the daemon is compiled with." - }, - "identity_pubkey": { - "type": "string", - "description": "The identity pubkey of the current node." - }, - "alias": { - "type": "string", - "title": "If applicable, the alias of the current node, e.g. \"bob\"" - }, - "color": { - "type": "string", - "title": "The color of the current node in hex code format" - }, - "num_pending_channels": { - "type": "integer", - "format": "int64", - "title": "Number of pending channels" - }, - "num_active_channels": { - "type": "integer", - "format": "int64", - "title": "Number of active channels" - }, - "num_inactive_channels": { - "type": "integer", - "format": "int64", - "title": "Number of inactive channels" - }, - "num_peers": { - "type": "integer", - "format": "int64", - "title": "Number of peers" - }, - "block_height": { - "type": "integer", - "format": "int64", - "title": "The node's current view of the height of the best block" - }, - "block_hash": { - "type": "string", - "title": "The node's current view of the hash of the best block" - }, - "best_header_timestamp": { - "type": "string", - "format": "int64", - "title": "Timestamp of the block best known to the wallet" - }, - "synced_to_chain": { - "type": "boolean", - "format": "boolean", - "title": "Whether the wallet's view is synced to the main chain" - }, - "synced_to_graph": { - "type": "boolean", - "format": "boolean", - "description": "Whether we consider ourselves synced with the public channel graph." - }, - "testnet": { - "type": "boolean", - "format": "boolean", - "title": "Whether the current node is connected to testnet. This field is\ndeprecated and the network field should be used instead" - }, - "chains": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcChain" - }, - "title": "A list of active chains the node is connected to" - }, - "uris": { - "type": "array", - "items": { - "type": "string" - }, - "description": "The URIs of the current node." - }, - "features": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/lnrpcFeature" - }, - "description": "Features that our node has advertised in our init message, node\nannouncements and invoices." - } - } - }, - "lnrpcNeutrinoBan": { - "type": "object", - "properties": { - "addr": { - "type": "string" - }, - "reason": { - "type": "string" - }, - "end_time": { - "type": "string" - } - } - }, - "lnrpcNeutrinoInfo": { - "type": "object", - "properties": { - "peers": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcPeerDesc" - } - }, - "bans": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcNeutrinoBan" - } - }, - "queries": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcNeutrinoQuery" - } - }, - "block_hash": { - "type": "string" - }, - "height": { - "type": "integer", - "format": "int32" - }, - "block_timestamp": { - "type": "string" - }, - "is_syncing": { - "type": "boolean", - "format": "boolean" - } - } - }, - "lnrpcNeutrinoQuery": { - "type": "object", - "properties": { - "peer": { - "type": "string" - }, - "command": { - "type": "string" - }, - "req_num": { - "type": "integer", - "format": "int64" - }, - "create_time": { - "type": "integer", - "format": "int64" - }, - "last_request_time": { - "type": "integer", - "format": "int64" - }, - "last_response_time": { - "type": "integer", - "format": "int64" - } - } - }, - "lnrpcPeerDesc": { - "type": "object", - "properties": { - "bytes_received": { - "type": "string", - "format": "uint64" - }, - "bytes_sent": { - "type": "string", - "format": "uint64" - }, - "last_recv": { - "type": "string" - }, - "last_send": { - "type": "string" - }, - "connected": { - "type": "boolean", - "format": "boolean" - }, - "addr": { - "type": "string" - }, - "inbound": { - "type": "boolean", - "format": "boolean" - }, - "na": { - "type": "string" - }, - "id": { - "type": "integer", - "format": "int32" - }, - "user_agent": { - "type": "string" - }, - "services": { - "type": "string" - }, - "version_known": { - "type": "boolean", - "format": "boolean" - }, - "advertised_proto_ver": { - "type": "integer", - "format": "int64" - }, - "protocol_version": { - "type": "integer", - "format": "int64" - }, - "send_headers_preferred": { - "type": "boolean", - "format": "boolean" - }, - "ver_ack_received": { - "type": "boolean", - "format": "boolean" - }, - "witness_enabled": { - "type": "boolean", - "format": "boolean" - }, - "wire_encoding": { - "type": "string" - }, - "time_offset": { - "type": "string", - "format": "int64" - }, - "time_connected": { - "type": "string" - }, - "starting_height": { - "type": "integer", - "format": "int32" - }, - "last_block": { - "type": "integer", - "format": "int32" - }, - "last_announced_block": { - "type": "string", - "format": "byte" - }, - "last_ping_nonce": { - "type": "string", - "format": "uint64" - }, - "last_ping_time": { - "type": "string" - }, - "last_ping_micros": { - "type": "string", - "format": "int64" - } - } - }, - "lnrpcWalletInfo": { - "type": "object", - "properties": { - "current_block_hash": { - "type": "string" - }, - "current_height": { - "type": "integer", - "format": "int32" - }, - "current_block_timestamp": { - "type": "string" - }, - "wallet_version": { - "type": "integer", - "format": "int32" - }, - "wallet_stats": { - "$ref": "#/definitions/lnrpcWalletStats" - } - } - }, - "lnrpcWalletStats": { - "type": "object", - "properties": { - "maintenance_in_progress": { - "type": "boolean", - "format": "boolean" - }, - "maintenance_name": { - "type": "string" - }, - "maintenance_cycles": { - "type": "integer", - "format": "int32" - }, - "maintenance_last_block_visited": { - "type": "integer", - "format": "int32" - }, - "time_of_last_maintenance": { - "type": "string" - }, - "syncing": { - "type": "boolean", - "format": "boolean" - }, - "sync_started": { - "type": "string" - }, - "sync_remaining_seconds": { - "type": "string", - "format": "int64" - }, - "sync_current_block": { - "type": "integer", - "format": "int32" - }, - "sync_from": { - "type": "integer", - "format": "int32" - }, - "sync_to": { - "type": "integer", - "format": "int32" - }, - "birthday_block": { - "type": "integer", - "format": "int32" - } - } - }, - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string" - }, - "value": { - "type": "string", - "format": "byte" - } - } - }, - "runtimeError": { - "type": "object", - "properties": { - "error": { - "type": "string" - }, - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - } - } -} diff --git a/lnd/lnrpc/pkt.pb.go b/lnd/lnrpc/pkt.pb.go deleted file mode 100644 index a5fe87b0..00000000 --- a/lnd/lnrpc/pkt.pb.go +++ /dev/null @@ -1,767 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: pkt.proto - -package lnrpc - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type NeutrinoBan struct { - Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"` - Reason string `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"` - EndTime string `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NeutrinoBan) Reset() { *m = NeutrinoBan{} } -func (m *NeutrinoBan) String() string { return proto.CompactTextString(m) } -func (*NeutrinoBan) ProtoMessage() {} -func (*NeutrinoBan) Descriptor() ([]byte, []int) { - return fileDescriptor_3c5f63a845a51abc, []int{0} -} - -func (m *NeutrinoBan) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NeutrinoBan.Unmarshal(m, b) -} -func (m *NeutrinoBan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NeutrinoBan.Marshal(b, m, deterministic) -} -func (m *NeutrinoBan) XXX_Merge(src proto.Message) { - xxx_messageInfo_NeutrinoBan.Merge(m, src) -} -func (m *NeutrinoBan) XXX_Size() int { - return xxx_messageInfo_NeutrinoBan.Size(m) -} -func (m *NeutrinoBan) XXX_DiscardUnknown() { - xxx_messageInfo_NeutrinoBan.DiscardUnknown(m) -} - -var xxx_messageInfo_NeutrinoBan proto.InternalMessageInfo - -func (m *NeutrinoBan) GetAddr() string { - if m != nil { - return m.Addr - } - return "" -} - -func (m *NeutrinoBan) GetReason() string { - if m != nil { - return m.Reason - } - return "" -} - -func (m *NeutrinoBan) GetEndTime() string { - if m != nil { - return m.EndTime - } - return "" -} - -type NeutrinoQuery struct { - Peer string `protobuf:"bytes,1,opt,name=peer,proto3" json:"peer,omitempty"` - Command string `protobuf:"bytes,2,opt,name=command,proto3" json:"command,omitempty"` - ReqNum uint32 `protobuf:"varint,3,opt,name=req_num,json=reqNum,proto3" json:"req_num,omitempty"` - CreateTime uint32 `protobuf:"varint,4,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` - LastRequestTime uint32 `protobuf:"varint,5,opt,name=last_request_time,json=lastRequestTime,proto3" json:"last_request_time,omitempty"` - LastResponseTime uint32 `protobuf:"varint,6,opt,name=last_response_time,json=lastResponseTime,proto3" json:"last_response_time,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NeutrinoQuery) Reset() { *m = NeutrinoQuery{} } -func (m *NeutrinoQuery) String() string { return proto.CompactTextString(m) } -func (*NeutrinoQuery) ProtoMessage() {} -func (*NeutrinoQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_3c5f63a845a51abc, []int{1} -} - -func (m *NeutrinoQuery) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NeutrinoQuery.Unmarshal(m, b) -} -func (m *NeutrinoQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NeutrinoQuery.Marshal(b, m, deterministic) -} -func (m *NeutrinoQuery) XXX_Merge(src proto.Message) { - xxx_messageInfo_NeutrinoQuery.Merge(m, src) -} -func (m *NeutrinoQuery) XXX_Size() int { - return xxx_messageInfo_NeutrinoQuery.Size(m) -} -func (m *NeutrinoQuery) XXX_DiscardUnknown() { - xxx_messageInfo_NeutrinoQuery.DiscardUnknown(m) -} - -var xxx_messageInfo_NeutrinoQuery proto.InternalMessageInfo - -func (m *NeutrinoQuery) GetPeer() string { - if m != nil { - return m.Peer - } - return "" -} - -func (m *NeutrinoQuery) GetCommand() string { - if m != nil { - return m.Command - } - return "" -} - -func (m *NeutrinoQuery) GetReqNum() uint32 { - if m != nil { - return m.ReqNum - } - return 0 -} - -func (m *NeutrinoQuery) GetCreateTime() uint32 { - if m != nil { - return m.CreateTime - } - return 0 -} - -func (m *NeutrinoQuery) GetLastRequestTime() uint32 { - if m != nil { - return m.LastRequestTime - } - return 0 -} - -func (m *NeutrinoQuery) GetLastResponseTime() uint32 { - if m != nil { - return m.LastResponseTime - } - return 0 -} - -type NeutrinoInfo struct { - Peers []*PeerDesc `protobuf:"bytes,1,rep,name=peers,proto3" json:"peers,omitempty"` - Bans []*NeutrinoBan `protobuf:"bytes,2,rep,name=bans,proto3" json:"bans,omitempty"` - Queries []*NeutrinoQuery `protobuf:"bytes,3,rep,name=queries,proto3" json:"queries,omitempty"` - BlockHash string `protobuf:"bytes,4,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` - Height int32 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` - BlockTimestamp string `protobuf:"bytes,6,opt,name=block_timestamp,json=blockTimestamp,proto3" json:"block_timestamp,omitempty"` - IsSyncing bool `protobuf:"varint,7,opt,name=is_syncing,json=isSyncing,proto3" json:"is_syncing,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NeutrinoInfo) Reset() { *m = NeutrinoInfo{} } -func (m *NeutrinoInfo) String() string { return proto.CompactTextString(m) } -func (*NeutrinoInfo) ProtoMessage() {} -func (*NeutrinoInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_3c5f63a845a51abc, []int{2} -} - -func (m *NeutrinoInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NeutrinoInfo.Unmarshal(m, b) -} -func (m *NeutrinoInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NeutrinoInfo.Marshal(b, m, deterministic) -} -func (m *NeutrinoInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_NeutrinoInfo.Merge(m, src) -} -func (m *NeutrinoInfo) XXX_Size() int { - return xxx_messageInfo_NeutrinoInfo.Size(m) -} -func (m *NeutrinoInfo) XXX_DiscardUnknown() { - xxx_messageInfo_NeutrinoInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_NeutrinoInfo proto.InternalMessageInfo - -func (m *NeutrinoInfo) GetPeers() []*PeerDesc { - if m != nil { - return m.Peers - } - return nil -} - -func (m *NeutrinoInfo) GetBans() []*NeutrinoBan { - if m != nil { - return m.Bans - } - return nil -} - -func (m *NeutrinoInfo) GetQueries() []*NeutrinoQuery { - if m != nil { - return m.Queries - } - return nil -} - -func (m *NeutrinoInfo) GetBlockHash() string { - if m != nil { - return m.BlockHash - } - return "" -} - -func (m *NeutrinoInfo) GetHeight() int32 { - if m != nil { - return m.Height - } - return 0 -} - -func (m *NeutrinoInfo) GetBlockTimestamp() string { - if m != nil { - return m.BlockTimestamp - } - return "" -} - -func (m *NeutrinoInfo) GetIsSyncing() bool { - if m != nil { - return m.IsSyncing - } - return false -} - -type WalletInfo struct { - CurrentBlockHash string `protobuf:"bytes,1,opt,name=current_block_hash,json=currentBlockHash,proto3" json:"current_block_hash,omitempty"` - CurrentHeight int32 `protobuf:"varint,2,opt,name=current_height,json=currentHeight,proto3" json:"current_height,omitempty"` - CurrentBlockTimestamp string `protobuf:"bytes,3,opt,name=current_block_timestamp,json=currentBlockTimestamp,proto3" json:"current_block_timestamp,omitempty"` - WalletVersion int32 `protobuf:"varint,4,opt,name=wallet_version,json=walletVersion,proto3" json:"wallet_version,omitempty"` - WalletStats *WalletStats `protobuf:"bytes,5,opt,name=wallet_stats,json=walletStats,proto3" json:"wallet_stats,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WalletInfo) Reset() { *m = WalletInfo{} } -func (m *WalletInfo) String() string { return proto.CompactTextString(m) } -func (*WalletInfo) ProtoMessage() {} -func (*WalletInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_3c5f63a845a51abc, []int{3} -} - -func (m *WalletInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_WalletInfo.Unmarshal(m, b) -} -func (m *WalletInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_WalletInfo.Marshal(b, m, deterministic) -} -func (m *WalletInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_WalletInfo.Merge(m, src) -} -func (m *WalletInfo) XXX_Size() int { - return xxx_messageInfo_WalletInfo.Size(m) -} -func (m *WalletInfo) XXX_DiscardUnknown() { - xxx_messageInfo_WalletInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_WalletInfo proto.InternalMessageInfo - -func (m *WalletInfo) GetCurrentBlockHash() string { - if m != nil { - return m.CurrentBlockHash - } - return "" -} - -func (m *WalletInfo) GetCurrentHeight() int32 { - if m != nil { - return m.CurrentHeight - } - return 0 -} - -func (m *WalletInfo) GetCurrentBlockTimestamp() string { - if m != nil { - return m.CurrentBlockTimestamp - } - return "" -} - -func (m *WalletInfo) GetWalletVersion() int32 { - if m != nil { - return m.WalletVersion - } - return 0 -} - -func (m *WalletInfo) GetWalletStats() *WalletStats { - if m != nil { - return m.WalletStats - } - return nil -} - -type PeerDesc struct { - BytesReceived uint64 `protobuf:"varint,1,opt,name=bytes_received,json=bytesReceived,proto3" json:"bytes_received,omitempty"` - BytesSent uint64 `protobuf:"varint,2,opt,name=bytes_sent,json=bytesSent,proto3" json:"bytes_sent,omitempty"` - LastRecv string `protobuf:"bytes,3,opt,name=last_recv,json=lastRecv,proto3" json:"last_recv,omitempty"` - LastSend string `protobuf:"bytes,4,opt,name=last_send,json=lastSend,proto3" json:"last_send,omitempty"` - Connected bool `protobuf:"varint,5,opt,name=connected,proto3" json:"connected,omitempty"` - Addr string `protobuf:"bytes,6,opt,name=addr,proto3" json:"addr,omitempty"` - Inbound bool `protobuf:"varint,7,opt,name=inbound,proto3" json:"inbound,omitempty"` - Na string `protobuf:"bytes,8,opt,name=na,proto3" json:"na,omitempty"` - Id int32 `protobuf:"varint,9,opt,name=id,proto3" json:"id,omitempty"` - UserAgent string `protobuf:"bytes,10,opt,name=user_agent,json=userAgent,proto3" json:"user_agent,omitempty"` - Services string `protobuf:"bytes,11,opt,name=services,proto3" json:"services,omitempty"` - VersionKnown bool `protobuf:"varint,12,opt,name=version_known,json=versionKnown,proto3" json:"version_known,omitempty"` - AdvertisedProtoVer uint32 `protobuf:"varint,13,opt,name=advertised_proto_ver,json=advertisedProtoVer,proto3" json:"advertised_proto_ver,omitempty"` - ProtocolVersion uint32 `protobuf:"varint,14,opt,name=protocol_version,json=protocolVersion,proto3" json:"protocol_version,omitempty"` - SendHeadersPreferred bool `protobuf:"varint,15,opt,name=send_headers_preferred,json=sendHeadersPreferred,proto3" json:"send_headers_preferred,omitempty"` - VerAckReceived bool `protobuf:"varint,16,opt,name=ver_ack_received,json=verAckReceived,proto3" json:"ver_ack_received,omitempty"` - WitnessEnabled bool `protobuf:"varint,17,opt,name=witness_enabled,json=witnessEnabled,proto3" json:"witness_enabled,omitempty"` - WireEncoding string `protobuf:"bytes,18,opt,name=wire_encoding,json=wireEncoding,proto3" json:"wire_encoding,omitempty"` - TimeOffset int64 `protobuf:"varint,19,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"` - TimeConnected string `protobuf:"bytes,20,opt,name=time_connected,json=timeConnected,proto3" json:"time_connected,omitempty"` - StartingHeight int32 `protobuf:"varint,21,opt,name=starting_height,json=startingHeight,proto3" json:"starting_height,omitempty"` - LastBlock int32 `protobuf:"varint,22,opt,name=last_block,json=lastBlock,proto3" json:"last_block,omitempty"` - LastAnnouncedBlock []byte `protobuf:"bytes,23,opt,name=last_announced_block,json=lastAnnouncedBlock,proto3" json:"last_announced_block,omitempty"` - LastPingNonce uint64 `protobuf:"varint,24,opt,name=last_ping_nonce,json=lastPingNonce,proto3" json:"last_ping_nonce,omitempty"` - LastPingTime string `protobuf:"bytes,25,opt,name=last_ping_time,json=lastPingTime,proto3" json:"last_ping_time,omitempty"` - LastPingMicros int64 `protobuf:"varint,26,opt,name=last_ping_micros,json=lastPingMicros,proto3" json:"last_ping_micros,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PeerDesc) Reset() { *m = PeerDesc{} } -func (m *PeerDesc) String() string { return proto.CompactTextString(m) } -func (*PeerDesc) ProtoMessage() {} -func (*PeerDesc) Descriptor() ([]byte, []int) { - return fileDescriptor_3c5f63a845a51abc, []int{4} -} - -func (m *PeerDesc) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PeerDesc.Unmarshal(m, b) -} -func (m *PeerDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PeerDesc.Marshal(b, m, deterministic) -} -func (m *PeerDesc) XXX_Merge(src proto.Message) { - xxx_messageInfo_PeerDesc.Merge(m, src) -} -func (m *PeerDesc) XXX_Size() int { - return xxx_messageInfo_PeerDesc.Size(m) -} -func (m *PeerDesc) XXX_DiscardUnknown() { - xxx_messageInfo_PeerDesc.DiscardUnknown(m) -} - -var xxx_messageInfo_PeerDesc proto.InternalMessageInfo - -func (m *PeerDesc) GetBytesReceived() uint64 { - if m != nil { - return m.BytesReceived - } - return 0 -} - -func (m *PeerDesc) GetBytesSent() uint64 { - if m != nil { - return m.BytesSent - } - return 0 -} - -func (m *PeerDesc) GetLastRecv() string { - if m != nil { - return m.LastRecv - } - return "" -} - -func (m *PeerDesc) GetLastSend() string { - if m != nil { - return m.LastSend - } - return "" -} - -func (m *PeerDesc) GetConnected() bool { - if m != nil { - return m.Connected - } - return false -} - -func (m *PeerDesc) GetAddr() string { - if m != nil { - return m.Addr - } - return "" -} - -func (m *PeerDesc) GetInbound() bool { - if m != nil { - return m.Inbound - } - return false -} - -func (m *PeerDesc) GetNa() string { - if m != nil { - return m.Na - } - return "" -} - -func (m *PeerDesc) GetId() int32 { - if m != nil { - return m.Id - } - return 0 -} - -func (m *PeerDesc) GetUserAgent() string { - if m != nil { - return m.UserAgent - } - return "" -} - -func (m *PeerDesc) GetServices() string { - if m != nil { - return m.Services - } - return "" -} - -func (m *PeerDesc) GetVersionKnown() bool { - if m != nil { - return m.VersionKnown - } - return false -} - -func (m *PeerDesc) GetAdvertisedProtoVer() uint32 { - if m != nil { - return m.AdvertisedProtoVer - } - return 0 -} - -func (m *PeerDesc) GetProtocolVersion() uint32 { - if m != nil { - return m.ProtocolVersion - } - return 0 -} - -func (m *PeerDesc) GetSendHeadersPreferred() bool { - if m != nil { - return m.SendHeadersPreferred - } - return false -} - -func (m *PeerDesc) GetVerAckReceived() bool { - if m != nil { - return m.VerAckReceived - } - return false -} - -func (m *PeerDesc) GetWitnessEnabled() bool { - if m != nil { - return m.WitnessEnabled - } - return false -} - -func (m *PeerDesc) GetWireEncoding() string { - if m != nil { - return m.WireEncoding - } - return "" -} - -func (m *PeerDesc) GetTimeOffset() int64 { - if m != nil { - return m.TimeOffset - } - return 0 -} - -func (m *PeerDesc) GetTimeConnected() string { - if m != nil { - return m.TimeConnected - } - return "" -} - -func (m *PeerDesc) GetStartingHeight() int32 { - if m != nil { - return m.StartingHeight - } - return 0 -} - -func (m *PeerDesc) GetLastBlock() int32 { - if m != nil { - return m.LastBlock - } - return 0 -} - -func (m *PeerDesc) GetLastAnnouncedBlock() []byte { - if m != nil { - return m.LastAnnouncedBlock - } - return nil -} - -func (m *PeerDesc) GetLastPingNonce() uint64 { - if m != nil { - return m.LastPingNonce - } - return 0 -} - -func (m *PeerDesc) GetLastPingTime() string { - if m != nil { - return m.LastPingTime - } - return "" -} - -func (m *PeerDesc) GetLastPingMicros() int64 { - if m != nil { - return m.LastPingMicros - } - return 0 -} - -type WalletStats struct { - MaintenanceInProgress bool `protobuf:"varint,1,opt,name=maintenance_in_progress,json=maintenanceInProgress,proto3" json:"maintenance_in_progress,omitempty"` - MaintenanceName string `protobuf:"bytes,2,opt,name=maintenance_name,json=maintenanceName,proto3" json:"maintenance_name,omitempty"` - MaintenanceCycles int32 `protobuf:"varint,3,opt,name=maintenance_cycles,json=maintenanceCycles,proto3" json:"maintenance_cycles,omitempty"` - MaintenanceLastBlockVisited int32 `protobuf:"varint,4,opt,name=maintenance_last_block_visited,json=maintenanceLastBlockVisited,proto3" json:"maintenance_last_block_visited,omitempty"` - TimeOfLastMaintenance string `protobuf:"bytes,5,opt,name=time_of_last_maintenance,json=timeOfLastMaintenance,proto3" json:"time_of_last_maintenance,omitempty"` - Syncing bool `protobuf:"varint,6,opt,name=syncing,proto3" json:"syncing,omitempty"` - SyncStarted string `protobuf:"bytes,7,opt,name=sync_started,json=syncStarted,proto3" json:"sync_started,omitempty"` - SyncRemainingSeconds int64 `protobuf:"varint,8,opt,name=sync_remaining_seconds,json=syncRemainingSeconds,proto3" json:"sync_remaining_seconds,omitempty"` - SyncCurrentBlock int32 `protobuf:"varint,9,opt,name=sync_current_block,json=syncCurrentBlock,proto3" json:"sync_current_block,omitempty"` - SyncFrom int32 `protobuf:"varint,10,opt,name=sync_from,json=syncFrom,proto3" json:"sync_from,omitempty"` - SyncTo int32 `protobuf:"varint,11,opt,name=sync_to,json=syncTo,proto3" json:"sync_to,omitempty"` - BirthdayBlock int32 `protobuf:"varint,12,opt,name=birthday_block,json=birthdayBlock,proto3" json:"birthday_block,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WalletStats) Reset() { *m = WalletStats{} } -func (m *WalletStats) String() string { return proto.CompactTextString(m) } -func (*WalletStats) ProtoMessage() {} -func (*WalletStats) Descriptor() ([]byte, []int) { - return fileDescriptor_3c5f63a845a51abc, []int{5} -} - -func (m *WalletStats) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_WalletStats.Unmarshal(m, b) -} -func (m *WalletStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_WalletStats.Marshal(b, m, deterministic) -} -func (m *WalletStats) XXX_Merge(src proto.Message) { - xxx_messageInfo_WalletStats.Merge(m, src) -} -func (m *WalletStats) XXX_Size() int { - return xxx_messageInfo_WalletStats.Size(m) -} -func (m *WalletStats) XXX_DiscardUnknown() { - xxx_messageInfo_WalletStats.DiscardUnknown(m) -} - -var xxx_messageInfo_WalletStats proto.InternalMessageInfo - -func (m *WalletStats) GetMaintenanceInProgress() bool { - if m != nil { - return m.MaintenanceInProgress - } - return false -} - -func (m *WalletStats) GetMaintenanceName() string { - if m != nil { - return m.MaintenanceName - } - return "" -} - -func (m *WalletStats) GetMaintenanceCycles() int32 { - if m != nil { - return m.MaintenanceCycles - } - return 0 -} - -func (m *WalletStats) GetMaintenanceLastBlockVisited() int32 { - if m != nil { - return m.MaintenanceLastBlockVisited - } - return 0 -} - -func (m *WalletStats) GetTimeOfLastMaintenance() string { - if m != nil { - return m.TimeOfLastMaintenance - } - return "" -} - -func (m *WalletStats) GetSyncing() bool { - if m != nil { - return m.Syncing - } - return false -} - -func (m *WalletStats) GetSyncStarted() string { - if m != nil { - return m.SyncStarted - } - return "" -} - -func (m *WalletStats) GetSyncRemainingSeconds() int64 { - if m != nil { - return m.SyncRemainingSeconds - } - return 0 -} - -func (m *WalletStats) GetSyncCurrentBlock() int32 { - if m != nil { - return m.SyncCurrentBlock - } - return 0 -} - -func (m *WalletStats) GetSyncFrom() int32 { - if m != nil { - return m.SyncFrom - } - return 0 -} - -func (m *WalletStats) GetSyncTo() int32 { - if m != nil { - return m.SyncTo - } - return 0 -} - -func (m *WalletStats) GetBirthdayBlock() int32 { - if m != nil { - return m.BirthdayBlock - } - return 0 -} - -func init() { - proto.RegisterType((*NeutrinoBan)(nil), "lnrpc.NeutrinoBan") - proto.RegisterType((*NeutrinoQuery)(nil), "lnrpc.NeutrinoQuery") - proto.RegisterType((*NeutrinoInfo)(nil), "lnrpc.NeutrinoInfo") - proto.RegisterType((*WalletInfo)(nil), "lnrpc.WalletInfo") - proto.RegisterType((*PeerDesc)(nil), "lnrpc.PeerDesc") - proto.RegisterType((*WalletStats)(nil), "lnrpc.WalletStats") -} - -func init() { proto.RegisterFile("pkt.proto", fileDescriptor_3c5f63a845a51abc) } - -var fileDescriptor_3c5f63a845a51abc = []byte{ - // 1162 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x56, 0xdb, 0x6e, 0x1b, 0xb7, - 0x16, 0x85, 0x64, 0xcb, 0x92, 0xb6, 0xae, 0xe1, 0xb1, 0x93, 0x49, 0x72, 0x72, 0x8e, 0xeb, 0xe6, - 0xa2, 0x16, 0x89, 0x53, 0xf4, 0xfa, 0x9c, 0xb8, 0x29, 0x12, 0xb4, 0x71, 0xdd, 0xb1, 0x91, 0x02, - 0x7d, 0x19, 0x50, 0x9c, 0x6d, 0x89, 0xb0, 0x86, 0x23, 0x93, 0x94, 0x0c, 0xff, 0x43, 0xff, 0x20, - 0x5f, 0xd5, 0xaf, 0xe9, 0x6b, 0xb1, 0x37, 0x39, 0xd2, 0xa4, 0x0f, 0x06, 0x86, 0x6b, 0xad, 0xd9, - 0xc3, 0x7d, 0x5b, 0x32, 0x74, 0x97, 0x57, 0xfe, 0x78, 0x69, 0x4b, 0x5f, 0x8a, 0xd6, 0xc2, 0xd8, - 0xa5, 0x3a, 0xba, 0x80, 0xde, 0x29, 0xae, 0xbc, 0xd5, 0xa6, 0x7c, 0x2d, 0x8d, 0x10, 0xb0, 0x2b, - 0xf3, 0xdc, 0x26, 0x8d, 0xc3, 0xc6, 0xa4, 0x9b, 0xf2, 0xb3, 0xb8, 0x0b, 0x7b, 0x16, 0xa5, 0x2b, - 0x4d, 0xd2, 0x64, 0x34, 0x9e, 0xc4, 0x7d, 0xe8, 0xa0, 0xc9, 0x33, 0xaf, 0x0b, 0x4c, 0x76, 0x98, - 0x69, 0xa3, 0xc9, 0x2f, 0x74, 0x81, 0x47, 0x7f, 0x35, 0x60, 0x50, 0x85, 0xfd, 0x6d, 0x85, 0xf6, - 0x96, 0x02, 0x2f, 0x11, 0x37, 0x81, 0xe9, 0x59, 0x24, 0xd0, 0x56, 0x65, 0x51, 0x48, 0x93, 0xc7, - 0xc8, 0xd5, 0x51, 0xdc, 0x83, 0xb6, 0xc5, 0xeb, 0xcc, 0xac, 0x0a, 0x8e, 0x3c, 0xa0, 0x6f, 0x5e, - 0x9f, 0xae, 0x0a, 0xf1, 0x7f, 0xe8, 0x29, 0x8b, 0xd2, 0x63, 0xf8, 0xec, 0x2e, 0x93, 0x10, 0x20, - 0xfa, 0xb2, 0xf8, 0x12, 0xee, 0x2c, 0xa4, 0xf3, 0x99, 0xc5, 0xeb, 0x15, 0x3a, 0x1f, 0x64, 0x2d, - 0x96, 0x8d, 0x88, 0x48, 0x03, 0xce, 0xda, 0xe7, 0x20, 0xa2, 0xd6, 0x2d, 0x4b, 0xe3, 0x62, 0xcc, - 0x3d, 0x16, 0x8f, 0x83, 0x38, 0x10, 0x9c, 0xd3, 0x9f, 0x4d, 0xe8, 0x57, 0x39, 0xbd, 0x33, 0x97, - 0xa5, 0x78, 0x02, 0x2d, 0x4a, 0xc3, 0x25, 0x8d, 0xc3, 0x9d, 0x49, 0xef, 0xeb, 0xd1, 0x31, 0x57, - 0xf4, 0xf8, 0x0c, 0xd1, 0xfe, 0x88, 0x4e, 0xa5, 0x81, 0x15, 0x4f, 0x61, 0x77, 0x2a, 0x8d, 0x4b, - 0x9a, 0xac, 0x12, 0x51, 0x55, 0x2b, 0x7a, 0xca, 0xbc, 0x38, 0x86, 0xf6, 0xf5, 0x0a, 0xad, 0x46, - 0x97, 0xec, 0xb0, 0x74, 0xff, 0x5f, 0x52, 0x2e, 0x64, 0x5a, 0x89, 0xc4, 0x23, 0x80, 0xe9, 0xa2, - 0x54, 0x57, 0xd9, 0x5c, 0xba, 0x39, 0x57, 0xa2, 0x9b, 0x76, 0x19, 0x79, 0x2b, 0xdd, 0x9c, 0xba, - 0x36, 0x47, 0x3d, 0x9b, 0x7b, 0xce, 0xbe, 0x95, 0xc6, 0x93, 0x78, 0x06, 0xa3, 0xf0, 0x1a, 0x25, - 0xeb, 0xbc, 0x2c, 0x96, 0x9c, 0x71, 0x37, 0x1d, 0x32, 0x7c, 0x51, 0xa1, 0x14, 0x5f, 0xbb, 0xcc, - 0xdd, 0x1a, 0xa5, 0xcd, 0x2c, 0x69, 0x1f, 0x36, 0x26, 0x9d, 0xb4, 0xab, 0xdd, 0x79, 0x00, 0x8e, - 0xfe, 0x6e, 0x00, 0xfc, 0x2e, 0x17, 0x0b, 0xf4, 0x5c, 0x8c, 0xe7, 0x20, 0xd4, 0xca, 0x5a, 0x34, - 0x3e, 0xab, 0xdd, 0x2a, 0x74, 0x7b, 0x1c, 0x99, 0xd7, 0x9b, 0xcb, 0x3d, 0x81, 0x61, 0xa5, 0x8e, - 0x97, 0x6c, 0xf2, 0x25, 0x07, 0x11, 0x7d, 0x1b, 0xee, 0xfa, 0x3d, 0xdc, 0xfb, 0x34, 0xe8, 0xf6, - 0xce, 0x61, 0xe0, 0x0e, 0xea, 0x91, 0xb7, 0x57, 0x7f, 0x02, 0xc3, 0x1b, 0xbe, 0x5a, 0xb6, 0x46, - 0xeb, 0x74, 0x69, 0xb8, 0x3c, 0xad, 0x74, 0x10, 0xd0, 0x0f, 0x01, 0x14, 0xdf, 0x41, 0x3f, 0xca, - 0x9c, 0x97, 0xde, 0x71, 0xa1, 0xb6, 0x1d, 0x0a, 0xc9, 0x9d, 0x13, 0x93, 0xf6, 0x6e, 0xb6, 0x87, - 0xa3, 0x8f, 0x6d, 0xe8, 0x54, 0x4d, 0xa6, 0x4f, 0x4d, 0x6f, 0x3d, 0xba, 0xcc, 0xa2, 0x42, 0xbd, - 0xc6, 0x9c, 0x73, 0xde, 0x4d, 0x07, 0x8c, 0xa6, 0x11, 0xe4, 0x66, 0xb1, 0xcc, 0xa1, 0x09, 0xc9, - 0xee, 0xa6, 0x5d, 0x46, 0xce, 0xd1, 0x78, 0xf1, 0x10, 0xba, 0x71, 0x12, 0xd5, 0x3a, 0xa6, 0xd6, - 0x09, 0x03, 0xa8, 0xd6, 0x1b, 0xd2, 0xa1, 0xc9, 0x63, 0x9f, 0x99, 0x3c, 0x47, 0x93, 0x8b, 0xff, - 0x42, 0x57, 0x95, 0xc6, 0xa0, 0xf2, 0x98, 0x73, 0x02, 0x9d, 0x74, 0x0b, 0x6c, 0xd6, 0x79, 0xaf, - 0xb6, 0xce, 0x09, 0xb4, 0xb5, 0x99, 0x96, 0x2b, 0x93, 0xc7, 0xa6, 0x56, 0x47, 0x31, 0x84, 0xa6, - 0x91, 0x49, 0x87, 0xb5, 0x4d, 0x23, 0xe9, 0xac, 0xf3, 0xa4, 0xcb, 0xa5, 0x6b, 0x6a, 0x4e, 0x62, - 0xe5, 0xd0, 0x66, 0x72, 0x46, 0x49, 0x40, 0x98, 0x38, 0x42, 0x5e, 0x11, 0x20, 0x1e, 0x40, 0xc7, - 0xa1, 0x5d, 0x6b, 0x85, 0x2e, 0xe9, 0x85, 0x6b, 0x56, 0x67, 0xf1, 0x39, 0x0c, 0x62, 0x2b, 0xb2, - 0x2b, 0x53, 0xde, 0x98, 0xa4, 0xcf, 0x9f, 0xee, 0x47, 0xf0, 0x67, 0xc2, 0xc4, 0x57, 0xb0, 0x2f, - 0xf3, 0x35, 0x5a, 0xaf, 0x1d, 0xe6, 0x19, 0xdb, 0x14, 0x35, 0x30, 0x19, 0xf0, 0x46, 0x8a, 0x2d, - 0x77, 0x46, 0xd4, 0x07, 0xb4, 0xe2, 0x0b, 0x18, 0xb3, 0x4c, 0x95, 0x8b, 0x4d, 0xab, 0x87, 0x61, - 0xd9, 0x2b, 0xbc, 0x6a, 0xf6, 0xb7, 0x70, 0x97, 0x0a, 0x98, 0xcd, 0x51, 0xe6, 0x68, 0x5d, 0xb6, - 0xb4, 0x78, 0x89, 0xd6, 0x62, 0x9e, 0x8c, 0xf8, 0x2a, 0xfb, 0xc4, 0xbe, 0x0d, 0xe4, 0x59, 0xc5, - 0x89, 0x09, 0x8c, 0xd7, 0x94, 0xb1, 0xba, 0xda, 0x36, 0x78, 0xcc, 0xfa, 0xe1, 0x1a, 0xed, 0x2b, - 0x75, 0xb5, 0xe9, 0xf0, 0x33, 0x18, 0xdd, 0x68, 0x6f, 0xd0, 0xb9, 0x0c, 0x8d, 0x9c, 0x2e, 0x30, - 0x4f, 0xee, 0x04, 0x61, 0x84, 0xdf, 0x04, 0x94, 0x4a, 0x71, 0xa3, 0x2d, 0x66, 0x68, 0x54, 0x99, - 0xd3, 0x6a, 0x09, 0xae, 0x55, 0x9f, 0xc0, 0x37, 0x11, 0x23, 0x9f, 0xa3, 0x59, 0xcf, 0xca, 0xcb, - 0x4b, 0x87, 0x3e, 0xf9, 0xcf, 0x61, 0x63, 0xb2, 0x93, 0x02, 0x41, 0xbf, 0x32, 0x42, 0x73, 0xc7, - 0x82, 0x6d, 0xf3, 0xf7, 0x39, 0xcc, 0x80, 0xd0, 0x93, 0xcd, 0x00, 0x3c, 0x83, 0x91, 0xf3, 0xd2, - 0x7a, 0x6d, 0x66, 0xd5, 0xa6, 0x1d, 0x70, 0x3f, 0x87, 0x15, 0x1c, 0x57, 0xed, 0x11, 0x00, 0x0f, - 0x19, 0xef, 0x59, 0x72, 0x97, 0x35, 0x3c, 0x76, 0xbc, 0x5a, 0xd4, 0x1a, 0xa6, 0xa5, 0x31, 0xe5, - 0xca, 0x28, 0xcc, 0xa3, 0xf0, 0xde, 0x61, 0x63, 0xd2, 0x4f, 0xd9, 0x46, 0x5f, 0x55, 0x54, 0x78, - 0xe3, 0x29, 0xb0, 0xdf, 0x66, 0x4b, 0xfa, 0xb4, 0x29, 0x8d, 0xc2, 0x24, 0x09, 0x9b, 0x41, 0xf0, - 0x99, 0x36, 0xb3, 0x53, 0x02, 0xc5, 0x63, 0x18, 0x6e, 0x75, 0x6c, 0xc0, 0xf7, 0x43, 0x3d, 0x2a, - 0x19, 0x5b, 0xf5, 0x04, 0xc6, 0x5b, 0x55, 0xa1, 0x95, 0x2d, 0x5d, 0xf2, 0x80, 0x8b, 0x32, 0xac, - 0x74, 0xef, 0x19, 0x3d, 0xfa, 0xb8, 0x0b, 0xbd, 0xda, 0xea, 0x92, 0x87, 0x14, 0x52, 0x1b, 0x8f, - 0x46, 0x1a, 0x85, 0x99, 0x36, 0x34, 0x58, 0x33, 0x8b, 0xce, 0xf1, 0xa6, 0x76, 0xd2, 0x83, 0x1a, - 0xfd, 0xce, 0x9c, 0x45, 0x92, 0x46, 0xab, 0xfe, 0x9e, 0x91, 0x05, 0xc6, 0x5f, 0xa9, 0x51, 0x0d, - 0x3f, 0x95, 0x05, 0x8a, 0x17, 0x20, 0xea, 0x52, 0x75, 0xab, 0x16, 0x6c, 0xe2, 0x54, 0xc3, 0x3b, - 0x35, 0xe6, 0x84, 0x09, 0x71, 0x02, 0xff, 0xab, 0xcb, 0xb7, 0x65, 0xcf, 0xd6, 0xda, 0x69, 0x6a, - 0x65, 0x70, 0xab, 0x87, 0x35, 0xd5, 0x2f, 0x55, 0x27, 0x3e, 0x04, 0x89, 0xf8, 0x01, 0x92, 0x38, - 0x20, 0x21, 0x40, 0x4d, 0xcb, 0x36, 0xd0, 0x4d, 0x0f, 0xc2, 0xb4, 0xd0, 0x9b, 0xef, 0xb7, 0x24, - 0xad, 0x7f, 0xe5, 0xe9, 0x7b, 0x61, 0xfd, 0xe3, 0x51, 0x7c, 0x06, 0x7d, 0x7a, 0xcc, 0x78, 0x32, - 0x30, 0xb8, 0x43, 0x37, 0xed, 0x11, 0x76, 0x1e, 0x20, 0x5e, 0x22, 0x92, 0x58, 0xa4, 0xef, 0x51, - 0x2f, 0x1c, 0xaa, 0xd2, 0xe4, 0x8e, 0x5d, 0x63, 0x27, 0xdd, 0x27, 0x36, 0xad, 0xc8, 0xf3, 0xc0, - 0xd1, 0x6f, 0x03, 0xbf, 0xf5, 0x89, 0x97, 0x47, 0x5f, 0x19, 0x13, 0x73, 0x52, 0x73, 0x71, 0xb2, - 0x3b, 0x56, 0x5f, 0xda, 0xb2, 0x60, 0x93, 0x69, 0xa5, 0x1d, 0x02, 0x7e, 0xb2, 0x65, 0x41, 0xff, - 0x18, 0x30, 0xe9, 0x4b, 0xb6, 0x98, 0x56, 0xba, 0x47, 0xc7, 0x8b, 0x92, 0x7d, 0x58, 0x5b, 0x3f, - 0xcf, 0xe5, 0x6d, 0x8c, 0xdf, 0x0f, 0x96, 0x5f, 0xa1, 0x1c, 0xfc, 0xf5, 0xe3, 0x3f, 0x8e, 0x66, - 0xda, 0xcf, 0x57, 0xd3, 0x63, 0x55, 0x16, 0x2f, 0x97, 0x57, 0xfe, 0x85, 0x92, 0x6e, 0x4e, 0x0f, - 0xf9, 0xcb, 0x85, 0xa1, 0x3f, 0xbb, 0x54, 0xd3, 0x3d, 0x36, 0x8f, 0x6f, 0xfe, 0x09, 0x00, 0x00, - 0xff, 0xff, 0xad, 0x58, 0x54, 0xb5, 0x2f, 0x09, 0x00, 0x00, -} diff --git a/lnd/lnrpc/pkt.proto b/lnd/lnrpc/pkt.proto deleted file mode 100644 index 4547f86b..00000000 --- a/lnd/lnrpc/pkt.proto +++ /dev/null @@ -1,82 +0,0 @@ -syntax = "proto3"; - -package lnrpc; - -option go_package = "github.com/pkt-cash/pktd/lnd/lnrpc"; - -message NeutrinoBan { - string addr = 1; - string reason = 2; - string end_time = 3; -} - -message NeutrinoQuery { - string peer = 1; - string command = 2; - uint32 req_num = 3; - uint32 create_time = 4; - uint32 last_request_time = 5; - uint32 last_response_time = 6; -} - -message NeutrinoInfo { - repeated PeerDesc peers = 1; - repeated NeutrinoBan bans = 2; - repeated NeutrinoQuery queries = 3; - string block_hash = 4; - int32 height = 5; - string block_timestamp = 6; - bool is_syncing = 7; -} - -message WalletInfo { - string current_block_hash = 1; - int32 current_height = 2; - string current_block_timestamp = 3; - int32 wallet_version = 4; - WalletStats wallet_stats = 5; -} - -message PeerDesc { - uint64 bytes_received = 1; - uint64 bytes_sent = 2; - string last_recv = 3; - string last_send = 4; - bool connected = 5; - string addr = 6; - bool inbound = 7; - string na = 8; //netaddress address:port - int32 id = 9; - string user_agent = 10; - string services = 11; - bool version_known = 12; - uint32 advertised_proto_ver = 13; - uint32 protocol_version = 14; - bool send_headers_preferred = 15; - bool ver_ack_received = 16; - bool witness_enabled = 17; - string wire_encoding = 18; - int64 time_offset = 19; - string time_connected = 20; - int32 starting_height = 21; - int32 last_block = 22; - bytes last_announced_block = 23; - uint64 last_ping_nonce = 24; - string last_ping_time = 25; - int64 last_ping_micros = 26; -} - -message WalletStats { - bool maintenance_in_progress = 1; - string maintenance_name = 2; - int32 maintenance_cycles = 3; - int32 maintenance_last_block_visited = 4; - string time_of_last_maintenance = 5; - bool syncing = 6; - string sync_started = 7; - int64 sync_remaining_seconds = 8; - int32 sync_current_block = 9; - int32 sync_from = 10; - int32 sync_to = 11; - int32 birthday_block = 12; -} \ No newline at end of file diff --git a/lnd/lnrpc/pkt.swagger.json b/lnd/lnrpc/pkt.swagger.json deleted file mode 100644 index 24d1a74f..00000000 --- a/lnd/lnrpc/pkt.swagger.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "pkt.proto", - "version": "version not set" - }, - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": {}, - "definitions": { - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string" - }, - "value": { - "type": "string", - "format": "byte" - } - } - }, - "runtimeError": { - "type": "object", - "properties": { - "error": { - "type": "string" - }, - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - } - } -} diff --git a/lnd/lnrpc/rest-annotations.yaml b/lnd/lnrpc/rest-annotations.yaml deleted file mode 100644 index fae8d577..00000000 --- a/lnd/lnrpc/rest-annotations.yaml +++ /dev/null @@ -1,306 +0,0 @@ -type: google.api.Service -config_version: 3 - -http: - rules: - # rpc.proto - - selector: lnrpc.Lightning.WalletBalance - get: "/v1/balance/blockchain" - - selector: lnrpc.Lightning.ChannelBalance - get: "/v1/balance/channels" - - selector: lnrpc.Lightning.GetTransactions - get: "/v1/transactions" - - selector: lnrpc.Lightning.EstimateFee - get: "/v1/transactions/fee" - - selector: lnrpc.Lightning.SendCoins - post: "/v1/transactions" - body: "*" - - selector: lnrpc.Lightning.ListUnspent - get: "/v1/utxos" - - selector: lnrpc.Lightning.SubscribeTransactions - get: "/v1/transactions/subscribe" - - selector: lnrpc.Lightning.SendMany - post: "/v1/transactions/many" - body: "*" - - selector: lnrpc.Lightning.NewAddress - get: "/v1/newaddress" - - selector: lnrpc.Lightning.SignMessage - post: "/v1/signmessage" - body: "*" - - selector: lnrpc.Lightning.VerifyMessage - post: "/v1/verifymessage" - body: "*" - - selector: lnrpc.Lightning.ConnectPeer - post: "/v1/peers" - body: "*" - - selector: lnrpc.Lightning.DisconnectPeer - delete: "/v1/peers/{pub_key}" - - selector: lnrpc.Lightning.ListPeers - get: "/v1/peers" - - selector: lnrpc.Lightning.SubscribePeerEvents - get: "/v1/peers/subscribe" - - selector: lnrpc.Lightning.GetInfo - get: "/v1/getinfo" - - selector: lnrpc.Lightning.GetRecoveryInfo - get: "/v1/getrecoveryinfo" - - selector: lnrpc.Lightning.PendingChannels - get: "/v1/channels/pending" - - selector: lnrpc.Lightning.ListChannels - get: "/v1/channels" - - selector: lnrpc.Lightning.SubscribeChannelEvents - get: "/v1/channels/subscribe" - - selector: lnrpc.Lightning.ClosedChannels - get: "/v1/channels/closed" - - selector: lnrpc.Lightning.OpenChannelSync - post: "/v1/channels" - body: "*" - - selector: lnrpc.Lightning.OpenChannel - post: "/v1/channels/stream" - body: "*" - - selector: lnrpc.Lightning.FundingStateStep - post: "/v1/funding/step" - body: "*" - - selector: lnrpc.Lightning.ChannelAcceptor - # request streaming RPC, REST not supported - - selector: lnrpc.Lightning.CloseChannel - delete: "/v1/channels/{channel_point.funding_txid_str}/{channel_point.output_index}" - - selector: lnrpc.Lightning.AbandonChannel - delete: "/v1/channels/abandon/{channel_point.funding_txid_str}/{channel_point.output_index}" - - selector: lnrpc.Lightning.SendPayment - - selector: lnrpc.Lightning.SendPaymentSync - post: "/v1/channels/transactions" - body: "*" - - selector: lnrpc.Lightning.SendToRoute - # deprecated, no REST endpoint - - selector: lnrpc.Lightning.SendToRouteSync - post: "/v1/channels/transactions/route" - body: "*" - - selector: lnrpc.Lightning.AddInvoice - post: "/v1/invoices" - body: "*" - - selector: lnrpc.Lightning.ListInvoices - get: "/v1/invoices" - - selector: lnrpc.Lightning.LookupInvoice - get: "/v1/invoice/{r_hash_str}" - - selector: lnrpc.Lightning.SubscribeInvoices - get: "/v1/invoices/subscribe" - - selector: lnrpc.Lightning.DecodePayReq - get: "/v1/payreq/{pay_req}" - - selector: lnrpc.Lightning.ListPayments - get: "/v1/payments" - - selector: lnrpc.Lightning.DeleteAllPayments - delete: "/v1/payments" - - selector: lnrpc.Lightning.DescribeGraph - get: "/v1/graph" - - selector: lnrpc.Lightning.GetNodeMetrics - get: "/v1/graph/nodemetrics" - - selector: lnrpc.Lightning.GetChanInfo - get: "/v1/graph/edge/{chan_id}" - - selector: lnrpc.Lightning.GetNodeInfo - get: "/v1/graph/node/{pub_key}" - - selector: lnrpc.Lightning.QueryRoutes - get: "/v1/graph/routes/{pub_key}/{amt}" - - selector: lnrpc.Lightning.GetNetworkInfo - get: "/v1/graph/info" - - selector: lnrpc.Lightning.StopDaemon - post: "/v1/stop" - body: "*" - - selector: lnrpc.Lightning.SubscribeChannelGraph - get: "/v1/graph/subscribe" - - selector: lnrpc.Lightning.DebugLevel - post: "/v1/debuglevel" - body: "*" - - selector: lnrpc.Lightning.FeeReport - get: "/v1/fees" - - selector: lnrpc.Lightning.UpdateChannelPolicy - post: "/v1/chanpolicy" - body: "*" - - selector: lnrpc.Lightning.ForwardingHistory - post: "/v1/switch" - body: "*" - - selector: lnrpc.Lightning.ExportChannelBackup - get: "/v1/channels/backup/{chan_point.funding_txid_str}/{chan_point.output_index}" - - selector: lnrpc.Lightning.ExportAllChannelBackups - get: "/v1/channels/backup" - - selector: lnrpc.Lightning.VerifyChanBackup - post: "/v1/channels/backup/verify" - body: "*" - - selector: lnrpc.Lightning.RestoreChannelBackups - post: "/v1/channels/backup/restore" - body: "*" - - selector: lnrpc.Lightning.SubscribeChannelBackups - get: "/v1/channels/backup/subscribe" - - selector: lnrpc.Lightning.BakeMacaroon - post: "/v1/macaroon" - body: "*" - - selector: lnrpc.Lightning.ListMacaroonIDs - get: "/v1/macaroon/ids" - - selector: lnrpc.Lightning.DeleteMacaroonID - delete: "/v1/macaroon/{root_key_id}" - - selector: lnrpc.Lightning.ListPermissions - get: "/v1/macaroon/permissions" - - # walletunlocker.proto - - selector: lnrpc.WalletUnlocker.GenSeed - get: "/v1/genseed" - - selector: lnrpc.WalletUnlocker.InitWallet - post: "/v1/initwallet" - body: "*" - - selector: lnrpc.WalletUnlocker.UnlockWallet - post: "/v1/unlockwallet" - body: "*" - - selector: lnrpc.WalletUnlocker.ChangePassword - post: "/v1/changepassword" - body: "*" - - # autopilotrpc/autopilot.proto - - selector: autopilotrpc.Autopilot.Status - get: "/v2/autopilot/status" - - selector: autopilotrpc.Autopilot.ModifyStatus - post: "/v2/autopilot/modify" - body: "*" - - selector: autopilotrpc.Autopilot.QueryScores - get: "/v2/autopilot/scores" - - selector: autopilotrpc.Autopilot.SetScores - post: "/v2/autopilot/scores" - body: "*" - - # chainrpc/chainnotifier.proto - - selector: chainrpc.ChainNotifier.RegisterConfirmationsNtfn - post: "/v2/chainnotifier/register/confirmations" - body: "*" - - selector: chainrpc.ChainNotifier.RegisterSpendNtfn - post: "/v2/chainnotifier/register/spends" - body: "*" - - selector: chainrpc.ChainNotifier.RegisterBlockEpochNtfn - post: "/v2/chainnotifier/register/blocks" - body: "*" - - # invoicesrpc/invoices.proto - - selector: invoicesrpc.Invoices.SubscribeSingleInvoice - get: "/v2/invoices/subscribe/{r_hash}" - - selector: invoicesrpc.Invoices.CancelInvoice - post: "/v2/invoices/cancel" - body: "*" - - selector: invoicesrpc.Invoices.AddHoldInvoice - post: "/v2/invoices/hodl" - body: "*" - - selector: invoicesrpc.Invoices.SettleInvoice - post: "/v2/invoices/settle" - body: "*" - - # routerrpc/router.proto - - selector: routerrpc.Router.SendPaymentV2 - post: "/v2/router/send" - body: "*" - - selector: routerrpc.Router.TrackPaymentV2 - get: "/v2/router/track/{payment_hash}" - - selector: routerrpc.Router.EstimateRouteFee - post: "/v2/router/route/estimatefee" - body: "*" - - selector: routerrpc.Router.SendToRoute - # deprecated, no REST endpoint - - selector: routerrpc.Router.SendToRouteV2 - post: "/v2/router/route/send" - body: "*" - - selector: routerrpc.Router.ResetMissionControl - post: "/v2/router/mc/reset" - body: "*" - - selector: routerrpc.Router.QueryMissionControl - get: "/v2/router/mc" - - selector: routerrpc.Router.QueryProbability - get: "/v2/router/mc/probability/{from_node}/{to_node}/{amt_msat}" - - selector: routerrpc.Router.BuildRoute - post: "/v2/router/route" - body: "*" - - selector: routerrpc.Router.SubscribeHtlcEvents - get: "/v2/router/htlcevents" - - selector: routerrpc.Router.SendPayment - # deprecated, no REST endpoint - - selector: routerrpc.Router.TrackPayment - # deprecated, no REST endpoint - - selector: routerrpc.HtlcInterceptor - # request streaming RPC, REST not supported - - # signrpc/signer.proto - - selector: signrpc.Signer.SignOutputRaw - post: "/v2/signer/signraw" - body: "*" - - selector: signrpc.Signer.ComputeInputScript - post: "/v2/signer/inputscript" - body: "*" - - selector: signrpc.Signer.SignMessage - post: "/v2/signer/signmessage" - body: "*" - - selector: signrpc.Signer.VerifyMessage - post: "/v2/signer/verifymessage" - body: "*" - - selector: signrpc.Signer.DeriveSharedKey - post: "/v2/signer/sharedkey" - body: "*" - - # verrpc/verrpc.proto - - selector: verrpc.Versioner.GetVersion - get: "/v2/versioner/version" - - # walletrpc/walletkit.proto - - selector: walletrpc.WalletKit.ListUnspent - post: "/v2/wallet/utxos" - - selector: walletrpc.WalletKit.LeaseOutput - post: "/v2/wallet/utxos/lease" - body: "*" - - selector: walletrpc.WalletKit.ReleaseOutput - post: "/v2/wallet/utxos/release" - body: "*" - - selector: walletrpc.WalletKit.DeriveNextKey - post: "/v2/wallet/key/next" - body: "*" - - selector: walletrpc.WalletKit.DeriveKey - post: "/v2/wallet/key" - body: "*" - - selector: walletrpc.WalletKit.NextAddr - post: "/v2/wallet/address/next" - body: "*" - - selector: walletrpc.WalletKit.PublishTransaction - post: "/v2/wallet/tx" - body: "*" - - selector: walletrpc.WalletKit.SendOutputs - post: "/v2/wallet/send" - body: "*" - - selector: walletrpc.WalletKit.EstimateFee - get: "/v2/wallet/estimatefee/{conf_target}" - - selector: walletrpc.WalletKit.PendingSweeps - get: "/v2/wallet/sweeps/pending" - - selector: walletrpc.WalletKit.BumpFee - post: "/v2/wallet/bumpfee" - body: "*" - - selector: walletrpc.WalletKit.ListSweeps - get: "/v2/wallet/sweeps" - - selector: walletrpc.WalletKit.LabelTransaction - post: "/v2/wallet/tx/label" - body: "*" - - selector: walletrpc.WalletKit.FundPsbt - post: "/v2/wallet/psbt/fund" - body: "*" - - selector: walletrpc.WalletKit.FinalizePsbt - post: "/v2/wallet/psbt/finalize" - body: "*" - - # watchtowerrpc/watchtower.proto - - selector: watchtowerrpc.Watchtower.GetInfo - get: "/v2/watchtower/server" - - # wtclientrpc/wtclient.proto - - selector: wtclientrpc.WatchtowerClient.AddTower - post: "/v2/watchtower/client" - body: "*" - - selector: wtclientrpc.WatchtowerClient.RemoveTower - delete: "/v2/watchtower/client/{pubkey}" - - selector: wtclientrpc.WatchtowerClient.ListTowers - get: "/v2/watchtower/client" - - selector: wtclientrpc.WatchtowerClient.GetTowerInfo - get: "/v2/watchtower/client/info/{pubkey}" - - selector: wtclientrpc.WatchtowerClient.Stats - get: "/v2/watchtower/client/stats" - - selector: wtclientrpc.WatchtowerClient.Policy - get: "/v2/watchtower/client/policy" diff --git a/lnd/lnrpc/routerrpc/config.go b/lnd/lnrpc/routerrpc/config.go deleted file mode 100644 index cf6a2469..00000000 --- a/lnd/lnrpc/routerrpc/config.go +++ /dev/null @@ -1,70 +0,0 @@ -package routerrpc - -import ( - "github.com/pkt-cash/pktd/lnd/macaroons" - "github.com/pkt-cash/pktd/lnd/routing" -) - -// Config is the main configuration file for the router RPC server. It contains -// all the items required for the router RPC server to carry out its duties. -// The fields with struct tags are meant to be parsed as normal configuration -// options, while if able to be populated, the latter fields MUST also be -// specified. -type Config struct { - RoutingConfig - - // RouterMacPath is the path for the router macaroon. If unspecified - // then we assume that the macaroon will be found under the network - // directory, named DefaultRouterMacFilename. - RouterMacPath string `long:"routermacaroonpath" description:"Path to the router macaroon"` - - // NetworkDir is the main network directory wherein the router rpc - // server will find the macaroon named DefaultRouterMacFilename. - NetworkDir string - - // MacService is the main macaroon service that we'll use to handle - // authentication for the Router rpc server. - MacService *macaroons.Service - - // Router is the main channel router instance that backs this RPC - // server. - // - // TODO(roasbeef): make into pkg lvl interface? - // - // TODO(roasbeef): assumes router handles saving payment state - Router *routing.ChannelRouter - - // RouterBackend contains shared logic between this sub server and the - // main rpc server. - RouterBackend *RouterBackend -} - -// DefaultConfig defines the config defaults. -func DefaultConfig() *Config { - defaultRoutingConfig := RoutingConfig{ - AprioriHopProbability: routing.DefaultAprioriHopProbability, - AprioriWeight: routing.DefaultAprioriWeight, - MinRouteProbability: routing.DefaultMinRouteProbability, - PenaltyHalfLife: routing.DefaultPenaltyHalfLife, - AttemptCost: routing.DefaultAttemptCost.ToSatoshis(), - AttemptCostPPM: routing.DefaultAttemptCostPPM, - MaxMcHistory: routing.DefaultMaxMcHistory, - } - - return &Config{ - RoutingConfig: defaultRoutingConfig, - } -} - -// GetRoutingConfig returns the routing config based on this sub server config. -func GetRoutingConfig(cfg *Config) *RoutingConfig { - return &RoutingConfig{ - AprioriHopProbability: cfg.AprioriHopProbability, - AprioriWeight: cfg.AprioriWeight, - MinRouteProbability: cfg.MinRouteProbability, - AttemptCost: cfg.AttemptCost, - AttemptCostPPM: cfg.AttemptCostPPM, - PenaltyHalfLife: cfg.PenaltyHalfLife, - MaxMcHistory: cfg.MaxMcHistory, - } -} diff --git a/lnd/lnrpc/routerrpc/driver.go b/lnd/lnrpc/routerrpc/driver.go deleted file mode 100644 index 6453727f..00000000 --- a/lnd/lnrpc/routerrpc/driver.go +++ /dev/null @@ -1,61 +0,0 @@ -package routerrpc - -import ( - "fmt" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnrpc" -) - -// createNewSubServer is a helper method that will create the new router sub -// server given the main config dispatcher method. If we're unable to find the -// config that is meant for us in the config dispatcher, then we'll exit with -// an error. -func createNewSubServer(configRegistry lnrpc.SubServerConfigDispatcher) ( - lnrpc.SubServer, lnrpc.MacaroonPerms, er.R) { - - // We'll attempt to look up the config that we expect, according to our - // subServerName name. If we can't find this, then we'll exit with an - // error, as we're unable to properly initialize ourselves without this - // config. - routeServerConf, ok := configRegistry.FetchConfig(subServerName) - if !ok { - return nil, nil, er.Errorf("unable to find config for "+ - "subserver type %s", subServerName) - } - - // Now that we've found an object mapping to our service name, we'll - // ensure that it's the type we need. - config, ok := routeServerConf.(*Config) - if !ok { - return nil, nil, er.Errorf("wrong type of config for "+ - "subserver %s, expected %T got %T", subServerName, - &Config{}, routeServerConf) - } - - // Before we try to make the new router service instance, we'll perform - // some sanity checks on the arguments to ensure that they're useable. - switch { - case config.Router == nil: - return nil, nil, er.Errorf("Router must be set to create " + - "Routerpc") - } - - return New(config) -} - -func init() { - subServer := &lnrpc.SubServerDriver{ - SubServerName: subServerName, - New: func(c lnrpc.SubServerConfigDispatcher) (lnrpc.SubServer, lnrpc.MacaroonPerms, er.R) { - return createNewSubServer(c) - }, - } - - // If the build tag is active, then we'll register ourselves as a - // sub-RPC server within the global lnrpc package namespace. - if err := lnrpc.RegisterSubServer(subServer); err != nil { - panic(fmt.Sprintf("failed to register sub server driver '%s': %v", - subServerName, err)) - } -} diff --git a/lnd/lnrpc/routerrpc/forward_interceptor.go b/lnd/lnrpc/routerrpc/forward_interceptor.go deleted file mode 100644 index 9981bbc1..00000000 --- a/lnd/lnrpc/routerrpc/forward_interceptor.go +++ /dev/null @@ -1,223 +0,0 @@ -package routerrpc - -import ( - "sync" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/htlcswitch" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/pktlog/log" -) - -var ( - Err = er.NewErrorType("lnd.routerrpc") - // ErrFwdNotExists is an error returned when the caller tries to resolve - // a forward that doesn't exist anymore. - ErrFwdNotExists = Err.CodeWithDetail("ErrFwdNotExists", "forward does not exist") - - // ErrMissingPreimage is an error returned when the caller tries to settle - // a forward and doesn't provide a preimage. - ErrMissingPreimage = Err.CodeWithDetail("ErrMissingPreimage", "missing preimage") -) - -// forwardInterceptor is a helper struct that handles the lifecycle of an rpc -// interceptor streaming session. -// It is created when the stream opens and disconnects when the stream closes. -type forwardInterceptor struct { - // server is the Server reference - server *Server - - // holdForwards is a map of current hold forwards and their corresponding - // ForwardResolver. - holdForwards map[channeldb.CircuitKey]htlcswitch.InterceptedForward - - // stream is the bidirectional RPC stream - stream Router_HtlcInterceptorServer - - // quit is a channel that is closed when this forwardInterceptor is shutting - // down. - quit chan struct{} - - // intercepted is where we stream all intercepted packets coming from - // the switch. - intercepted chan htlcswitch.InterceptedForward - - wg sync.WaitGroup -} - -// newForwardInterceptor creates a new forwardInterceptor. -func newForwardInterceptor(server *Server, stream Router_HtlcInterceptorServer) *forwardInterceptor { - return &forwardInterceptor{ - server: server, - stream: stream, - holdForwards: make( - map[channeldb.CircuitKey]htlcswitch.InterceptedForward), - quit: make(chan struct{}), - intercepted: make(chan htlcswitch.InterceptedForward), - } -} - -// run sends the intercepted packets to the client and receives the -// corersponding responses. On one hand it regsitered itself as an interceptor -// that receives the switch packets and on the other hand launches a go routine -// to read from the client stream. -// To coordinate all this and make sure it is safe for concurrent access all -// packets are sent to the main where they are handled. -func (r *forwardInterceptor) run() er.R { - // make sure we disconnect and resolves all remaining packets if any. - defer r.onDisconnect() - - // Register our interceptor so we receive all forwarded packets. - interceptableForwarder := r.server.cfg.RouterBackend.InterceptableForwarder - interceptableForwarder.SetInterceptor(r.onIntercept) - defer interceptableForwarder.SetInterceptor(nil) - - // start a go routine that reads client resolutions. - errChan := make(chan er.R) - resolutionRequests := make(chan *ForwardHtlcInterceptResponse) - r.wg.Add(1) - go r.readClientResponses(resolutionRequests, errChan) - - // run the main loop that synchronizes both sides input into one go routine. - for { - select { - case intercepted := <-r.intercepted: - log.Tracef("sending intercepted packet to client %v", intercepted) - // in case we couldn't forward we exit the loop and drain the - // current interceptor as this indicates on a connection problem. - if err := r.holdAndForwardToClient(intercepted); err != nil { - return err - } - case resolution := <-resolutionRequests: - log.Tracef("resolving intercepted packet %v", resolution) - // in case we couldn't resolve we just add a log line since this - // does not indicate on any connection problem. - if err := r.resolveFromClient(resolution); err != nil { - log.Warnf("client resolution of intercepted "+ - "packet failed %v", err) - } - case err := <-errChan: - return err - case <-r.server.quit: - return nil - } - } -} - -// onIntercept is the function that is called by the switch for every forwarded -// packet. Our interceptor makes sure we hold the packet and then signal to the -// main loop to handle the packet. We only return true if we were able -// to deliver the packet to the main loop. -func (r *forwardInterceptor) onIntercept(p htlcswitch.InterceptedForward) bool { - select { - case r.intercepted <- p: - return true - case <-r.quit: - return false - case <-r.server.quit: - return false - } -} - -func (r *forwardInterceptor) readClientResponses( - resolutionChan chan *ForwardHtlcInterceptResponse, errChan chan er.R) { - - defer r.wg.Done() - for { - resp, err := r.stream.Recv() - if err != nil { - errChan <- er.E(err) - return - } - - // Now that we have the response from the RPC client, send it to - // the responses chan. - select { - case resolutionChan <- resp: - case <-r.quit: - return - case <-r.server.quit: - return - } - } -} - -// holdAndForwardToClient forwards the intercepted htlc to the client. -func (r *forwardInterceptor) holdAndForwardToClient( - forward htlcswitch.InterceptedForward) er.R { - - htlc := forward.Packet() - inKey := htlc.IncomingCircuit - - // First hold the forward, then send to client. - r.holdForwards[inKey] = forward - interceptionRequest := &ForwardHtlcInterceptRequest{ - IncomingCircuitKey: &CircuitKey{ - ChanId: inKey.ChanID.ToUint64(), - HtlcId: inKey.HtlcID, - }, - OutgoingRequestedChanId: htlc.OutgoingChanID.ToUint64(), - PaymentHash: htlc.Hash[:], - OutgoingAmountMsat: uint64(htlc.OutgoingAmount), - OutgoingExpiry: htlc.OutgoingExpiry, - IncomingAmountMsat: uint64(htlc.IncomingAmount), - IncomingExpiry: htlc.IncomingExpiry, - CustomRecords: htlc.CustomRecords, - OnionBlob: htlc.OnionBlob[:], - } - - return er.E(r.stream.Send(interceptionRequest)) -} - -// resolveFromClient handles a resolution arrived from the client. -func (r *forwardInterceptor) resolveFromClient( - in *ForwardHtlcInterceptResponse) er.R { - - circuitKey := channeldb.CircuitKey{ - ChanID: lnwire.NewShortChanIDFromInt(in.IncomingCircuitKey.ChanId), - HtlcID: in.IncomingCircuitKey.HtlcId, - } - var interceptedForward htlcswitch.InterceptedForward - interceptedForward, ok := r.holdForwards[circuitKey] - if !ok { - return ErrFwdNotExists.Default() - } - delete(r.holdForwards, circuitKey) - - switch in.Action { - case ResolveHoldForwardAction_RESUME: - return interceptedForward.Resume() - case ResolveHoldForwardAction_FAIL: - return interceptedForward.Fail() - case ResolveHoldForwardAction_SETTLE: - if in.Preimage == nil { - return ErrMissingPreimage.Default() - } - preimage, err := lntypes.MakePreimage(in.Preimage) - if err != nil { - return err - } - return interceptedForward.Settle(preimage) - default: - return er.Errorf("unrecognized resolve action %v", in.Action) - } -} - -// onDisconnect removes all previousely held forwards from -// the store. Before they are removed it ensure to resume as the default -// behavior. -func (r *forwardInterceptor) onDisconnect() { - // Then close the channel so all go routine will exit. - close(r.quit) - - log.Infof("RPC interceptor disconnected, resolving held packets") - for key, forward := range r.holdForwards { - if err := forward.Resume(); err != nil { - log.Errorf("failed to resume hold forward %v", err) - } - delete(r.holdForwards, key) - } - r.wg.Wait() -} diff --git a/lnd/lnrpc/routerrpc/router.pb.go b/lnd/lnrpc/routerrpc/router.pb.go deleted file mode 100644 index d872076b..00000000 --- a/lnd/lnrpc/routerrpc/router.pb.go +++ /dev/null @@ -1,2978 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: routerrpc/router.proto - -package routerrpc - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - lnrpc "github.com/pkt-cash/pktd/lnd/lnrpc" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type FailureDetail int32 - -const ( - FailureDetail_UNKNOWN FailureDetail = 0 - FailureDetail_NO_DETAIL FailureDetail = 1 - FailureDetail_ONION_DECODE FailureDetail = 2 - FailureDetail_LINK_NOT_ELIGIBLE FailureDetail = 3 - FailureDetail_ON_CHAIN_TIMEOUT FailureDetail = 4 - FailureDetail_HTLC_EXCEEDS_MAX FailureDetail = 5 - FailureDetail_INSUFFICIENT_BALANCE FailureDetail = 6 - FailureDetail_INCOMPLETE_FORWARD FailureDetail = 7 - FailureDetail_HTLC_ADD_FAILED FailureDetail = 8 - FailureDetail_FORWARDS_DISABLED FailureDetail = 9 - FailureDetail_INVOICE_CANCELED FailureDetail = 10 - FailureDetail_INVOICE_UNDERPAID FailureDetail = 11 - FailureDetail_INVOICE_EXPIRY_TOO_SOON FailureDetail = 12 - FailureDetail_INVOICE_NOT_OPEN FailureDetail = 13 - FailureDetail_MPP_INVOICE_TIMEOUT FailureDetail = 14 - FailureDetail_ADDRESS_MISMATCH FailureDetail = 15 - FailureDetail_SET_TOTAL_MISMATCH FailureDetail = 16 - FailureDetail_SET_TOTAL_TOO_LOW FailureDetail = 17 - FailureDetail_SET_OVERPAID FailureDetail = 18 - FailureDetail_UNKNOWN_INVOICE FailureDetail = 19 - FailureDetail_INVALID_KEYSEND FailureDetail = 20 - FailureDetail_MPP_IN_PROGRESS FailureDetail = 21 - FailureDetail_CIRCULAR_ROUTE FailureDetail = 22 -) - -var FailureDetail_name = map[int32]string{ - 0: "UNKNOWN", - 1: "NO_DETAIL", - 2: "ONION_DECODE", - 3: "LINK_NOT_ELIGIBLE", - 4: "ON_CHAIN_TIMEOUT", - 5: "HTLC_EXCEEDS_MAX", - 6: "INSUFFICIENT_BALANCE", - 7: "INCOMPLETE_FORWARD", - 8: "HTLC_ADD_FAILED", - 9: "FORWARDS_DISABLED", - 10: "INVOICE_CANCELED", - 11: "INVOICE_UNDERPAID", - 12: "INVOICE_EXPIRY_TOO_SOON", - 13: "INVOICE_NOT_OPEN", - 14: "MPP_INVOICE_TIMEOUT", - 15: "ADDRESS_MISMATCH", - 16: "SET_TOTAL_MISMATCH", - 17: "SET_TOTAL_TOO_LOW", - 18: "SET_OVERPAID", - 19: "UNKNOWN_INVOICE", - 20: "INVALID_KEYSEND", - 21: "MPP_IN_PROGRESS", - 22: "CIRCULAR_ROUTE", -} - -var FailureDetail_value = map[string]int32{ - "UNKNOWN": 0, - "NO_DETAIL": 1, - "ONION_DECODE": 2, - "LINK_NOT_ELIGIBLE": 3, - "ON_CHAIN_TIMEOUT": 4, - "HTLC_EXCEEDS_MAX": 5, - "INSUFFICIENT_BALANCE": 6, - "INCOMPLETE_FORWARD": 7, - "HTLC_ADD_FAILED": 8, - "FORWARDS_DISABLED": 9, - "INVOICE_CANCELED": 10, - "INVOICE_UNDERPAID": 11, - "INVOICE_EXPIRY_TOO_SOON": 12, - "INVOICE_NOT_OPEN": 13, - "MPP_INVOICE_TIMEOUT": 14, - "ADDRESS_MISMATCH": 15, - "SET_TOTAL_MISMATCH": 16, - "SET_TOTAL_TOO_LOW": 17, - "SET_OVERPAID": 18, - "UNKNOWN_INVOICE": 19, - "INVALID_KEYSEND": 20, - "MPP_IN_PROGRESS": 21, - "CIRCULAR_ROUTE": 22, -} - -func (x FailureDetail) String() string { - return proto.EnumName(FailureDetail_name, int32(x)) -} - -func (FailureDetail) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{0} -} - -type PaymentState int32 - -const ( - // - //Payment is still in flight. - PaymentState_IN_FLIGHT PaymentState = 0 - // - //Payment completed successfully. - PaymentState_SUCCEEDED PaymentState = 1 - // - //There are more routes to try, but the payment timeout was exceeded. - PaymentState_FAILED_TIMEOUT PaymentState = 2 - // - //All possible routes were tried and failed permanently. Or were no - //routes to the destination at all. - PaymentState_FAILED_NO_ROUTE PaymentState = 3 - // - //A non-recoverable error has occured. - PaymentState_FAILED_ERROR PaymentState = 4 - // - //Payment details incorrect (unknown hash, invalid amt or - //invalid final cltv delta) - PaymentState_FAILED_INCORRECT_PAYMENT_DETAILS PaymentState = 5 - // - //Insufficient local balance. - PaymentState_FAILED_INSUFFICIENT_BALANCE PaymentState = 6 -) - -var PaymentState_name = map[int32]string{ - 0: "IN_FLIGHT", - 1: "SUCCEEDED", - 2: "FAILED_TIMEOUT", - 3: "FAILED_NO_ROUTE", - 4: "FAILED_ERROR", - 5: "FAILED_INCORRECT_PAYMENT_DETAILS", - 6: "FAILED_INSUFFICIENT_BALANCE", -} - -var PaymentState_value = map[string]int32{ - "IN_FLIGHT": 0, - "SUCCEEDED": 1, - "FAILED_TIMEOUT": 2, - "FAILED_NO_ROUTE": 3, - "FAILED_ERROR": 4, - "FAILED_INCORRECT_PAYMENT_DETAILS": 5, - "FAILED_INSUFFICIENT_BALANCE": 6, -} - -func (x PaymentState) String() string { - return proto.EnumName(PaymentState_name, int32(x)) -} - -func (PaymentState) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{1} -} - -type ResolveHoldForwardAction int32 - -const ( - ResolveHoldForwardAction_SETTLE ResolveHoldForwardAction = 0 - ResolveHoldForwardAction_FAIL ResolveHoldForwardAction = 1 - ResolveHoldForwardAction_RESUME ResolveHoldForwardAction = 2 -) - -var ResolveHoldForwardAction_name = map[int32]string{ - 0: "SETTLE", - 1: "FAIL", - 2: "RESUME", -} - -var ResolveHoldForwardAction_value = map[string]int32{ - "SETTLE": 0, - "FAIL": 1, - "RESUME": 2, -} - -func (x ResolveHoldForwardAction) String() string { - return proto.EnumName(ResolveHoldForwardAction_name, int32(x)) -} - -func (ResolveHoldForwardAction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{2} -} - -type HtlcEvent_EventType int32 - -const ( - HtlcEvent_UNKNOWN HtlcEvent_EventType = 0 - HtlcEvent_SEND HtlcEvent_EventType = 1 - HtlcEvent_RECEIVE HtlcEvent_EventType = 2 - HtlcEvent_FORWARD HtlcEvent_EventType = 3 -) - -var HtlcEvent_EventType_name = map[int32]string{ - 0: "UNKNOWN", - 1: "SEND", - 2: "RECEIVE", - 3: "FORWARD", -} - -var HtlcEvent_EventType_value = map[string]int32{ - "UNKNOWN": 0, - "SEND": 1, - "RECEIVE": 2, - "FORWARD": 3, -} - -func (x HtlcEvent_EventType) String() string { - return proto.EnumName(HtlcEvent_EventType_name, int32(x)) -} - -func (HtlcEvent_EventType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{17, 0} -} - -type SendPaymentRequest struct { - // The identity pubkey of the payment recipient - Dest []byte `protobuf:"bytes,1,opt,name=dest,proto3" json:"dest,omitempty"` - // - //Number of satoshis to send. - // - //The fields amt and amt_msat are mutually exclusive. - Amt int64 `protobuf:"varint,2,opt,name=amt,proto3" json:"amt,omitempty"` - // - //Number of millisatoshis to send. - // - //The fields amt and amt_msat are mutually exclusive. - AmtMsat int64 `protobuf:"varint,12,opt,name=amt_msat,json=amtMsat,proto3" json:"amt_msat,omitempty"` - // The hash to use within the payment's HTLC - PaymentHash []byte `protobuf:"bytes,3,opt,name=payment_hash,json=paymentHash,proto3" json:"payment_hash,omitempty"` - // - //The CLTV delta from the current height that should be used to set the - //timelock for the final hop. - FinalCltvDelta int32 `protobuf:"varint,4,opt,name=final_cltv_delta,json=finalCltvDelta,proto3" json:"final_cltv_delta,omitempty"` - // - //A bare-bones invoice for a payment within the Lightning Network. With the - //details of the invoice, the sender has all the data necessary to send a - //payment to the recipient. The amount in the payment request may be zero. In - //that case it is required to set the amt field as well. If no payment request - //is specified, the following fields are required: dest, amt and payment_hash. - PaymentRequest string `protobuf:"bytes,5,opt,name=payment_request,json=paymentRequest,proto3" json:"payment_request,omitempty"` - // - //An upper limit on the amount of time we should spend when attempting to - //fulfill the payment. This is expressed in seconds. If we cannot make a - //successful payment within this time frame, an error will be returned. - //This field must be non-zero. - TimeoutSeconds int32 `protobuf:"varint,6,opt,name=timeout_seconds,json=timeoutSeconds,proto3" json:"timeout_seconds,omitempty"` - // - //The maximum number of satoshis that will be paid as a fee of the payment. - //If this field is left to the default value of 0, only zero-fee routes will - //be considered. This usually means single hop routes connecting directly to - //the destination. To send the payment without a fee limit, use max int here. - // - //The fields fee_limit_sat and fee_limit_msat are mutually exclusive. - FeeLimitSat int64 `protobuf:"varint,7,opt,name=fee_limit_sat,json=feeLimitSat,proto3" json:"fee_limit_sat,omitempty"` - // - //The maximum number of millisatoshis that will be paid as a fee of the - //payment. If this field is left to the default value of 0, only zero-fee - //routes will be considered. This usually means single hop routes connecting - //directly to the destination. To send the payment without a fee limit, use - //max int here. - // - //The fields fee_limit_sat and fee_limit_msat are mutually exclusive. - FeeLimitMsat int64 `protobuf:"varint,13,opt,name=fee_limit_msat,json=feeLimitMsat,proto3" json:"fee_limit_msat,omitempty"` - // - //Deprecated, use outgoing_chan_ids. The channel id of the channel that must - //be taken to the first hop. If zero, any channel may be used (unless - //outgoing_chan_ids are set). - OutgoingChanId uint64 `protobuf:"varint,8,opt,name=outgoing_chan_id,json=outgoingChanId,proto3" json:"outgoing_chan_id,omitempty"` // Deprecated: Do not use. - // - //The channel ids of the channels are allowed for the first hop. If empty, - //any channel may be used. - OutgoingChanIds []uint64 `protobuf:"varint,19,rep,packed,name=outgoing_chan_ids,json=outgoingChanIds,proto3" json:"outgoing_chan_ids,omitempty"` - // - //The pubkey of the last hop of the route. If empty, any hop may be used. - LastHopPubkey []byte `protobuf:"bytes,14,opt,name=last_hop_pubkey,json=lastHopPubkey,proto3" json:"last_hop_pubkey,omitempty"` - // - //An optional maximum total time lock for the route. This should not exceed - //lnd's `--max-cltv-expiry` setting. If zero, then the value of - //`--max-cltv-expiry` is enforced. - CltvLimit int32 `protobuf:"varint,9,opt,name=cltv_limit,json=cltvLimit,proto3" json:"cltv_limit,omitempty"` - // - //Optional route hints to reach the destination through private channels. - RouteHints []*lnrpc.RouteHint `protobuf:"bytes,10,rep,name=route_hints,json=routeHints,proto3" json:"route_hints,omitempty"` - // - //An optional field that can be used to pass an arbitrary set of TLV records - //to a peer which understands the new records. This can be used to pass - //application specific data during the payment attempt. Record types are - //required to be in the custom range >= 65536. When using REST, the values - //must be encoded as base64. - DestCustomRecords map[uint64][]byte `protobuf:"bytes,11,rep,name=dest_custom_records,json=destCustomRecords,proto3" json:"dest_custom_records,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // If set, circular payments to self are permitted. - AllowSelfPayment bool `protobuf:"varint,15,opt,name=allow_self_payment,json=allowSelfPayment,proto3" json:"allow_self_payment,omitempty"` - // - //Features assumed to be supported by the final node. All transitive feature - //dependencies must also be set properly. For a given feature bit pair, either - //optional or remote may be set, but not both. If this field is nil or empty, - //the router will try to load destination features from the graph as a - //fallback. - DestFeatures []lnrpc.FeatureBit `protobuf:"varint,16,rep,packed,name=dest_features,json=destFeatures,proto3,enum=lnrpc.FeatureBit" json:"dest_features,omitempty"` - // - //The maximum number of partial payments that may be use to complete the full - //amount. - MaxParts uint32 `protobuf:"varint,17,opt,name=max_parts,json=maxParts,proto3" json:"max_parts,omitempty"` - // - //If set, only the final payment update is streamed back. Intermediate updates - //that show which htlcs are still in flight are suppressed. - NoInflightUpdates bool `protobuf:"varint,18,opt,name=no_inflight_updates,json=noInflightUpdates,proto3" json:"no_inflight_updates,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendPaymentRequest) Reset() { *m = SendPaymentRequest{} } -func (m *SendPaymentRequest) String() string { return proto.CompactTextString(m) } -func (*SendPaymentRequest) ProtoMessage() {} -func (*SendPaymentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{0} -} - -func (m *SendPaymentRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendPaymentRequest.Unmarshal(m, b) -} -func (m *SendPaymentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendPaymentRequest.Marshal(b, m, deterministic) -} -func (m *SendPaymentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendPaymentRequest.Merge(m, src) -} -func (m *SendPaymentRequest) XXX_Size() int { - return xxx_messageInfo_SendPaymentRequest.Size(m) -} -func (m *SendPaymentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SendPaymentRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SendPaymentRequest proto.InternalMessageInfo - -func (m *SendPaymentRequest) GetDest() []byte { - if m != nil { - return m.Dest - } - return nil -} - -func (m *SendPaymentRequest) GetAmt() int64 { - if m != nil { - return m.Amt - } - return 0 -} - -func (m *SendPaymentRequest) GetAmtMsat() int64 { - if m != nil { - return m.AmtMsat - } - return 0 -} - -func (m *SendPaymentRequest) GetPaymentHash() []byte { - if m != nil { - return m.PaymentHash - } - return nil -} - -func (m *SendPaymentRequest) GetFinalCltvDelta() int32 { - if m != nil { - return m.FinalCltvDelta - } - return 0 -} - -func (m *SendPaymentRequest) GetPaymentRequest() string { - if m != nil { - return m.PaymentRequest - } - return "" -} - -func (m *SendPaymentRequest) GetTimeoutSeconds() int32 { - if m != nil { - return m.TimeoutSeconds - } - return 0 -} - -func (m *SendPaymentRequest) GetFeeLimitSat() int64 { - if m != nil { - return m.FeeLimitSat - } - return 0 -} - -func (m *SendPaymentRequest) GetFeeLimitMsat() int64 { - if m != nil { - return m.FeeLimitMsat - } - return 0 -} - -// Deprecated: Do not use. -func (m *SendPaymentRequest) GetOutgoingChanId() uint64 { - if m != nil { - return m.OutgoingChanId - } - return 0 -} - -func (m *SendPaymentRequest) GetOutgoingChanIds() []uint64 { - if m != nil { - return m.OutgoingChanIds - } - return nil -} - -func (m *SendPaymentRequest) GetLastHopPubkey() []byte { - if m != nil { - return m.LastHopPubkey - } - return nil -} - -func (m *SendPaymentRequest) GetCltvLimit() int32 { - if m != nil { - return m.CltvLimit - } - return 0 -} - -func (m *SendPaymentRequest) GetRouteHints() []*lnrpc.RouteHint { - if m != nil { - return m.RouteHints - } - return nil -} - -func (m *SendPaymentRequest) GetDestCustomRecords() map[uint64][]byte { - if m != nil { - return m.DestCustomRecords - } - return nil -} - -func (m *SendPaymentRequest) GetAllowSelfPayment() bool { - if m != nil { - return m.AllowSelfPayment - } - return false -} - -func (m *SendPaymentRequest) GetDestFeatures() []lnrpc.FeatureBit { - if m != nil { - return m.DestFeatures - } - return nil -} - -func (m *SendPaymentRequest) GetMaxParts() uint32 { - if m != nil { - return m.MaxParts - } - return 0 -} - -func (m *SendPaymentRequest) GetNoInflightUpdates() bool { - if m != nil { - return m.NoInflightUpdates - } - return false -} - -type TrackPaymentRequest struct { - // The hash of the payment to look up. - PaymentHash []byte `protobuf:"bytes,1,opt,name=payment_hash,json=paymentHash,proto3" json:"payment_hash,omitempty"` - // - //If set, only the final payment update is streamed back. Intermediate updates - //that show which htlcs are still in flight are suppressed. - NoInflightUpdates bool `protobuf:"varint,2,opt,name=no_inflight_updates,json=noInflightUpdates,proto3" json:"no_inflight_updates,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TrackPaymentRequest) Reset() { *m = TrackPaymentRequest{} } -func (m *TrackPaymentRequest) String() string { return proto.CompactTextString(m) } -func (*TrackPaymentRequest) ProtoMessage() {} -func (*TrackPaymentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{1} -} - -func (m *TrackPaymentRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TrackPaymentRequest.Unmarshal(m, b) -} -func (m *TrackPaymentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TrackPaymentRequest.Marshal(b, m, deterministic) -} -func (m *TrackPaymentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_TrackPaymentRequest.Merge(m, src) -} -func (m *TrackPaymentRequest) XXX_Size() int { - return xxx_messageInfo_TrackPaymentRequest.Size(m) -} -func (m *TrackPaymentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_TrackPaymentRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_TrackPaymentRequest proto.InternalMessageInfo - -func (m *TrackPaymentRequest) GetPaymentHash() []byte { - if m != nil { - return m.PaymentHash - } - return nil -} - -func (m *TrackPaymentRequest) GetNoInflightUpdates() bool { - if m != nil { - return m.NoInflightUpdates - } - return false -} - -type RouteFeeRequest struct { - // - //The destination once wishes to obtain a routing fee quote to. - Dest []byte `protobuf:"bytes,1,opt,name=dest,proto3" json:"dest,omitempty"` - // - //The amount one wishes to send to the target destination. - AmtSat int64 `protobuf:"varint,2,opt,name=amt_sat,json=amtSat,proto3" json:"amt_sat,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RouteFeeRequest) Reset() { *m = RouteFeeRequest{} } -func (m *RouteFeeRequest) String() string { return proto.CompactTextString(m) } -func (*RouteFeeRequest) ProtoMessage() {} -func (*RouteFeeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{2} -} - -func (m *RouteFeeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RouteFeeRequest.Unmarshal(m, b) -} -func (m *RouteFeeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RouteFeeRequest.Marshal(b, m, deterministic) -} -func (m *RouteFeeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RouteFeeRequest.Merge(m, src) -} -func (m *RouteFeeRequest) XXX_Size() int { - return xxx_messageInfo_RouteFeeRequest.Size(m) -} -func (m *RouteFeeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RouteFeeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_RouteFeeRequest proto.InternalMessageInfo - -func (m *RouteFeeRequest) GetDest() []byte { - if m != nil { - return m.Dest - } - return nil -} - -func (m *RouteFeeRequest) GetAmtSat() int64 { - if m != nil { - return m.AmtSat - } - return 0 -} - -type RouteFeeResponse struct { - // - //A lower bound of the estimated fee to the target destination within the - //network, expressed in milli-satoshis. - RoutingFeeMsat int64 `protobuf:"varint,1,opt,name=routing_fee_msat,json=routingFeeMsat,proto3" json:"routing_fee_msat,omitempty"` - // - //An estimate of the worst case time delay that can occur. Note that callers - //will still need to factor in the final CLTV delta of the last hop into this - //value. - TimeLockDelay int64 `protobuf:"varint,2,opt,name=time_lock_delay,json=timeLockDelay,proto3" json:"time_lock_delay,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RouteFeeResponse) Reset() { *m = RouteFeeResponse{} } -func (m *RouteFeeResponse) String() string { return proto.CompactTextString(m) } -func (*RouteFeeResponse) ProtoMessage() {} -func (*RouteFeeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{3} -} - -func (m *RouteFeeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RouteFeeResponse.Unmarshal(m, b) -} -func (m *RouteFeeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RouteFeeResponse.Marshal(b, m, deterministic) -} -func (m *RouteFeeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RouteFeeResponse.Merge(m, src) -} -func (m *RouteFeeResponse) XXX_Size() int { - return xxx_messageInfo_RouteFeeResponse.Size(m) -} -func (m *RouteFeeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_RouteFeeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_RouteFeeResponse proto.InternalMessageInfo - -func (m *RouteFeeResponse) GetRoutingFeeMsat() int64 { - if m != nil { - return m.RoutingFeeMsat - } - return 0 -} - -func (m *RouteFeeResponse) GetTimeLockDelay() int64 { - if m != nil { - return m.TimeLockDelay - } - return 0 -} - -type SendToRouteRequest struct { - // The payment hash to use for the HTLC. - PaymentHash []byte `protobuf:"bytes,1,opt,name=payment_hash,json=paymentHash,proto3" json:"payment_hash,omitempty"` - // Route that should be used to attempt to complete the payment. - Route *lnrpc.Route `protobuf:"bytes,2,opt,name=route,proto3" json:"route,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendToRouteRequest) Reset() { *m = SendToRouteRequest{} } -func (m *SendToRouteRequest) String() string { return proto.CompactTextString(m) } -func (*SendToRouteRequest) ProtoMessage() {} -func (*SendToRouteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{4} -} - -func (m *SendToRouteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendToRouteRequest.Unmarshal(m, b) -} -func (m *SendToRouteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendToRouteRequest.Marshal(b, m, deterministic) -} -func (m *SendToRouteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendToRouteRequest.Merge(m, src) -} -func (m *SendToRouteRequest) XXX_Size() int { - return xxx_messageInfo_SendToRouteRequest.Size(m) -} -func (m *SendToRouteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SendToRouteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SendToRouteRequest proto.InternalMessageInfo - -func (m *SendToRouteRequest) GetPaymentHash() []byte { - if m != nil { - return m.PaymentHash - } - return nil -} - -func (m *SendToRouteRequest) GetRoute() *lnrpc.Route { - if m != nil { - return m.Route - } - return nil -} - -type SendToRouteResponse struct { - // The preimage obtained by making the payment. - Preimage []byte `protobuf:"bytes,1,opt,name=preimage,proto3" json:"preimage,omitempty"` - // The failure message in case the payment failed. - Failure *lnrpc.Failure `protobuf:"bytes,2,opt,name=failure,proto3" json:"failure,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendToRouteResponse) Reset() { *m = SendToRouteResponse{} } -func (m *SendToRouteResponse) String() string { return proto.CompactTextString(m) } -func (*SendToRouteResponse) ProtoMessage() {} -func (*SendToRouteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{5} -} - -func (m *SendToRouteResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendToRouteResponse.Unmarshal(m, b) -} -func (m *SendToRouteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendToRouteResponse.Marshal(b, m, deterministic) -} -func (m *SendToRouteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendToRouteResponse.Merge(m, src) -} -func (m *SendToRouteResponse) XXX_Size() int { - return xxx_messageInfo_SendToRouteResponse.Size(m) -} -func (m *SendToRouteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SendToRouteResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SendToRouteResponse proto.InternalMessageInfo - -func (m *SendToRouteResponse) GetPreimage() []byte { - if m != nil { - return m.Preimage - } - return nil -} - -func (m *SendToRouteResponse) GetFailure() *lnrpc.Failure { - if m != nil { - return m.Failure - } - return nil -} - -type ResetMissionControlRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResetMissionControlRequest) Reset() { *m = ResetMissionControlRequest{} } -func (m *ResetMissionControlRequest) String() string { return proto.CompactTextString(m) } -func (*ResetMissionControlRequest) ProtoMessage() {} -func (*ResetMissionControlRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{6} -} - -func (m *ResetMissionControlRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResetMissionControlRequest.Unmarshal(m, b) -} -func (m *ResetMissionControlRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResetMissionControlRequest.Marshal(b, m, deterministic) -} -func (m *ResetMissionControlRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResetMissionControlRequest.Merge(m, src) -} -func (m *ResetMissionControlRequest) XXX_Size() int { - return xxx_messageInfo_ResetMissionControlRequest.Size(m) -} -func (m *ResetMissionControlRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResetMissionControlRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ResetMissionControlRequest proto.InternalMessageInfo - -type ResetMissionControlResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResetMissionControlResponse) Reset() { *m = ResetMissionControlResponse{} } -func (m *ResetMissionControlResponse) String() string { return proto.CompactTextString(m) } -func (*ResetMissionControlResponse) ProtoMessage() {} -func (*ResetMissionControlResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{7} -} - -func (m *ResetMissionControlResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResetMissionControlResponse.Unmarshal(m, b) -} -func (m *ResetMissionControlResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResetMissionControlResponse.Marshal(b, m, deterministic) -} -func (m *ResetMissionControlResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResetMissionControlResponse.Merge(m, src) -} -func (m *ResetMissionControlResponse) XXX_Size() int { - return xxx_messageInfo_ResetMissionControlResponse.Size(m) -} -func (m *ResetMissionControlResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ResetMissionControlResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ResetMissionControlResponse proto.InternalMessageInfo - -type QueryMissionControlRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *QueryMissionControlRequest) Reset() { *m = QueryMissionControlRequest{} } -func (m *QueryMissionControlRequest) String() string { return proto.CompactTextString(m) } -func (*QueryMissionControlRequest) ProtoMessage() {} -func (*QueryMissionControlRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{8} -} - -func (m *QueryMissionControlRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_QueryMissionControlRequest.Unmarshal(m, b) -} -func (m *QueryMissionControlRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_QueryMissionControlRequest.Marshal(b, m, deterministic) -} -func (m *QueryMissionControlRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryMissionControlRequest.Merge(m, src) -} -func (m *QueryMissionControlRequest) XXX_Size() int { - return xxx_messageInfo_QueryMissionControlRequest.Size(m) -} -func (m *QueryMissionControlRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryMissionControlRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryMissionControlRequest proto.InternalMessageInfo - -// QueryMissionControlResponse contains mission control state. -type QueryMissionControlResponse struct { - // Node pair-level mission control state. - Pairs []*PairHistory `protobuf:"bytes,2,rep,name=pairs,proto3" json:"pairs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *QueryMissionControlResponse) Reset() { *m = QueryMissionControlResponse{} } -func (m *QueryMissionControlResponse) String() string { return proto.CompactTextString(m) } -func (*QueryMissionControlResponse) ProtoMessage() {} -func (*QueryMissionControlResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{9} -} - -func (m *QueryMissionControlResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_QueryMissionControlResponse.Unmarshal(m, b) -} -func (m *QueryMissionControlResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_QueryMissionControlResponse.Marshal(b, m, deterministic) -} -func (m *QueryMissionControlResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryMissionControlResponse.Merge(m, src) -} -func (m *QueryMissionControlResponse) XXX_Size() int { - return xxx_messageInfo_QueryMissionControlResponse.Size(m) -} -func (m *QueryMissionControlResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryMissionControlResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryMissionControlResponse proto.InternalMessageInfo - -func (m *QueryMissionControlResponse) GetPairs() []*PairHistory { - if m != nil { - return m.Pairs - } - return nil -} - -// PairHistory contains the mission control state for a particular node pair. -type PairHistory struct { - // The source node pubkey of the pair. - NodeFrom []byte `protobuf:"bytes,1,opt,name=node_from,json=nodeFrom,proto3" json:"node_from,omitempty"` - // The destination node pubkey of the pair. - NodeTo []byte `protobuf:"bytes,2,opt,name=node_to,json=nodeTo,proto3" json:"node_to,omitempty"` - History *PairData `protobuf:"bytes,7,opt,name=history,proto3" json:"history,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PairHistory) Reset() { *m = PairHistory{} } -func (m *PairHistory) String() string { return proto.CompactTextString(m) } -func (*PairHistory) ProtoMessage() {} -func (*PairHistory) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{10} -} - -func (m *PairHistory) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PairHistory.Unmarshal(m, b) -} -func (m *PairHistory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PairHistory.Marshal(b, m, deterministic) -} -func (m *PairHistory) XXX_Merge(src proto.Message) { - xxx_messageInfo_PairHistory.Merge(m, src) -} -func (m *PairHistory) XXX_Size() int { - return xxx_messageInfo_PairHistory.Size(m) -} -func (m *PairHistory) XXX_DiscardUnknown() { - xxx_messageInfo_PairHistory.DiscardUnknown(m) -} - -var xxx_messageInfo_PairHistory proto.InternalMessageInfo - -func (m *PairHistory) GetNodeFrom() []byte { - if m != nil { - return m.NodeFrom - } - return nil -} - -func (m *PairHistory) GetNodeTo() []byte { - if m != nil { - return m.NodeTo - } - return nil -} - -func (m *PairHistory) GetHistory() *PairData { - if m != nil { - return m.History - } - return nil -} - -type PairData struct { - // Time of last failure. - FailTime int64 `protobuf:"varint,1,opt,name=fail_time,json=failTime,proto3" json:"fail_time,omitempty"` - // - //Lowest amount that failed to forward rounded to whole sats. This may be - //set to zero if the failure is independent of amount. - FailAmtSat int64 `protobuf:"varint,2,opt,name=fail_amt_sat,json=failAmtSat,proto3" json:"fail_amt_sat,omitempty"` - // - //Lowest amount that failed to forward in millisats. This may be - //set to zero if the failure is independent of amount. - FailAmtMsat int64 `protobuf:"varint,4,opt,name=fail_amt_msat,json=failAmtMsat,proto3" json:"fail_amt_msat,omitempty"` - // Time of last success. - SuccessTime int64 `protobuf:"varint,5,opt,name=success_time,json=successTime,proto3" json:"success_time,omitempty"` - // Highest amount that we could successfully forward rounded to whole sats. - SuccessAmtSat int64 `protobuf:"varint,6,opt,name=success_amt_sat,json=successAmtSat,proto3" json:"success_amt_sat,omitempty"` - // Highest amount that we could successfully forward in millisats. - SuccessAmtMsat int64 `protobuf:"varint,7,opt,name=success_amt_msat,json=successAmtMsat,proto3" json:"success_amt_msat,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PairData) Reset() { *m = PairData{} } -func (m *PairData) String() string { return proto.CompactTextString(m) } -func (*PairData) ProtoMessage() {} -func (*PairData) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{11} -} - -func (m *PairData) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PairData.Unmarshal(m, b) -} -func (m *PairData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PairData.Marshal(b, m, deterministic) -} -func (m *PairData) XXX_Merge(src proto.Message) { - xxx_messageInfo_PairData.Merge(m, src) -} -func (m *PairData) XXX_Size() int { - return xxx_messageInfo_PairData.Size(m) -} -func (m *PairData) XXX_DiscardUnknown() { - xxx_messageInfo_PairData.DiscardUnknown(m) -} - -var xxx_messageInfo_PairData proto.InternalMessageInfo - -func (m *PairData) GetFailTime() int64 { - if m != nil { - return m.FailTime - } - return 0 -} - -func (m *PairData) GetFailAmtSat() int64 { - if m != nil { - return m.FailAmtSat - } - return 0 -} - -func (m *PairData) GetFailAmtMsat() int64 { - if m != nil { - return m.FailAmtMsat - } - return 0 -} - -func (m *PairData) GetSuccessTime() int64 { - if m != nil { - return m.SuccessTime - } - return 0 -} - -func (m *PairData) GetSuccessAmtSat() int64 { - if m != nil { - return m.SuccessAmtSat - } - return 0 -} - -func (m *PairData) GetSuccessAmtMsat() int64 { - if m != nil { - return m.SuccessAmtMsat - } - return 0 -} - -type QueryProbabilityRequest struct { - // The source node pubkey of the pair. - FromNode []byte `protobuf:"bytes,1,opt,name=from_node,json=fromNode,proto3" json:"from_node,omitempty"` - // The destination node pubkey of the pair. - ToNode []byte `protobuf:"bytes,2,opt,name=to_node,json=toNode,proto3" json:"to_node,omitempty"` - // The amount for which to calculate a probability. - AmtMsat int64 `protobuf:"varint,3,opt,name=amt_msat,json=amtMsat,proto3" json:"amt_msat,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *QueryProbabilityRequest) Reset() { *m = QueryProbabilityRequest{} } -func (m *QueryProbabilityRequest) String() string { return proto.CompactTextString(m) } -func (*QueryProbabilityRequest) ProtoMessage() {} -func (*QueryProbabilityRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{12} -} - -func (m *QueryProbabilityRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_QueryProbabilityRequest.Unmarshal(m, b) -} -func (m *QueryProbabilityRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_QueryProbabilityRequest.Marshal(b, m, deterministic) -} -func (m *QueryProbabilityRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryProbabilityRequest.Merge(m, src) -} -func (m *QueryProbabilityRequest) XXX_Size() int { - return xxx_messageInfo_QueryProbabilityRequest.Size(m) -} -func (m *QueryProbabilityRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryProbabilityRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryProbabilityRequest proto.InternalMessageInfo - -func (m *QueryProbabilityRequest) GetFromNode() []byte { - if m != nil { - return m.FromNode - } - return nil -} - -func (m *QueryProbabilityRequest) GetToNode() []byte { - if m != nil { - return m.ToNode - } - return nil -} - -func (m *QueryProbabilityRequest) GetAmtMsat() int64 { - if m != nil { - return m.AmtMsat - } - return 0 -} - -type QueryProbabilityResponse struct { - // The success probability for the requested pair. - Probability float64 `protobuf:"fixed64,1,opt,name=probability,proto3" json:"probability,omitempty"` - // The historical data for the requested pair. - History *PairData `protobuf:"bytes,2,opt,name=history,proto3" json:"history,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *QueryProbabilityResponse) Reset() { *m = QueryProbabilityResponse{} } -func (m *QueryProbabilityResponse) String() string { return proto.CompactTextString(m) } -func (*QueryProbabilityResponse) ProtoMessage() {} -func (*QueryProbabilityResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{13} -} - -func (m *QueryProbabilityResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_QueryProbabilityResponse.Unmarshal(m, b) -} -func (m *QueryProbabilityResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_QueryProbabilityResponse.Marshal(b, m, deterministic) -} -func (m *QueryProbabilityResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryProbabilityResponse.Merge(m, src) -} -func (m *QueryProbabilityResponse) XXX_Size() int { - return xxx_messageInfo_QueryProbabilityResponse.Size(m) -} -func (m *QueryProbabilityResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryProbabilityResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryProbabilityResponse proto.InternalMessageInfo - -func (m *QueryProbabilityResponse) GetProbability() float64 { - if m != nil { - return m.Probability - } - return 0 -} - -func (m *QueryProbabilityResponse) GetHistory() *PairData { - if m != nil { - return m.History - } - return nil -} - -type BuildRouteRequest struct { - // - //The amount to send expressed in msat. If set to zero, the minimum routable - //amount is used. - AmtMsat int64 `protobuf:"varint,1,opt,name=amt_msat,json=amtMsat,proto3" json:"amt_msat,omitempty"` - // - //CLTV delta from the current height that should be used for the timelock - //of the final hop - FinalCltvDelta int32 `protobuf:"varint,2,opt,name=final_cltv_delta,json=finalCltvDelta,proto3" json:"final_cltv_delta,omitempty"` - // - //The channel id of the channel that must be taken to the first hop. If zero, - //any channel may be used. - OutgoingChanId uint64 `protobuf:"varint,3,opt,name=outgoing_chan_id,json=outgoingChanId,proto3" json:"outgoing_chan_id,omitempty"` - // - //A list of hops that defines the route. This does not include the source hop - //pubkey. - HopPubkeys [][]byte `protobuf:"bytes,4,rep,name=hop_pubkeys,json=hopPubkeys,proto3" json:"hop_pubkeys,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BuildRouteRequest) Reset() { *m = BuildRouteRequest{} } -func (m *BuildRouteRequest) String() string { return proto.CompactTextString(m) } -func (*BuildRouteRequest) ProtoMessage() {} -func (*BuildRouteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{14} -} - -func (m *BuildRouteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BuildRouteRequest.Unmarshal(m, b) -} -func (m *BuildRouteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BuildRouteRequest.Marshal(b, m, deterministic) -} -func (m *BuildRouteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BuildRouteRequest.Merge(m, src) -} -func (m *BuildRouteRequest) XXX_Size() int { - return xxx_messageInfo_BuildRouteRequest.Size(m) -} -func (m *BuildRouteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BuildRouteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_BuildRouteRequest proto.InternalMessageInfo - -func (m *BuildRouteRequest) GetAmtMsat() int64 { - if m != nil { - return m.AmtMsat - } - return 0 -} - -func (m *BuildRouteRequest) GetFinalCltvDelta() int32 { - if m != nil { - return m.FinalCltvDelta - } - return 0 -} - -func (m *BuildRouteRequest) GetOutgoingChanId() uint64 { - if m != nil { - return m.OutgoingChanId - } - return 0 -} - -func (m *BuildRouteRequest) GetHopPubkeys() [][]byte { - if m != nil { - return m.HopPubkeys - } - return nil -} - -type BuildRouteResponse struct { - // - //Fully specified route that can be used to execute the payment. - Route *lnrpc.Route `protobuf:"bytes,1,opt,name=route,proto3" json:"route,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BuildRouteResponse) Reset() { *m = BuildRouteResponse{} } -func (m *BuildRouteResponse) String() string { return proto.CompactTextString(m) } -func (*BuildRouteResponse) ProtoMessage() {} -func (*BuildRouteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{15} -} - -func (m *BuildRouteResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BuildRouteResponse.Unmarshal(m, b) -} -func (m *BuildRouteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BuildRouteResponse.Marshal(b, m, deterministic) -} -func (m *BuildRouteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_BuildRouteResponse.Merge(m, src) -} -func (m *BuildRouteResponse) XXX_Size() int { - return xxx_messageInfo_BuildRouteResponse.Size(m) -} -func (m *BuildRouteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_BuildRouteResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_BuildRouteResponse proto.InternalMessageInfo - -func (m *BuildRouteResponse) GetRoute() *lnrpc.Route { - if m != nil { - return m.Route - } - return nil -} - -type SubscribeHtlcEventsRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SubscribeHtlcEventsRequest) Reset() { *m = SubscribeHtlcEventsRequest{} } -func (m *SubscribeHtlcEventsRequest) String() string { return proto.CompactTextString(m) } -func (*SubscribeHtlcEventsRequest) ProtoMessage() {} -func (*SubscribeHtlcEventsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{16} -} - -func (m *SubscribeHtlcEventsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SubscribeHtlcEventsRequest.Unmarshal(m, b) -} -func (m *SubscribeHtlcEventsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SubscribeHtlcEventsRequest.Marshal(b, m, deterministic) -} -func (m *SubscribeHtlcEventsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SubscribeHtlcEventsRequest.Merge(m, src) -} -func (m *SubscribeHtlcEventsRequest) XXX_Size() int { - return xxx_messageInfo_SubscribeHtlcEventsRequest.Size(m) -} -func (m *SubscribeHtlcEventsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SubscribeHtlcEventsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SubscribeHtlcEventsRequest proto.InternalMessageInfo - -// -//HtlcEvent contains the htlc event that was processed. These are served on a -//best-effort basis; events are not persisted, delivery is not guaranteed -//(in the event of a crash in the switch, forward events may be lost) and -//some events may be replayed upon restart. Events consumed from this package -//should be de-duplicated by the htlc's unique combination of incoming and -//outgoing channel id and htlc id. [EXPERIMENTAL] -type HtlcEvent struct { - // - //The short channel id that the incoming htlc arrived at our node on. This - //value is zero for sends. - IncomingChannelId uint64 `protobuf:"varint,1,opt,name=incoming_channel_id,json=incomingChannelId,proto3" json:"incoming_channel_id,omitempty"` - // - //The short channel id that the outgoing htlc left our node on. This value - //is zero for receives. - OutgoingChannelId uint64 `protobuf:"varint,2,opt,name=outgoing_channel_id,json=outgoingChannelId,proto3" json:"outgoing_channel_id,omitempty"` - // - //Incoming id is the index of the incoming htlc in the incoming channel. - //This value is zero for sends. - IncomingHtlcId uint64 `protobuf:"varint,3,opt,name=incoming_htlc_id,json=incomingHtlcId,proto3" json:"incoming_htlc_id,omitempty"` - // - //Outgoing id is the index of the outgoing htlc in the outgoing channel. - //This value is zero for receives. - OutgoingHtlcId uint64 `protobuf:"varint,4,opt,name=outgoing_htlc_id,json=outgoingHtlcId,proto3" json:"outgoing_htlc_id,omitempty"` - // - //The time in unix nanoseconds that the event occurred. - TimestampNs uint64 `protobuf:"varint,5,opt,name=timestamp_ns,json=timestampNs,proto3" json:"timestamp_ns,omitempty"` - // - //The event type indicates whether the htlc was part of a send, receive or - //forward. - EventType HtlcEvent_EventType `protobuf:"varint,6,opt,name=event_type,json=eventType,proto3,enum=routerrpc.HtlcEvent_EventType" json:"event_type,omitempty"` - // Types that are valid to be assigned to Event: - // *HtlcEvent_ForwardEvent - // *HtlcEvent_ForwardFailEvent - // *HtlcEvent_SettleEvent - // *HtlcEvent_LinkFailEvent - Event isHtlcEvent_Event `protobuf_oneof:"event"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HtlcEvent) Reset() { *m = HtlcEvent{} } -func (m *HtlcEvent) String() string { return proto.CompactTextString(m) } -func (*HtlcEvent) ProtoMessage() {} -func (*HtlcEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{17} -} - -func (m *HtlcEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_HtlcEvent.Unmarshal(m, b) -} -func (m *HtlcEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_HtlcEvent.Marshal(b, m, deterministic) -} -func (m *HtlcEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_HtlcEvent.Merge(m, src) -} -func (m *HtlcEvent) XXX_Size() int { - return xxx_messageInfo_HtlcEvent.Size(m) -} -func (m *HtlcEvent) XXX_DiscardUnknown() { - xxx_messageInfo_HtlcEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_HtlcEvent proto.InternalMessageInfo - -func (m *HtlcEvent) GetIncomingChannelId() uint64 { - if m != nil { - return m.IncomingChannelId - } - return 0 -} - -func (m *HtlcEvent) GetOutgoingChannelId() uint64 { - if m != nil { - return m.OutgoingChannelId - } - return 0 -} - -func (m *HtlcEvent) GetIncomingHtlcId() uint64 { - if m != nil { - return m.IncomingHtlcId - } - return 0 -} - -func (m *HtlcEvent) GetOutgoingHtlcId() uint64 { - if m != nil { - return m.OutgoingHtlcId - } - return 0 -} - -func (m *HtlcEvent) GetTimestampNs() uint64 { - if m != nil { - return m.TimestampNs - } - return 0 -} - -func (m *HtlcEvent) GetEventType() HtlcEvent_EventType { - if m != nil { - return m.EventType - } - return HtlcEvent_UNKNOWN -} - -type isHtlcEvent_Event interface { - isHtlcEvent_Event() -} - -type HtlcEvent_ForwardEvent struct { - ForwardEvent *ForwardEvent `protobuf:"bytes,7,opt,name=forward_event,json=forwardEvent,proto3,oneof"` -} - -type HtlcEvent_ForwardFailEvent struct { - ForwardFailEvent *ForwardFailEvent `protobuf:"bytes,8,opt,name=forward_fail_event,json=forwardFailEvent,proto3,oneof"` -} - -type HtlcEvent_SettleEvent struct { - SettleEvent *SettleEvent `protobuf:"bytes,9,opt,name=settle_event,json=settleEvent,proto3,oneof"` -} - -type HtlcEvent_LinkFailEvent struct { - LinkFailEvent *LinkFailEvent `protobuf:"bytes,10,opt,name=link_fail_event,json=linkFailEvent,proto3,oneof"` -} - -func (*HtlcEvent_ForwardEvent) isHtlcEvent_Event() {} - -func (*HtlcEvent_ForwardFailEvent) isHtlcEvent_Event() {} - -func (*HtlcEvent_SettleEvent) isHtlcEvent_Event() {} - -func (*HtlcEvent_LinkFailEvent) isHtlcEvent_Event() {} - -func (m *HtlcEvent) GetEvent() isHtlcEvent_Event { - if m != nil { - return m.Event - } - return nil -} - -func (m *HtlcEvent) GetForwardEvent() *ForwardEvent { - if x, ok := m.GetEvent().(*HtlcEvent_ForwardEvent); ok { - return x.ForwardEvent - } - return nil -} - -func (m *HtlcEvent) GetForwardFailEvent() *ForwardFailEvent { - if x, ok := m.GetEvent().(*HtlcEvent_ForwardFailEvent); ok { - return x.ForwardFailEvent - } - return nil -} - -func (m *HtlcEvent) GetSettleEvent() *SettleEvent { - if x, ok := m.GetEvent().(*HtlcEvent_SettleEvent); ok { - return x.SettleEvent - } - return nil -} - -func (m *HtlcEvent) GetLinkFailEvent() *LinkFailEvent { - if x, ok := m.GetEvent().(*HtlcEvent_LinkFailEvent); ok { - return x.LinkFailEvent - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*HtlcEvent) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*HtlcEvent_ForwardEvent)(nil), - (*HtlcEvent_ForwardFailEvent)(nil), - (*HtlcEvent_SettleEvent)(nil), - (*HtlcEvent_LinkFailEvent)(nil), - } -} - -type HtlcInfo struct { - // The timelock on the incoming htlc. - IncomingTimelock uint32 `protobuf:"varint,1,opt,name=incoming_timelock,json=incomingTimelock,proto3" json:"incoming_timelock,omitempty"` - // The timelock on the outgoing htlc. - OutgoingTimelock uint32 `protobuf:"varint,2,opt,name=outgoing_timelock,json=outgoingTimelock,proto3" json:"outgoing_timelock,omitempty"` - // The amount of the incoming htlc. - IncomingAmtMsat uint64 `protobuf:"varint,3,opt,name=incoming_amt_msat,json=incomingAmtMsat,proto3" json:"incoming_amt_msat,omitempty"` - // The amount of the outgoing htlc. - OutgoingAmtMsat uint64 `protobuf:"varint,4,opt,name=outgoing_amt_msat,json=outgoingAmtMsat,proto3" json:"outgoing_amt_msat,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HtlcInfo) Reset() { *m = HtlcInfo{} } -func (m *HtlcInfo) String() string { return proto.CompactTextString(m) } -func (*HtlcInfo) ProtoMessage() {} -func (*HtlcInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{18} -} - -func (m *HtlcInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_HtlcInfo.Unmarshal(m, b) -} -func (m *HtlcInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_HtlcInfo.Marshal(b, m, deterministic) -} -func (m *HtlcInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_HtlcInfo.Merge(m, src) -} -func (m *HtlcInfo) XXX_Size() int { - return xxx_messageInfo_HtlcInfo.Size(m) -} -func (m *HtlcInfo) XXX_DiscardUnknown() { - xxx_messageInfo_HtlcInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_HtlcInfo proto.InternalMessageInfo - -func (m *HtlcInfo) GetIncomingTimelock() uint32 { - if m != nil { - return m.IncomingTimelock - } - return 0 -} - -func (m *HtlcInfo) GetOutgoingTimelock() uint32 { - if m != nil { - return m.OutgoingTimelock - } - return 0 -} - -func (m *HtlcInfo) GetIncomingAmtMsat() uint64 { - if m != nil { - return m.IncomingAmtMsat - } - return 0 -} - -func (m *HtlcInfo) GetOutgoingAmtMsat() uint64 { - if m != nil { - return m.OutgoingAmtMsat - } - return 0 -} - -type ForwardEvent struct { - // Info contains details about the htlc that was forwarded. - Info *HtlcInfo `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ForwardEvent) Reset() { *m = ForwardEvent{} } -func (m *ForwardEvent) String() string { return proto.CompactTextString(m) } -func (*ForwardEvent) ProtoMessage() {} -func (*ForwardEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{19} -} - -func (m *ForwardEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ForwardEvent.Unmarshal(m, b) -} -func (m *ForwardEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ForwardEvent.Marshal(b, m, deterministic) -} -func (m *ForwardEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_ForwardEvent.Merge(m, src) -} -func (m *ForwardEvent) XXX_Size() int { - return xxx_messageInfo_ForwardEvent.Size(m) -} -func (m *ForwardEvent) XXX_DiscardUnknown() { - xxx_messageInfo_ForwardEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_ForwardEvent proto.InternalMessageInfo - -func (m *ForwardEvent) GetInfo() *HtlcInfo { - if m != nil { - return m.Info - } - return nil -} - -type ForwardFailEvent struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ForwardFailEvent) Reset() { *m = ForwardFailEvent{} } -func (m *ForwardFailEvent) String() string { return proto.CompactTextString(m) } -func (*ForwardFailEvent) ProtoMessage() {} -func (*ForwardFailEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{20} -} - -func (m *ForwardFailEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ForwardFailEvent.Unmarshal(m, b) -} -func (m *ForwardFailEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ForwardFailEvent.Marshal(b, m, deterministic) -} -func (m *ForwardFailEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_ForwardFailEvent.Merge(m, src) -} -func (m *ForwardFailEvent) XXX_Size() int { - return xxx_messageInfo_ForwardFailEvent.Size(m) -} -func (m *ForwardFailEvent) XXX_DiscardUnknown() { - xxx_messageInfo_ForwardFailEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_ForwardFailEvent proto.InternalMessageInfo - -type SettleEvent struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SettleEvent) Reset() { *m = SettleEvent{} } -func (m *SettleEvent) String() string { return proto.CompactTextString(m) } -func (*SettleEvent) ProtoMessage() {} -func (*SettleEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{21} -} - -func (m *SettleEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SettleEvent.Unmarshal(m, b) -} -func (m *SettleEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SettleEvent.Marshal(b, m, deterministic) -} -func (m *SettleEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_SettleEvent.Merge(m, src) -} -func (m *SettleEvent) XXX_Size() int { - return xxx_messageInfo_SettleEvent.Size(m) -} -func (m *SettleEvent) XXX_DiscardUnknown() { - xxx_messageInfo_SettleEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_SettleEvent proto.InternalMessageInfo - -type LinkFailEvent struct { - // Info contains details about the htlc that we failed. - Info *HtlcInfo `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` - // FailureCode is the BOLT error code for the failure. - WireFailure lnrpc.Failure_FailureCode `protobuf:"varint,2,opt,name=wire_failure,json=wireFailure,proto3,enum=lnrpc.Failure_FailureCode" json:"wire_failure,omitempty"` - // - //FailureDetail provides additional information about the reason for the - //failure. This detail enriches the information provided by the wire message - //and may be 'no detail' if the wire message requires no additional metadata. - FailureDetail FailureDetail `protobuf:"varint,3,opt,name=failure_detail,json=failureDetail,proto3,enum=routerrpc.FailureDetail" json:"failure_detail,omitempty"` - // A string representation of the link failure. - FailureString string `protobuf:"bytes,4,opt,name=failure_string,json=failureString,proto3" json:"failure_string,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LinkFailEvent) Reset() { *m = LinkFailEvent{} } -func (m *LinkFailEvent) String() string { return proto.CompactTextString(m) } -func (*LinkFailEvent) ProtoMessage() {} -func (*LinkFailEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{22} -} - -func (m *LinkFailEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LinkFailEvent.Unmarshal(m, b) -} -func (m *LinkFailEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LinkFailEvent.Marshal(b, m, deterministic) -} -func (m *LinkFailEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_LinkFailEvent.Merge(m, src) -} -func (m *LinkFailEvent) XXX_Size() int { - return xxx_messageInfo_LinkFailEvent.Size(m) -} -func (m *LinkFailEvent) XXX_DiscardUnknown() { - xxx_messageInfo_LinkFailEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_LinkFailEvent proto.InternalMessageInfo - -func (m *LinkFailEvent) GetInfo() *HtlcInfo { - if m != nil { - return m.Info - } - return nil -} - -func (m *LinkFailEvent) GetWireFailure() lnrpc.Failure_FailureCode { - if m != nil { - return m.WireFailure - } - return lnrpc.Failure_RESERVED -} - -func (m *LinkFailEvent) GetFailureDetail() FailureDetail { - if m != nil { - return m.FailureDetail - } - return FailureDetail_UNKNOWN -} - -func (m *LinkFailEvent) GetFailureString() string { - if m != nil { - return m.FailureString - } - return "" -} - -type PaymentStatus struct { - // Current state the payment is in. - State PaymentState `protobuf:"varint,1,opt,name=state,proto3,enum=routerrpc.PaymentState" json:"state,omitempty"` - // - //The pre-image of the payment when state is SUCCEEDED. - Preimage []byte `protobuf:"bytes,2,opt,name=preimage,proto3" json:"preimage,omitempty"` - // - //The HTLCs made in attempt to settle the payment [EXPERIMENTAL]. - Htlcs []*lnrpc.HTLCAttempt `protobuf:"bytes,4,rep,name=htlcs,proto3" json:"htlcs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PaymentStatus) Reset() { *m = PaymentStatus{} } -func (m *PaymentStatus) String() string { return proto.CompactTextString(m) } -func (*PaymentStatus) ProtoMessage() {} -func (*PaymentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{23} -} - -func (m *PaymentStatus) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PaymentStatus.Unmarshal(m, b) -} -func (m *PaymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PaymentStatus.Marshal(b, m, deterministic) -} -func (m *PaymentStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_PaymentStatus.Merge(m, src) -} -func (m *PaymentStatus) XXX_Size() int { - return xxx_messageInfo_PaymentStatus.Size(m) -} -func (m *PaymentStatus) XXX_DiscardUnknown() { - xxx_messageInfo_PaymentStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_PaymentStatus proto.InternalMessageInfo - -func (m *PaymentStatus) GetState() PaymentState { - if m != nil { - return m.State - } - return PaymentState_IN_FLIGHT -} - -func (m *PaymentStatus) GetPreimage() []byte { - if m != nil { - return m.Preimage - } - return nil -} - -func (m *PaymentStatus) GetHtlcs() []*lnrpc.HTLCAttempt { - if m != nil { - return m.Htlcs - } - return nil -} - -type CircuitKey struct { - /// The id of the channel that the is part of this circuit. - ChanId uint64 `protobuf:"varint,1,opt,name=chan_id,json=chanId,proto3" json:"chan_id,omitempty"` - /// The index of the incoming htlc in the incoming channel. - HtlcId uint64 `protobuf:"varint,2,opt,name=htlc_id,json=htlcId,proto3" json:"htlc_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CircuitKey) Reset() { *m = CircuitKey{} } -func (m *CircuitKey) String() string { return proto.CompactTextString(m) } -func (*CircuitKey) ProtoMessage() {} -func (*CircuitKey) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{24} -} - -func (m *CircuitKey) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CircuitKey.Unmarshal(m, b) -} -func (m *CircuitKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CircuitKey.Marshal(b, m, deterministic) -} -func (m *CircuitKey) XXX_Merge(src proto.Message) { - xxx_messageInfo_CircuitKey.Merge(m, src) -} -func (m *CircuitKey) XXX_Size() int { - return xxx_messageInfo_CircuitKey.Size(m) -} -func (m *CircuitKey) XXX_DiscardUnknown() { - xxx_messageInfo_CircuitKey.DiscardUnknown(m) -} - -var xxx_messageInfo_CircuitKey proto.InternalMessageInfo - -func (m *CircuitKey) GetChanId() uint64 { - if m != nil { - return m.ChanId - } - return 0 -} - -func (m *CircuitKey) GetHtlcId() uint64 { - if m != nil { - return m.HtlcId - } - return 0 -} - -type ForwardHtlcInterceptRequest struct { - // - //The key of this forwarded htlc. It defines the incoming channel id and - //the index in this channel. - IncomingCircuitKey *CircuitKey `protobuf:"bytes,1,opt,name=incoming_circuit_key,json=incomingCircuitKey,proto3" json:"incoming_circuit_key,omitempty"` - // The incoming htlc amount. - IncomingAmountMsat uint64 `protobuf:"varint,5,opt,name=incoming_amount_msat,json=incomingAmountMsat,proto3" json:"incoming_amount_msat,omitempty"` - // The incoming htlc expiry. - IncomingExpiry uint32 `protobuf:"varint,6,opt,name=incoming_expiry,json=incomingExpiry,proto3" json:"incoming_expiry,omitempty"` - // - //The htlc payment hash. This value is not guaranteed to be unique per - //request. - PaymentHash []byte `protobuf:"bytes,2,opt,name=payment_hash,json=paymentHash,proto3" json:"payment_hash,omitempty"` - // The requested outgoing channel id for this forwarded htlc. Because of - // non-strict forwarding, this isn't necessarily the channel over which the - // packet will be forwarded eventually. A different channel to the same peer - // may be selected as well. - OutgoingRequestedChanId uint64 `protobuf:"varint,7,opt,name=outgoing_requested_chan_id,json=outgoingRequestedChanId,proto3" json:"outgoing_requested_chan_id,omitempty"` - // The outgoing htlc amount. - OutgoingAmountMsat uint64 `protobuf:"varint,3,opt,name=outgoing_amount_msat,json=outgoingAmountMsat,proto3" json:"outgoing_amount_msat,omitempty"` - // The outgoing htlc expiry. - OutgoingExpiry uint32 `protobuf:"varint,4,opt,name=outgoing_expiry,json=outgoingExpiry,proto3" json:"outgoing_expiry,omitempty"` - // Any custom records that were present in the payload. - CustomRecords map[uint64][]byte `protobuf:"bytes,8,rep,name=custom_records,json=customRecords,proto3" json:"custom_records,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // The onion blob for the next hop - OnionBlob []byte `protobuf:"bytes,9,opt,name=onion_blob,json=onionBlob,proto3" json:"onion_blob,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ForwardHtlcInterceptRequest) Reset() { *m = ForwardHtlcInterceptRequest{} } -func (m *ForwardHtlcInterceptRequest) String() string { return proto.CompactTextString(m) } -func (*ForwardHtlcInterceptRequest) ProtoMessage() {} -func (*ForwardHtlcInterceptRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{25} -} - -func (m *ForwardHtlcInterceptRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ForwardHtlcInterceptRequest.Unmarshal(m, b) -} -func (m *ForwardHtlcInterceptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ForwardHtlcInterceptRequest.Marshal(b, m, deterministic) -} -func (m *ForwardHtlcInterceptRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ForwardHtlcInterceptRequest.Merge(m, src) -} -func (m *ForwardHtlcInterceptRequest) XXX_Size() int { - return xxx_messageInfo_ForwardHtlcInterceptRequest.Size(m) -} -func (m *ForwardHtlcInterceptRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ForwardHtlcInterceptRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ForwardHtlcInterceptRequest proto.InternalMessageInfo - -func (m *ForwardHtlcInterceptRequest) GetIncomingCircuitKey() *CircuitKey { - if m != nil { - return m.IncomingCircuitKey - } - return nil -} - -func (m *ForwardHtlcInterceptRequest) GetIncomingAmountMsat() uint64 { - if m != nil { - return m.IncomingAmountMsat - } - return 0 -} - -func (m *ForwardHtlcInterceptRequest) GetIncomingExpiry() uint32 { - if m != nil { - return m.IncomingExpiry - } - return 0 -} - -func (m *ForwardHtlcInterceptRequest) GetPaymentHash() []byte { - if m != nil { - return m.PaymentHash - } - return nil -} - -func (m *ForwardHtlcInterceptRequest) GetOutgoingRequestedChanId() uint64 { - if m != nil { - return m.OutgoingRequestedChanId - } - return 0 -} - -func (m *ForwardHtlcInterceptRequest) GetOutgoingAmountMsat() uint64 { - if m != nil { - return m.OutgoingAmountMsat - } - return 0 -} - -func (m *ForwardHtlcInterceptRequest) GetOutgoingExpiry() uint32 { - if m != nil { - return m.OutgoingExpiry - } - return 0 -} - -func (m *ForwardHtlcInterceptRequest) GetCustomRecords() map[uint64][]byte { - if m != nil { - return m.CustomRecords - } - return nil -} - -func (m *ForwardHtlcInterceptRequest) GetOnionBlob() []byte { - if m != nil { - return m.OnionBlob - } - return nil -} - -//* -//ForwardHtlcInterceptResponse enables the caller to resolve a previously hold -//forward. The caller can choose either to: -//- `Resume`: Execute the default behavior (usually forward). -//- `Reject`: Fail the htlc backwards. -//- `Settle`: Settle this htlc with a given preimage. -type ForwardHtlcInterceptResponse struct { - //* - //The key of this forwarded htlc. It defines the incoming channel id and - //the index in this channel. - IncomingCircuitKey *CircuitKey `protobuf:"bytes,1,opt,name=incoming_circuit_key,json=incomingCircuitKey,proto3" json:"incoming_circuit_key,omitempty"` - // The resolve action for this intercepted htlc. - Action ResolveHoldForwardAction `protobuf:"varint,2,opt,name=action,proto3,enum=routerrpc.ResolveHoldForwardAction" json:"action,omitempty"` - // The preimage in case the resolve action is Settle. - Preimage []byte `protobuf:"bytes,3,opt,name=preimage,proto3" json:"preimage,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ForwardHtlcInterceptResponse) Reset() { *m = ForwardHtlcInterceptResponse{} } -func (m *ForwardHtlcInterceptResponse) String() string { return proto.CompactTextString(m) } -func (*ForwardHtlcInterceptResponse) ProtoMessage() {} -func (*ForwardHtlcInterceptResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{26} -} - -func (m *ForwardHtlcInterceptResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ForwardHtlcInterceptResponse.Unmarshal(m, b) -} -func (m *ForwardHtlcInterceptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ForwardHtlcInterceptResponse.Marshal(b, m, deterministic) -} -func (m *ForwardHtlcInterceptResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ForwardHtlcInterceptResponse.Merge(m, src) -} -func (m *ForwardHtlcInterceptResponse) XXX_Size() int { - return xxx_messageInfo_ForwardHtlcInterceptResponse.Size(m) -} -func (m *ForwardHtlcInterceptResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ForwardHtlcInterceptResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ForwardHtlcInterceptResponse proto.InternalMessageInfo - -func (m *ForwardHtlcInterceptResponse) GetIncomingCircuitKey() *CircuitKey { - if m != nil { - return m.IncomingCircuitKey - } - return nil -} - -func (m *ForwardHtlcInterceptResponse) GetAction() ResolveHoldForwardAction { - if m != nil { - return m.Action - } - return ResolveHoldForwardAction_SETTLE -} - -func (m *ForwardHtlcInterceptResponse) GetPreimage() []byte { - if m != nil { - return m.Preimage - } - return nil -} - -func init() { - proto.RegisterEnum("routerrpc.FailureDetail", FailureDetail_name, FailureDetail_value) - proto.RegisterEnum("routerrpc.PaymentState", PaymentState_name, PaymentState_value) - proto.RegisterEnum("routerrpc.ResolveHoldForwardAction", ResolveHoldForwardAction_name, ResolveHoldForwardAction_value) - proto.RegisterEnum("routerrpc.HtlcEvent_EventType", HtlcEvent_EventType_name, HtlcEvent_EventType_value) - proto.RegisterType((*SendPaymentRequest)(nil), "routerrpc.SendPaymentRequest") - proto.RegisterMapType((map[uint64][]byte)(nil), "routerrpc.SendPaymentRequest.DestCustomRecordsEntry") - proto.RegisterType((*TrackPaymentRequest)(nil), "routerrpc.TrackPaymentRequest") - proto.RegisterType((*RouteFeeRequest)(nil), "routerrpc.RouteFeeRequest") - proto.RegisterType((*RouteFeeResponse)(nil), "routerrpc.RouteFeeResponse") - proto.RegisterType((*SendToRouteRequest)(nil), "routerrpc.SendToRouteRequest") - proto.RegisterType((*SendToRouteResponse)(nil), "routerrpc.SendToRouteResponse") - proto.RegisterType((*ResetMissionControlRequest)(nil), "routerrpc.ResetMissionControlRequest") - proto.RegisterType((*ResetMissionControlResponse)(nil), "routerrpc.ResetMissionControlResponse") - proto.RegisterType((*QueryMissionControlRequest)(nil), "routerrpc.QueryMissionControlRequest") - proto.RegisterType((*QueryMissionControlResponse)(nil), "routerrpc.QueryMissionControlResponse") - proto.RegisterType((*PairHistory)(nil), "routerrpc.PairHistory") - proto.RegisterType((*PairData)(nil), "routerrpc.PairData") - proto.RegisterType((*QueryProbabilityRequest)(nil), "routerrpc.QueryProbabilityRequest") - proto.RegisterType((*QueryProbabilityResponse)(nil), "routerrpc.QueryProbabilityResponse") - proto.RegisterType((*BuildRouteRequest)(nil), "routerrpc.BuildRouteRequest") - proto.RegisterType((*BuildRouteResponse)(nil), "routerrpc.BuildRouteResponse") - proto.RegisterType((*SubscribeHtlcEventsRequest)(nil), "routerrpc.SubscribeHtlcEventsRequest") - proto.RegisterType((*HtlcEvent)(nil), "routerrpc.HtlcEvent") - proto.RegisterType((*HtlcInfo)(nil), "routerrpc.HtlcInfo") - proto.RegisterType((*ForwardEvent)(nil), "routerrpc.ForwardEvent") - proto.RegisterType((*ForwardFailEvent)(nil), "routerrpc.ForwardFailEvent") - proto.RegisterType((*SettleEvent)(nil), "routerrpc.SettleEvent") - proto.RegisterType((*LinkFailEvent)(nil), "routerrpc.LinkFailEvent") - proto.RegisterType((*PaymentStatus)(nil), "routerrpc.PaymentStatus") - proto.RegisterType((*CircuitKey)(nil), "routerrpc.CircuitKey") - proto.RegisterType((*ForwardHtlcInterceptRequest)(nil), "routerrpc.ForwardHtlcInterceptRequest") - proto.RegisterMapType((map[uint64][]byte)(nil), "routerrpc.ForwardHtlcInterceptRequest.CustomRecordsEntry") - proto.RegisterType((*ForwardHtlcInterceptResponse)(nil), "routerrpc.ForwardHtlcInterceptResponse") -} - -func init() { proto.RegisterFile("routerrpc/router.proto", fileDescriptor_7a0613f69d37b0a5) } - -var fileDescriptor_7a0613f69d37b0a5 = []byte{ - // 2600 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x59, 0x4b, 0x73, 0xe3, 0xc6, - 0xb5, 0x36, 0x48, 0x88, 0x22, 0x0f, 0x1f, 0x82, 0x9a, 0xf2, 0x88, 0x97, 0x9a, 0xb1, 0x69, 0xda, - 0x9e, 0x61, 0xcd, 0x1d, 0xcb, 0xba, 0xba, 0xa9, 0xc4, 0x89, 0x1f, 0x31, 0x45, 0x42, 0x23, 0xcc, - 0x50, 0x24, 0xdd, 0xa4, 0xc6, 0x76, 0xbc, 0xe8, 0x40, 0x64, 0x73, 0x88, 0x08, 0x04, 0x18, 0xa0, - 0x39, 0xb6, 0x96, 0xd9, 0xa5, 0xf2, 0x47, 0xb2, 0xcb, 0x2f, 0x48, 0x55, 0xb2, 0xc8, 0xff, 0xc8, - 0x36, 0xfb, 0x54, 0x65, 0x9d, 0xea, 0x07, 0x40, 0x40, 0xa2, 0x66, 0x92, 0x4a, 0x16, 0xf6, 0x10, - 0xdf, 0xf9, 0xfa, 0xf4, 0xe9, 0x3e, 0xaf, 0xee, 0x16, 0xdc, 0x0b, 0xfc, 0x15, 0xa3, 0x41, 0xb0, - 0x9c, 0x7c, 0x2c, 0x7f, 0x1d, 0x2e, 0x03, 0x9f, 0xf9, 0xa8, 0x10, 0xe3, 0xf5, 0x42, 0xb0, 0x9c, - 0x48, 0xb4, 0xf9, 0xf7, 0x1c, 0xa0, 0x11, 0xf5, 0xa6, 0x43, 0xfb, 0x7a, 0x41, 0x3d, 0x86, 0xe9, - 0xaf, 0x57, 0x34, 0x64, 0x08, 0x81, 0x3e, 0xa5, 0x21, 0xab, 0x69, 0x0d, 0xad, 0x55, 0xc2, 0xe2, - 0x37, 0x32, 0x20, 0x6b, 0x2f, 0x58, 0x2d, 0xd3, 0xd0, 0x5a, 0x59, 0xcc, 0x7f, 0xa2, 0xff, 0x81, - 0xbc, 0xbd, 0x60, 0x64, 0x11, 0xda, 0xac, 0x56, 0x12, 0xf0, 0xb6, 0xbd, 0x60, 0xe7, 0xa1, 0xcd, - 0xd0, 0x7b, 0x50, 0x5a, 0x4a, 0x95, 0x64, 0x6e, 0x87, 0xf3, 0x5a, 0x56, 0x28, 0x2a, 0x2a, 0xec, - 0xcc, 0x0e, 0xe7, 0xa8, 0x05, 0xc6, 0xcc, 0xf1, 0x6c, 0x97, 0x4c, 0x5c, 0xf6, 0x8a, 0x4c, 0xa9, - 0xcb, 0xec, 0x9a, 0xde, 0xd0, 0x5a, 0x5b, 0xb8, 0x22, 0xf0, 0x8e, 0xcb, 0x5e, 0x75, 0x39, 0x8a, - 0x1e, 0xc1, 0x4e, 0xa4, 0x2c, 0x90, 0x06, 0xd6, 0xb6, 0x1a, 0x5a, 0xab, 0x80, 0x2b, 0xcb, 0xb4, - 0xd9, 0x8f, 0x60, 0x87, 0x39, 0x0b, 0xea, 0xaf, 0x18, 0x09, 0xe9, 0xc4, 0xf7, 0xa6, 0x61, 0x2d, - 0x27, 0x35, 0x2a, 0x78, 0x24, 0x51, 0xd4, 0x84, 0xf2, 0x8c, 0x52, 0xe2, 0x3a, 0x0b, 0x87, 0x11, - 0x6e, 0xfe, 0xb6, 0x30, 0xbf, 0x38, 0xa3, 0xb4, 0xc7, 0xb1, 0x91, 0xcd, 0xd0, 0x07, 0x50, 0x59, - 0x73, 0xc4, 0x1a, 0xcb, 0x82, 0x54, 0x8a, 0x48, 0x62, 0xa1, 0x87, 0x60, 0xf8, 0x2b, 0xf6, 0xd2, - 0x77, 0xbc, 0x97, 0x64, 0x32, 0xb7, 0x3d, 0xe2, 0x4c, 0x6b, 0xf9, 0x86, 0xd6, 0xd2, 0x4f, 0xf4, - 0x9a, 0x76, 0xa4, 0xe1, 0x4a, 0x24, 0xed, 0xcc, 0x6d, 0xcf, 0x9a, 0xa2, 0xc7, 0xb0, 0x7b, 0x93, - 0x1f, 0xd6, 0xaa, 0x8d, 0x6c, 0x4b, 0xc7, 0x3b, 0x69, 0x6a, 0x88, 0x1e, 0xc2, 0x8e, 0x6b, 0x87, - 0x8c, 0xcc, 0xfd, 0x25, 0x59, 0xae, 0x2e, 0xaf, 0xe8, 0x75, 0xad, 0x22, 0xf6, 0xb1, 0xcc, 0xe1, - 0x33, 0x7f, 0x39, 0x14, 0x20, 0x7a, 0x00, 0x20, 0xf6, 0x50, 0x98, 0x5a, 0x2b, 0x88, 0x15, 0x17, - 0x38, 0x22, 0xcc, 0x44, 0xff, 0x07, 0x45, 0xe1, 0x7b, 0x32, 0x77, 0x3c, 0x16, 0xd6, 0xa0, 0x91, - 0x6d, 0x15, 0x8f, 0x8d, 0x43, 0xd7, 0xe3, 0x61, 0x80, 0xb9, 0xe4, 0xcc, 0xf1, 0x18, 0x86, 0x20, - 0xfa, 0x19, 0xa2, 0x29, 0x54, 0xb9, 0xcf, 0xc9, 0x64, 0x15, 0x32, 0x7f, 0x41, 0x02, 0x3a, 0xf1, - 0x83, 0x69, 0x58, 0x2b, 0x8a, 0xa1, 0x3f, 0x3a, 0x8c, 0x43, 0xe9, 0xf0, 0x76, 0xec, 0x1c, 0x76, - 0x69, 0xc8, 0x3a, 0x62, 0x1c, 0x96, 0xc3, 0x4c, 0x8f, 0x05, 0xd7, 0x78, 0x77, 0x7a, 0x13, 0x47, - 0x4f, 0x00, 0xd9, 0xae, 0xeb, 0x7f, 0x4f, 0x42, 0xea, 0xce, 0x88, 0xf2, 0x65, 0x6d, 0xa7, 0xa1, - 0xb5, 0xf2, 0xd8, 0x10, 0x92, 0x11, 0x75, 0x67, 0x4a, 0x3d, 0xfa, 0x31, 0x94, 0x85, 0x4d, 0x33, - 0x6a, 0xb3, 0x55, 0x40, 0xc3, 0x9a, 0xd1, 0xc8, 0xb6, 0x2a, 0xc7, 0xbb, 0x6a, 0x21, 0xa7, 0x12, - 0x3e, 0x71, 0x18, 0x2e, 0x71, 0x9e, 0xfa, 0x0e, 0xd1, 0x01, 0x14, 0x16, 0xf6, 0x0f, 0x64, 0x69, - 0x07, 0x2c, 0xac, 0xed, 0x36, 0xb4, 0x56, 0x19, 0xe7, 0x17, 0xf6, 0x0f, 0x43, 0xfe, 0x8d, 0x0e, - 0xa1, 0xea, 0xf9, 0xc4, 0xf1, 0x66, 0xae, 0xf3, 0x72, 0xce, 0xc8, 0x6a, 0x39, 0xb5, 0x19, 0x0d, - 0x6b, 0x48, 0xd8, 0xb0, 0xeb, 0xf9, 0x96, 0x92, 0x5c, 0x48, 0x41, 0xbd, 0x0b, 0xf7, 0x36, 0xaf, - 0x8f, 0xa7, 0x07, 0x77, 0x10, 0xcf, 0x18, 0x1d, 0xf3, 0x9f, 0x68, 0x0f, 0xb6, 0x5e, 0xd9, 0xee, - 0x8a, 0x8a, 0x94, 0x29, 0x61, 0xf9, 0xf1, 0xb3, 0xcc, 0x27, 0x5a, 0x73, 0x0e, 0xd5, 0x71, 0x60, - 0x4f, 0xae, 0x6e, 0x64, 0xdd, 0xcd, 0xa4, 0xd1, 0x6e, 0x27, 0xcd, 0x1d, 0xf6, 0x66, 0xee, 0xb0, - 0xb7, 0xf9, 0x05, 0xec, 0x08, 0x0f, 0x9f, 0x52, 0xfa, 0xba, 0xdc, 0xde, 0x07, 0x9e, 0xb9, 0x22, - 0x13, 0x64, 0x7e, 0xe7, 0xec, 0x05, 0x4f, 0x82, 0xe6, 0x14, 0x8c, 0xf5, 0xf8, 0x70, 0xe9, 0x7b, - 0x21, 0xe5, 0x89, 0xcb, 0x03, 0x80, 0x47, 0x30, 0x4f, 0x10, 0x91, 0x1a, 0x9a, 0x18, 0x55, 0x51, - 0xf8, 0x29, 0xa5, 0x22, 0x39, 0x1e, 0xca, 0x7c, 0x24, 0xae, 0x3f, 0xb9, 0xe2, 0x19, 0x6e, 0x5f, - 0x2b, 0xf5, 0x65, 0x0e, 0xf7, 0xfc, 0xc9, 0x55, 0x97, 0x83, 0xcd, 0xef, 0x64, 0x11, 0x1a, 0xfb, - 0x62, 0xae, 0x7f, 0x63, 0x3b, 0x9a, 0xb0, 0x25, 0x62, 0x51, 0xa8, 0x2d, 0x1e, 0x97, 0x92, 0x41, - 0x8d, 0xa5, 0xa8, 0xf9, 0x1d, 0x54, 0x53, 0xca, 0xd5, 0x2a, 0xea, 0x90, 0x5f, 0x06, 0xd4, 0x59, - 0xd8, 0x2f, 0xa9, 0xd2, 0x1c, 0x7f, 0xa3, 0x16, 0x6c, 0xcf, 0x6c, 0xc7, 0x5d, 0x05, 0x91, 0xe2, - 0x4a, 0x14, 0x64, 0x12, 0xc5, 0x91, 0xb8, 0x79, 0x1f, 0xea, 0x98, 0x86, 0x94, 0x9d, 0x3b, 0x61, - 0xe8, 0xf8, 0x5e, 0xc7, 0xf7, 0x58, 0xe0, 0xbb, 0x6a, 0x05, 0xcd, 0x07, 0x70, 0xb0, 0x51, 0x2a, - 0x4d, 0xe0, 0x83, 0xbf, 0x5a, 0xd1, 0xe0, 0x7a, 0xf3, 0xe0, 0xaf, 0xe0, 0x60, 0xa3, 0x54, 0xd9, - 0xff, 0x04, 0xb6, 0x96, 0xb6, 0x13, 0x70, 0xdf, 0xf3, 0xa4, 0xbc, 0x97, 0x48, 0xca, 0xa1, 0xed, - 0x04, 0x67, 0x4e, 0xc8, 0xfc, 0xe0, 0x1a, 0x4b, 0xd2, 0x33, 0x3d, 0xaf, 0x19, 0x99, 0xe6, 0xef, - 0x34, 0x28, 0x26, 0x84, 0x3c, 0x35, 0x3c, 0x7f, 0x4a, 0xc9, 0x2c, 0xf0, 0x17, 0xd1, 0x26, 0x70, - 0xe0, 0x34, 0xf0, 0x17, 0x3c, 0x26, 0x84, 0x90, 0xf9, 0x2a, 0x80, 0x73, 0xfc, 0x73, 0xec, 0xa3, - 0x8f, 0x60, 0x7b, 0x2e, 0x15, 0x88, 0xb2, 0x59, 0x3c, 0xae, 0xde, 0x98, 0xbb, 0x6b, 0x33, 0x1b, - 0x47, 0x9c, 0x67, 0x7a, 0x3e, 0x6b, 0xe8, 0xcf, 0xf4, 0xbc, 0x6e, 0x6c, 0x3d, 0xd3, 0xf3, 0x5b, - 0x46, 0xee, 0x99, 0x9e, 0xcf, 0x19, 0xdb, 0xcd, 0xbf, 0x69, 0x90, 0x8f, 0xd8, 0xdc, 0x12, 0xbe, - 0xa5, 0x84, 0xc7, 0x85, 0x0a, 0xa6, 0x3c, 0x07, 0xc6, 0xce, 0x82, 0xa2, 0x06, 0x94, 0x84, 0x30, - 0x1d, 0xa2, 0xc0, 0xb1, 0xb6, 0x08, 0x53, 0x51, 0xcf, 0x23, 0x86, 0x88, 0x47, 0x5d, 0xd5, 0x73, - 0x49, 0x89, 0x5a, 0x52, 0xb8, 0x9a, 0x4c, 0x68, 0x18, 0xca, 0x59, 0xb6, 0x24, 0x45, 0x61, 0x62, - 0xa2, 0x87, 0xb0, 0x13, 0x51, 0xa2, 0xb9, 0x72, 0x32, 0x5e, 0x15, 0xac, 0xa6, 0x6b, 0x81, 0x91, - 0xe4, 0x2d, 0xd6, 0x1d, 0xa4, 0xb2, 0x26, 0xf2, 0x49, 0xe5, 0xe2, 0x9b, 0xbf, 0x82, 0x7d, 0xe1, - 0xca, 0x61, 0xe0, 0x5f, 0xda, 0x97, 0x8e, 0xeb, 0xb0, 0xeb, 0x28, 0xc8, 0xf9, 0xc2, 0x03, 0x7f, - 0x41, 0xf8, 0xde, 0x46, 0x2e, 0xe0, 0x40, 0xdf, 0x9f, 0x52, 0xee, 0x02, 0xe6, 0x4b, 0x91, 0x72, - 0x01, 0xf3, 0x85, 0x20, 0xd9, 0x79, 0xb3, 0xa9, 0xce, 0xdb, 0xbc, 0x82, 0xda, 0xed, 0xb9, 0x54, - 0xcc, 0x34, 0xa0, 0xb8, 0x5c, 0xc3, 0x62, 0x3a, 0x0d, 0x27, 0xa1, 0xa4, 0x6f, 0x33, 0x6f, 0xf6, - 0x6d, 0xf3, 0xf7, 0x1a, 0xec, 0x9e, 0xac, 0x1c, 0x77, 0x9a, 0x4a, 0xdc, 0xa4, 0x75, 0x5a, 0xfa, - 0x5c, 0xb0, 0xa9, 0xe9, 0x67, 0x36, 0x36, 0xfd, 0x27, 0x1b, 0x1a, 0x6b, 0x56, 0x34, 0xd6, 0xcc, - 0x86, 0xb6, 0xfa, 0x2e, 0x14, 0xd7, 0x5d, 0x32, 0xac, 0xe9, 0x8d, 0x6c, 0xab, 0x84, 0x61, 0x1e, - 0xb5, 0xc8, 0xb0, 0xf9, 0x09, 0xa0, 0xa4, 0xa1, 0x6a, 0x43, 0xe2, 0xfa, 0xa1, 0xdd, 0x5d, 0x3f, - 0xee, 0x43, 0x7d, 0xb4, 0xba, 0x0c, 0x27, 0x81, 0x73, 0x49, 0xcf, 0x98, 0x3b, 0x31, 0x5f, 0x51, - 0x8f, 0x85, 0x51, 0x96, 0xfe, 0x43, 0x87, 0x42, 0x8c, 0xf2, 0xf2, 0xec, 0x78, 0x13, 0x7f, 0x11, - 0x19, 0xed, 0x51, 0x97, 0xdb, 0x2d, 0x9b, 0xc2, 0x6e, 0x24, 0xea, 0x48, 0x89, 0x35, 0xe5, 0xfc, - 0xd4, 0x22, 0x15, 0x3f, 0x23, 0xf9, 0xc9, 0x35, 0x4a, 0x7e, 0x0b, 0x8c, 0x58, 0xff, 0x9c, 0xb9, - 0x93, 0x78, 0x53, 0x70, 0x25, 0xc2, 0xb9, 0x31, 0x92, 0x19, 0x6b, 0x8e, 0x98, 0xba, 0x64, 0x46, - 0xb8, 0x62, 0xbe, 0x07, 0x25, 0x9e, 0x0f, 0x21, 0xb3, 0x17, 0x4b, 0xe2, 0x85, 0x22, 0x2f, 0x74, - 0x5c, 0x8c, 0xb1, 0x7e, 0x88, 0x3e, 0x07, 0xa0, 0x7c, 0x7d, 0x84, 0x5d, 0x2f, 0xa9, 0x48, 0x89, - 0xca, 0xf1, 0x3b, 0x89, 0xc0, 0x88, 0x37, 0xe0, 0x50, 0xfc, 0x7f, 0x7c, 0xbd, 0xa4, 0xb8, 0x40, - 0xa3, 0x9f, 0xe8, 0x0b, 0x28, 0xcf, 0xfc, 0xe0, 0x7b, 0x3b, 0x98, 0x12, 0x01, 0xaa, 0xb2, 0xb1, - 0x9f, 0xd0, 0x70, 0x2a, 0xe5, 0x62, 0xf8, 0xd9, 0x5b, 0xb8, 0x34, 0x4b, 0x7c, 0xa3, 0xe7, 0x80, - 0xa2, 0xf1, 0x22, 0xcb, 0xa5, 0x92, 0xbc, 0x50, 0x72, 0x70, 0x5b, 0x09, 0x2f, 0xd2, 0x91, 0x22, - 0x63, 0x76, 0x03, 0x43, 0x9f, 0x42, 0x29, 0xa4, 0x8c, 0xb9, 0x54, 0xa9, 0x29, 0x08, 0x35, 0xf7, - 0x52, 0x67, 0x1a, 0x2e, 0x8e, 0x34, 0x14, 0xc3, 0xf5, 0x27, 0x3a, 0x81, 0x1d, 0xd7, 0xf1, 0xae, - 0x92, 0x66, 0x80, 0x18, 0x5f, 0x4b, 0x8c, 0xef, 0x39, 0xde, 0x55, 0xd2, 0x86, 0xb2, 0x9b, 0x04, - 0x9a, 0x9f, 0x41, 0x21, 0xde, 0x25, 0x54, 0x84, 0xed, 0x8b, 0xfe, 0xf3, 0xfe, 0xe0, 0xeb, 0xbe, - 0xf1, 0x16, 0xca, 0x83, 0x3e, 0x32, 0xfb, 0x5d, 0x43, 0xe3, 0x30, 0x36, 0x3b, 0xa6, 0xf5, 0xc2, - 0x34, 0x32, 0xfc, 0xe3, 0x74, 0x80, 0xbf, 0x6e, 0xe3, 0xae, 0x91, 0x3d, 0xd9, 0x86, 0x2d, 0x31, - 0x6f, 0xf3, 0x8f, 0x1a, 0xe4, 0x85, 0x07, 0xbd, 0x99, 0x8f, 0xfe, 0x17, 0xe2, 0xe0, 0x12, 0xc5, - 0x8d, 0x37, 0x5c, 0x11, 0x75, 0x65, 0x1c, 0x07, 0xcc, 0x58, 0xe1, 0x9c, 0x1c, 0x87, 0x46, 0x4c, - 0xce, 0x48, 0x72, 0x24, 0x88, 0xc9, 0x8f, 0x13, 0x9a, 0x53, 0x25, 0x47, 0xc7, 0x3b, 0x91, 0x20, - 0xaa, 0xb0, 0xc9, 0xb3, 0x6d, 0xaa, 0x12, 0x27, 0xce, 0xb6, 0x8a, 0xdb, 0xfc, 0x09, 0x94, 0x92, - 0x3e, 0x47, 0x8f, 0x40, 0x77, 0xbc, 0x99, 0xaf, 0x12, 0xb1, 0x7a, 0x23, 0xb8, 0xf8, 0x22, 0xb1, - 0x20, 0x34, 0x11, 0x18, 0x37, 0xfd, 0xdc, 0x2c, 0x43, 0x31, 0xe1, 0xb4, 0xe6, 0x5f, 0x35, 0x28, - 0xa7, 0x9c, 0xf0, 0x2f, 0x6b, 0x47, 0x9f, 0x43, 0xe9, 0x7b, 0x27, 0xa0, 0x24, 0xd9, 0xfe, 0x2b, - 0xc7, 0xf5, 0x74, 0xfb, 0x8f, 0xfe, 0xed, 0xf8, 0x53, 0x8a, 0x8b, 0x9c, 0xaf, 0x00, 0xf4, 0x73, - 0xa8, 0xa8, 0x91, 0x64, 0x4a, 0x99, 0xed, 0xb8, 0x62, 0xab, 0x2a, 0xa9, 0xf0, 0x50, 0xdc, 0xae, - 0x90, 0xe3, 0xf2, 0x2c, 0xf9, 0x89, 0x3e, 0x5c, 0x2b, 0x08, 0x59, 0xe0, 0x78, 0x2f, 0xc5, 0xfe, - 0x15, 0x62, 0xda, 0x48, 0x80, 0xbc, 0x91, 0x97, 0xd5, 0xe1, 0x71, 0xc4, 0x6c, 0xb6, 0x0a, 0xd1, - 0x47, 0xb0, 0x15, 0x32, 0x5b, 0x55, 0xb2, 0x4a, 0x2a, 0xb7, 0x12, 0x44, 0x8a, 0x25, 0x2b, 0x75, - 0xfa, 0xc9, 0xdc, 0x3a, 0xfd, 0x6c, 0xf1, 0x8a, 0x21, 0xab, 0x68, 0xf1, 0x18, 0xa9, 0xc5, 0x9f, - 0x8d, 0x7b, 0x9d, 0x36, 0x63, 0x74, 0xb1, 0x64, 0x58, 0x12, 0x54, 0x77, 0xfb, 0x02, 0xa0, 0xe3, - 0x04, 0x93, 0x95, 0xc3, 0x9e, 0xd3, 0x6b, 0xde, 0xb3, 0xa2, 0x72, 0x2d, 0xcb, 0x5e, 0x6e, 0x22, - 0x4b, 0xf4, 0x3e, 0x6c, 0x47, 0x85, 0x48, 0xd6, 0xb7, 0xdc, 0x5c, 0x14, 0xa0, 0xe6, 0x9f, 0x74, - 0x38, 0x50, 0x2e, 0x95, 0xde, 0x60, 0x34, 0x98, 0xd0, 0x65, 0x7c, 0x2c, 0x7e, 0x0a, 0x7b, 0xeb, - 0xa2, 0x2a, 0x27, 0x22, 0xd1, 0x51, 0xbb, 0x78, 0xfc, 0x76, 0x62, 0xa5, 0x6b, 0x33, 0x30, 0x8a, - 0x8b, 0xed, 0xda, 0xb4, 0xa3, 0x84, 0x22, 0x7b, 0xe1, 0xaf, 0x3c, 0x15, 0xa2, 0xb2, 0xe2, 0xa1, - 0x75, 0x38, 0x73, 0x91, 0x88, 0xe8, 0x47, 0x10, 0x07, 0x39, 0xa1, 0x3f, 0x2c, 0x9d, 0xe0, 0x5a, - 0x54, 0xbf, 0xf2, 0xba, 0xdc, 0x9a, 0x02, 0xbd, 0x75, 0x56, 0xcd, 0xdc, 0x3e, 0xab, 0x7e, 0x0a, - 0xf5, 0x38, 0x3b, 0xd4, 0x35, 0x96, 0x4e, 0xe3, 0xd6, 0xb6, 0x2d, 0x6c, 0xd8, 0x8f, 0x18, 0x38, - 0x22, 0xa8, 0xfe, 0x76, 0x04, 0x7b, 0x89, 0xd4, 0x5a, 0x9b, 0x2e, 0x33, 0x11, 0xad, 0xb3, 0x2b, - 0x69, 0x7a, 0x3c, 0x42, 0x99, 0xae, 0x4b, 0xd3, 0x23, 0x58, 0x99, 0xfe, 0x4b, 0xa8, 0xdc, 0xb8, - 0xe6, 0xe5, 0x85, 0xdf, 0x7f, 0x7a, 0xbb, 0xb2, 0x6e, 0x72, 0xcf, 0xe1, 0x86, 0xbb, 0x5e, 0x79, - 0x92, 0xba, 0xe7, 0x3d, 0x00, 0xf0, 0x3d, 0xc7, 0xf7, 0xc8, 0xa5, 0xeb, 0x5f, 0x8a, 0x82, 0x5b, - 0xc2, 0x05, 0x81, 0x9c, 0xb8, 0xfe, 0x65, 0xfd, 0x4b, 0x40, 0xff, 0xe1, 0x7d, 0xea, 0xcf, 0x1a, - 0xdc, 0xdf, 0x6c, 0xa2, 0xea, 0xf3, 0xff, 0xb5, 0x10, 0xfa, 0x14, 0x72, 0xf6, 0x84, 0x39, 0xbe, - 0xa7, 0x2a, 0xc3, 0xfb, 0x89, 0xa1, 0x98, 0x86, 0xbe, 0xfb, 0x8a, 0x9e, 0xf9, 0xee, 0x54, 0x19, - 0xd3, 0x16, 0x54, 0xac, 0x86, 0xa4, 0x92, 0x2e, 0x9b, 0x4e, 0xba, 0xc7, 0xbf, 0xd1, 0xa1, 0x9c, - 0xaa, 0x0c, 0xe9, 0xd6, 0x50, 0x86, 0x42, 0x7f, 0x40, 0xba, 0xe6, 0xb8, 0x6d, 0xf5, 0x0c, 0x0d, - 0x19, 0x50, 0x1a, 0xf4, 0xad, 0x41, 0x9f, 0x74, 0xcd, 0xce, 0xa0, 0xcb, 0x9b, 0xc4, 0xdb, 0xb0, - 0xdb, 0xb3, 0xfa, 0xcf, 0x49, 0x7f, 0x30, 0x26, 0x66, 0xcf, 0x7a, 0x6a, 0x9d, 0xf4, 0x4c, 0x23, - 0x8b, 0xf6, 0xc0, 0x18, 0xf4, 0x49, 0xe7, 0xac, 0x6d, 0xf5, 0xc9, 0xd8, 0x3a, 0x37, 0x07, 0x17, - 0x63, 0x43, 0xe7, 0x28, 0xcf, 0x66, 0x62, 0x7e, 0xd3, 0x31, 0xcd, 0xee, 0x88, 0x9c, 0xb7, 0xbf, - 0x31, 0xb6, 0x50, 0x0d, 0xf6, 0xac, 0xfe, 0xe8, 0xe2, 0xf4, 0xd4, 0xea, 0x58, 0x66, 0x7f, 0x4c, - 0x4e, 0xda, 0xbd, 0x76, 0xbf, 0x63, 0x1a, 0x39, 0x74, 0x0f, 0x90, 0xd5, 0xef, 0x0c, 0xce, 0x87, - 0x3d, 0x73, 0x6c, 0x92, 0xa8, 0x19, 0x6d, 0xa3, 0x2a, 0xec, 0x08, 0x3d, 0xed, 0x6e, 0x97, 0x9c, - 0xb6, 0xad, 0x9e, 0xd9, 0x35, 0xf2, 0xdc, 0x12, 0xc5, 0x18, 0x91, 0xae, 0x35, 0x6a, 0x9f, 0x70, - 0xb8, 0xc0, 0xe7, 0xb4, 0xfa, 0x2f, 0x06, 0x56, 0xc7, 0x24, 0x1d, 0xae, 0x96, 0xa3, 0xc0, 0xc9, - 0x11, 0x7a, 0xd1, 0xef, 0x9a, 0x78, 0xd8, 0xb6, 0xba, 0x46, 0x11, 0x1d, 0xc0, 0x7e, 0x04, 0x9b, - 0xdf, 0x0c, 0x2d, 0xfc, 0x2d, 0x19, 0x0f, 0x06, 0x64, 0x34, 0x18, 0xf4, 0x8d, 0x52, 0x52, 0x13, - 0x5f, 0xed, 0x60, 0x68, 0xf6, 0x8d, 0x32, 0xda, 0x87, 0xea, 0xf9, 0x70, 0x48, 0x22, 0x49, 0xb4, - 0xd8, 0x0a, 0xa7, 0xb7, 0xbb, 0x5d, 0x6c, 0x8e, 0x46, 0xe4, 0xdc, 0x1a, 0x9d, 0xb7, 0xc7, 0x9d, - 0x33, 0x63, 0x87, 0x2f, 0x69, 0x64, 0x8e, 0xc9, 0x78, 0x30, 0x6e, 0xf7, 0xd6, 0xb8, 0xc1, 0x0d, - 0x5a, 0xe3, 0x7c, 0xd2, 0xde, 0xe0, 0x6b, 0x63, 0x97, 0x6f, 0x38, 0x87, 0x07, 0x2f, 0x94, 0x89, - 0x88, 0xaf, 0x5d, 0xb9, 0x27, 0x9a, 0xd3, 0xa8, 0x72, 0xd0, 0xea, 0xbf, 0x68, 0xf7, 0xac, 0x2e, - 0x79, 0x6e, 0x7e, 0x2b, 0x9a, 0xf9, 0x1e, 0x07, 0xa5, 0x65, 0x64, 0x88, 0x07, 0x4f, 0xb9, 0x21, - 0xc6, 0xdb, 0x08, 0x41, 0xa5, 0x63, 0xe1, 0xce, 0x45, 0xaf, 0x8d, 0x09, 0x1e, 0x5c, 0x8c, 0x4d, - 0xe3, 0xde, 0xe3, 0x3f, 0x68, 0x50, 0x4a, 0x16, 0x6b, 0xee, 0x75, 0xab, 0x4f, 0x4e, 0x7b, 0xd6, - 0xd3, 0xb3, 0xb1, 0x0c, 0x82, 0xd1, 0x45, 0x87, 0xbb, 0xcc, 0xe4, 0x87, 0x04, 0x04, 0x15, 0xb9, - 0xe9, 0xf1, 0x62, 0x33, 0x7c, 0x2e, 0x85, 0xf5, 0x07, 0x4a, 0x6f, 0x96, 0x1b, 0xaf, 0x40, 0x13, - 0xe3, 0x01, 0x36, 0x74, 0xf4, 0x01, 0x34, 0x14, 0xc2, 0xfd, 0x8a, 0xb1, 0xd9, 0x19, 0x93, 0x61, - 0xfb, 0xdb, 0x73, 0xee, 0x76, 0x19, 0x64, 0x23, 0x63, 0x0b, 0xbd, 0x0b, 0x07, 0x31, 0x6b, 0x53, - 0x5c, 0x3c, 0xfe, 0x0c, 0x6a, 0x77, 0x05, 0x3d, 0x02, 0xc8, 0x8d, 0xcc, 0xf1, 0xb8, 0x67, 0xca, - 0x83, 0xcd, 0xa9, 0x0c, 0x5c, 0x80, 0x1c, 0x36, 0x47, 0x17, 0xe7, 0xa6, 0x91, 0x39, 0xfe, 0x4b, - 0x1e, 0x72, 0xe2, 0xa4, 0x1d, 0xa0, 0x2f, 0xa1, 0x9c, 0x78, 0x49, 0x7a, 0x71, 0x8c, 0x1e, 0xbc, - 0xf6, 0x8d, 0xa9, 0x1e, 0xdd, 0xc7, 0x15, 0x7c, 0xa4, 0xa1, 0x13, 0xa8, 0x24, 0x9f, 0x54, 0x5e, - 0x1c, 0xa3, 0xe4, 0x01, 0x75, 0xc3, 0x6b, 0xcb, 0x06, 0x1d, 0xcf, 0xc1, 0x30, 0x43, 0xe6, 0x2c, - 0x78, 0x9f, 0x54, 0x8f, 0x1e, 0xa8, 0x9e, 0x4c, 0xf0, 0xf4, 0x4b, 0x4a, 0xfd, 0x60, 0xa3, 0x4c, - 0x95, 0x9c, 0xaf, 0xf8, 0x99, 0x24, 0x7e, 0x76, 0xb8, 0xb5, 0xa0, 0xf4, 0x5b, 0x47, 0xfd, 0x9d, - 0xbb, 0xc4, 0xea, 0xa9, 0x20, 0xfb, 0xdb, 0x0c, 0x5f, 0x63, 0x39, 0x21, 0xdb, 0xb0, 0x4b, 0x37, - 0x94, 0x6e, 0xe8, 0xdc, 0x68, 0x0a, 0xd5, 0x0d, 0x4f, 0x12, 0xe8, 0xc3, 0x74, 0x1d, 0xbb, 0xe3, - 0x41, 0xa3, 0xfe, 0xf0, 0x4d, 0x34, 0xb5, 0xf8, 0x29, 0x54, 0x37, 0xbc, 0x5d, 0xa4, 0x66, 0xb9, - 0xfb, 0xe5, 0x23, 0x35, 0xcb, 0xeb, 0x9e, 0x40, 0xbe, 0x03, 0xe3, 0xe6, 0x55, 0x17, 0x35, 0x6f, - 0x8e, 0xbd, 0x7d, 0xe7, 0xae, 0xbf, 0xff, 0x5a, 0x8e, 0x52, 0x6e, 0x01, 0xac, 0x2f, 0x8c, 0xe8, - 0x7e, 0x62, 0xc8, 0xad, 0x0b, 0x6f, 0xfd, 0xc1, 0x1d, 0x52, 0xa5, 0x6a, 0x0c, 0xd5, 0x0d, 0x37, - 0xc8, 0xd4, 0x6e, 0xdc, 0x7d, 0xc3, 0xac, 0xef, 0x6d, 0xba, 0x68, 0x1d, 0x69, 0xe8, 0x5c, 0x06, - 0x58, 0xf4, 0x3c, 0xfa, 0x86, 0x8c, 0xa9, 0x6d, 0x3e, 0x10, 0xae, 0x42, 0x11, 0x5a, 0x47, 0x1a, - 0x1a, 0x40, 0x29, 0x99, 0x25, 0x6f, 0x4c, 0x9f, 0x37, 0x2a, 0x9c, 0xc1, 0x4e, 0xaa, 0x19, 0xfb, - 0x01, 0x7a, 0xf4, 0xc6, 0x23, 0x85, 0xdc, 0xb1, 0x54, 0x04, 0xbc, 0xe6, 0xec, 0xd1, 0xd2, 0x8e, - 0xb4, 0x93, 0xc3, 0x5f, 0x3c, 0x79, 0xe9, 0xb0, 0xf9, 0xea, 0xf2, 0x70, 0xe2, 0x2f, 0x3e, 0x5e, - 0x5e, 0xb1, 0x8f, 0x26, 0x76, 0x38, 0xe7, 0x3f, 0xa6, 0x1f, 0xbb, 0x1e, 0xff, 0x6f, 0xfd, 0xb7, - 0x90, 0x60, 0x39, 0xb9, 0xcc, 0x89, 0xbf, 0x7c, 0xfc, 0xff, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, - 0x71, 0x72, 0x24, 0x87, 0x29, 0x19, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// RouterClient is the client API for Router service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type RouterClient interface { - // - //SendPaymentV2 attempts to route a payment described by the passed - //PaymentRequest to the final destination. The call returns a stream of - //payment updates. - SendPaymentV2(ctx context.Context, in *SendPaymentRequest, opts ...grpc.CallOption) (Router_SendPaymentV2Client, error) - // - //TrackPaymentV2 returns an update stream for the payment identified by the - //payment hash. - TrackPaymentV2(ctx context.Context, in *TrackPaymentRequest, opts ...grpc.CallOption) (Router_TrackPaymentV2Client, error) - // - //EstimateRouteFee allows callers to obtain a lower bound w.r.t how much it - //may cost to send an HTLC to the target end destination. - EstimateRouteFee(ctx context.Context, in *RouteFeeRequest, opts ...grpc.CallOption) (*RouteFeeResponse, error) - // - //Deprecated, use SendToRouteV2. SendToRoute attempts to make a payment via - //the specified route. This method differs from SendPayment in that it - //allows users to specify a full route manually. This can be used for - //things like rebalancing, and atomic swaps. It differs from the newer - //SendToRouteV2 in that it doesn't return the full HTLC information. - SendToRoute(ctx context.Context, in *SendToRouteRequest, opts ...grpc.CallOption) (*SendToRouteResponse, error) - // - //SendToRouteV2 attempts to make a payment via the specified route. This - //method differs from SendPayment in that it allows users to specify a full - //route manually. This can be used for things like rebalancing, and atomic - //swaps. - SendToRouteV2(ctx context.Context, in *SendToRouteRequest, opts ...grpc.CallOption) (*lnrpc.HTLCAttempt, error) - // - //ResetMissionControl clears all mission control state and starts with a clean - //slate. - ResetMissionControl(ctx context.Context, in *ResetMissionControlRequest, opts ...grpc.CallOption) (*ResetMissionControlResponse, error) - // - //QueryMissionControl exposes the internal mission control state to callers. - //It is a development feature. - QueryMissionControl(ctx context.Context, in *QueryMissionControlRequest, opts ...grpc.CallOption) (*QueryMissionControlResponse, error) - // - //QueryProbability returns the current success probability estimate for a - //given node pair and amount. - QueryProbability(ctx context.Context, in *QueryProbabilityRequest, opts ...grpc.CallOption) (*QueryProbabilityResponse, error) - // - //BuildRoute builds a fully specified route based on a list of hop public - //keys. It retrieves the relevant channel policies from the graph in order to - //calculate the correct fees and time locks. - BuildRoute(ctx context.Context, in *BuildRouteRequest, opts ...grpc.CallOption) (*BuildRouteResponse, error) - // - //SubscribeHtlcEvents creates a uni-directional stream from the server to - //the client which delivers a stream of htlc events. - SubscribeHtlcEvents(ctx context.Context, in *SubscribeHtlcEventsRequest, opts ...grpc.CallOption) (Router_SubscribeHtlcEventsClient, error) - // - //Deprecated, use SendPaymentV2. SendPayment attempts to route a payment - //described by the passed PaymentRequest to the final destination. The call - //returns a stream of payment status updates. - SendPayment(ctx context.Context, in *SendPaymentRequest, opts ...grpc.CallOption) (Router_SendPaymentClient, error) - // - //Deprecated, use TrackPaymentV2. TrackPayment returns an update stream for - //the payment identified by the payment hash. - TrackPayment(ctx context.Context, in *TrackPaymentRequest, opts ...grpc.CallOption) (Router_TrackPaymentClient, error) - //* - //HtlcInterceptor dispatches a bi-directional streaming RPC in which - //Forwarded HTLC requests are sent to the client and the client responds with - //a boolean that tells LND if this htlc should be intercepted. - //In case of interception, the htlc can be either settled, cancelled or - //resumed later by using the ResolveHoldForward endpoint. - HtlcInterceptor(ctx context.Context, opts ...grpc.CallOption) (Router_HtlcInterceptorClient, error) -} - -type routerClient struct { - cc *grpc.ClientConn -} - -func NewRouterClient(cc *grpc.ClientConn) RouterClient { - return &routerClient{cc} -} - -func (c *routerClient) SendPaymentV2(ctx context.Context, in *SendPaymentRequest, opts ...grpc.CallOption) (Router_SendPaymentV2Client, error) { - stream, err := c.cc.NewStream(ctx, &_Router_serviceDesc.Streams[0], "/routerrpc.Router/SendPaymentV2", opts...) - if err != nil { - return nil, err - } - x := &routerSendPaymentV2Client{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Router_SendPaymentV2Client interface { - Recv() (*lnrpc.Payment, error) - grpc.ClientStream -} - -type routerSendPaymentV2Client struct { - grpc.ClientStream -} - -func (x *routerSendPaymentV2Client) Recv() (*lnrpc.Payment, error) { - m := new(lnrpc.Payment) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *routerClient) TrackPaymentV2(ctx context.Context, in *TrackPaymentRequest, opts ...grpc.CallOption) (Router_TrackPaymentV2Client, error) { - stream, err := c.cc.NewStream(ctx, &_Router_serviceDesc.Streams[1], "/routerrpc.Router/TrackPaymentV2", opts...) - if err != nil { - return nil, err - } - x := &routerTrackPaymentV2Client{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Router_TrackPaymentV2Client interface { - Recv() (*lnrpc.Payment, error) - grpc.ClientStream -} - -type routerTrackPaymentV2Client struct { - grpc.ClientStream -} - -func (x *routerTrackPaymentV2Client) Recv() (*lnrpc.Payment, error) { - m := new(lnrpc.Payment) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *routerClient) EstimateRouteFee(ctx context.Context, in *RouteFeeRequest, opts ...grpc.CallOption) (*RouteFeeResponse, error) { - out := new(RouteFeeResponse) - err := c.cc.Invoke(ctx, "/routerrpc.Router/EstimateRouteFee", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Deprecated: Do not use. -func (c *routerClient) SendToRoute(ctx context.Context, in *SendToRouteRequest, opts ...grpc.CallOption) (*SendToRouteResponse, error) { - out := new(SendToRouteResponse) - err := c.cc.Invoke(ctx, "/routerrpc.Router/SendToRoute", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *routerClient) SendToRouteV2(ctx context.Context, in *SendToRouteRequest, opts ...grpc.CallOption) (*lnrpc.HTLCAttempt, error) { - out := new(lnrpc.HTLCAttempt) - err := c.cc.Invoke(ctx, "/routerrpc.Router/SendToRouteV2", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *routerClient) ResetMissionControl(ctx context.Context, in *ResetMissionControlRequest, opts ...grpc.CallOption) (*ResetMissionControlResponse, error) { - out := new(ResetMissionControlResponse) - err := c.cc.Invoke(ctx, "/routerrpc.Router/ResetMissionControl", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *routerClient) QueryMissionControl(ctx context.Context, in *QueryMissionControlRequest, opts ...grpc.CallOption) (*QueryMissionControlResponse, error) { - out := new(QueryMissionControlResponse) - err := c.cc.Invoke(ctx, "/routerrpc.Router/QueryMissionControl", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *routerClient) QueryProbability(ctx context.Context, in *QueryProbabilityRequest, opts ...grpc.CallOption) (*QueryProbabilityResponse, error) { - out := new(QueryProbabilityResponse) - err := c.cc.Invoke(ctx, "/routerrpc.Router/QueryProbability", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *routerClient) BuildRoute(ctx context.Context, in *BuildRouteRequest, opts ...grpc.CallOption) (*BuildRouteResponse, error) { - out := new(BuildRouteResponse) - err := c.cc.Invoke(ctx, "/routerrpc.Router/BuildRoute", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *routerClient) SubscribeHtlcEvents(ctx context.Context, in *SubscribeHtlcEventsRequest, opts ...grpc.CallOption) (Router_SubscribeHtlcEventsClient, error) { - stream, err := c.cc.NewStream(ctx, &_Router_serviceDesc.Streams[2], "/routerrpc.Router/SubscribeHtlcEvents", opts...) - if err != nil { - return nil, err - } - x := &routerSubscribeHtlcEventsClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Router_SubscribeHtlcEventsClient interface { - Recv() (*HtlcEvent, error) - grpc.ClientStream -} - -type routerSubscribeHtlcEventsClient struct { - grpc.ClientStream -} - -func (x *routerSubscribeHtlcEventsClient) Recv() (*HtlcEvent, error) { - m := new(HtlcEvent) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// Deprecated: Do not use. -func (c *routerClient) SendPayment(ctx context.Context, in *SendPaymentRequest, opts ...grpc.CallOption) (Router_SendPaymentClient, error) { - stream, err := c.cc.NewStream(ctx, &_Router_serviceDesc.Streams[3], "/routerrpc.Router/SendPayment", opts...) - if err != nil { - return nil, err - } - x := &routerSendPaymentClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Router_SendPaymentClient interface { - Recv() (*PaymentStatus, error) - grpc.ClientStream -} - -type routerSendPaymentClient struct { - grpc.ClientStream -} - -func (x *routerSendPaymentClient) Recv() (*PaymentStatus, error) { - m := new(PaymentStatus) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// Deprecated: Do not use. -func (c *routerClient) TrackPayment(ctx context.Context, in *TrackPaymentRequest, opts ...grpc.CallOption) (Router_TrackPaymentClient, error) { - stream, err := c.cc.NewStream(ctx, &_Router_serviceDesc.Streams[4], "/routerrpc.Router/TrackPayment", opts...) - if err != nil { - return nil, err - } - x := &routerTrackPaymentClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Router_TrackPaymentClient interface { - Recv() (*PaymentStatus, error) - grpc.ClientStream -} - -type routerTrackPaymentClient struct { - grpc.ClientStream -} - -func (x *routerTrackPaymentClient) Recv() (*PaymentStatus, error) { - m := new(PaymentStatus) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *routerClient) HtlcInterceptor(ctx context.Context, opts ...grpc.CallOption) (Router_HtlcInterceptorClient, error) { - stream, err := c.cc.NewStream(ctx, &_Router_serviceDesc.Streams[5], "/routerrpc.Router/HtlcInterceptor", opts...) - if err != nil { - return nil, err - } - x := &routerHtlcInterceptorClient{stream} - return x, nil -} - -type Router_HtlcInterceptorClient interface { - Send(*ForwardHtlcInterceptResponse) error - Recv() (*ForwardHtlcInterceptRequest, error) - grpc.ClientStream -} - -type routerHtlcInterceptorClient struct { - grpc.ClientStream -} - -func (x *routerHtlcInterceptorClient) Send(m *ForwardHtlcInterceptResponse) error { - return x.ClientStream.SendMsg(m) -} - -func (x *routerHtlcInterceptorClient) Recv() (*ForwardHtlcInterceptRequest, error) { - m := new(ForwardHtlcInterceptRequest) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// RouterServer is the server API for Router service. -type RouterServer interface { - // - //SendPaymentV2 attempts to route a payment described by the passed - //PaymentRequest to the final destination. The call returns a stream of - //payment updates. - SendPaymentV2(*SendPaymentRequest, Router_SendPaymentV2Server) error - // - //TrackPaymentV2 returns an update stream for the payment identified by the - //payment hash. - TrackPaymentV2(*TrackPaymentRequest, Router_TrackPaymentV2Server) error - // - //EstimateRouteFee allows callers to obtain a lower bound w.r.t how much it - //may cost to send an HTLC to the target end destination. - EstimateRouteFee(context.Context, *RouteFeeRequest) (*RouteFeeResponse, error) - // - //Deprecated, use SendToRouteV2. SendToRoute attempts to make a payment via - //the specified route. This method differs from SendPayment in that it - //allows users to specify a full route manually. This can be used for - //things like rebalancing, and atomic swaps. It differs from the newer - //SendToRouteV2 in that it doesn't return the full HTLC information. - SendToRoute(context.Context, *SendToRouteRequest) (*SendToRouteResponse, error) - // - //SendToRouteV2 attempts to make a payment via the specified route. This - //method differs from SendPayment in that it allows users to specify a full - //route manually. This can be used for things like rebalancing, and atomic - //swaps. - SendToRouteV2(context.Context, *SendToRouteRequest) (*lnrpc.HTLCAttempt, error) - // - //ResetMissionControl clears all mission control state and starts with a clean - //slate. - ResetMissionControl(context.Context, *ResetMissionControlRequest) (*ResetMissionControlResponse, error) - // - //QueryMissionControl exposes the internal mission control state to callers. - //It is a development feature. - QueryMissionControl(context.Context, *QueryMissionControlRequest) (*QueryMissionControlResponse, error) - // - //QueryProbability returns the current success probability estimate for a - //given node pair and amount. - QueryProbability(context.Context, *QueryProbabilityRequest) (*QueryProbabilityResponse, error) - // - //BuildRoute builds a fully specified route based on a list of hop public - //keys. It retrieves the relevant channel policies from the graph in order to - //calculate the correct fees and time locks. - BuildRoute(context.Context, *BuildRouteRequest) (*BuildRouteResponse, error) - // - //SubscribeHtlcEvents creates a uni-directional stream from the server to - //the client which delivers a stream of htlc events. - SubscribeHtlcEvents(*SubscribeHtlcEventsRequest, Router_SubscribeHtlcEventsServer) error - // - //Deprecated, use SendPaymentV2. SendPayment attempts to route a payment - //described by the passed PaymentRequest to the final destination. The call - //returns a stream of payment status updates. - SendPayment(*SendPaymentRequest, Router_SendPaymentServer) error - // - //Deprecated, use TrackPaymentV2. TrackPayment returns an update stream for - //the payment identified by the payment hash. - TrackPayment(*TrackPaymentRequest, Router_TrackPaymentServer) error - //* - //HtlcInterceptor dispatches a bi-directional streaming RPC in which - //Forwarded HTLC requests are sent to the client and the client responds with - //a boolean that tells LND if this htlc should be intercepted. - //In case of interception, the htlc can be either settled, cancelled or - //resumed later by using the ResolveHoldForward endpoint. - HtlcInterceptor(Router_HtlcInterceptorServer) error -} - -// UnimplementedRouterServer can be embedded to have forward compatible implementations. -type UnimplementedRouterServer struct { -} - -func (*UnimplementedRouterServer) SendPaymentV2(req *SendPaymentRequest, srv Router_SendPaymentV2Server) error { - return status.Errorf(codes.Unimplemented, "method SendPaymentV2 not implemented") -} -func (*UnimplementedRouterServer) TrackPaymentV2(req *TrackPaymentRequest, srv Router_TrackPaymentV2Server) error { - return status.Errorf(codes.Unimplemented, "method TrackPaymentV2 not implemented") -} -func (*UnimplementedRouterServer) EstimateRouteFee(ctx context.Context, req *RouteFeeRequest) (*RouteFeeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method EstimateRouteFee not implemented") -} -func (*UnimplementedRouterServer) SendToRoute(ctx context.Context, req *SendToRouteRequest) (*SendToRouteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SendToRoute not implemented") -} -func (*UnimplementedRouterServer) SendToRouteV2(ctx context.Context, req *SendToRouteRequest) (*lnrpc.HTLCAttempt, error) { - return nil, status.Errorf(codes.Unimplemented, "method SendToRouteV2 not implemented") -} -func (*UnimplementedRouterServer) ResetMissionControl(ctx context.Context, req *ResetMissionControlRequest) (*ResetMissionControlResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ResetMissionControl not implemented") -} -func (*UnimplementedRouterServer) QueryMissionControl(ctx context.Context, req *QueryMissionControlRequest) (*QueryMissionControlResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method QueryMissionControl not implemented") -} -func (*UnimplementedRouterServer) QueryProbability(ctx context.Context, req *QueryProbabilityRequest) (*QueryProbabilityResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method QueryProbability not implemented") -} -func (*UnimplementedRouterServer) BuildRoute(ctx context.Context, req *BuildRouteRequest) (*BuildRouteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method BuildRoute not implemented") -} -func (*UnimplementedRouterServer) SubscribeHtlcEvents(req *SubscribeHtlcEventsRequest, srv Router_SubscribeHtlcEventsServer) error { - return status.Errorf(codes.Unimplemented, "method SubscribeHtlcEvents not implemented") -} -func (*UnimplementedRouterServer) SendPayment(req *SendPaymentRequest, srv Router_SendPaymentServer) error { - return status.Errorf(codes.Unimplemented, "method SendPayment not implemented") -} -func (*UnimplementedRouterServer) TrackPayment(req *TrackPaymentRequest, srv Router_TrackPaymentServer) error { - return status.Errorf(codes.Unimplemented, "method TrackPayment not implemented") -} -func (*UnimplementedRouterServer) HtlcInterceptor(srv Router_HtlcInterceptorServer) error { - return status.Errorf(codes.Unimplemented, "method HtlcInterceptor not implemented") -} - -func RegisterRouterServer(s *grpc.Server, srv RouterServer) { - s.RegisterService(&_Router_serviceDesc, srv) -} - -func _Router_SendPaymentV2_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SendPaymentRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(RouterServer).SendPaymentV2(m, &routerSendPaymentV2Server{stream}) -} - -type Router_SendPaymentV2Server interface { - Send(*lnrpc.Payment) error - grpc.ServerStream -} - -type routerSendPaymentV2Server struct { - grpc.ServerStream -} - -func (x *routerSendPaymentV2Server) Send(m *lnrpc.Payment) error { - return x.ServerStream.SendMsg(m) -} - -func _Router_TrackPaymentV2_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(TrackPaymentRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(RouterServer).TrackPaymentV2(m, &routerTrackPaymentV2Server{stream}) -} - -type Router_TrackPaymentV2Server interface { - Send(*lnrpc.Payment) error - grpc.ServerStream -} - -type routerTrackPaymentV2Server struct { - grpc.ServerStream -} - -func (x *routerTrackPaymentV2Server) Send(m *lnrpc.Payment) error { - return x.ServerStream.SendMsg(m) -} - -func _Router_EstimateRouteFee_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RouteFeeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RouterServer).EstimateRouteFee(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/routerrpc.Router/EstimateRouteFee", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RouterServer).EstimateRouteFee(ctx, req.(*RouteFeeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Router_SendToRoute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SendToRouteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RouterServer).SendToRoute(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/routerrpc.Router/SendToRoute", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RouterServer).SendToRoute(ctx, req.(*SendToRouteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Router_SendToRouteV2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SendToRouteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RouterServer).SendToRouteV2(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/routerrpc.Router/SendToRouteV2", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RouterServer).SendToRouteV2(ctx, req.(*SendToRouteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Router_ResetMissionControl_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ResetMissionControlRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RouterServer).ResetMissionControl(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/routerrpc.Router/ResetMissionControl", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RouterServer).ResetMissionControl(ctx, req.(*ResetMissionControlRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Router_QueryMissionControl_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryMissionControlRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RouterServer).QueryMissionControl(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/routerrpc.Router/QueryMissionControl", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RouterServer).QueryMissionControl(ctx, req.(*QueryMissionControlRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Router_QueryProbability_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryProbabilityRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RouterServer).QueryProbability(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/routerrpc.Router/QueryProbability", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RouterServer).QueryProbability(ctx, req.(*QueryProbabilityRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Router_BuildRoute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(BuildRouteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RouterServer).BuildRoute(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/routerrpc.Router/BuildRoute", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RouterServer).BuildRoute(ctx, req.(*BuildRouteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Router_SubscribeHtlcEvents_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SubscribeHtlcEventsRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(RouterServer).SubscribeHtlcEvents(m, &routerSubscribeHtlcEventsServer{stream}) -} - -type Router_SubscribeHtlcEventsServer interface { - Send(*HtlcEvent) error - grpc.ServerStream -} - -type routerSubscribeHtlcEventsServer struct { - grpc.ServerStream -} - -func (x *routerSubscribeHtlcEventsServer) Send(m *HtlcEvent) error { - return x.ServerStream.SendMsg(m) -} - -func _Router_SendPayment_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SendPaymentRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(RouterServer).SendPayment(m, &routerSendPaymentServer{stream}) -} - -type Router_SendPaymentServer interface { - Send(*PaymentStatus) error - grpc.ServerStream -} - -type routerSendPaymentServer struct { - grpc.ServerStream -} - -func (x *routerSendPaymentServer) Send(m *PaymentStatus) error { - return x.ServerStream.SendMsg(m) -} - -func _Router_TrackPayment_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(TrackPaymentRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(RouterServer).TrackPayment(m, &routerTrackPaymentServer{stream}) -} - -type Router_TrackPaymentServer interface { - Send(*PaymentStatus) error - grpc.ServerStream -} - -type routerTrackPaymentServer struct { - grpc.ServerStream -} - -func (x *routerTrackPaymentServer) Send(m *PaymentStatus) error { - return x.ServerStream.SendMsg(m) -} - -func _Router_HtlcInterceptor_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(RouterServer).HtlcInterceptor(&routerHtlcInterceptorServer{stream}) -} - -type Router_HtlcInterceptorServer interface { - Send(*ForwardHtlcInterceptRequest) error - Recv() (*ForwardHtlcInterceptResponse, error) - grpc.ServerStream -} - -type routerHtlcInterceptorServer struct { - grpc.ServerStream -} - -func (x *routerHtlcInterceptorServer) Send(m *ForwardHtlcInterceptRequest) error { - return x.ServerStream.SendMsg(m) -} - -func (x *routerHtlcInterceptorServer) Recv() (*ForwardHtlcInterceptResponse, error) { - m := new(ForwardHtlcInterceptResponse) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _Router_serviceDesc = grpc.ServiceDesc{ - ServiceName: "routerrpc.Router", - HandlerType: (*RouterServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "EstimateRouteFee", - Handler: _Router_EstimateRouteFee_Handler, - }, - { - MethodName: "SendToRoute", - Handler: _Router_SendToRoute_Handler, - }, - { - MethodName: "SendToRouteV2", - Handler: _Router_SendToRouteV2_Handler, - }, - { - MethodName: "ResetMissionControl", - Handler: _Router_ResetMissionControl_Handler, - }, - { - MethodName: "QueryMissionControl", - Handler: _Router_QueryMissionControl_Handler, - }, - { - MethodName: "QueryProbability", - Handler: _Router_QueryProbability_Handler, - }, - { - MethodName: "BuildRoute", - Handler: _Router_BuildRoute_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "SendPaymentV2", - Handler: _Router_SendPaymentV2_Handler, - ServerStreams: true, - }, - { - StreamName: "TrackPaymentV2", - Handler: _Router_TrackPaymentV2_Handler, - ServerStreams: true, - }, - { - StreamName: "SubscribeHtlcEvents", - Handler: _Router_SubscribeHtlcEvents_Handler, - ServerStreams: true, - }, - { - StreamName: "SendPayment", - Handler: _Router_SendPayment_Handler, - ServerStreams: true, - }, - { - StreamName: "TrackPayment", - Handler: _Router_TrackPayment_Handler, - ServerStreams: true, - }, - { - StreamName: "HtlcInterceptor", - Handler: _Router_HtlcInterceptor_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "routerrpc/router.proto", -} diff --git a/lnd/lnrpc/routerrpc/router.pb.gw.go b/lnd/lnrpc/routerrpc/router.pb.gw.go deleted file mode 100644 index 23350311..00000000 --- a/lnd/lnrpc/routerrpc/router.pb.gw.go +++ /dev/null @@ -1,782 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: routerrpc/router.proto - -/* -Package routerrpc is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package routerrpc - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage - -func request_Router_SendPaymentV2_0(ctx context.Context, marshaler runtime.Marshaler, client RouterClient, req *http.Request, pathParams map[string]string) (Router_SendPaymentV2Client, runtime.ServerMetadata, error) { - var protoReq SendPaymentRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - stream, err := client.SendPaymentV2(ctx, &protoReq) - if err != nil { - return nil, metadata, err - } - header, err := stream.Header() - if err != nil { - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil - -} - -var ( - filter_Router_TrackPaymentV2_0 = &utilities.DoubleArray{Encoding: map[string]int{"payment_hash": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} -) - -func request_Router_TrackPaymentV2_0(ctx context.Context, marshaler runtime.Marshaler, client RouterClient, req *http.Request, pathParams map[string]string) (Router_TrackPaymentV2Client, runtime.ServerMetadata, error) { - var protoReq TrackPaymentRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["payment_hash"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "payment_hash") - } - - protoReq.PaymentHash, err = runtime.Bytes(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "payment_hash", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Router_TrackPaymentV2_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - stream, err := client.TrackPaymentV2(ctx, &protoReq) - if err != nil { - return nil, metadata, err - } - header, err := stream.Header() - if err != nil { - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil - -} - -func request_Router_EstimateRouteFee_0(ctx context.Context, marshaler runtime.Marshaler, client RouterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RouteFeeRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.EstimateRouteFee(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Router_EstimateRouteFee_0(ctx context.Context, marshaler runtime.Marshaler, server RouterServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RouteFeeRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.EstimateRouteFee(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Router_SendToRouteV2_0(ctx context.Context, marshaler runtime.Marshaler, client RouterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SendToRouteRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.SendToRouteV2(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Router_SendToRouteV2_0(ctx context.Context, marshaler runtime.Marshaler, server RouterServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SendToRouteRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.SendToRouteV2(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Router_ResetMissionControl_0(ctx context.Context, marshaler runtime.Marshaler, client RouterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ResetMissionControlRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ResetMissionControl(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Router_ResetMissionControl_0(ctx context.Context, marshaler runtime.Marshaler, server RouterServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ResetMissionControlRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ResetMissionControl(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Router_QueryMissionControl_0(ctx context.Context, marshaler runtime.Marshaler, client RouterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryMissionControlRequest - var metadata runtime.ServerMetadata - - msg, err := client.QueryMissionControl(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Router_QueryMissionControl_0(ctx context.Context, marshaler runtime.Marshaler, server RouterServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryMissionControlRequest - var metadata runtime.ServerMetadata - - msg, err := server.QueryMissionControl(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Router_QueryProbability_0(ctx context.Context, marshaler runtime.Marshaler, client RouterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryProbabilityRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["from_node"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "from_node") - } - - protoReq.FromNode, err = runtime.Bytes(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "from_node", err) - } - - val, ok = pathParams["to_node"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "to_node") - } - - protoReq.ToNode, err = runtime.Bytes(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "to_node", err) - } - - val, ok = pathParams["amt_msat"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "amt_msat") - } - - protoReq.AmtMsat, err = runtime.Int64(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "amt_msat", err) - } - - msg, err := client.QueryProbability(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Router_QueryProbability_0(ctx context.Context, marshaler runtime.Marshaler, server RouterServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryProbabilityRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["from_node"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "from_node") - } - - protoReq.FromNode, err = runtime.Bytes(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "from_node", err) - } - - val, ok = pathParams["to_node"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "to_node") - } - - protoReq.ToNode, err = runtime.Bytes(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "to_node", err) - } - - val, ok = pathParams["amt_msat"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "amt_msat") - } - - protoReq.AmtMsat, err = runtime.Int64(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "amt_msat", err) - } - - msg, err := server.QueryProbability(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Router_BuildRoute_0(ctx context.Context, marshaler runtime.Marshaler, client RouterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq BuildRouteRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.BuildRoute(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Router_BuildRoute_0(ctx context.Context, marshaler runtime.Marshaler, server RouterServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq BuildRouteRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.BuildRoute(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Router_SubscribeHtlcEvents_0(ctx context.Context, marshaler runtime.Marshaler, client RouterClient, req *http.Request, pathParams map[string]string) (Router_SubscribeHtlcEventsClient, runtime.ServerMetadata, error) { - var protoReq SubscribeHtlcEventsRequest - var metadata runtime.ServerMetadata - - stream, err := client.SubscribeHtlcEvents(ctx, &protoReq) - if err != nil { - return nil, metadata, err - } - header, err := stream.Header() - if err != nil { - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil - -} - -// RegisterRouterHandlerServer registers the http handlers for service Router to "mux". -// UnaryRPC :call RouterServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -func RegisterRouterHandlerServer(ctx context.Context, mux *runtime.ServeMux, server RouterServer) error { - - mux.Handle("POST", pattern_Router_SendPaymentV2_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") - _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - }) - - mux.Handle("GET", pattern_Router_TrackPaymentV2_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") - _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - }) - - mux.Handle("POST", pattern_Router_EstimateRouteFee_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Router_EstimateRouteFee_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Router_EstimateRouteFee_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Router_SendToRouteV2_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Router_SendToRouteV2_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Router_SendToRouteV2_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Router_ResetMissionControl_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Router_ResetMissionControl_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Router_ResetMissionControl_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Router_QueryMissionControl_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Router_QueryMissionControl_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Router_QueryMissionControl_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Router_QueryProbability_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Router_QueryProbability_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Router_QueryProbability_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Router_BuildRoute_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Router_BuildRoute_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Router_BuildRoute_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Router_SubscribeHtlcEvents_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") - _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - }) - - return nil -} - -// RegisterRouterHandlerFromEndpoint is same as RegisterRouterHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterRouterHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterRouterHandler(ctx, mux, conn) -} - -// RegisterRouterHandler registers the http handlers for service Router to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterRouterHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterRouterHandlerClient(ctx, mux, NewRouterClient(conn)) -} - -// RegisterRouterHandlerClient registers the http handlers for service Router -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "RouterClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "RouterClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "RouterClient" to call the correct interceptors. -func RegisterRouterHandlerClient(ctx context.Context, mux *runtime.ServeMux, client RouterClient) error { - - mux.Handle("POST", pattern_Router_SendPaymentV2_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Router_SendPaymentV2_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Router_SendPaymentV2_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Router_TrackPaymentV2_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Router_TrackPaymentV2_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Router_TrackPaymentV2_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Router_EstimateRouteFee_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Router_EstimateRouteFee_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Router_EstimateRouteFee_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Router_SendToRouteV2_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Router_SendToRouteV2_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Router_SendToRouteV2_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Router_ResetMissionControl_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Router_ResetMissionControl_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Router_ResetMissionControl_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Router_QueryMissionControl_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Router_QueryMissionControl_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Router_QueryMissionControl_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Router_QueryProbability_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Router_QueryProbability_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Router_QueryProbability_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Router_BuildRoute_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Router_BuildRoute_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Router_BuildRoute_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Router_SubscribeHtlcEvents_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Router_SubscribeHtlcEvents_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Router_SubscribeHtlcEvents_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Router_SendPaymentV2_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "router", "send"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Router_TrackPaymentV2_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v2", "router", "track", "payment_hash"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Router_EstimateRouteFee_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v2", "router", "route", "estimatefee"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Router_SendToRouteV2_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v2", "router", "route", "send"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Router_ResetMissionControl_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v2", "router", "mc", "reset"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Router_QueryMissionControl_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "router", "mc"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Router_QueryProbability_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 1, 0, 4, 1, 5, 5, 1, 0, 4, 1, 5, 6}, []string{"v2", "router", "mc", "probability", "from_node", "to_node", "amt_msat"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Router_BuildRoute_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "router", "route"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Router_SubscribeHtlcEvents_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "router", "htlcevents"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Router_SendPaymentV2_0 = runtime.ForwardResponseStream - - forward_Router_TrackPaymentV2_0 = runtime.ForwardResponseStream - - forward_Router_EstimateRouteFee_0 = runtime.ForwardResponseMessage - - forward_Router_SendToRouteV2_0 = runtime.ForwardResponseMessage - - forward_Router_ResetMissionControl_0 = runtime.ForwardResponseMessage - - forward_Router_QueryMissionControl_0 = runtime.ForwardResponseMessage - - forward_Router_QueryProbability_0 = runtime.ForwardResponseMessage - - forward_Router_BuildRoute_0 = runtime.ForwardResponseMessage - - forward_Router_SubscribeHtlcEvents_0 = runtime.ForwardResponseStream -) diff --git a/lnd/lnrpc/routerrpc/router.proto b/lnd/lnrpc/routerrpc/router.proto deleted file mode 100644 index 94daa796..00000000 --- a/lnd/lnrpc/routerrpc/router.proto +++ /dev/null @@ -1,664 +0,0 @@ -syntax = "proto3"; - -import "rpc.proto"; - -package routerrpc; - -option go_package = "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc"; - -// Router is a service that offers advanced interaction with the router -// subsystem of the daemon. -service Router { - /* - SendPaymentV2 attempts to route a payment described by the passed - PaymentRequest to the final destination. The call returns a stream of - payment updates. - */ - rpc SendPaymentV2 (SendPaymentRequest) returns (stream lnrpc.Payment); - - /* - TrackPaymentV2 returns an update stream for the payment identified by the - payment hash. - */ - rpc TrackPaymentV2 (TrackPaymentRequest) returns (stream lnrpc.Payment); - - /* - EstimateRouteFee allows callers to obtain a lower bound w.r.t how much it - may cost to send an HTLC to the target end destination. - */ - rpc EstimateRouteFee (RouteFeeRequest) returns (RouteFeeResponse); - - /* - Deprecated, use SendToRouteV2. SendToRoute attempts to make a payment via - the specified route. This method differs from SendPayment in that it - allows users to specify a full route manually. This can be used for - things like rebalancing, and atomic swaps. It differs from the newer - SendToRouteV2 in that it doesn't return the full HTLC information. - */ - rpc SendToRoute (SendToRouteRequest) returns (SendToRouteResponse) { - option deprecated = true; - } - - /* - SendToRouteV2 attempts to make a payment via the specified route. This - method differs from SendPayment in that it allows users to specify a full - route manually. This can be used for things like rebalancing, and atomic - swaps. - */ - rpc SendToRouteV2 (SendToRouteRequest) returns (lnrpc.HTLCAttempt); - - /* - ResetMissionControl clears all mission control state and starts with a clean - slate. - */ - rpc ResetMissionControl (ResetMissionControlRequest) - returns (ResetMissionControlResponse); - - /* - QueryMissionControl exposes the internal mission control state to callers. - It is a development feature. - */ - rpc QueryMissionControl (QueryMissionControlRequest) - returns (QueryMissionControlResponse); - - /* - QueryProbability returns the current success probability estimate for a - given node pair and amount. - */ - rpc QueryProbability (QueryProbabilityRequest) - returns (QueryProbabilityResponse); - - /* - BuildRoute builds a fully specified route based on a list of hop public - keys. It retrieves the relevant channel policies from the graph in order to - calculate the correct fees and time locks. - */ - rpc BuildRoute (BuildRouteRequest) returns (BuildRouteResponse); - - /* - SubscribeHtlcEvents creates a uni-directional stream from the server to - the client which delivers a stream of htlc events. - */ - rpc SubscribeHtlcEvents (SubscribeHtlcEventsRequest) - returns (stream HtlcEvent); - - /* - Deprecated, use SendPaymentV2. SendPayment attempts to route a payment - described by the passed PaymentRequest to the final destination. The call - returns a stream of payment status updates. - */ - rpc SendPayment (SendPaymentRequest) returns (stream PaymentStatus) { - option deprecated = true; - } - - /* - Deprecated, use TrackPaymentV2. TrackPayment returns an update stream for - the payment identified by the payment hash. - */ - rpc TrackPayment (TrackPaymentRequest) returns (stream PaymentStatus) { - option deprecated = true; - } - - /** - HtlcInterceptor dispatches a bi-directional streaming RPC in which - Forwarded HTLC requests are sent to the client and the client responds with - a boolean that tells LND if this htlc should be intercepted. - In case of interception, the htlc can be either settled, cancelled or - resumed later by using the ResolveHoldForward endpoint. - */ - rpc HtlcInterceptor (stream ForwardHtlcInterceptResponse) - returns (stream ForwardHtlcInterceptRequest); -} - -message SendPaymentRequest { - // The identity pubkey of the payment recipient - bytes dest = 1; - - /* - Number of satoshis to send. - - The fields amt and amt_msat are mutually exclusive. - */ - int64 amt = 2; - - /* - Number of millisatoshis to send. - - The fields amt and amt_msat are mutually exclusive. - */ - int64 amt_msat = 12; - - // The hash to use within the payment's HTLC - bytes payment_hash = 3; - - /* - The CLTV delta from the current height that should be used to set the - timelock for the final hop. - */ - int32 final_cltv_delta = 4; - - /* - A bare-bones invoice for a payment within the Lightning Network. With the - details of the invoice, the sender has all the data necessary to send a - payment to the recipient. The amount in the payment request may be zero. In - that case it is required to set the amt field as well. If no payment request - is specified, the following fields are required: dest, amt and payment_hash. - */ - string payment_request = 5; - - /* - An upper limit on the amount of time we should spend when attempting to - fulfill the payment. This is expressed in seconds. If we cannot make a - successful payment within this time frame, an error will be returned. - This field must be non-zero. - */ - int32 timeout_seconds = 6; - - /* - The maximum number of satoshis that will be paid as a fee of the payment. - If this field is left to the default value of 0, only zero-fee routes will - be considered. This usually means single hop routes connecting directly to - the destination. To send the payment without a fee limit, use max int here. - - The fields fee_limit_sat and fee_limit_msat are mutually exclusive. - */ - int64 fee_limit_sat = 7; - - /* - The maximum number of millisatoshis that will be paid as a fee of the - payment. If this field is left to the default value of 0, only zero-fee - routes will be considered. This usually means single hop routes connecting - directly to the destination. To send the payment without a fee limit, use - max int here. - - The fields fee_limit_sat and fee_limit_msat are mutually exclusive. - */ - int64 fee_limit_msat = 13; - - /* - Deprecated, use outgoing_chan_ids. The channel id of the channel that must - be taken to the first hop. If zero, any channel may be used (unless - outgoing_chan_ids are set). - */ - uint64 outgoing_chan_id = 8 [jstype = JS_STRING, deprecated = true]; - - /* - The channel ids of the channels are allowed for the first hop. If empty, - any channel may be used. - */ - repeated uint64 outgoing_chan_ids = 19; - - /* - The pubkey of the last hop of the route. If empty, any hop may be used. - */ - bytes last_hop_pubkey = 14; - - /* - An optional maximum total time lock for the route. This should not exceed - lnd's `--max-cltv-expiry` setting. If zero, then the value of - `--max-cltv-expiry` is enforced. - */ - int32 cltv_limit = 9; - - /* - Optional route hints to reach the destination through private channels. - */ - repeated lnrpc.RouteHint route_hints = 10; - - /* - An optional field that can be used to pass an arbitrary set of TLV records - to a peer which understands the new records. This can be used to pass - application specific data during the payment attempt. Record types are - required to be in the custom range >= 65536. When using REST, the values - must be encoded as base64. - */ - map dest_custom_records = 11; - - // If set, circular payments to self are permitted. - bool allow_self_payment = 15; - - /* - Features assumed to be supported by the final node. All transitive feature - dependencies must also be set properly. For a given feature bit pair, either - optional or remote may be set, but not both. If this field is nil or empty, - the router will try to load destination features from the graph as a - fallback. - */ - repeated lnrpc.FeatureBit dest_features = 16; - - /* - The maximum number of partial payments that may be use to complete the full - amount. - */ - uint32 max_parts = 17; - - /* - If set, only the final payment update is streamed back. Intermediate updates - that show which htlcs are still in flight are suppressed. - */ - bool no_inflight_updates = 18; -} - -message TrackPaymentRequest { - // The hash of the payment to look up. - bytes payment_hash = 1; - - /* - If set, only the final payment update is streamed back. Intermediate updates - that show which htlcs are still in flight are suppressed. - */ - bool no_inflight_updates = 2; -} - -message RouteFeeRequest { - /* - The destination once wishes to obtain a routing fee quote to. - */ - bytes dest = 1; - - /* - The amount one wishes to send to the target destination. - */ - int64 amt_sat = 2; -} - -message RouteFeeResponse { - /* - A lower bound of the estimated fee to the target destination within the - network, expressed in milli-satoshis. - */ - int64 routing_fee_msat = 1; - - /* - An estimate of the worst case time delay that can occur. Note that callers - will still need to factor in the final CLTV delta of the last hop into this - value. - */ - int64 time_lock_delay = 2; -} - -message SendToRouteRequest { - // The payment hash to use for the HTLC. - bytes payment_hash = 1; - - // Route that should be used to attempt to complete the payment. - lnrpc.Route route = 2; -} - -message SendToRouteResponse { - // The preimage obtained by making the payment. - bytes preimage = 1; - - // The failure message in case the payment failed. - lnrpc.Failure failure = 2; -} - -message ResetMissionControlRequest { -} - -message ResetMissionControlResponse { -} - -message QueryMissionControlRequest { -} - -// QueryMissionControlResponse contains mission control state. -message QueryMissionControlResponse { - reserved 1; - - // Node pair-level mission control state. - repeated PairHistory pairs = 2; -} - -// PairHistory contains the mission control state for a particular node pair. -message PairHistory { - // The source node pubkey of the pair. - bytes node_from = 1; - - // The destination node pubkey of the pair. - bytes node_to = 2; - - reserved 3, 4, 5, 6; - - PairData history = 7; -} - -message PairData { - // Time of last failure. - int64 fail_time = 1; - - /* - Lowest amount that failed to forward rounded to whole sats. This may be - set to zero if the failure is independent of amount. - */ - int64 fail_amt_sat = 2; - - /* - Lowest amount that failed to forward in millisats. This may be - set to zero if the failure is independent of amount. - */ - int64 fail_amt_msat = 4; - - reserved 3; - - // Time of last success. - int64 success_time = 5; - - // Highest amount that we could successfully forward rounded to whole sats. - int64 success_amt_sat = 6; - - // Highest amount that we could successfully forward in millisats. - int64 success_amt_msat = 7; -} - -message QueryProbabilityRequest { - // The source node pubkey of the pair. - bytes from_node = 1; - - // The destination node pubkey of the pair. - bytes to_node = 2; - - // The amount for which to calculate a probability. - int64 amt_msat = 3; -} - -message QueryProbabilityResponse { - // The success probability for the requested pair. - double probability = 1; - - // The historical data for the requested pair. - PairData history = 2; -} - -message BuildRouteRequest { - /* - The amount to send expressed in msat. If set to zero, the minimum routable - amount is used. - */ - int64 amt_msat = 1; - - /* - CLTV delta from the current height that should be used for the timelock - of the final hop - */ - int32 final_cltv_delta = 2; - - /* - The channel id of the channel that must be taken to the first hop. If zero, - any channel may be used. - */ - uint64 outgoing_chan_id = 3 [jstype = JS_STRING]; - - /* - A list of hops that defines the route. This does not include the source hop - pubkey. - */ - repeated bytes hop_pubkeys = 4; -} - -message BuildRouteResponse { - /* - Fully specified route that can be used to execute the payment. - */ - lnrpc.Route route = 1; -} - -message SubscribeHtlcEventsRequest { -} - -/* -HtlcEvent contains the htlc event that was processed. These are served on a -best-effort basis; events are not persisted, delivery is not guaranteed -(in the event of a crash in the switch, forward events may be lost) and -some events may be replayed upon restart. Events consumed from this package -should be de-duplicated by the htlc's unique combination of incoming and -outgoing channel id and htlc id. [EXPERIMENTAL] -*/ -message HtlcEvent { - /* - The short channel id that the incoming htlc arrived at our node on. This - value is zero for sends. - */ - uint64 incoming_channel_id = 1; - - /* - The short channel id that the outgoing htlc left our node on. This value - is zero for receives. - */ - uint64 outgoing_channel_id = 2; - - /* - Incoming id is the index of the incoming htlc in the incoming channel. - This value is zero for sends. - */ - uint64 incoming_htlc_id = 3; - - /* - Outgoing id is the index of the outgoing htlc in the outgoing channel. - This value is zero for receives. - */ - uint64 outgoing_htlc_id = 4; - - /* - The time in unix nanoseconds that the event occurred. - */ - uint64 timestamp_ns = 5; - - enum EventType { - UNKNOWN = 0; - SEND = 1; - RECEIVE = 2; - FORWARD = 3; - } - - /* - The event type indicates whether the htlc was part of a send, receive or - forward. - */ - EventType event_type = 6; - - oneof event { - ForwardEvent forward_event = 7; - ForwardFailEvent forward_fail_event = 8; - SettleEvent settle_event = 9; - LinkFailEvent link_fail_event = 10; - } -} - -message HtlcInfo { - // The timelock on the incoming htlc. - uint32 incoming_timelock = 1; - - // The timelock on the outgoing htlc. - uint32 outgoing_timelock = 2; - - // The amount of the incoming htlc. - uint64 incoming_amt_msat = 3; - - // The amount of the outgoing htlc. - uint64 outgoing_amt_msat = 4; -} - -message ForwardEvent { - // Info contains details about the htlc that was forwarded. - HtlcInfo info = 1; -} - -message ForwardFailEvent { -} - -message SettleEvent { -} - -message LinkFailEvent { - // Info contains details about the htlc that we failed. - HtlcInfo info = 1; - - // FailureCode is the BOLT error code for the failure. - lnrpc.Failure.FailureCode wire_failure = 2; - - /* - FailureDetail provides additional information about the reason for the - failure. This detail enriches the information provided by the wire message - and may be 'no detail' if the wire message requires no additional metadata. - */ - FailureDetail failure_detail = 3; - - // A string representation of the link failure. - string failure_string = 4; -} - -enum FailureDetail { - UNKNOWN = 0; - NO_DETAIL = 1; - ONION_DECODE = 2; - LINK_NOT_ELIGIBLE = 3; - ON_CHAIN_TIMEOUT = 4; - HTLC_EXCEEDS_MAX = 5; - INSUFFICIENT_BALANCE = 6; - INCOMPLETE_FORWARD = 7; - HTLC_ADD_FAILED = 8; - FORWARDS_DISABLED = 9; - INVOICE_CANCELED = 10; - INVOICE_UNDERPAID = 11; - INVOICE_EXPIRY_TOO_SOON = 12; - INVOICE_NOT_OPEN = 13; - MPP_INVOICE_TIMEOUT = 14; - ADDRESS_MISMATCH = 15; - SET_TOTAL_MISMATCH = 16; - SET_TOTAL_TOO_LOW = 17; - SET_OVERPAID = 18; - UNKNOWN_INVOICE = 19; - INVALID_KEYSEND = 20; - MPP_IN_PROGRESS = 21; - CIRCULAR_ROUTE = 22; -} - -enum PaymentState { - /* - Payment is still in flight. - */ - IN_FLIGHT = 0; - - /* - Payment completed successfully. - */ - SUCCEEDED = 1; - - /* - There are more routes to try, but the payment timeout was exceeded. - */ - FAILED_TIMEOUT = 2; - - /* - All possible routes were tried and failed permanently. Or were no - routes to the destination at all. - */ - FAILED_NO_ROUTE = 3; - - /* - A non-recoverable error has occured. - */ - FAILED_ERROR = 4; - - /* - Payment details incorrect (unknown hash, invalid amt or - invalid final cltv delta) - */ - FAILED_INCORRECT_PAYMENT_DETAILS = 5; - - /* - Insufficient local balance. - */ - FAILED_INSUFFICIENT_BALANCE = 6; -} - -message PaymentStatus { - // Current state the payment is in. - PaymentState state = 1; - - /* - The pre-image of the payment when state is SUCCEEDED. - */ - bytes preimage = 2; - - reserved 3; - - /* - The HTLCs made in attempt to settle the payment [EXPERIMENTAL]. - */ - repeated lnrpc.HTLCAttempt htlcs = 4; -} - -message CircuitKey { - /// The id of the channel that the is part of this circuit. - uint64 chan_id = 1; - - /// The index of the incoming htlc in the incoming channel. - uint64 htlc_id = 2; -} - -message ForwardHtlcInterceptRequest { - /* - The key of this forwarded htlc. It defines the incoming channel id and - the index in this channel. - */ - CircuitKey incoming_circuit_key = 1; - - // The incoming htlc amount. - uint64 incoming_amount_msat = 5; - - // The incoming htlc expiry. - uint32 incoming_expiry = 6; - - /* - The htlc payment hash. This value is not guaranteed to be unique per - request. - */ - bytes payment_hash = 2; - - // The requested outgoing channel id for this forwarded htlc. Because of - // non-strict forwarding, this isn't necessarily the channel over which the - // packet will be forwarded eventually. A different channel to the same peer - // may be selected as well. - uint64 outgoing_requested_chan_id = 7; - - // The outgoing htlc amount. - uint64 outgoing_amount_msat = 3; - - // The outgoing htlc expiry. - uint32 outgoing_expiry = 4; - - // Any custom records that were present in the payload. - map custom_records = 8; - - // The onion blob for the next hop - bytes onion_blob = 9; -} - -/** -ForwardHtlcInterceptResponse enables the caller to resolve a previously hold -forward. The caller can choose either to: -- `Resume`: Execute the default behavior (usually forward). -- `Reject`: Fail the htlc backwards. -- `Settle`: Settle this htlc with a given preimage. -*/ -message ForwardHtlcInterceptResponse { - /** - The key of this forwarded htlc. It defines the incoming channel id and - the index in this channel. - */ - CircuitKey incoming_circuit_key = 1; - - // The resolve action for this intercepted htlc. - ResolveHoldForwardAction action = 2; - - // The preimage in case the resolve action is Settle. - bytes preimage = 3; -} - -enum ResolveHoldForwardAction { - SETTLE = 0; - FAIL = 1; - RESUME = 2; -} diff --git a/lnd/lnrpc/routerrpc/router.swagger.json b/lnd/lnrpc/routerrpc/router.swagger.json deleted file mode 100644 index 17fe3598..00000000 --- a/lnd/lnrpc/routerrpc/router.swagger.json +++ /dev/null @@ -1,1402 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "routerrpc/router.proto", - "version": "version not set" - }, - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/v2/router/htlcevents": { - "get": { - "summary": "SubscribeHtlcEvents creates a uni-directional stream from the server to\nthe client which delivers a stream of htlc events.", - "operationId": "Router_SubscribeHtlcEvents", - "responses": { - "200": { - "description": "A successful response.(streaming responses)", - "schema": { - "type": "object", - "properties": { - "result": { - "$ref": "#/definitions/routerrpcHtlcEvent" - }, - "error": { - "$ref": "#/definitions/runtimeStreamError" - } - }, - "title": "Stream result of routerrpcHtlcEvent" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "Router" - ] - } - }, - "/v2/router/mc": { - "get": { - "summary": "QueryMissionControl exposes the internal mission control state to callers.\nIt is a development feature.", - "operationId": "Router_QueryMissionControl", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/routerrpcQueryMissionControlResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "Router" - ] - } - }, - "/v2/router/mc/probability/{from_node}/{to_node}/{amt_msat}": { - "get": { - "summary": "QueryProbability returns the current success probability estimate for a\ngiven node pair and amount.", - "operationId": "Router_QueryProbability", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/routerrpcQueryProbabilityResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "from_node", - "description": "The source node pubkey of the pair.", - "in": "path", - "required": true, - "type": "string", - "format": "byte" - }, - { - "name": "to_node", - "description": "The destination node pubkey of the pair.", - "in": "path", - "required": true, - "type": "string", - "format": "byte" - }, - { - "name": "amt_msat", - "description": "The amount for which to calculate a probability.", - "in": "path", - "required": true, - "type": "string", - "format": "int64" - } - ], - "tags": [ - "Router" - ] - } - }, - "/v2/router/mc/reset": { - "post": { - "summary": "ResetMissionControl clears all mission control state and starts with a clean\nslate.", - "operationId": "Router_ResetMissionControl", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/routerrpcResetMissionControlResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/routerrpcResetMissionControlRequest" - } - } - ], - "tags": [ - "Router" - ] - } - }, - "/v2/router/route": { - "post": { - "summary": "BuildRoute builds a fully specified route based on a list of hop public\nkeys. It retrieves the relevant channel policies from the graph in order to\ncalculate the correct fees and time locks.", - "operationId": "Router_BuildRoute", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/routerrpcBuildRouteResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/routerrpcBuildRouteRequest" - } - } - ], - "tags": [ - "Router" - ] - } - }, - "/v2/router/route/estimatefee": { - "post": { - "summary": "EstimateRouteFee allows callers to obtain a lower bound w.r.t how much it\nmay cost to send an HTLC to the target end destination.", - "operationId": "Router_EstimateRouteFee", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/routerrpcRouteFeeResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/routerrpcRouteFeeRequest" - } - } - ], - "tags": [ - "Router" - ] - } - }, - "/v2/router/route/send": { - "post": { - "summary": "SendToRouteV2 attempts to make a payment via the specified route. This\nmethod differs from SendPayment in that it allows users to specify a full\nroute manually. This can be used for things like rebalancing, and atomic\nswaps.", - "operationId": "Router_SendToRouteV2", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcHTLCAttempt" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/routerrpcSendToRouteRequest" - } - } - ], - "tags": [ - "Router" - ] - } - }, - "/v2/router/send": { - "post": { - "summary": "SendPaymentV2 attempts to route a payment described by the passed\nPaymentRequest to the final destination. The call returns a stream of\npayment updates.", - "operationId": "Router_SendPaymentV2", - "responses": { - "200": { - "description": "A successful response.(streaming responses)", - "schema": { - "type": "object", - "properties": { - "result": { - "$ref": "#/definitions/lnrpcPayment" - }, - "error": { - "$ref": "#/definitions/runtimeStreamError" - } - }, - "title": "Stream result of lnrpcPayment" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/routerrpcSendPaymentRequest" - } - } - ], - "tags": [ - "Router" - ] - } - }, - "/v2/router/track/{payment_hash}": { - "get": { - "summary": "TrackPaymentV2 returns an update stream for the payment identified by the\npayment hash.", - "operationId": "Router_TrackPaymentV2", - "responses": { - "200": { - "description": "A successful response.(streaming responses)", - "schema": { - "type": "object", - "properties": { - "result": { - "$ref": "#/definitions/lnrpcPayment" - }, - "error": { - "$ref": "#/definitions/runtimeStreamError" - } - }, - "title": "Stream result of lnrpcPayment" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "payment_hash", - "description": "The hash of the payment to look up.", - "in": "path", - "required": true, - "type": "string", - "format": "byte" - }, - { - "name": "no_inflight_updates", - "description": "If set, only the final payment update is streamed back. Intermediate updates\nthat show which htlcs are still in flight are suppressed.", - "in": "query", - "required": false, - "type": "boolean" - } - ], - "tags": [ - "Router" - ] - } - } - }, - "definitions": { - "FailureFailureCode": { - "type": "string", - "enum": [ - "RESERVED", - "INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS", - "INCORRECT_PAYMENT_AMOUNT", - "FINAL_INCORRECT_CLTV_EXPIRY", - "FINAL_INCORRECT_HTLC_AMOUNT", - "FINAL_EXPIRY_TOO_SOON", - "INVALID_REALM", - "EXPIRY_TOO_SOON", - "INVALID_ONION_VERSION", - "INVALID_ONION_HMAC", - "INVALID_ONION_KEY", - "AMOUNT_BELOW_MINIMUM", - "FEE_INSUFFICIENT", - "INCORRECT_CLTV_EXPIRY", - "CHANNEL_DISABLED", - "TEMPORARY_CHANNEL_FAILURE", - "REQUIRED_NODE_FEATURE_MISSING", - "REQUIRED_CHANNEL_FEATURE_MISSING", - "UNKNOWN_NEXT_PEER", - "TEMPORARY_NODE_FAILURE", - "PERMANENT_NODE_FAILURE", - "PERMANENT_CHANNEL_FAILURE", - "EXPIRY_TOO_FAR", - "MPP_TIMEOUT", - "INTERNAL_FAILURE", - "UNKNOWN_FAILURE", - "UNREADABLE_FAILURE" - ], - "default": "RESERVED", - "description": " - RESERVED: The numbers assigned in this enumeration match the failure codes as\ndefined in BOLT #4. Because protobuf 3 requires enums to start with 0,\na RESERVED value is added.\n - INTERNAL_FAILURE: An internal error occurred.\n - UNKNOWN_FAILURE: The error source is known, but the failure itself couldn't be decoded.\n - UNREADABLE_FAILURE: An unreadable failure result is returned if the received failure message\ncannot be decrypted. In that case the error source is unknown." - }, - "HTLCAttemptHTLCStatus": { - "type": "string", - "enum": [ - "IN_FLIGHT", - "SUCCEEDED", - "FAILED" - ], - "default": "IN_FLIGHT" - }, - "lnrpcChannelUpdate": { - "type": "object", - "properties": { - "signature": { - "type": "string", - "format": "byte", - "description": "The signature that validates the announced data and proves the ownership\nof node id." - }, - "chain_hash": { - "type": "string", - "format": "byte", - "description": "The target chain that this channel was opened within. This value\nshould be the genesis hash of the target chain. Along with the short\nchannel ID, this uniquely identifies the channel globally in a\nblockchain." - }, - "chan_id": { - "type": "string", - "format": "uint64", - "description": "The unique description of the funding transaction." - }, - "timestamp": { - "type": "integer", - "format": "int64", - "description": "A timestamp that allows ordering in the case of multiple announcements.\nWe should ignore the message if timestamp is not greater than the\nlast-received." - }, - "message_flags": { - "type": "integer", - "format": "int64", - "description": "The bitfield that describes whether optional fields are present in this\nupdate. Currently, the least-significant bit must be set to 1 if the\noptional field MaxHtlc is present." - }, - "channel_flags": { - "type": "integer", - "format": "int64", - "description": "The bitfield that describes additional meta-data concerning how the\nupdate is to be interpreted. Currently, the least-significant bit must be\nset to 0 if the creating node corresponds to the first node in the\npreviously sent channel announcement and 1 otherwise. If the second bit\nis set, then the channel is set to be disabled." - }, - "time_lock_delta": { - "type": "integer", - "format": "int64", - "description": "The minimum number of blocks this node requires to be added to the expiry\nof HTLCs. This is a security parameter determined by the node operator.\nThis value represents the required gap between the time locks of the\nincoming and outgoing HTLC's set to this node." - }, - "htlc_minimum_msat": { - "type": "string", - "format": "uint64", - "description": "The minimum HTLC value which will be accepted." - }, - "base_fee": { - "type": "integer", - "format": "int64", - "description": "The base fee that must be used for incoming HTLC's to this particular\nchannel. This value will be tacked onto the required for a payment\nindependent of the size of the payment." - }, - "fee_rate": { - "type": "integer", - "format": "int64", - "description": "The fee rate that will be charged per millionth of a satoshi." - }, - "htlc_maximum_msat": { - "type": "string", - "format": "uint64", - "description": "The maximum HTLC value which will be accepted." - }, - "extra_opaque_data": { - "type": "string", - "format": "byte", - "description": "The set of data that was appended to this message, some of which we may\nnot actually know how to iterate or parse. By holding onto this data, we\nensure that we're able to properly validate the set of signatures that\ncover these new fields, and ensure we're able to make upgrades to the\nnetwork in a forwards compatible manner." - } - } - }, - "lnrpcFailure": { - "type": "object", - "properties": { - "code": { - "$ref": "#/definitions/FailureFailureCode", - "title": "Failure code as defined in the Lightning spec" - }, - "channel_update": { - "$ref": "#/definitions/lnrpcChannelUpdate", - "description": "An optional channel update message." - }, - "htlc_msat": { - "type": "string", - "format": "uint64", - "description": "A failure type-dependent htlc value." - }, - "onion_sha_256": { - "type": "string", - "format": "byte", - "description": "The sha256 sum of the onion payload." - }, - "cltv_expiry": { - "type": "integer", - "format": "int64", - "description": "A failure type-dependent cltv expiry value." - }, - "flags": { - "type": "integer", - "format": "int64", - "description": "A failure type-dependent flags value." - }, - "failure_source_index": { - "type": "integer", - "format": "int64", - "description": "The position in the path of the intermediate or final node that generated\nthe failure message. Position zero is the sender node." - }, - "height": { - "type": "integer", - "format": "int64", - "description": "A failure type-dependent block height." - } - } - }, - "lnrpcFeatureBit": { - "type": "string", - "enum": [ - "DATALOSS_PROTECT_REQ", - "DATALOSS_PROTECT_OPT", - "INITIAL_ROUING_SYNC", - "UPFRONT_SHUTDOWN_SCRIPT_REQ", - "UPFRONT_SHUTDOWN_SCRIPT_OPT", - "GOSSIP_QUERIES_REQ", - "GOSSIP_QUERIES_OPT", - "TLV_ONION_REQ", - "TLV_ONION_OPT", - "EXT_GOSSIP_QUERIES_REQ", - "EXT_GOSSIP_QUERIES_OPT", - "STATIC_REMOTE_KEY_REQ", - "STATIC_REMOTE_KEY_OPT", - "PAYMENT_ADDR_REQ", - "PAYMENT_ADDR_OPT", - "MPP_REQ", - "MPP_OPT" - ], - "default": "DATALOSS_PROTECT_REQ" - }, - "lnrpcHTLCAttempt": { - "type": "object", - "properties": { - "status": { - "$ref": "#/definitions/HTLCAttemptHTLCStatus", - "description": "The status of the HTLC." - }, - "route": { - "$ref": "#/definitions/lnrpcRoute", - "description": "The route taken by this HTLC." - }, - "attempt_time_ns": { - "type": "string", - "format": "int64", - "description": "The time in UNIX nanoseconds at which this HTLC was sent." - }, - "resolve_time_ns": { - "type": "string", - "format": "int64", - "description": "The time in UNIX nanoseconds at which this HTLC was settled or failed.\nThis value will not be set if the HTLC is still IN_FLIGHT." - }, - "failure": { - "$ref": "#/definitions/lnrpcFailure", - "description": "Detailed htlc failure info." - }, - "preimage": { - "type": "string", - "format": "byte", - "description": "The preimage that was used to settle the HTLC." - } - } - }, - "lnrpcHop": { - "type": "object", - "properties": { - "chan_id": { - "type": "string", - "format": "uint64", - "description": "The unique channel ID for the channel. The first 3 bytes are the block\nheight, the next 3 the index within the block, and the last 2 bytes are the\noutput index for the channel." - }, - "chan_capacity": { - "type": "string", - "format": "int64" - }, - "amt_to_forward": { - "type": "string", - "format": "int64" - }, - "fee": { - "type": "string", - "format": "int64" - }, - "expiry": { - "type": "integer", - "format": "int64" - }, - "amt_to_forward_msat": { - "type": "string", - "format": "int64" - }, - "fee_msat": { - "type": "string", - "format": "int64" - }, - "pub_key": { - "type": "string", - "description": "An optional public key of the hop. If the public key is given, the payment\ncan be executed without relying on a copy of the channel graph." - }, - "tlv_payload": { - "type": "boolean", - "description": "If set to true, then this hop will be encoded using the new variable length\nTLV format. Note that if any custom tlv_records below are specified, then\nthis field MUST be set to true for them to be encoded properly." - }, - "mpp_record": { - "$ref": "#/definitions/lnrpcMPPRecord", - "description": "An optional TLV record that signals the use of an MPP payment. If present,\nthe receiver will enforce that that the same mpp_record is included in the\nfinal hop payload of all non-zero payments in the HTLC set. If empty, a\nregular single-shot payment is or was attempted." - }, - "custom_records": { - "type": "object", - "additionalProperties": { - "type": "string", - "format": "byte" - }, - "description": "An optional set of key-value TLV records. This is useful within the context\nof the SendToRoute call as it allows callers to specify arbitrary K-V pairs\nto drop off at each hop within the onion." - } - } - }, - "lnrpcHopHint": { - "type": "object", - "properties": { - "node_id": { - "type": "string", - "description": "The public key of the node at the start of the channel." - }, - "chan_id": { - "type": "string", - "format": "uint64", - "description": "The unique identifier of the channel." - }, - "fee_base_msat": { - "type": "integer", - "format": "int64", - "description": "The base fee of the channel denominated in millisatoshis." - }, - "fee_proportional_millionths": { - "type": "integer", - "format": "int64", - "description": "The fee rate of the channel for sending one satoshi across it denominated in\nmillionths of a satoshi." - }, - "cltv_expiry_delta": { - "type": "integer", - "format": "int64", - "description": "The time-lock delta of the channel." - } - } - }, - "lnrpcMPPRecord": { - "type": "object", - "properties": { - "payment_addr": { - "type": "string", - "format": "byte", - "description": "A unique, random identifier used to authenticate the sender as the intended\npayer of a multi-path payment. The payment_addr must be the same for all\nsubpayments, and match the payment_addr provided in the receiver's invoice.\nThe same payment_addr must be used on all subpayments." - }, - "total_amt_msat": { - "type": "string", - "format": "int64", - "description": "The total amount in milli-satoshis being sent as part of a larger multi-path\npayment. The caller is responsible for ensuring subpayments to the same node\nand payment_hash sum exactly to total_amt_msat. The same\ntotal_amt_msat must be used on all subpayments." - } - } - }, - "lnrpcPayment": { - "type": "object", - "properties": { - "payment_hash": { - "type": "string", - "title": "The payment hash" - }, - "value": { - "type": "string", - "format": "int64", - "description": "Deprecated, use value_sat or value_msat." - }, - "creation_date": { - "type": "string", - "format": "int64", - "title": "Deprecated, use creation_time_ns" - }, - "fee": { - "type": "string", - "format": "int64", - "description": "Deprecated, use fee_sat or fee_msat." - }, - "payment_preimage": { - "type": "string", - "title": "The payment preimage" - }, - "value_sat": { - "type": "string", - "format": "int64", - "title": "The value of the payment in satoshis" - }, - "value_msat": { - "type": "string", - "format": "int64", - "title": "The value of the payment in milli-satoshis" - }, - "payment_request": { - "type": "string", - "description": "The optional payment request being fulfilled." - }, - "status": { - "$ref": "#/definitions/lnrpcPaymentPaymentStatus", - "description": "The status of the payment." - }, - "fee_sat": { - "type": "string", - "format": "int64", - "title": "The fee paid for this payment in satoshis" - }, - "fee_msat": { - "type": "string", - "format": "int64", - "title": "The fee paid for this payment in milli-satoshis" - }, - "creation_time_ns": { - "type": "string", - "format": "int64", - "description": "The time in UNIX nanoseconds at which the payment was created." - }, - "htlcs": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcHTLCAttempt" - }, - "description": "The HTLCs made in attempt to settle the payment." - }, - "payment_index": { - "type": "string", - "format": "uint64", - "description": "The creation index of this payment. Each payment can be uniquely identified\nby this index, which may not strictly increment by 1 for payments made in\nolder versions of lnd." - }, - "failure_reason": { - "$ref": "#/definitions/lnrpcPaymentFailureReason" - } - } - }, - "lnrpcPaymentFailureReason": { - "type": "string", - "enum": [ - "FAILURE_REASON_NONE", - "FAILURE_REASON_TIMEOUT", - "FAILURE_REASON_NO_ROUTE", - "FAILURE_REASON_ERROR", - "FAILURE_REASON_INCORRECT_PAYMENT_DETAILS", - "FAILURE_REASON_INSUFFICIENT_BALANCE" - ], - "default": "FAILURE_REASON_NONE", - "description": " - FAILURE_REASON_NONE: Payment isn't failed (yet).\n - FAILURE_REASON_TIMEOUT: There are more routes to try, but the payment timeout was exceeded.\n - FAILURE_REASON_NO_ROUTE: All possible routes were tried and failed permanently. Or were no\nroutes to the destination at all.\n - FAILURE_REASON_ERROR: A non-recoverable error has occured.\n - FAILURE_REASON_INCORRECT_PAYMENT_DETAILS: Payment details incorrect (unknown hash, invalid amt or\ninvalid final cltv delta)\n - FAILURE_REASON_INSUFFICIENT_BALANCE: Insufficient local balance." - }, - "lnrpcPaymentPaymentStatus": { - "type": "string", - "enum": [ - "UNKNOWN", - "IN_FLIGHT", - "SUCCEEDED", - "FAILED" - ], - "default": "UNKNOWN" - }, - "lnrpcRoute": { - "type": "object", - "properties": { - "total_time_lock": { - "type": "integer", - "format": "int64", - "description": "The cumulative (final) time lock across the entire route. This is the CLTV\nvalue that should be extended to the first hop in the route. All other hops\nwill decrement the time-lock as advertised, leaving enough time for all\nhops to wait for or present the payment preimage to complete the payment." - }, - "total_fees": { - "type": "string", - "format": "int64", - "description": "The sum of the fees paid at each hop within the final route. In the case\nof a one-hop payment, this value will be zero as we don't need to pay a fee\nto ourselves." - }, - "total_amt": { - "type": "string", - "format": "int64", - "description": "The total amount of funds required to complete a payment over this route.\nThis value includes the cumulative fees at each hop. As a result, the HTLC\nextended to the first-hop in the route will need to have at least this many\nsatoshis, otherwise the route will fail at an intermediate node due to an\ninsufficient amount of fees." - }, - "hops": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcHop" - }, - "description": "Contains details concerning the specific forwarding details at each hop." - }, - "total_fees_msat": { - "type": "string", - "format": "int64", - "description": "The total fees in millisatoshis." - }, - "total_amt_msat": { - "type": "string", - "format": "int64", - "description": "The total amount in millisatoshis." - } - }, - "description": "A path through the channel graph which runs over one or more channels in\nsuccession. This struct carries all the information required to craft the\nSphinx onion packet, and send the payment along the first hop in the path. A\nroute is only selected as valid if all the channels have sufficient capacity to\ncarry the initial payment amount after fees are accounted for." - }, - "lnrpcRouteHint": { - "type": "object", - "properties": { - "hop_hints": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcHopHint" - }, - "description": "A list of hop hints that when chained together can assist in reaching a\nspecific destination." - } - } - }, - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string" - }, - "value": { - "type": "string", - "format": "byte" - } - } - }, - "routerrpcBuildRouteRequest": { - "type": "object", - "properties": { - "amt_msat": { - "type": "string", - "format": "int64", - "description": "The amount to send expressed in msat. If set to zero, the minimum routable\namount is used." - }, - "final_cltv_delta": { - "type": "integer", - "format": "int32", - "title": "CLTV delta from the current height that should be used for the timelock\nof the final hop" - }, - "outgoing_chan_id": { - "type": "string", - "format": "uint64", - "description": "The channel id of the channel that must be taken to the first hop. If zero,\nany channel may be used." - }, - "hop_pubkeys": { - "type": "array", - "items": { - "type": "string", - "format": "byte" - }, - "description": "A list of hops that defines the route. This does not include the source hop\npubkey." - } - } - }, - "routerrpcBuildRouteResponse": { - "type": "object", - "properties": { - "route": { - "$ref": "#/definitions/lnrpcRoute", - "description": "Fully specified route that can be used to execute the payment." - } - } - }, - "routerrpcCircuitKey": { - "type": "object", - "properties": { - "chan_id": { - "type": "string", - "format": "uint64", - "description": "/ The id of the channel that the is part of this circuit." - }, - "htlc_id": { - "type": "string", - "format": "uint64", - "description": "/ The index of the incoming htlc in the incoming channel." - } - } - }, - "routerrpcFailureDetail": { - "type": "string", - "enum": [ - "UNKNOWN", - "NO_DETAIL", - "ONION_DECODE", - "LINK_NOT_ELIGIBLE", - "ON_CHAIN_TIMEOUT", - "HTLC_EXCEEDS_MAX", - "INSUFFICIENT_BALANCE", - "INCOMPLETE_FORWARD", - "HTLC_ADD_FAILED", - "FORWARDS_DISABLED", - "INVOICE_CANCELED", - "INVOICE_UNDERPAID", - "INVOICE_EXPIRY_TOO_SOON", - "INVOICE_NOT_OPEN", - "MPP_INVOICE_TIMEOUT", - "ADDRESS_MISMATCH", - "SET_TOTAL_MISMATCH", - "SET_TOTAL_TOO_LOW", - "SET_OVERPAID", - "UNKNOWN_INVOICE", - "INVALID_KEYSEND", - "MPP_IN_PROGRESS", - "CIRCULAR_ROUTE" - ], - "default": "UNKNOWN" - }, - "routerrpcForwardEvent": { - "type": "object", - "properties": { - "info": { - "$ref": "#/definitions/routerrpcHtlcInfo", - "description": "Info contains details about the htlc that was forwarded." - } - } - }, - "routerrpcForwardFailEvent": { - "type": "object" - }, - "routerrpcForwardHtlcInterceptRequest": { - "type": "object", - "properties": { - "incoming_circuit_key": { - "$ref": "#/definitions/routerrpcCircuitKey", - "description": "The key of this forwarded htlc. It defines the incoming channel id and\nthe index in this channel." - }, - "incoming_amount_msat": { - "type": "string", - "format": "uint64", - "description": "The incoming htlc amount." - }, - "incoming_expiry": { - "type": "integer", - "format": "int64", - "description": "The incoming htlc expiry." - }, - "payment_hash": { - "type": "string", - "format": "byte", - "description": "The htlc payment hash. This value is not guaranteed to be unique per\nrequest." - }, - "outgoing_requested_chan_id": { - "type": "string", - "format": "uint64", - "description": "The requested outgoing channel id for this forwarded htlc. Because of\nnon-strict forwarding, this isn't necessarily the channel over which the\npacket will be forwarded eventually. A different channel to the same peer\nmay be selected as well." - }, - "outgoing_amount_msat": { - "type": "string", - "format": "uint64", - "description": "The outgoing htlc amount." - }, - "outgoing_expiry": { - "type": "integer", - "format": "int64", - "description": "The outgoing htlc expiry." - }, - "custom_records": { - "type": "object", - "additionalProperties": { - "type": "string", - "format": "byte" - }, - "description": "Any custom records that were present in the payload." - }, - "onion_blob": { - "type": "string", - "format": "byte", - "title": "The onion blob for the next hop" - } - } - }, - "routerrpcHtlcEvent": { - "type": "object", - "properties": { - "incoming_channel_id": { - "type": "string", - "format": "uint64", - "description": "The short channel id that the incoming htlc arrived at our node on. This\nvalue is zero for sends." - }, - "outgoing_channel_id": { - "type": "string", - "format": "uint64", - "description": "The short channel id that the outgoing htlc left our node on. This value\nis zero for receives." - }, - "incoming_htlc_id": { - "type": "string", - "format": "uint64", - "description": "Incoming id is the index of the incoming htlc in the incoming channel.\nThis value is zero for sends." - }, - "outgoing_htlc_id": { - "type": "string", - "format": "uint64", - "description": "Outgoing id is the index of the outgoing htlc in the outgoing channel.\nThis value is zero for receives." - }, - "timestamp_ns": { - "type": "string", - "format": "uint64", - "description": "The time in unix nanoseconds that the event occurred." - }, - "event_type": { - "$ref": "#/definitions/routerrpcHtlcEventEventType", - "description": "The event type indicates whether the htlc was part of a send, receive or\nforward." - }, - "forward_event": { - "$ref": "#/definitions/routerrpcForwardEvent" - }, - "forward_fail_event": { - "$ref": "#/definitions/routerrpcForwardFailEvent" - }, - "settle_event": { - "$ref": "#/definitions/routerrpcSettleEvent" - }, - "link_fail_event": { - "$ref": "#/definitions/routerrpcLinkFailEvent" - } - }, - "title": "HtlcEvent contains the htlc event that was processed. These are served on a\nbest-effort basis; events are not persisted, delivery is not guaranteed\n(in the event of a crash in the switch, forward events may be lost) and\nsome events may be replayed upon restart. Events consumed from this package\nshould be de-duplicated by the htlc's unique combination of incoming and\noutgoing channel id and htlc id. [EXPERIMENTAL]" - }, - "routerrpcHtlcEventEventType": { - "type": "string", - "enum": [ - "UNKNOWN", - "SEND", - "RECEIVE", - "FORWARD" - ], - "default": "UNKNOWN" - }, - "routerrpcHtlcInfo": { - "type": "object", - "properties": { - "incoming_timelock": { - "type": "integer", - "format": "int64", - "description": "The timelock on the incoming htlc." - }, - "outgoing_timelock": { - "type": "integer", - "format": "int64", - "description": "The timelock on the outgoing htlc." - }, - "incoming_amt_msat": { - "type": "string", - "format": "uint64", - "description": "The amount of the incoming htlc." - }, - "outgoing_amt_msat": { - "type": "string", - "format": "uint64", - "description": "The amount of the outgoing htlc." - } - } - }, - "routerrpcLinkFailEvent": { - "type": "object", - "properties": { - "info": { - "$ref": "#/definitions/routerrpcHtlcInfo", - "description": "Info contains details about the htlc that we failed." - }, - "wire_failure": { - "$ref": "#/definitions/FailureFailureCode", - "description": "FailureCode is the BOLT error code for the failure." - }, - "failure_detail": { - "$ref": "#/definitions/routerrpcFailureDetail", - "description": "FailureDetail provides additional information about the reason for the\nfailure. This detail enriches the information provided by the wire message\nand may be 'no detail' if the wire message requires no additional metadata." - }, - "failure_string": { - "type": "string", - "description": "A string representation of the link failure." - } - } - }, - "routerrpcPairData": { - "type": "object", - "properties": { - "fail_time": { - "type": "string", - "format": "int64", - "description": "Time of last failure." - }, - "fail_amt_sat": { - "type": "string", - "format": "int64", - "description": "Lowest amount that failed to forward rounded to whole sats. This may be\nset to zero if the failure is independent of amount." - }, - "fail_amt_msat": { - "type": "string", - "format": "int64", - "description": "Lowest amount that failed to forward in millisats. This may be\nset to zero if the failure is independent of amount." - }, - "success_time": { - "type": "string", - "format": "int64", - "description": "Time of last success." - }, - "success_amt_sat": { - "type": "string", - "format": "int64", - "description": "Highest amount that we could successfully forward rounded to whole sats." - }, - "success_amt_msat": { - "type": "string", - "format": "int64", - "description": "Highest amount that we could successfully forward in millisats." - } - } - }, - "routerrpcPairHistory": { - "type": "object", - "properties": { - "node_from": { - "type": "string", - "format": "byte", - "description": "The source node pubkey of the pair." - }, - "node_to": { - "type": "string", - "format": "byte", - "description": "The destination node pubkey of the pair." - }, - "history": { - "$ref": "#/definitions/routerrpcPairData" - } - }, - "description": "PairHistory contains the mission control state for a particular node pair." - }, - "routerrpcPaymentState": { - "type": "string", - "enum": [ - "IN_FLIGHT", - "SUCCEEDED", - "FAILED_TIMEOUT", - "FAILED_NO_ROUTE", - "FAILED_ERROR", - "FAILED_INCORRECT_PAYMENT_DETAILS", - "FAILED_INSUFFICIENT_BALANCE" - ], - "default": "IN_FLIGHT", - "description": " - IN_FLIGHT: Payment is still in flight.\n - SUCCEEDED: Payment completed successfully.\n - FAILED_TIMEOUT: There are more routes to try, but the payment timeout was exceeded.\n - FAILED_NO_ROUTE: All possible routes were tried and failed permanently. Or were no\nroutes to the destination at all.\n - FAILED_ERROR: A non-recoverable error has occured.\n - FAILED_INCORRECT_PAYMENT_DETAILS: Payment details incorrect (unknown hash, invalid amt or\ninvalid final cltv delta)\n - FAILED_INSUFFICIENT_BALANCE: Insufficient local balance." - }, - "routerrpcPaymentStatus": { - "type": "object", - "properties": { - "state": { - "$ref": "#/definitions/routerrpcPaymentState", - "description": "Current state the payment is in." - }, - "preimage": { - "type": "string", - "format": "byte", - "description": "The pre-image of the payment when state is SUCCEEDED." - }, - "htlcs": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcHTLCAttempt" - }, - "description": "The HTLCs made in attempt to settle the payment [EXPERIMENTAL]." - } - } - }, - "routerrpcQueryMissionControlResponse": { - "type": "object", - "properties": { - "pairs": { - "type": "array", - "items": { - "$ref": "#/definitions/routerrpcPairHistory" - }, - "description": "Node pair-level mission control state." - } - }, - "description": "QueryMissionControlResponse contains mission control state." - }, - "routerrpcQueryProbabilityResponse": { - "type": "object", - "properties": { - "probability": { - "type": "number", - "format": "double", - "description": "The success probability for the requested pair." - }, - "history": { - "$ref": "#/definitions/routerrpcPairData", - "description": "The historical data for the requested pair." - } - } - }, - "routerrpcResetMissionControlRequest": { - "type": "object" - }, - "routerrpcResetMissionControlResponse": { - "type": "object" - }, - "routerrpcResolveHoldForwardAction": { - "type": "string", - "enum": [ - "SETTLE", - "FAIL", - "RESUME" - ], - "default": "SETTLE" - }, - "routerrpcRouteFeeRequest": { - "type": "object", - "properties": { - "dest": { - "type": "string", - "format": "byte", - "description": "The destination once wishes to obtain a routing fee quote to." - }, - "amt_sat": { - "type": "string", - "format": "int64", - "description": "The amount one wishes to send to the target destination." - } - } - }, - "routerrpcRouteFeeResponse": { - "type": "object", - "properties": { - "routing_fee_msat": { - "type": "string", - "format": "int64", - "description": "A lower bound of the estimated fee to the target destination within the\nnetwork, expressed in milli-satoshis." - }, - "time_lock_delay": { - "type": "string", - "format": "int64", - "description": "An estimate of the worst case time delay that can occur. Note that callers\nwill still need to factor in the final CLTV delta of the last hop into this\nvalue." - } - } - }, - "routerrpcSendPaymentRequest": { - "type": "object", - "properties": { - "dest": { - "type": "string", - "format": "byte", - "title": "The identity pubkey of the payment recipient" - }, - "amt": { - "type": "string", - "format": "int64", - "description": "Number of satoshis to send.\n\nThe fields amt and amt_msat are mutually exclusive." - }, - "amt_msat": { - "type": "string", - "format": "int64", - "description": "Number of millisatoshis to send.\n\nThe fields amt and amt_msat are mutually exclusive." - }, - "payment_hash": { - "type": "string", - "format": "byte", - "title": "The hash to use within the payment's HTLC" - }, - "final_cltv_delta": { - "type": "integer", - "format": "int32", - "description": "The CLTV delta from the current height that should be used to set the\ntimelock for the final hop." - }, - "payment_request": { - "type": "string", - "description": "A bare-bones invoice for a payment within the Lightning Network. With the\ndetails of the invoice, the sender has all the data necessary to send a\npayment to the recipient. The amount in the payment request may be zero. In\nthat case it is required to set the amt field as well. If no payment request\nis specified, the following fields are required: dest, amt and payment_hash." - }, - "timeout_seconds": { - "type": "integer", - "format": "int32", - "description": "An upper limit on the amount of time we should spend when attempting to\nfulfill the payment. This is expressed in seconds. If we cannot make a\nsuccessful payment within this time frame, an error will be returned.\nThis field must be non-zero." - }, - "fee_limit_sat": { - "type": "string", - "format": "int64", - "description": "The maximum number of satoshis that will be paid as a fee of the payment.\nIf this field is left to the default value of 0, only zero-fee routes will\nbe considered. This usually means single hop routes connecting directly to\nthe destination. To send the payment without a fee limit, use max int here.\n\nThe fields fee_limit_sat and fee_limit_msat are mutually exclusive." - }, - "fee_limit_msat": { - "type": "string", - "format": "int64", - "description": "The maximum number of millisatoshis that will be paid as a fee of the\npayment. If this field is left to the default value of 0, only zero-fee\nroutes will be considered. This usually means single hop routes connecting\ndirectly to the destination. To send the payment without a fee limit, use\nmax int here.\n\nThe fields fee_limit_sat and fee_limit_msat are mutually exclusive." - }, - "outgoing_chan_id": { - "type": "string", - "format": "uint64", - "description": "Deprecated, use outgoing_chan_ids. The channel id of the channel that must\nbe taken to the first hop. If zero, any channel may be used (unless\noutgoing_chan_ids are set)." - }, - "outgoing_chan_ids": { - "type": "array", - "items": { - "type": "string", - "format": "uint64" - }, - "description": "The channel ids of the channels are allowed for the first hop. If empty,\nany channel may be used." - }, - "last_hop_pubkey": { - "type": "string", - "format": "byte", - "description": "The pubkey of the last hop of the route. If empty, any hop may be used." - }, - "cltv_limit": { - "type": "integer", - "format": "int32", - "description": "An optional maximum total time lock for the route. This should not exceed\nlnd's `--max-cltv-expiry` setting. If zero, then the value of\n`--max-cltv-expiry` is enforced." - }, - "route_hints": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcRouteHint" - }, - "description": "Optional route hints to reach the destination through private channels." - }, - "dest_custom_records": { - "type": "object", - "additionalProperties": { - "type": "string", - "format": "byte" - }, - "description": "An optional field that can be used to pass an arbitrary set of TLV records\nto a peer which understands the new records. This can be used to pass\napplication specific data during the payment attempt. Record types are\nrequired to be in the custom range \u003e= 65536. When using REST, the values\nmust be encoded as base64." - }, - "allow_self_payment": { - "type": "boolean", - "description": "If set, circular payments to self are permitted." - }, - "dest_features": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcFeatureBit" - }, - "description": "Features assumed to be supported by the final node. All transitive feature\ndependencies must also be set properly. For a given feature bit pair, either\noptional or remote may be set, but not both. If this field is nil or empty,\nthe router will try to load destination features from the graph as a\nfallback." - }, - "max_parts": { - "type": "integer", - "format": "int64", - "description": "The maximum number of partial payments that may be use to complete the full\namount." - }, - "no_inflight_updates": { - "type": "boolean", - "description": "If set, only the final payment update is streamed back. Intermediate updates\nthat show which htlcs are still in flight are suppressed." - } - } - }, - "routerrpcSendToRouteRequest": { - "type": "object", - "properties": { - "payment_hash": { - "type": "string", - "format": "byte", - "description": "The payment hash to use for the HTLC." - }, - "route": { - "$ref": "#/definitions/lnrpcRoute", - "description": "Route that should be used to attempt to complete the payment." - } - } - }, - "routerrpcSendToRouteResponse": { - "type": "object", - "properties": { - "preimage": { - "type": "string", - "format": "byte", - "description": "The preimage obtained by making the payment." - }, - "failure": { - "$ref": "#/definitions/lnrpcFailure", - "description": "The failure message in case the payment failed." - } - } - }, - "routerrpcSettleEvent": { - "type": "object" - }, - "runtimeError": { - "type": "object", - "properties": { - "error": { - "type": "string" - }, - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - }, - "runtimeStreamError": { - "type": "object", - "properties": { - "grpc_code": { - "type": "integer", - "format": "int32" - }, - "http_code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "http_status": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - } - } -} diff --git a/lnd/lnrpc/routerrpc/router_backend.go b/lnd/lnrpc/routerrpc/router_backend.go deleted file mode 100644 index 635df4f8..00000000 --- a/lnd/lnrpc/routerrpc/router_backend.go +++ /dev/null @@ -1,1210 +0,0 @@ -package routerrpc - -import ( - "context" - "encoding/hex" - math "math" - "time" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/pktlog/log" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/htlcswitch" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/record" - "github.com/pkt-cash/pktd/lnd/routing" - "github.com/pkt-cash/pktd/lnd/routing/route" - "github.com/pkt-cash/pktd/lnd/subscribe" - "github.com/pkt-cash/pktd/lnd/zpay32" -) - -// RouterBackend contains the backend implementation of the router rpc sub -// server calls. -type RouterBackend struct { - // SelfNode is the vertex of the node sending the payment. - SelfNode route.Vertex - - // FetchChannelCapacity is a closure that we'll use the fetch the total - // capacity of a channel to populate in responses. - FetchChannelCapacity func(chanID uint64) (btcutil.Amount, er.R) - - // FetchChannelEndpoints returns the pubkeys of both endpoints of the - // given channel id. - FetchChannelEndpoints func(chanID uint64) (route.Vertex, - route.Vertex, er.R) - - // FindRoutes is a closure that abstracts away how we locate/query for - // routes. - FindRoute func(source, target route.Vertex, - amt lnwire.MilliSatoshi, restrictions *routing.RestrictParams, - destCustomRecords record.CustomSet, - routeHints map[route.Vertex][]*channeldb.ChannelEdgePolicy, - finalExpiry uint16) (*route.Route, er.R) - - MissionControl MissionControl - - // ActiveNetParams are the network parameters of the primary network - // that the route is operating on. This is necessary so we can ensure - // that we receive payment requests that send to destinations on our - // network. - ActiveNetParams *chaincfg.Params - - // Tower is the ControlTower instance that is used to track pending - // payments. - Tower routing.ControlTower - - // MaxTotalTimelock is the maximum total time lock a route is allowed to - // have. - MaxTotalTimelock uint32 - - // DefaultFinalCltvDelta is the default value used as final cltv delta - // when an RPC caller doesn't specify a value. - DefaultFinalCltvDelta uint16 - - // SubscribeHtlcEvents returns a subscription client for the node's - // htlc events. - SubscribeHtlcEvents func() (*subscribe.Client, er.R) - - // InterceptableForwarder exposes the ability to intercept forward events - // by letting the router register a ForwardInterceptor. - InterceptableForwarder htlcswitch.InterceptableHtlcForwarder -} - -// MissionControl defines the mission control dependencies of routerrpc. -type MissionControl interface { - // GetProbability is expected to return the success probability of a - // payment from fromNode to toNode. - GetProbability(fromNode, toNode route.Vertex, - amt lnwire.MilliSatoshi) float64 - - // ResetHistory resets the history of MissionControl returning it to a - // state as if no payment attempts have been made. - ResetHistory() er.R - - // GetHistorySnapshot takes a snapshot from the current mission control - // state and actual probability estimates. - GetHistorySnapshot() *routing.MissionControlSnapshot - - // GetPairHistorySnapshot returns the stored history for a given node - // pair. - GetPairHistorySnapshot(fromNode, - toNode route.Vertex) routing.TimedPairResult -} - -// QueryRoutes attempts to query the daemons' Channel Router for a possible -// route to a target destination capable of carrying a specific amount of -// satoshis within the route's flow. The retuned route contains the full -// details required to craft and send an HTLC, also including the necessary -// information that should be present within the Sphinx packet encapsulated -// within the HTLC. -// -// TODO(roasbeef): should return a slice of routes in reality * create separate -// PR to send based on well formatted route -func (r *RouterBackend) QueryRoutes(ctx context.Context, - in *lnrpc.QueryRoutesRequest) (*lnrpc.QueryRoutesResponse, er.R) { - - parsePubKey := func(key string) (route.Vertex, er.R) { - pubKeyBytes, err := util.DecodeHex(key) - if err != nil { - return route.Vertex{}, err - } - - return route.NewVertexFromBytes(pubKeyBytes) - } - - // Parse the hex-encoded source and target public keys into full public - // key objects we can properly manipulate. - targetPubKey, err := parsePubKey(in.PubKey) - if err != nil { - return nil, err - } - - var sourcePubKey route.Vertex - if in.SourcePubKey != "" { - var err er.R - sourcePubKey, err = parsePubKey(in.SourcePubKey) - if err != nil { - return nil, err - } - } else { - // If no source is specified, use self. - sourcePubKey = r.SelfNode - } - - // Currently, within the bootstrap phase of the network, we limit the - // largest payment size allotted to (2^32) - 1 mSAT or 4.29 million - // satoshis. - amt, err := lnrpc.UnmarshallAmt(in.Amt, in.AmtMsat) - if err != nil { - return nil, err - } - - // Unmarshall restrictions from request. - feeLimit := lnrpc.CalculateFeeLimit(in.FeeLimit, amt) - - ignoredNodes := make(map[route.Vertex]struct{}) - for _, ignorePubKey := range in.IgnoredNodes { - ignoreVertex, err := route.NewVertexFromBytes(ignorePubKey) - if err != nil { - return nil, err - } - ignoredNodes[ignoreVertex] = struct{}{} - } - - ignoredPairs := make(map[routing.DirectedNodePair]struct{}) - - // Convert deprecated ignoredEdges to pairs. - for _, ignoredEdge := range in.IgnoredEdges { - pair, err := r.rpcEdgeToPair(ignoredEdge) - if err != nil { - log.Warnf("Ignore channel %v skipped: %v", - ignoredEdge.ChannelId, err) - - continue - } - ignoredPairs[pair] = struct{}{} - } - - // Add ignored pairs to set. - for _, ignorePair := range in.IgnoredPairs { - from, err := route.NewVertexFromBytes(ignorePair.From) - if err != nil { - return nil, err - } - - to, err := route.NewVertexFromBytes(ignorePair.To) - if err != nil { - return nil, err - } - - pair := routing.NewDirectedNodePair(from, to) - ignoredPairs[pair] = struct{}{} - } - - // Since QueryRoutes allows having a different source other than - // ourselves, we'll only apply our max time lock if we are the source. - maxTotalTimelock := r.MaxTotalTimelock - if sourcePubKey != r.SelfNode { - maxTotalTimelock = math.MaxUint32 - } - cltvLimit, err := ValidateCLTVLimit(in.CltvLimit, maxTotalTimelock) - if err != nil { - return nil, err - } - - // We need to subtract the final delta before passing it into path - // finding. The optimal path is independent of the final cltv delta and - // the path finding algorithm is unaware of this value. - finalCLTVDelta := r.DefaultFinalCltvDelta - if in.FinalCltvDelta != 0 { - finalCLTVDelta = uint16(in.FinalCltvDelta) - } - cltvLimit -= uint32(finalCLTVDelta) - - // Parse destination feature bits. - features, err := UnmarshalFeatures(in.DestFeatures) - if err != nil { - return nil, err - } - - restrictions := &routing.RestrictParams{ - FeeLimit: feeLimit, - ProbabilitySource: func(fromNode, toNode route.Vertex, - amt lnwire.MilliSatoshi) float64 { - - if _, ok := ignoredNodes[fromNode]; ok { - return 0 - } - - pair := routing.DirectedNodePair{ - From: fromNode, - To: toNode, - } - if _, ok := ignoredPairs[pair]; ok { - return 0 - } - - if !in.UseMissionControl { - return 1 - } - - return r.MissionControl.GetProbability( - fromNode, toNode, amt, - ) - }, - DestCustomRecords: record.CustomSet(in.DestCustomRecords), - CltvLimit: cltvLimit, - DestFeatures: features, - } - - // Pass along an outgoing channel restriction if specified. - if in.OutgoingChanId != 0 { - restrictions.OutgoingChannelIDs = []uint64{in.OutgoingChanId} - } - - // Pass along a last hop restriction if specified. - if len(in.LastHopPubkey) > 0 { - lastHop, err := route.NewVertexFromBytes( - in.LastHopPubkey, - ) - if err != nil { - return nil, err - } - restrictions.LastHop = &lastHop - } - - // If we have any TLV records destined for the final hop, then we'll - // attempt to decode them now into a form that the router can more - // easily manipulate. - customRecords := record.CustomSet(in.DestCustomRecords) - if err := customRecords.Validate(); err != nil { - return nil, err - } - - // Convert route hints to an edge map. - routeHints, err := unmarshallRouteHints(in.RouteHints) - if err != nil { - return nil, err - } - routeHintEdges, err := routing.RouteHintsToEdges( - routeHints, targetPubKey, - ) - if err != nil { - return nil, err - } - - // Query the channel router for a possible path to the destination that - // can carry `in.Amt` satoshis _including_ the total fee required on - // the route. - route, err := r.FindRoute( - sourcePubKey, targetPubKey, amt, restrictions, - customRecords, routeHintEdges, finalCLTVDelta, - ) - if err != nil { - return nil, err - } - - // For each valid route, we'll convert the result into the format - // required by the RPC system. - rpcRoute, err := r.MarshallRoute(route) - if err != nil { - return nil, err - } - - // Calculate route success probability. Do not rely on a probability - // that could have been returned from path finding, because mission - // control may have been disabled in the provided ProbabilitySource. - successProb := r.getSuccessProbability(route) - - routeResp := &lnrpc.QueryRoutesResponse{ - Routes: []*lnrpc.Route{rpcRoute}, - SuccessProb: successProb, - } - - return routeResp, nil -} - -// getSuccessProbability returns the success probability for the given route -// based on the current state of mission control. -func (r *RouterBackend) getSuccessProbability(rt *route.Route) float64 { - fromNode := rt.SourcePubKey - amtToFwd := rt.TotalAmount - successProb := 1.0 - for _, hop := range rt.Hops { - toNode := hop.PubKeyBytes - - probability := r.MissionControl.GetProbability( - fromNode, toNode, amtToFwd, - ) - - successProb *= probability - - amtToFwd = hop.AmtToForward - fromNode = toNode - } - - return successProb -} - -// rpcEdgeToPair looks up the provided channel and returns the channel endpoints -// as a directed pair. -func (r *RouterBackend) rpcEdgeToPair(e *lnrpc.EdgeLocator) ( - routing.DirectedNodePair, er.R) { - - a, b, err := r.FetchChannelEndpoints(e.ChannelId) - if err != nil { - return routing.DirectedNodePair{}, err - } - - var pair routing.DirectedNodePair - if e.DirectionReverse { - pair.From, pair.To = b, a - } else { - pair.From, pair.To = a, b - } - - return pair, nil -} - -// MarshallRoute marshalls an internal route to an rpc route struct. -func (r *RouterBackend) MarshallRoute(route *route.Route) (*lnrpc.Route, er.R) { - resp := &lnrpc.Route{ - TotalTimeLock: route.TotalTimeLock, - TotalFees: int64(route.TotalFees().ToSatoshis()), - TotalFeesMsat: int64(route.TotalFees()), - TotalAmt: int64(route.TotalAmount.ToSatoshis()), - TotalAmtMsat: int64(route.TotalAmount), - Hops: make([]*lnrpc.Hop, len(route.Hops)), - } - incomingAmt := route.TotalAmount - for i, hop := range route.Hops { - fee := route.HopFee(i) - - // Channel capacity is not a defining property of a route. For - // backwards RPC compatibility, we retrieve it here from the - // graph. - chanCapacity, err := r.FetchChannelCapacity(hop.ChannelID) - if err != nil { - // If capacity cannot be retrieved, this may be a - // not-yet-received or private channel. Then report - // amount that is sent through the channel as capacity. - chanCapacity = incomingAmt.ToSatoshis() - } - - // Extract the MPP fields if present on this hop. - var mpp *lnrpc.MPPRecord - if hop.MPP != nil { - addr := hop.MPP.PaymentAddr() - - mpp = &lnrpc.MPPRecord{ - PaymentAddr: addr[:], - TotalAmtMsat: int64(hop.MPP.TotalMsat()), - } - } - - resp.Hops[i] = &lnrpc.Hop{ - ChanId: hop.ChannelID, - ChanCapacity: int64(chanCapacity), - AmtToForward: int64(hop.AmtToForward.ToSatoshis()), - AmtToForwardMsat: int64(hop.AmtToForward), - Fee: int64(fee.ToSatoshis()), - FeeMsat: int64(fee), - Expiry: uint32(hop.OutgoingTimeLock), - PubKey: hex.EncodeToString( - hop.PubKeyBytes[:], - ), - CustomRecords: hop.CustomRecords, - TlvPayload: !hop.LegacyPayload, - MppRecord: mpp, - } - incomingAmt = hop.AmtToForward - } - - return resp, nil -} - -// UnmarshallHopWithPubkey unmarshalls an rpc hop for which the pubkey has -// already been extracted. -func UnmarshallHopWithPubkey(rpcHop *lnrpc.Hop, pubkey route.Vertex) (*route.Hop, er.R) { - - customRecords := record.CustomSet(rpcHop.CustomRecords) - if err := customRecords.Validate(); err != nil { - return nil, err - } - - mpp, err := UnmarshalMPP(rpcHop.MppRecord) - if err != nil { - return nil, err - } - - return &route.Hop{ - OutgoingTimeLock: rpcHop.Expiry, - AmtToForward: lnwire.MilliSatoshi(rpcHop.AmtToForwardMsat), - PubKeyBytes: pubkey, - ChannelID: rpcHop.ChanId, - CustomRecords: customRecords, - LegacyPayload: !rpcHop.TlvPayload, - MPP: mpp, - }, nil -} - -// UnmarshallHop unmarshalls an rpc hop that may or may not contain a node -// pubkey. -func (r *RouterBackend) UnmarshallHop(rpcHop *lnrpc.Hop, - prevNodePubKey [33]byte) (*route.Hop, er.R) { - - var pubKeyBytes [33]byte - if rpcHop.PubKey != "" { - // Unmarshall the provided hop pubkey. - pubKey, err := util.DecodeHex(rpcHop.PubKey) - if err != nil { - return nil, er.Errorf("cannot decode pubkey %s", - rpcHop.PubKey) - } - copy(pubKeyBytes[:], pubKey) - } else { - // If no pub key is given of the hop, the local channel graph - // needs to be queried to complete the information necessary for - // routing. Discard edge policies, because they may be nil. - node1, node2, err := r.FetchChannelEndpoints(rpcHop.ChanId) - if err != nil { - return nil, err - } - - switch { - case prevNodePubKey == node1: - pubKeyBytes = node2 - case prevNodePubKey == node2: - pubKeyBytes = node1 - default: - return nil, er.Errorf("channel edge does not match " + - "expected node") - } - } - - return UnmarshallHopWithPubkey(rpcHop, pubKeyBytes) -} - -// UnmarshallRoute unmarshalls an rpc route. For hops that don't specify a -// pubkey, the channel graph is queried. -func (r *RouterBackend) UnmarshallRoute(rpcroute *lnrpc.Route) ( - *route.Route, er.R) { - - prevNodePubKey := r.SelfNode - - hops := make([]*route.Hop, len(rpcroute.Hops)) - for i, hop := range rpcroute.Hops { - routeHop, err := r.UnmarshallHop(hop, prevNodePubKey) - if err != nil { - return nil, err - } - - hops[i] = routeHop - - prevNodePubKey = routeHop.PubKeyBytes - } - - route, err := route.NewRouteFromHops( - lnwire.MilliSatoshi(rpcroute.TotalAmtMsat), - rpcroute.TotalTimeLock, - r.SelfNode, - hops, - ) - if err != nil { - return nil, err - } - - return route, nil -} - -// extractIntentFromSendRequest attempts to parse the SendRequest details -// required to dispatch a client from the information presented by an RPC -// client. -func (r *RouterBackend) extractIntentFromSendRequest( - rpcPayReq *SendPaymentRequest) (*routing.LightningPayment, er.R) { - - payIntent := &routing.LightningPayment{} - - // Pass along restrictions on the outgoing channels that may be used. - payIntent.OutgoingChannelIDs = rpcPayReq.OutgoingChanIds - - // Add the deprecated single outgoing channel restriction if present. - if rpcPayReq.OutgoingChanId != 0 { - if payIntent.OutgoingChannelIDs != nil { - return nil, er.New("outgoing_chan_id and " + - "outgoing_chan_ids are mutually exclusive") - } - - payIntent.OutgoingChannelIDs = append( - payIntent.OutgoingChannelIDs, rpcPayReq.OutgoingChanId, - ) - } - - // Pass along a last hop restriction if specified. - if len(rpcPayReq.LastHopPubkey) > 0 { - lastHop, err := route.NewVertexFromBytes( - rpcPayReq.LastHopPubkey, - ) - if err != nil { - return nil, err - } - payIntent.LastHop = &lastHop - } - - // Take the CLTV limit from the request if set, otherwise use the max. - cltvLimit, err := ValidateCLTVLimit( - uint32(rpcPayReq.CltvLimit), r.MaxTotalTimelock, - ) - if err != nil { - return nil, err - } - payIntent.CltvLimit = cltvLimit - - // Take max htlcs from the request. Map zero to one for backwards - // compatibility. - maxParts := rpcPayReq.MaxParts - if maxParts == 0 { - maxParts = 1 - } - payIntent.MaxParts = maxParts - - // Take fee limit from request. - payIntent.FeeLimit, err = lnrpc.UnmarshallAmt( - rpcPayReq.FeeLimitSat, rpcPayReq.FeeLimitMsat, - ) - if err != nil { - return nil, err - } - - // Set payment attempt timeout. - if rpcPayReq.TimeoutSeconds == 0 { - return nil, er.New("timeout_seconds must be specified") - } - - customRecords := record.CustomSet(rpcPayReq.DestCustomRecords) - if err := customRecords.Validate(); err != nil { - return nil, err - } - payIntent.DestCustomRecords = customRecords - - payIntent.PayAttemptTimeout = time.Second * - time.Duration(rpcPayReq.TimeoutSeconds) - - // Route hints. - routeHints, err := unmarshallRouteHints( - rpcPayReq.RouteHints, - ) - if err != nil { - return nil, err - } - payIntent.RouteHints = routeHints - - // Unmarshall either sat or msat amount from request. - reqAmt, err := lnrpc.UnmarshallAmt( - rpcPayReq.Amt, rpcPayReq.AmtMsat, - ) - if err != nil { - return nil, err - } - - // If the payment request field isn't blank, then the details of the - // invoice are encoded entirely within the encoded payReq. So we'll - // attempt to decode it, populating the payment accordingly. - if rpcPayReq.PaymentRequest != "" { - switch { - - case len(rpcPayReq.Dest) > 0: - return nil, er.New("dest and payment_request " + - "cannot appear together") - - case len(rpcPayReq.PaymentHash) > 0: - return nil, er.New("dest and payment_hash " + - "cannot appear together") - - case rpcPayReq.FinalCltvDelta != 0: - return nil, er.New("dest and final_cltv_delta " + - "cannot appear together") - } - - payReq, err := zpay32.Decode( - rpcPayReq.PaymentRequest, r.ActiveNetParams, - ) - if err != nil { - return nil, err - } - - // Next, we'll ensure that this payreq hasn't already expired. - err = ValidatePayReqExpiry(payReq) - if err != nil { - return nil, err - } - - // If the amount was not included in the invoice, then we let - // the payee specify the amount of satoshis they wish to send. - // We override the amount to pay with the amount provided from - // the payment request. - if payReq.MilliSat == nil { - if reqAmt == 0 { - return nil, er.New("amount must be " + - "specified when paying a zero amount " + - "invoice") - } - - payIntent.Amount = reqAmt - } else { - if reqAmt != 0 { - return nil, er.New("amount must not be " + - "specified when paying a non-zero " + - " amount invoice") - } - - payIntent.Amount = *payReq.MilliSat - } - - copy(payIntent.PaymentHash[:], payReq.PaymentHash[:]) - destKey := payReq.Destination.SerializeCompressed() - copy(payIntent.Target[:], destKey) - - payIntent.FinalCLTVDelta = uint16(payReq.MinFinalCLTVExpiry()) - payIntent.RouteHints = append( - payIntent.RouteHints, payReq.RouteHints..., - ) - payIntent.DestFeatures = payReq.Features - payIntent.PaymentAddr = payReq.PaymentAddr - payIntent.PaymentRequest = []byte(rpcPayReq.PaymentRequest) - } else { - // Otherwise, If the payment request field was not specified - // (and a custom route wasn't specified), construct the payment - // from the other fields. - - // Payment destination. - target, err := route.NewVertexFromBytes(rpcPayReq.Dest) - if err != nil { - return nil, err - } - payIntent.Target = target - - // Final payment CLTV delta. - if rpcPayReq.FinalCltvDelta != 0 { - payIntent.FinalCLTVDelta = - uint16(rpcPayReq.FinalCltvDelta) - } else { - payIntent.FinalCLTVDelta = r.DefaultFinalCltvDelta - } - - // Amount. - if reqAmt == 0 { - return nil, er.New("amount must be specified") - } - - payIntent.Amount = reqAmt - - // Payment hash. - copy(payIntent.PaymentHash[:], rpcPayReq.PaymentHash) - - // Parse destination feature bits. - features, err := UnmarshalFeatures(rpcPayReq.DestFeatures) - if err != nil { - return nil, err - } - - payIntent.DestFeatures = features - } - - // Check for disallowed payments to self. - if !rpcPayReq.AllowSelfPayment && payIntent.Target == r.SelfNode { - return nil, er.New("self-payments not allowed") - } - - return payIntent, nil -} - -// unmarshallRouteHints unmarshalls a list of route hints. -func unmarshallRouteHints(rpcRouteHints []*lnrpc.RouteHint) ( - [][]zpay32.HopHint, er.R) { - - routeHints := make([][]zpay32.HopHint, 0, len(rpcRouteHints)) - for _, rpcRouteHint := range rpcRouteHints { - routeHint := make( - []zpay32.HopHint, 0, len(rpcRouteHint.HopHints), - ) - for _, rpcHint := range rpcRouteHint.HopHints { - hint, err := unmarshallHopHint(rpcHint) - if err != nil { - return nil, err - } - - routeHint = append(routeHint, hint) - } - routeHints = append(routeHints, routeHint) - } - - return routeHints, nil -} - -// unmarshallHopHint unmarshalls a single hop hint. -func unmarshallHopHint(rpcHint *lnrpc.HopHint) (zpay32.HopHint, er.R) { - pubBytes, err := util.DecodeHex(rpcHint.NodeId) - if err != nil { - return zpay32.HopHint{}, err - } - - pubkey, err := btcec.ParsePubKey(pubBytes, btcec.S256()) - if err != nil { - return zpay32.HopHint{}, err - } - - return zpay32.HopHint{ - NodeID: pubkey, - ChannelID: rpcHint.ChanId, - FeeBaseMSat: rpcHint.FeeBaseMsat, - FeeProportionalMillionths: rpcHint.FeeProportionalMillionths, - CLTVExpiryDelta: uint16(rpcHint.CltvExpiryDelta), - }, nil -} - -// UnmarshalFeatures converts a list of uint32's into a valid feature vector. -// This method checks that feature bit pairs aren't assigned toegether, and -// validates transitive dependencies. -func UnmarshalFeatures( - rpcFeatures []lnrpc.FeatureBit) (*lnwire.FeatureVector, er.R) { - - // If no destination features are specified we'll return nil to signal - // that the router should try to use the graph as a fallback. - if rpcFeatures == nil { - return nil, nil - } - - raw := lnwire.NewRawFeatureVector() - for _, bit := range rpcFeatures { - err := raw.SafeSet(lnwire.FeatureBit(bit)) - if err != nil { - return nil, err - } - } - - return lnwire.NewFeatureVector(raw, lnwire.Features), nil -} - -// ValidatePayReqExpiry checks if the passed payment request has expired. In -// the case it has expired, an error will be returned. -func ValidatePayReqExpiry(payReq *zpay32.Invoice) er.R { - expiry := payReq.Expiry() - validUntil := payReq.Timestamp.Add(expiry) - if time.Now().After(validUntil) { - return er.Errorf("invoice expired. Valid until %v", validUntil) - } - - return nil -} - -// ValidateCLTVLimit returns a valid CLTV limit given a value and a maximum. If -// the value exceeds the maximum, then an error is returned. If the value is 0, -// then the maximum is used. -func ValidateCLTVLimit(val, max uint32) (uint32, er.R) { - switch { - case val == 0: - return max, nil - case val > max: - return 0, er.Errorf("total time lock of %v exceeds max "+ - "allowed %v", val, max) - default: - return val, nil - } -} - -// UnmarshalMPP accepts the mpp_total_amt_msat and mpp_payment_addr fields from -// an RPC request and converts into an record.MPP object. An error is returned -// if the payment address is not 0 or 32 bytes. If the total amount and payment -// address are zero-value, the return value will be nil signaling there is no -// MPP record to attach to this hop. Otherwise, a non-nil reocrd will be -// contained combining the provided values. -func UnmarshalMPP(reqMPP *lnrpc.MPPRecord) (*record.MPP, er.R) { - // If no MPP record was submitted, assume the user wants to send a - // regular payment. - if reqMPP == nil { - return nil, nil - } - - reqTotal := reqMPP.TotalAmtMsat - reqAddr := reqMPP.PaymentAddr - - switch { - - // No MPP fields were provided. - case reqTotal == 0 && len(reqAddr) == 0: - return nil, er.Errorf("missing total_msat and payment_addr") - - // Total is present, but payment address is missing. - case reqTotal > 0 && len(reqAddr) == 0: - return nil, er.Errorf("missing payment_addr") - - // Payment address is present, but total is missing. - case reqTotal == 0 && len(reqAddr) > 0: - return nil, er.Errorf("missing total_msat") - } - - addr, err := lntypes.MakeHash(reqAddr) - if err != nil { - return nil, er.Errorf("unable to parse "+ - "payment_addr: %v", err) - } - - total := lnwire.MilliSatoshi(reqTotal) - - return record.NewMPP(total, addr), nil -} - -// MarshalHTLCAttempt constructs an RPC HTLCAttempt from the db representation. -func (r *RouterBackend) MarshalHTLCAttempt( - htlc channeldb.HTLCAttempt) (*lnrpc.HTLCAttempt, er.R) { - - route, err := r.MarshallRoute(&htlc.Route) - if err != nil { - return nil, err - } - - rpcAttempt := &lnrpc.HTLCAttempt{ - AttemptTimeNs: MarshalTimeNano(htlc.AttemptTime), - Route: route, - } - - switch { - case htlc.Settle != nil: - rpcAttempt.Status = lnrpc.HTLCAttempt_SUCCEEDED - rpcAttempt.ResolveTimeNs = MarshalTimeNano( - htlc.Settle.SettleTime, - ) - rpcAttempt.Preimage = htlc.Settle.Preimage[:] - - case htlc.Failure != nil: - rpcAttempt.Status = lnrpc.HTLCAttempt_FAILED - rpcAttempt.ResolveTimeNs = MarshalTimeNano( - htlc.Failure.FailTime, - ) - - var err er.R - rpcAttempt.Failure, err = marshallHtlcFailure(htlc.Failure) - if err != nil { - return nil, err - } - default: - rpcAttempt.Status = lnrpc.HTLCAttempt_IN_FLIGHT - } - - return rpcAttempt, nil -} - -// marshallHtlcFailure marshalls htlc fail info from the database to its rpc -// representation. -func marshallHtlcFailure(failure *channeldb.HTLCFailInfo) (*lnrpc.Failure, er.R) { - - rpcFailure := &lnrpc.Failure{ - FailureSourceIndex: failure.FailureSourceIndex, - } - - switch failure.Reason { - - case channeldb.HTLCFailUnknown: - rpcFailure.Code = lnrpc.Failure_UNKNOWN_FAILURE - - case channeldb.HTLCFailUnreadable: - rpcFailure.Code = lnrpc.Failure_UNREADABLE_FAILURE - - case channeldb.HTLCFailInternal: - rpcFailure.Code = lnrpc.Failure_INTERNAL_FAILURE - - case channeldb.HTLCFailMessage: - err := marshallWireError(failure.Message, rpcFailure) - if err != nil { - return nil, err - } - - default: - return nil, er.New("unknown htlc failure reason") - } - - return rpcFailure, nil -} - -// MarshalTimeNano converts a time.Time into its nanosecond representation. If -// the time is zero, this method simply returns 0, since calling UnixNano() on a -// zero-valued time is undefined. -func MarshalTimeNano(t time.Time) int64 { - if t.IsZero() { - return 0 - } - return t.UnixNano() -} - -// marshallError marshall an error as received from the switch to rpc structs -// suitable for returning to the caller of an rpc method. -// -// Because of difficulties with using protobuf oneof constructs in some -// languages, the decision was made here to use a single message format for all -// failure messages with some fields left empty depending on the failure type. -func marshallError(sendError er.R) (*lnrpc.Failure, er.R) { - response := &lnrpc.Failure{} - - if htlcswitch.ErrUnreadableFailureMessage.Is(sendError) { - response.Code = lnrpc.Failure_UNREADABLE_FAILURE - return response, nil - } - - rtErr, ok := er.Wrapped(sendError).(htlcswitch.ClearTextError) - if !ok { - return nil, sendError - } - - err := marshallWireError(rtErr.WireMessage(), response) - if err != nil { - return nil, err - } - - // If the ClearTextError received is a ForwardingError, the error - // originated from a node along the route, not locally on our outgoing - // link. We set failureSourceIdx to the index of the node where the - // failure occurred. If the error is not a ForwardingError, the failure - // occurred at our node, so we leave the index as 0 to indicate that - // we failed locally. - fErr, ok := rtErr.(*htlcswitch.ForwardingError) - if ok { - response.FailureSourceIndex = uint32(fErr.FailureSourceIdx) - } - - return response, nil -} - -// marshallError marshall an error as received from the switch to rpc structs -// suitable for returning to the caller of an rpc method. -// -// Because of difficulties with using protobuf oneof constructs in some -// languages, the decision was made here to use a single message format for all -// failure messages with some fields left empty depending on the failure type. -func marshallWireError(msg lnwire.FailureMessage, - response *lnrpc.Failure) er.R { - - switch onionErr := msg.(type) { - - case *lnwire.FailIncorrectDetails: - response.Code = lnrpc.Failure_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS - response.Height = onionErr.Height() - - case *lnwire.FailIncorrectPaymentAmount: - response.Code = lnrpc.Failure_INCORRECT_PAYMENT_AMOUNT - - case *lnwire.FailFinalIncorrectCltvExpiry: - response.Code = lnrpc.Failure_FINAL_INCORRECT_CLTV_EXPIRY - response.CltvExpiry = onionErr.CltvExpiry - - case *lnwire.FailFinalIncorrectHtlcAmount: - response.Code = lnrpc.Failure_FINAL_INCORRECT_HTLC_AMOUNT - response.HtlcMsat = uint64(onionErr.IncomingHTLCAmount) - - case *lnwire.FailFinalExpiryTooSoon: - response.Code = lnrpc.Failure_FINAL_EXPIRY_TOO_SOON - - case *lnwire.FailInvalidRealm: - response.Code = lnrpc.Failure_INVALID_REALM - - case *lnwire.FailExpiryTooSoon: - response.Code = lnrpc.Failure_EXPIRY_TOO_SOON - response.ChannelUpdate = marshallChannelUpdate(&onionErr.Update) - - case *lnwire.FailExpiryTooFar: - response.Code = lnrpc.Failure_EXPIRY_TOO_FAR - - case *lnwire.FailInvalidOnionVersion: - response.Code = lnrpc.Failure_INVALID_ONION_VERSION - response.OnionSha_256 = onionErr.OnionSHA256[:] - - case *lnwire.FailInvalidOnionHmac: - response.Code = lnrpc.Failure_INVALID_ONION_HMAC - response.OnionSha_256 = onionErr.OnionSHA256[:] - - case *lnwire.FailInvalidOnionKey: - response.Code = lnrpc.Failure_INVALID_ONION_KEY - response.OnionSha_256 = onionErr.OnionSHA256[:] - - case *lnwire.FailAmountBelowMinimum: - response.Code = lnrpc.Failure_AMOUNT_BELOW_MINIMUM - response.ChannelUpdate = marshallChannelUpdate(&onionErr.Update) - response.HtlcMsat = uint64(onionErr.HtlcMsat) - - case *lnwire.FailFeeInsufficient: - response.Code = lnrpc.Failure_FEE_INSUFFICIENT - response.ChannelUpdate = marshallChannelUpdate(&onionErr.Update) - response.HtlcMsat = uint64(onionErr.HtlcMsat) - - case *lnwire.FailIncorrectCltvExpiry: - response.Code = lnrpc.Failure_INCORRECT_CLTV_EXPIRY - response.ChannelUpdate = marshallChannelUpdate(&onionErr.Update) - response.CltvExpiry = onionErr.CltvExpiry - - case *lnwire.FailChannelDisabled: - response.Code = lnrpc.Failure_CHANNEL_DISABLED - response.ChannelUpdate = marshallChannelUpdate(&onionErr.Update) - response.Flags = uint32(onionErr.Flags) - - case *lnwire.FailTemporaryChannelFailure: - response.Code = lnrpc.Failure_TEMPORARY_CHANNEL_FAILURE - response.ChannelUpdate = marshallChannelUpdate(onionErr.Update) - - case *lnwire.FailRequiredNodeFeatureMissing: - response.Code = lnrpc.Failure_REQUIRED_NODE_FEATURE_MISSING - - case *lnwire.FailRequiredChannelFeatureMissing: - response.Code = lnrpc.Failure_REQUIRED_CHANNEL_FEATURE_MISSING - - case *lnwire.FailUnknownNextPeer: - response.Code = lnrpc.Failure_UNKNOWN_NEXT_PEER - - case *lnwire.FailTemporaryNodeFailure: - response.Code = lnrpc.Failure_TEMPORARY_NODE_FAILURE - - case *lnwire.FailPermanentNodeFailure: - response.Code = lnrpc.Failure_PERMANENT_NODE_FAILURE - - case *lnwire.FailPermanentChannelFailure: - response.Code = lnrpc.Failure_PERMANENT_CHANNEL_FAILURE - - case *lnwire.FailMPPTimeout: - response.Code = lnrpc.Failure_MPP_TIMEOUT - - case nil: - response.Code = lnrpc.Failure_UNKNOWN_FAILURE - - default: - return er.Errorf("cannot marshall failure %T", onionErr) - } - - return nil -} - -// marshallChannelUpdate marshalls a channel update as received over the wire to -// the router rpc format. -func marshallChannelUpdate(update *lnwire.ChannelUpdate) *lnrpc.ChannelUpdate { - if update == nil { - return nil - } - - return &lnrpc.ChannelUpdate{ - Signature: update.Signature[:], - ChainHash: update.ChainHash[:], - ChanId: update.ShortChannelID.ToUint64(), - Timestamp: update.Timestamp, - MessageFlags: uint32(update.MessageFlags), - ChannelFlags: uint32(update.ChannelFlags), - TimeLockDelta: uint32(update.TimeLockDelta), - HtlcMinimumMsat: uint64(update.HtlcMinimumMsat), - BaseFee: update.BaseFee, - FeeRate: update.FeeRate, - HtlcMaximumMsat: uint64(update.HtlcMaximumMsat), - ExtraOpaqueData: update.ExtraOpaqueData, - } -} - -// MarshallPayment marshall a payment to its rpc representation. -func (r *RouterBackend) MarshallPayment(payment *channeldb.MPPayment) ( - *lnrpc.Payment, er.R) { - - // Fetch the payment's preimage and the total paid in fees. - var ( - fee lnwire.MilliSatoshi - preimage lntypes.Preimage - ) - for _, htlc := range payment.HTLCs { - // If any of the htlcs have settled, extract a valid - // preimage. - if htlc.Settle != nil { - preimage = htlc.Settle.Preimage - fee += htlc.Route.TotalFees() - } - } - - msatValue := int64(payment.Info.Value) - satValue := int64(payment.Info.Value.ToSatoshis()) - - status, err := convertPaymentStatus(payment.Status) - if err != nil { - return nil, err - } - - htlcs := make([]*lnrpc.HTLCAttempt, 0, len(payment.HTLCs)) - for _, dbHTLC := range payment.HTLCs { - htlc, err := r.MarshalHTLCAttempt(dbHTLC) - if err != nil { - return nil, err - } - - htlcs = append(htlcs, htlc) - } - - paymentHash := payment.Info.PaymentHash - creationTimeNS := MarshalTimeNano(payment.Info.CreationTime) - - failureReason, err := marshallPaymentFailureReason( - payment.FailureReason, - ) - if err != nil { - return nil, err - } - - return &lnrpc.Payment{ - PaymentHash: hex.EncodeToString(paymentHash[:]), - Value: satValue, - ValueMsat: msatValue, - ValueSat: satValue, - CreationDate: payment.Info.CreationTime.Unix(), - CreationTimeNs: creationTimeNS, - Fee: int64(fee.ToSatoshis()), - FeeSat: int64(fee.ToSatoshis()), - FeeMsat: int64(fee), - PaymentPreimage: hex.EncodeToString(preimage[:]), - PaymentRequest: string(payment.Info.PaymentRequest), - Status: status, - Htlcs: htlcs, - PaymentIndex: payment.SequenceNum, - FailureReason: failureReason, - }, nil -} - -// convertPaymentStatus converts a channeldb.PaymentStatus to the type expected -// by the RPC. -func convertPaymentStatus(dbStatus channeldb.PaymentStatus) ( - lnrpc.Payment_PaymentStatus, er.R) { - - switch dbStatus { - case channeldb.StatusUnknown: - return lnrpc.Payment_UNKNOWN, nil - - case channeldb.StatusInFlight: - return lnrpc.Payment_IN_FLIGHT, nil - - case channeldb.StatusSucceeded: - return lnrpc.Payment_SUCCEEDED, nil - - case channeldb.StatusFailed: - return lnrpc.Payment_FAILED, nil - - default: - return 0, er.Errorf("unhandled payment status %v", dbStatus) - } -} - -// marshallPaymentFailureReason marshalls the failure reason to the corresponding rpc -// type. -func marshallPaymentFailureReason(reason *channeldb.FailureReason) ( - lnrpc.PaymentFailureReason, er.R) { - - if reason == nil { - return lnrpc.PaymentFailureReason_FAILURE_REASON_NONE, nil - } - - switch *reason { - - case channeldb.FailureReasonTimeout: - return lnrpc.PaymentFailureReason_FAILURE_REASON_TIMEOUT, nil - - case channeldb.FailureReasonNoRoute: - return lnrpc.PaymentFailureReason_FAILURE_REASON_NO_ROUTE, nil - - case channeldb.FailureReasonError: - return lnrpc.PaymentFailureReason_FAILURE_REASON_ERROR, nil - - case channeldb.FailureReasonPaymentDetails: - return lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS, nil - - case channeldb.FailureReasonInsufficientBalance: - return lnrpc.PaymentFailureReason_FAILURE_REASON_INSUFFICIENT_BALANCE, nil - } - - return 0, er.New("unknown failure reason") -} diff --git a/lnd/lnrpc/routerrpc/router_backend_test.go b/lnd/lnrpc/routerrpc/router_backend_test.go deleted file mode 100644 index 712df29b..00000000 --- a/lnd/lnrpc/routerrpc/router_backend_test.go +++ /dev/null @@ -1,360 +0,0 @@ -package routerrpc - -import ( - "bytes" - "context" - "testing" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/record" - "github.com/pkt-cash/pktd/lnd/routing" - "github.com/pkt-cash/pktd/lnd/routing/route" - - "github.com/pkt-cash/pktd/lnd/lnrpc" -) - -const ( - destKey = "0286098b97bc843372b4426d4b276cea9aa2f48f0428d6f5b66ae101befc14f8b4" - ignoreNodeKey = "02f274f48f3c0d590449a6776e3ce8825076ac376e470e992246eebc565ef8bb2a" - hintNodeKey = "0274e7fb33eafd74fe1acb6db7680bb4aa78e9c839a6e954e38abfad680f645ef7" - - testMissionControlProb = 0.5 -) - -var ( - sourceKey = route.Vertex{1, 2, 3} - - node1 = route.Vertex{10} - - node2 = route.Vertex{11} -) - -// TestQueryRoutes asserts that query routes rpc parameters are properly parsed -// and passed onto path finding. -func TestQueryRoutes(t *testing.T) { - t.Run("no mission control", func(t *testing.T) { - testQueryRoutes(t, false, false) - }) - t.Run("no mission control and msat", func(t *testing.T) { - testQueryRoutes(t, false, true) - }) - t.Run("with mission control", func(t *testing.T) { - testQueryRoutes(t, true, false) - }) -} - -func testQueryRoutes(t *testing.T, useMissionControl bool, useMsat bool) { - ignoreNodeBytes, err := util.DecodeHex(ignoreNodeKey) - if err != nil { - t.Fatal(err) - } - - var ignoreNodeVertex route.Vertex - copy(ignoreNodeVertex[:], ignoreNodeBytes) - - destNodeBytes, err := util.DecodeHex(destKey) - if err != nil { - t.Fatal(err) - } - - var ( - lastHop = route.Vertex{64} - outgoingChan = uint64(383322) - ) - - hintNode, err := route.NewVertexFromStr(hintNodeKey) - if err != nil { - t.Fatal(err) - } - - rpcRouteHints := []*lnrpc.RouteHint{ - { - HopHints: []*lnrpc.HopHint{ - { - ChanId: 38484, - NodeId: hintNodeKey, - }, - }, - }, - } - - request := &lnrpc.QueryRoutesRequest{ - PubKey: destKey, - FinalCltvDelta: 100, - IgnoredNodes: [][]byte{ignoreNodeBytes}, - IgnoredEdges: []*lnrpc.EdgeLocator{{ - ChannelId: 555, - DirectionReverse: true, - }}, - IgnoredPairs: []*lnrpc.NodePair{{ - From: node1[:], - To: node2[:], - }}, - UseMissionControl: useMissionControl, - LastHopPubkey: lastHop[:], - OutgoingChanId: outgoingChan, - DestFeatures: []lnrpc.FeatureBit{lnrpc.FeatureBit_MPP_OPT}, - RouteHints: rpcRouteHints, - } - - amtSat := int64(100000) - if useMsat { - request.AmtMsat = amtSat * 1000 - request.FeeLimit = &lnrpc.FeeLimit{ - Limit: &lnrpc.FeeLimit_FixedMsat{ - FixedMsat: 250000, - }, - } - } else { - request.Amt = amtSat - request.FeeLimit = &lnrpc.FeeLimit{ - Limit: &lnrpc.FeeLimit_Fixed{ - Fixed: 250, - }, - } - } - - findRoute := func(source, target route.Vertex, - amt lnwire.MilliSatoshi, restrictions *routing.RestrictParams, - _ record.CustomSet, - routeHints map[route.Vertex][]*channeldb.ChannelEdgePolicy, - finalExpiry uint16) (*route.Route, er.R) { - - if int64(amt) != amtSat*1000 { - t.Fatal("unexpected amount") - } - - if source != sourceKey { - t.Fatal("unexpected source key") - } - - if !bytes.Equal(target[:], destNodeBytes) { - t.Fatal("unexpected target key") - } - - if restrictions.FeeLimit != 250*1000 { - t.Fatal("unexpected fee limit") - } - - if restrictions.ProbabilitySource(route.Vertex{2}, - route.Vertex{1}, 0, - ) != 0 { - t.Fatal("expecting 0% probability for ignored edge") - } - - if restrictions.ProbabilitySource(ignoreNodeVertex, - route.Vertex{6}, 0, - ) != 0 { - t.Fatal("expecting 0% probability for ignored node") - } - - if restrictions.ProbabilitySource(node1, node2, 0) != 0 { - t.Fatal("expecting 0% probability for ignored pair") - } - - if *restrictions.LastHop != lastHop { - t.Fatal("unexpected last hop") - } - - if restrictions.OutgoingChannelIDs[0] != outgoingChan { - t.Fatal("unexpected outgoing channel id") - } - - if !restrictions.DestFeatures.HasFeature(lnwire.MPPOptional) { - t.Fatal("unexpected dest features") - } - - if _, ok := routeHints[hintNode]; !ok { - t.Fatal("expected route hint") - } - - expectedProb := 1.0 - if useMissionControl { - expectedProb = testMissionControlProb - } - if restrictions.ProbabilitySource(route.Vertex{4}, - route.Vertex{5}, 0, - ) != expectedProb { - t.Fatal("expecting 100% probability") - } - - hops := []*route.Hop{{}} - return route.NewRouteFromHops(amt, 144, source, hops) - } - - backend := &RouterBackend{ - FindRoute: findRoute, - SelfNode: route.Vertex{1, 2, 3}, - FetchChannelCapacity: func(chanID uint64) ( - btcutil.Amount, er.R) { - - return 1, nil - }, - MissionControl: &mockMissionControl{}, - FetchChannelEndpoints: func(chanID uint64) (route.Vertex, - route.Vertex, er.R) { - - if chanID != 555 { - t.Fatal("expected endpoints to be fetched for "+ - "channel 555, but got %v instead", - chanID) - } - return route.Vertex{1}, route.Vertex{2}, nil - }, - } - - resp, err := backend.QueryRoutes(context.Background(), request) - if err != nil { - t.Fatal(err) - } - if len(resp.Routes) != 1 { - t.Fatal("expected a single route response") - } -} - -type mockMissionControl struct { -} - -func (m *mockMissionControl) GetProbability(fromNode, toNode route.Vertex, - amt lnwire.MilliSatoshi) float64 { - - return testMissionControlProb -} - -func (m *mockMissionControl) ResetHistory() er.R { - return nil -} - -func (m *mockMissionControl) GetHistorySnapshot() *routing.MissionControlSnapshot { - return nil -} - -func (m *mockMissionControl) GetPairHistorySnapshot(fromNode, - toNode route.Vertex) routing.TimedPairResult { - - return routing.TimedPairResult{} -} - -type mppOutcome byte - -const ( - valid mppOutcome = iota - invalid - nompp -) - -type unmarshalMPPTest struct { - name string - mpp *lnrpc.MPPRecord - outcome mppOutcome -} - -// TestUnmarshalMPP checks both positive and negative cases of UnmarshalMPP to -// assert that an MPP record is only returned when both fields are properly -// specified. It also asserts that zero-values for both inputs is also valid, -// but returns a nil record. -func TestUnmarshalMPP(t *testing.T) { - tests := []unmarshalMPPTest{ - { - name: "nil record", - mpp: nil, - outcome: nompp, - }, - { - name: "invalid total or addr", - mpp: &lnrpc.MPPRecord{ - PaymentAddr: nil, - TotalAmtMsat: 0, - }, - outcome: invalid, - }, - { - name: "valid total only", - mpp: &lnrpc.MPPRecord{ - PaymentAddr: nil, - TotalAmtMsat: 8, - }, - outcome: invalid, - }, - { - name: "valid addr only", - mpp: &lnrpc.MPPRecord{ - PaymentAddr: bytes.Repeat([]byte{0x02}, 32), - TotalAmtMsat: 0, - }, - outcome: invalid, - }, - { - name: "valid total and invalid addr", - mpp: &lnrpc.MPPRecord{ - PaymentAddr: []byte{0x02}, - TotalAmtMsat: 8, - }, - outcome: invalid, - }, - { - name: "valid total and valid addr", - mpp: &lnrpc.MPPRecord{ - PaymentAddr: bytes.Repeat([]byte{0x02}, 32), - TotalAmtMsat: 8, - }, - outcome: valid, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - testUnmarshalMPP(t, test) - }) - } -} - -func testUnmarshalMPP(t *testing.T, test unmarshalMPPTest) { - mpp, err := UnmarshalMPP(test.mpp) - switch test.outcome { - - // Valid arguments should result in no error, a non-nil MPP record, and - // the fields should be set correctly. - case valid: - if err != nil { - t.Fatalf("unable to parse mpp record: %v", err) - } - if mpp == nil { - t.Fatalf("mpp payload should be non-nil") - } - if int64(mpp.TotalMsat()) != test.mpp.TotalAmtMsat { - t.Fatalf("incorrect total msat") - } - addr := mpp.PaymentAddr() - if !bytes.Equal(addr[:], test.mpp.PaymentAddr) { - t.Fatalf("incorrect payment addr") - } - - // Invalid arguments should produce a failure and nil MPP record. - case invalid: - if err == nil { - t.Fatalf("expected failure for invalid mpp") - } - if mpp != nil { - t.Fatalf("mpp payload should be nil for failure") - } - - // Arguments that produce no MPP field should return no error and no MPP - // record. - case nompp: - if err != nil { - t.Fatalf("failure for args resulting for no-mpp") - } - if mpp != nil { - t.Fatalf("mpp payload should be nil for no-mpp") - } - - default: - t.Fatalf("test case has non-standard outcome") - } -} diff --git a/lnd/lnrpc/routerrpc/router_server.go b/lnd/lnrpc/routerrpc/router_server.go deleted file mode 100644 index bfb41da2..00000000 --- a/lnd/lnrpc/routerrpc/router_server.go +++ /dev/null @@ -1,645 +0,0 @@ -package routerrpc - -import ( - "context" - "io/ioutil" - "os" - "path/filepath" - "sync/atomic" - - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/macaroons" - "github.com/pkt-cash/pktd/lnd/routing" - "github.com/pkt-cash/pktd/lnd/routing/route" - "github.com/pkt-cash/pktd/pktlog/log" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "gopkg.in/macaroon-bakery.v2/bakery" -) - -const ( - // subServerName is the name of the sub rpc server. We'll use this name - // to register ourselves, and we also require that the main - // SubServerConfigDispatcher instance recognize as the name of our - subServerName = "RouterRPC" -) - -var ( - errServerShuttingDown = er.GenericErrorType.CodeWithDetail("errServerShuttingDown", - "routerrpc server shutting down") - - // ErrInterceptorAlreadyExists is an error returned when the a new stream - // is opened and there is already one active interceptor. - // The user must disconnect prior to open another stream. - ErrInterceptorAlreadyExists = er.GenericErrorType.CodeWithDetail("ErrInterceptorAlreadyExists", - "interceptor already exists") - - // macaroonOps are the set of capabilities that our minted macaroon (if - // it doesn't already exist) will have. - macaroonOps = []bakery.Op{ - { - Entity: "offchain", - Action: "read", - }, - { - Entity: "offchain", - Action: "write", - }, - } - - // macPermissions maps RPC calls to the permissions they require. - macPermissions = map[string][]bakery.Op{ - "/routerrpc.Router/SendPaymentV2": {{ - Entity: "offchain", - Action: "write", - }}, - "/routerrpc.Router/SendToRouteV2": {{ - Entity: "offchain", - Action: "write", - }}, - "/routerrpc.Router/SendToRoute": {{ - Entity: "offchain", - Action: "write", - }}, - "/routerrpc.Router/TrackPaymentV2": {{ - Entity: "offchain", - Action: "read", - }}, - "/routerrpc.Router/EstimateRouteFee": {{ - Entity: "offchain", - Action: "read", - }}, - "/routerrpc.Router/QueryMissionControl": {{ - Entity: "offchain", - Action: "read", - }}, - "/routerrpc.Router/QueryProbability": {{ - Entity: "offchain", - Action: "read", - }}, - "/routerrpc.Router/ResetMissionControl": {{ - Entity: "offchain", - Action: "write", - }}, - "/routerrpc.Router/BuildRoute": {{ - Entity: "offchain", - Action: "read", - }}, - "/routerrpc.Router/SubscribeHtlcEvents": {{ - Entity: "offchain", - Action: "read", - }}, - "/routerrpc.Router/SendPayment": {{ - Entity: "offchain", - Action: "write", - }}, - "/routerrpc.Router/TrackPayment": {{ - Entity: "offchain", - Action: "read", - }}, - "/routerrpc.Router/HtlcInterceptor": {{ - Entity: "offchain", - Action: "write", - }}, - } - - // DefaultRouterMacFilename is the default name of the router macaroon - // that we expect to find via a file handle within the main - // configuration file in this package. - DefaultRouterMacFilename = "router.macaroon" -) - -// Server is a stand alone sub RPC server which exposes functionality that -// allows clients to route arbitrary payment through the Lightning Network. -type Server struct { - started int32 // To be used atomically. - shutdown int32 // To be used atomically. - forwardInterceptorActive int32 // To be used atomically. - - cfg *Config - - quit chan struct{} -} - -// A compile time check to ensure that Server fully implements the RouterServer -// gRPC service. -var _ RouterServer = (*Server)(nil) - -// New creates a new instance of the RouterServer given a configuration struct -// that contains all external dependencies. If the target macaroon exists, and -// we're unable to create it, then an error will be returned. We also return -// the set of permissions that we require as a server. At the time of writing -// of this documentation, this is the same macaroon as as the admin macaroon. -func New(cfg *Config) (*Server, lnrpc.MacaroonPerms, er.R) { - // If the path of the router macaroon wasn't generated, then we'll - // assume that it's found at the default network directory. - if cfg.RouterMacPath == "" { - cfg.RouterMacPath = filepath.Join( - cfg.NetworkDir, DefaultRouterMacFilename, - ) - } - - // Now that we know the full path of the router macaroon, we can check - // to see if we need to create it or not. If stateless_init is set - // then we don't write the macaroons. - macFilePath := cfg.RouterMacPath - if cfg.MacService != nil && !cfg.MacService.StatelessInit && - !lnrpc.FileExists(macFilePath) { - - log.Infof("Making macaroons for Router RPC Server at: %v", - macFilePath) - - // At this point, we know that the router macaroon doesn't yet, - // exist, so we need to create it with the help of the main - // macaroon service. - routerMac, err := cfg.MacService.NewMacaroon( - context.Background(), macaroons.DefaultRootKeyID, - macaroonOps..., - ) - if err != nil { - return nil, nil, err - } - routerMacBytes, errr := routerMac.M().MarshalBinary() - if errr != nil { - return nil, nil, er.E(errr) - } - errr = ioutil.WriteFile(macFilePath, routerMacBytes, 0644) - if errr != nil { - _ = os.Remove(macFilePath) - return nil, nil, er.E(errr) - } - } - - routerServer := &Server{ - cfg: cfg, - quit: make(chan struct{}), - } - - return routerServer, macPermissions, nil -} - -// Start launches any helper goroutines required for the rpcServer to function. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) Start() er.R { - if atomic.AddInt32(&s.started, 1) != 1 { - return nil - } - - return nil -} - -// Stop signals any active goroutines for a graceful closure. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) Stop() er.R { - if atomic.AddInt32(&s.shutdown, 1) != 1 { - return nil - } - - close(s.quit) - return nil -} - -// Name returns a unique string representation of the sub-server. This can be -// used to identify the sub-server and also de-duplicate them. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) Name() string { - return subServerName -} - -// RegisterWithRootServer will be called by the root gRPC server to direct a -// sub RPC server to register itself with the main gRPC root server. Until this -// is called, each sub-server won't be able to have requests routed towards it. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) RegisterWithRootServer(grpcServer *grpc.Server) er.R { - // We make sure that we register it with the main gRPC server to ensure - // all our methods are routed properly. - RegisterRouterServer(grpcServer, s) - - log.Debugf("Router RPC server successfully register with root gRPC " + - "server") - - return nil -} - -// RegisterWithRestServer will be called by the root REST mux to direct a sub -// RPC server to register itself with the main REST mux server. Until this is -// called, each sub-server won't be able to have requests routed towards it. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) RegisterWithRestServer(ctx context.Context, - mux *runtime.ServeMux, dest string, opts []grpc.DialOption) er.R { - - // We make sure that we register it with the main REST server to ensure - // all our methods are routed properly. - errr := RegisterRouterHandlerFromEndpoint(ctx, mux, dest, opts) - if errr != nil { - log.Errorf("Could not register Router REST server "+ - "with root REST server: %v", errr) - return er.E(errr) - } - - log.Debugf("Router REST server successfully registered with " + - "root REST server") - return nil -} - -// SendPaymentV2 attempts to route a payment described by the passed -// PaymentRequest to the final destination. If we are unable to route the -// payment, or cannot find a route that satisfies the constraints in the -// PaymentRequest, then an error will be returned. Otherwise, the payment -// pre-image, along with the final route will be returned. -func (s *Server) SendPaymentV2(req *SendPaymentRequest, - stream Router_SendPaymentV2Server) error { - - payment, err := s.cfg.RouterBackend.extractIntentFromSendRequest(req) - if err != nil { - return er.Native(err) - } - - err = s.cfg.Router.SendPaymentAsync(payment) - if err != nil { - // Transform user errors to grpc code. - if channeldb.ErrPaymentInFlight.Is(err) || - channeldb.ErrAlreadyPaid.Is(err) { - - log.Debugf("SendPayment async result for hash %x: %v", - payment.PaymentHash, err) - - return status.Error( - codes.AlreadyExists, err.String(), - ) - } - - log.Errorf("SendPayment async error for hash %x: %v", - payment.PaymentHash, err) - - return er.Native(err) - } - - return s.trackPayment(payment.PaymentHash, stream, req.NoInflightUpdates) -} - -// EstimateRouteFee allows callers to obtain a lower bound w.r.t how much it -// may cost to send an HTLC to the target end destination. -func (s *Server) EstimateRouteFee(ctx context.Context, - req *RouteFeeRequest) (*RouteFeeResponse, error) { - - if len(req.Dest) != 33 { - return nil, er.Native(er.New("invalid length destination key")) - } - var destNode route.Vertex - copy(destNode[:], req.Dest) - - // Next, we'll convert the amount in satoshis to mSAT, which are the - // native unit of LN. - amtMsat := lnwire.NewMSatFromSatoshis(btcutil.Amount(req.AmtSat)) - - // Pick a fee limit - // - // TODO: Change this into behaviour that makes more sense. - feeLimit := lnwire.NewMSatFromSatoshis(btcutil.UnitsPerCoin()) - - // Finally, we'll query for a route to the destination that can carry - // that target amount, we'll only request a single route. Set a - // restriction for the default CLTV limit, otherwise we can find a route - // that exceeds it and is useless to us. - mc := s.cfg.RouterBackend.MissionControl - route, err := s.cfg.Router.FindRoute( - s.cfg.RouterBackend.SelfNode, destNode, amtMsat, - &routing.RestrictParams{ - FeeLimit: feeLimit, - CltvLimit: s.cfg.RouterBackend.MaxTotalTimelock, - ProbabilitySource: mc.GetProbability, - }, nil, nil, s.cfg.RouterBackend.DefaultFinalCltvDelta, - ) - if err != nil { - return nil, er.Native(err) - } - - return &RouteFeeResponse{ - RoutingFeeMsat: int64(route.TotalFees()), - TimeLockDelay: int64(route.TotalTimeLock), - }, nil -} - -// SendToRouteV2 sends a payment through a predefined route. The response of this -// call contains structured error information. -func (s *Server) SendToRouteV2(ctx context.Context, - req *SendToRouteRequest) (*lnrpc.HTLCAttempt, error) { - - if req.Route == nil { - return nil, er.Native(er.Errorf("unable to send, no routes provided")) - } - - route, err := s.cfg.RouterBackend.UnmarshallRoute(req.Route) - if err != nil { - return nil, er.Native(err) - } - - hash, err := lntypes.MakeHash(req.PaymentHash) - if err != nil { - return nil, er.Native(err) - } - - // Pass route to the router. This call returns the full htlc attempt - // information as it is stored in the database. It is possible that both - // the attempt return value and err are non-nil. This can happen when - // the attempt was already initiated before the error happened. In that - // case, we give precedence to the attempt information as stored in the - // db. - attempt, err := s.cfg.Router.SendToRoute(hash, route) - if attempt != nil { - rpcAttempt, err := s.cfg.RouterBackend.MarshalHTLCAttempt( - *attempt, - ) - if err != nil { - return nil, er.Native(err) - } - return rpcAttempt, nil - } - - // Transform user errors to grpc code. - if channeldb.ErrPaymentInFlight.Is(err) || - channeldb.ErrAlreadyPaid.Is(err) { - - return nil, status.Error(codes.AlreadyExists, err.String()) - } - - return nil, er.Native(err) -} - -// ResetMissionControl clears all mission control state and starts with a clean -// slate. -func (s *Server) ResetMissionControl(ctx context.Context, - req *ResetMissionControlRequest) (*ResetMissionControlResponse, error) { - - err := s.cfg.RouterBackend.MissionControl.ResetHistory() - if err != nil { - return nil, er.Native(err) - } - - return &ResetMissionControlResponse{}, nil -} - -// QueryMissionControl exposes the internal mission control state to callers. It -// is a development feature. -func (s *Server) QueryMissionControl(ctx context.Context, - req *QueryMissionControlRequest) (*QueryMissionControlResponse, error) { - - snapshot := s.cfg.RouterBackend.MissionControl.GetHistorySnapshot() - - rpcPairs := make([]*PairHistory, 0, len(snapshot.Pairs)) - for _, p := range snapshot.Pairs { - // Prevent binding to loop variable. - pair := p - - rpcPair := PairHistory{ - NodeFrom: pair.Pair.From[:], - NodeTo: pair.Pair.To[:], - History: toRPCPairData(&pair.TimedPairResult), - } - - rpcPairs = append(rpcPairs, &rpcPair) - } - - response := QueryMissionControlResponse{ - Pairs: rpcPairs, - } - - return &response, nil -} - -// toRPCPairData marshalls mission control pair data to the rpc struct. -func toRPCPairData(data *routing.TimedPairResult) *PairData { - rpcData := PairData{ - FailAmtSat: int64(data.FailAmt.ToSatoshis()), - FailAmtMsat: int64(data.FailAmt), - SuccessAmtSat: int64(data.SuccessAmt.ToSatoshis()), - SuccessAmtMsat: int64(data.SuccessAmt), - } - - if !data.FailTime.IsZero() { - rpcData.FailTime = data.FailTime.Unix() - } - - if !data.SuccessTime.IsZero() { - rpcData.SuccessTime = data.SuccessTime.Unix() - } - - return &rpcData -} - -// QueryProbability returns the current success probability estimate for a -// given node pair and amount. -func (s *Server) QueryProbability(ctx context.Context, - req *QueryProbabilityRequest) (*QueryProbabilityResponse, error) { - - fromNode, err := route.NewVertexFromBytes(req.FromNode) - if err != nil { - return nil, er.Native(err) - } - - toNode, err := route.NewVertexFromBytes(req.ToNode) - if err != nil { - return nil, er.Native(err) - } - - amt := lnwire.MilliSatoshi(req.AmtMsat) - - mc := s.cfg.RouterBackend.MissionControl - prob := mc.GetProbability(fromNode, toNode, amt) - history := mc.GetPairHistorySnapshot(fromNode, toNode) - - return &QueryProbabilityResponse{ - Probability: prob, - History: toRPCPairData(&history), - }, nil -} - -// TrackPaymentV2 returns a stream of payment state updates. The stream is -// closed when the payment completes. -func (s *Server) TrackPaymentV2(request *TrackPaymentRequest, - stream Router_TrackPaymentV2Server) error { - - paymentHash, err := lntypes.MakeHash(request.PaymentHash) - if err != nil { - return er.Native(err) - } - - log.Debugf("TrackPayment called for payment %v", paymentHash) - - return s.trackPayment(paymentHash, stream, request.NoInflightUpdates) -} - -// trackPayment writes payment status updates to the provided stream. -func (s *Server) trackPayment(paymentHash lntypes.Hash, - stream Router_TrackPaymentV2Server, noInflightUpdates bool) error { - - router := s.cfg.RouterBackend - - // Subscribe to the outcome of this payment. - subscription, err := router.Tower.SubscribePayment( - paymentHash, - ) - switch { - case channeldb.ErrPaymentNotInitiated.Is(err): - return status.Error(codes.NotFound, err.String()) - case err != nil: - return er.Native(err) - } - defer subscription.Close() - - // Stream updates back to the client. The first update is always the - // current state of the payment. - for { - select { - case item, ok := <-subscription.Updates: - if !ok { - // No more payment updates. - return nil - } - result := item.(*channeldb.MPPayment) - - // Skip in-flight updates unless requested. - if noInflightUpdates && - result.Status == channeldb.StatusInFlight { - - continue - } - - rpcPayment, err := router.MarshallPayment(result) - if err != nil { - return er.Native(err) - } - - // Send event to the client. - errr := stream.Send(rpcPayment) - if errr != nil { - return errr - } - - case <-s.quit: - return er.Native(errServerShuttingDown.Default()) - - case <-stream.Context().Done(): - log.Debugf("Payment status stream %v canceled", paymentHash) - return stream.Context().Err() - } - } -} - -// BuildRoute builds a route from a list of hop addresses. -func (s *Server) BuildRoute(ctx context.Context, - req *BuildRouteRequest) (*BuildRouteResponse, error) { - - // Unmarshall hop list. - hops := make([]route.Vertex, len(req.HopPubkeys)) - for i, pubkeyBytes := range req.HopPubkeys { - pubkey, err := route.NewVertexFromBytes(pubkeyBytes) - if err != nil { - return nil, er.Native(err) - } - hops[i] = pubkey - } - - // Prepare BuildRoute call parameters from rpc request. - var amt *lnwire.MilliSatoshi - if req.AmtMsat != 0 { - rpcAmt := lnwire.MilliSatoshi(req.AmtMsat) - amt = &rpcAmt - } - - var outgoingChan *uint64 - if req.OutgoingChanId != 0 { - outgoingChan = &req.OutgoingChanId - } - - // Build the route and return it to the caller. - route, err := s.cfg.Router.BuildRoute( - amt, hops, outgoingChan, req.FinalCltvDelta, - ) - if err != nil { - return nil, er.Native(err) - } - - rpcRoute, err := s.cfg.RouterBackend.MarshallRoute(route) - if err != nil { - return nil, er.Native(err) - } - - routeResp := &BuildRouteResponse{ - Route: rpcRoute, - } - - return routeResp, nil -} - -// SubscribeHtlcEvents creates a uni-directional stream from the server to -// the client which delivers a stream of htlc events. -func (s *Server) SubscribeHtlcEvents(req *SubscribeHtlcEventsRequest, - stream Router_SubscribeHtlcEventsServer) error { - - htlcClient, err := s.cfg.RouterBackend.SubscribeHtlcEvents() - if err != nil { - return er.Native(err) - } - defer htlcClient.Cancel() - - for { - select { - case event := <-htlcClient.Updates(): - rpcEvent, err := rpcHtlcEvent(event) - if err != nil { - return er.Native(err) - } - - if err := stream.Send(rpcEvent); err != nil { - return err - } - - // If the stream's context is cancelled, return an error. - case <-stream.Context().Done(): - log.Debugf("htlc event stream cancelled") - return stream.Context().Err() - - // If the subscribe client terminates, exit with an error. - case <-htlcClient.Quit(): - return er.Native(er.New("htlc event subscription terminated")) - - // If the server has been signalled to shut down, exit. - case <-s.quit: - return er.Native(errServerShuttingDown.Default()) - } - } -} - -// HtlcInterceptor is a bidirectional stream for streaming interception -// requests to the caller. -// Upon connection it does the following: -// 1. Check if there is already a live stream, if yes it rejects the request. -// 2. Regsitered a ForwardInterceptor -// 3. Delivers to the caller every √√ and detect his answer. -// It uses a local implementation of holdForwardsStore to keep all the hold -// forwards and find them when manual resolution is later needed. -func (s *Server) HtlcInterceptor(stream Router_HtlcInterceptorServer) error { - // We ensure there is only one interceptor at a time. - if !atomic.CompareAndSwapInt32(&s.forwardInterceptorActive, 0, 1) { - return er.Native(ErrInterceptorAlreadyExists.Default()) - } - defer atomic.CompareAndSwapInt32(&s.forwardInterceptorActive, 1, 0) - - // run the forward interceptor. - return er.Native(newForwardInterceptor(s, stream).run()) -} diff --git a/lnd/lnrpc/routerrpc/router_server_deprecated.go b/lnd/lnrpc/routerrpc/router_server_deprecated.go deleted file mode 100644 index 6b2e0ee1..00000000 --- a/lnd/lnrpc/routerrpc/router_server_deprecated.go +++ /dev/null @@ -1,119 +0,0 @@ -package routerrpc - -import ( - "context" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/lnrpc" -) - -// legacyTrackPaymentServer is a wrapper struct that transforms a stream of main -// rpc payment structs into the legacy PaymentStatus format. -type legacyTrackPaymentServer struct { - Router_TrackPaymentServer -} - -// Send converts a Payment object and sends it as a PaymentStatus object on the -// embedded stream. -func (i *legacyTrackPaymentServer) Send(p *lnrpc.Payment) error { - var state PaymentState - switch p.Status { - case lnrpc.Payment_IN_FLIGHT: - state = PaymentState_IN_FLIGHT - case lnrpc.Payment_SUCCEEDED: - state = PaymentState_SUCCEEDED - case lnrpc.Payment_FAILED: - switch p.FailureReason { - case lnrpc.PaymentFailureReason_FAILURE_REASON_NONE: - return er.Native(er.Errorf("expected fail reason")) - - case lnrpc.PaymentFailureReason_FAILURE_REASON_TIMEOUT: - state = PaymentState_FAILED_TIMEOUT - - case lnrpc.PaymentFailureReason_FAILURE_REASON_NO_ROUTE: - state = PaymentState_FAILED_NO_ROUTE - - case lnrpc.PaymentFailureReason_FAILURE_REASON_ERROR: - state = PaymentState_FAILED_ERROR - - case lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS: - state = PaymentState_FAILED_INCORRECT_PAYMENT_DETAILS - - case lnrpc.PaymentFailureReason_FAILURE_REASON_INSUFFICIENT_BALANCE: - state = PaymentState_FAILED_INSUFFICIENT_BALANCE - - default: - return er.Native(er.Errorf("unknown failure reason %v", - p.FailureReason)) - } - default: - return er.Native(er.Errorf("unknown state %v", p.Status)) - } - - preimage, err := util.DecodeHex(p.PaymentPreimage) - if err != nil { - return er.Native(err) - } - - legacyState := PaymentStatus{ - State: state, - Preimage: preimage, - Htlcs: p.Htlcs, - } - - return i.Router_TrackPaymentServer.Send(&legacyState) -} - -// TrackPayment returns a stream of payment state updates. The stream is -// closed when the payment completes. -func (s *Server) TrackPayment(request *TrackPaymentRequest, - stream Router_TrackPaymentServer) error { - - legacyStream := legacyTrackPaymentServer{ - Router_TrackPaymentServer: stream, - } - return s.TrackPaymentV2(request, &legacyStream) -} - -// SendPayment attempts to route a payment described by the passed -// PaymentRequest to the final destination. If we are unable to route the -// payment, or cannot find a route that satisfies the constraints in the -// PaymentRequest, then an error will be returned. Otherwise, the payment -// pre-image, along with the final route will be returned. -func (s *Server) SendPayment(request *SendPaymentRequest, - stream Router_SendPaymentServer) error { - - if request.MaxParts > 1 { - return er.Native(er.New("for multi-part payments, use SendPaymentV2")) - } - - legacyStream := legacyTrackPaymentServer{ - Router_TrackPaymentServer: stream, - } - return s.SendPaymentV2(request, &legacyStream) -} - -// SendToRoute sends a payment through a predefined route. The response of this -// call contains structured error information. -func (s *Server) SendToRoute(ctx context.Context, - req *SendToRouteRequest) (*SendToRouteResponse, error) { - - resp, err := s.SendToRouteV2(ctx, req) - if err != nil { - return nil, err - } - - if resp == nil { - return nil, nil - } - - // Need to convert to legacy response message because proto identifiers - // don't line up. - legacyResp := &SendToRouteResponse{ - Preimage: resp.Preimage, - Failure: resp.Failure, - } - - return legacyResp, err -} diff --git a/lnd/lnrpc/routerrpc/routing_config.go b/lnd/lnrpc/routerrpc/routing_config.go deleted file mode 100644 index ee74a0a8..00000000 --- a/lnd/lnrpc/routerrpc/routing_config.go +++ /dev/null @@ -1,46 +0,0 @@ -package routerrpc - -import ( - "time" - - "github.com/pkt-cash/pktd/btcutil" -) - -// RoutingConfig contains the configurable parameters that control routing. -type RoutingConfig struct { - // MinRouteProbability is the minimum required route success probability - // to attempt the payment. - MinRouteProbability float64 `long:"minrtprob" description:"Minimum required route success probability to attempt the payment"` - - // AprioriHopProbability is the assumed success probability of a hop in - // a route when no other information is available. - AprioriHopProbability float64 `long:"apriorihopprob" description:"Assumed success probability of a hop in a route when no other information is available."` - - // AprioriWeight is a value in the range [0, 1] that defines to what - // extent historical results should be extrapolated to untried - // connections. Setting it to one will completely ignore historical - // results and always assume the configured a priori probability for - // untried connections. A value of zero will ignore the a priori - // probability completely and only base the probability on historical - // results, unless there are none available. - AprioriWeight float64 `long:"aprioriweight" description:"Weight of the a priori probability in success probability estimation. Valid values are in [0, 1]."` - - // PenaltyHalfLife defines after how much time a penalized node or - // channel is back at 50% probability. - PenaltyHalfLife time.Duration `long:"penaltyhalflife" description:"Defines the duration after which a penalized node or channel is back at 50% probability"` - - // AttemptCost is the fixed virtual cost in path finding of a failed - // payment attempt. It is used to trade off potentially better routes - // against their probability of succeeding. - AttemptCost btcutil.Amount `long:"attemptcost" description:"The fixed (virtual) cost in sats of a failed payment attempt"` - - // AttemptCostPPM is the proportional virtual cost in path finding of a - // failed payment attempt. It is used to trade off potentially better - // routes against their probability of succeeding. This parameter is - // expressed in parts per million of the total payment amount. - AttemptCostPPM int64 `long:"attemptcostppm" description:"The proportional (virtual) cost in sats of a failed payment attempt expressed in parts per million of the total payment amount"` - - // MaxMcHistory defines the maximum number of payment results that - // are held on disk by mission control. - MaxMcHistory int `long:"maxmchistory" description:"the maximum number of payment results that are held on disk by mission control"` -} diff --git a/lnd/lnrpc/routerrpc/subscribe_events.go b/lnd/lnrpc/routerrpc/subscribe_events.go deleted file mode 100644 index 0e2cca8c..00000000 --- a/lnd/lnrpc/routerrpc/subscribe_events.go +++ /dev/null @@ -1,245 +0,0 @@ -package routerrpc - -import ( - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/htlcswitch" - "github.com/pkt-cash/pktd/lnd/invoices" - "github.com/pkt-cash/pktd/lnd/lnrpc" -) - -// rpcHtlcEvent returns a rpc htlc event from a htlcswitch event. -func rpcHtlcEvent(htlcEvent interface{}) (*HtlcEvent, er.R) { - var ( - key htlcswitch.HtlcKey - timestamp time.Time - eventType htlcswitch.HtlcEventType - event isHtlcEvent_Event - ) - - switch e := htlcEvent.(type) { - case *htlcswitch.ForwardingEvent: - event = &HtlcEvent_ForwardEvent{ - ForwardEvent: &ForwardEvent{ - Info: rpcInfo(e.HtlcInfo), - }, - } - - key = e.HtlcKey - eventType = e.HtlcEventType - timestamp = e.Timestamp - - case *htlcswitch.ForwardingFailEvent: - event = &HtlcEvent_ForwardFailEvent{ - ForwardFailEvent: &ForwardFailEvent{}, - } - - key = e.HtlcKey - eventType = e.HtlcEventType - timestamp = e.Timestamp - - case *htlcswitch.LinkFailEvent: - failureCode, failReason, err := rpcFailReason( - e.LinkError, - ) - if err != nil { - return nil, err - } - - event = &HtlcEvent_LinkFailEvent{ - LinkFailEvent: &LinkFailEvent{ - Info: rpcInfo(e.HtlcInfo), - WireFailure: failureCode, - FailureDetail: failReason, - FailureString: e.LinkError.Error(), - }, - } - - key = e.HtlcKey - eventType = e.HtlcEventType - timestamp = e.Timestamp - - case *htlcswitch.SettleEvent: - event = &HtlcEvent_SettleEvent{ - SettleEvent: &SettleEvent{}, - } - - key = e.HtlcKey - eventType = e.HtlcEventType - timestamp = e.Timestamp - - default: - return nil, er.Errorf("unknown event type: %T", e) - } - - rpcEvent := &HtlcEvent{ - IncomingChannelId: key.IncomingCircuit.ChanID.ToUint64(), - OutgoingChannelId: key.OutgoingCircuit.ChanID.ToUint64(), - IncomingHtlcId: key.IncomingCircuit.HtlcID, - OutgoingHtlcId: key.OutgoingCircuit.HtlcID, - TimestampNs: uint64(timestamp.UnixNano()), - Event: event, - } - - // Convert the htlc event type to a rpc event. - switch eventType { - case htlcswitch.HtlcEventTypeSend: - rpcEvent.EventType = HtlcEvent_SEND - - case htlcswitch.HtlcEventTypeReceive: - rpcEvent.EventType = HtlcEvent_RECEIVE - - case htlcswitch.HtlcEventTypeForward: - rpcEvent.EventType = HtlcEvent_FORWARD - - default: - return nil, er.Errorf("unknown event type: %v", eventType) - } - - return rpcEvent, nil -} - -// rpcInfo returns a rpc struct containing the htlc information from the -// switch's htlc info struct. -func rpcInfo(info htlcswitch.HtlcInfo) *HtlcInfo { - return &HtlcInfo{ - IncomingTimelock: info.IncomingTimeLock, - OutgoingTimelock: info.OutgoingTimeLock, - IncomingAmtMsat: uint64(info.IncomingAmt), - OutgoingAmtMsat: uint64(info.OutgoingAmt), - } -} - -// rpcFailReason maps a lnwire failure message and failure detail to a rpc -// failure code and detail. -func rpcFailReason(linkErr *htlcswitch.LinkError) (lnrpc.Failure_FailureCode, - FailureDetail, er.R) { - - wireErr, err := marshallError(er.E(linkErr)) - if err != nil { - return 0, 0, err - } - wireCode := wireErr.GetCode() - - // If the link has no failure detail, return with failure detail none. - if linkErr.FailureDetail == nil { - return wireCode, FailureDetail_NO_DETAIL, nil - } - - switch failureDetail := linkErr.FailureDetail.(type) { - case invoices.FailResolutionResult: - fd, err := rpcFailureResolution(failureDetail) - return wireCode, fd, err - - case htlcswitch.OutgoingFailure: - fd, err := rpcOutgoingFailure(failureDetail) - return wireCode, fd, err - - default: - return 0, 0, er.Errorf("unknown failure "+ - "detail type: %T", linkErr.FailureDetail) - - } - -} - -// rpcFailureResolution maps an invoice failure resolution to a rpc failure -// detail. Invoice failures have no zero resolution results (every failure -// is accompanied with a result), so we error if we fail to match the result -// type. -func rpcFailureResolution(invoiceFailure invoices.FailResolutionResult) ( - FailureDetail, er.R) { - - switch invoiceFailure { - case invoices.ResultReplayToCanceled: - return FailureDetail_INVOICE_CANCELED, nil - - case invoices.ResultInvoiceAlreadyCanceled: - return FailureDetail_INVOICE_CANCELED, nil - - case invoices.ResultAmountTooLow: - return FailureDetail_INVOICE_UNDERPAID, nil - - case invoices.ResultExpiryTooSoon: - return FailureDetail_INVOICE_EXPIRY_TOO_SOON, nil - - case invoices.ResultCanceled: - return FailureDetail_INVOICE_CANCELED, nil - - case invoices.ResultInvoiceNotOpen: - return FailureDetail_INVOICE_NOT_OPEN, nil - - case invoices.ResultMppTimeout: - return FailureDetail_MPP_INVOICE_TIMEOUT, nil - - case invoices.ResultAddressMismatch: - return FailureDetail_ADDRESS_MISMATCH, nil - - case invoices.ResultHtlcSetTotalMismatch: - return FailureDetail_SET_TOTAL_MISMATCH, nil - - case invoices.ResultHtlcSetTotalTooLow: - return FailureDetail_SET_TOTAL_TOO_LOW, nil - - case invoices.ResultHtlcSetOverpayment: - return FailureDetail_SET_OVERPAID, nil - - case invoices.ResultInvoiceNotFound: - return FailureDetail_UNKNOWN_INVOICE, nil - - case invoices.ResultKeySendError: - return FailureDetail_INVALID_KEYSEND, nil - - case invoices.ResultMppInProgress: - return FailureDetail_MPP_IN_PROGRESS, nil - - default: - return 0, er.Errorf("unknown fail resolution: %v", - invoiceFailure.FailureString()) - } -} - -// rpcOutgoingFailure maps an outgoing failure to a rpc FailureDetail. If the -// failure detail is FailureDetailNone, which indicates that the failure was -// a wire message which required no further failure detail, we return a no -// detail failure detail to indicate that there was no additional information. -func rpcOutgoingFailure(failureDetail htlcswitch.OutgoingFailure) ( - FailureDetail, er.R) { - - switch failureDetail { - case htlcswitch.OutgoingFailureNone: - return FailureDetail_NO_DETAIL, nil - - case htlcswitch.OutgoingFailureDecodeError: - return FailureDetail_ONION_DECODE, nil - - case htlcswitch.OutgoingFailureLinkNotEligible: - return FailureDetail_LINK_NOT_ELIGIBLE, nil - - case htlcswitch.OutgoingFailureOnChainTimeout: - return FailureDetail_ON_CHAIN_TIMEOUT, nil - - case htlcswitch.OutgoingFailureHTLCExceedsMax: - return FailureDetail_HTLC_EXCEEDS_MAX, nil - - case htlcswitch.OutgoingFailureInsufficientBalance: - return FailureDetail_INSUFFICIENT_BALANCE, nil - - case htlcswitch.OutgoingFailureCircularRoute: - return FailureDetail_CIRCULAR_ROUTE, nil - - case htlcswitch.OutgoingFailureIncompleteForward: - return FailureDetail_INCOMPLETE_FORWARD, nil - - case htlcswitch.OutgoingFailureDownstreamHtlcAdd: - return FailureDetail_HTLC_ADD_FAILED, nil - - case htlcswitch.OutgoingFailureForwardsDisabled: - return FailureDetail_FORWARDS_DISABLED, nil - - default: - return 0, er.Errorf("unknown outgoing failure "+ - "detail: %v", failureDetail.FailureString()) - } -} diff --git a/lnd/lnrpc/rpc.pb.go b/lnd/lnrpc/rpc.pb.go deleted file mode 100644 index 9daa67d3..00000000 --- a/lnd/lnrpc/rpc.pb.go +++ /dev/null @@ -1,17164 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: rpc.proto - -package lnrpc - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// -//`AddressType` has to be one of: -// -//- `p2wkh`: Pay to witness key hash (`WITNESS_PUBKEY_HASH` = 0) -//- `np2wkh`: Pay to nested witness key hash (`NESTED_PUBKEY_HASH` = 1) -type AddressType int32 - -const ( - AddressType_WITNESS_PUBKEY_HASH AddressType = 0 - AddressType_NESTED_PUBKEY_HASH AddressType = 1 - AddressType_UNUSED_WITNESS_PUBKEY_HASH AddressType = 2 - AddressType_UNUSED_NESTED_PUBKEY_HASH AddressType = 3 -) - -var AddressType_name = map[int32]string{ - 0: "WITNESS_PUBKEY_HASH", - 1: "NESTED_PUBKEY_HASH", - 2: "UNUSED_WITNESS_PUBKEY_HASH", - 3: "UNUSED_NESTED_PUBKEY_HASH", -} - -var AddressType_value = map[string]int32{ - "WITNESS_PUBKEY_HASH": 0, - "NESTED_PUBKEY_HASH": 1, - "UNUSED_WITNESS_PUBKEY_HASH": 2, - "UNUSED_NESTED_PUBKEY_HASH": 3, -} - -func (x AddressType) String() string { - return proto.EnumName(AddressType_name, int32(x)) -} - -func (AddressType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{0} -} - -type CommitmentType int32 - -const ( - // - //A channel using the legacy commitment format having tweaked to_remote - //keys. - CommitmentType_LEGACY CommitmentType = 0 - // - //A channel that uses the modern commitment format where the key in the - //output of the remote party does not change each state. This makes back - //up and recovery easier as when the channel is closed, the funds go - //directly to that key. - CommitmentType_STATIC_REMOTE_KEY CommitmentType = 1 - // - //A channel that uses a commitment format that has anchor outputs on the - //commitments, allowing fee bumping after a force close transaction has - //been broadcast. - CommitmentType_ANCHORS CommitmentType = 2 - // - //Returned when the commitment type isn't known or unavailable. - CommitmentType_UNKNOWN_COMMITMENT_TYPE CommitmentType = 999 -) - -var CommitmentType_name = map[int32]string{ - 0: "LEGACY", - 1: "STATIC_REMOTE_KEY", - 2: "ANCHORS", - 999: "UNKNOWN_COMMITMENT_TYPE", -} - -var CommitmentType_value = map[string]int32{ - "LEGACY": 0, - "STATIC_REMOTE_KEY": 1, - "ANCHORS": 2, - "UNKNOWN_COMMITMENT_TYPE": 999, -} - -func (x CommitmentType) String() string { - return proto.EnumName(CommitmentType_name, int32(x)) -} - -func (CommitmentType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{1} -} - -type Initiator int32 - -const ( - Initiator_INITIATOR_UNKNOWN Initiator = 0 - Initiator_INITIATOR_LOCAL Initiator = 1 - Initiator_INITIATOR_REMOTE Initiator = 2 - Initiator_INITIATOR_BOTH Initiator = 3 -) - -var Initiator_name = map[int32]string{ - 0: "INITIATOR_UNKNOWN", - 1: "INITIATOR_LOCAL", - 2: "INITIATOR_REMOTE", - 3: "INITIATOR_BOTH", -} - -var Initiator_value = map[string]int32{ - "INITIATOR_UNKNOWN": 0, - "INITIATOR_LOCAL": 1, - "INITIATOR_REMOTE": 2, - "INITIATOR_BOTH": 3, -} - -func (x Initiator) String() string { - return proto.EnumName(Initiator_name, int32(x)) -} - -func (Initiator) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{2} -} - -type ResolutionType int32 - -const ( - ResolutionType_TYPE_UNKNOWN ResolutionType = 0 - // We resolved an anchor output. - ResolutionType_ANCHOR ResolutionType = 1 - // - //We are resolving an incoming htlc on chain. This if this htlc is - //claimed, we swept the incoming htlc with the preimage. If it is timed - //out, our peer swept the timeout path. - ResolutionType_INCOMING_HTLC ResolutionType = 2 - // - //We are resolving an outgoing htlc on chain. If this htlc is claimed, - //the remote party swept the htlc with the preimage. If it is timed out, - //we swept it with the timeout path. - ResolutionType_OUTGOING_HTLC ResolutionType = 3 - // We force closed and need to sweep our time locked commitment output. - ResolutionType_COMMIT ResolutionType = 4 -) - -var ResolutionType_name = map[int32]string{ - 0: "TYPE_UNKNOWN", - 1: "ANCHOR", - 2: "INCOMING_HTLC", - 3: "OUTGOING_HTLC", - 4: "COMMIT", -} - -var ResolutionType_value = map[string]int32{ - "TYPE_UNKNOWN": 0, - "ANCHOR": 1, - "INCOMING_HTLC": 2, - "OUTGOING_HTLC": 3, - "COMMIT": 4, -} - -func (x ResolutionType) String() string { - return proto.EnumName(ResolutionType_name, int32(x)) -} - -func (ResolutionType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{3} -} - -type ResolutionOutcome int32 - -const ( - // Outcome unknown. - ResolutionOutcome_OUTCOME_UNKNOWN ResolutionOutcome = 0 - // An output was claimed on chain. - ResolutionOutcome_CLAIMED ResolutionOutcome = 1 - // An output was left unclaimed on chain. - ResolutionOutcome_UNCLAIMED ResolutionOutcome = 2 - // - //ResolverOutcomeAbandoned indicates that an output that we did not - //claim on chain, for example an anchor that we did not sweep and a - //third party claimed on chain, or a htlc that we could not decode - //so left unclaimed. - ResolutionOutcome_ABANDONED ResolutionOutcome = 3 - // - //If we force closed our channel, our htlcs need to be claimed in two - //stages. This outcome represents the broadcast of a timeout or success - //transaction for this two stage htlc claim. - ResolutionOutcome_FIRST_STAGE ResolutionOutcome = 4 - // A htlc was timed out on chain. - ResolutionOutcome_TIMEOUT ResolutionOutcome = 5 -) - -var ResolutionOutcome_name = map[int32]string{ - 0: "OUTCOME_UNKNOWN", - 1: "CLAIMED", - 2: "UNCLAIMED", - 3: "ABANDONED", - 4: "FIRST_STAGE", - 5: "TIMEOUT", -} - -var ResolutionOutcome_value = map[string]int32{ - "OUTCOME_UNKNOWN": 0, - "CLAIMED": 1, - "UNCLAIMED": 2, - "ABANDONED": 3, - "FIRST_STAGE": 4, - "TIMEOUT": 5, -} - -func (x ResolutionOutcome) String() string { - return proto.EnumName(ResolutionOutcome_name, int32(x)) -} - -func (ResolutionOutcome) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{4} -} - -type NodeMetricType int32 - -const ( - NodeMetricType_UNKNOWN NodeMetricType = 0 - NodeMetricType_BETWEENNESS_CENTRALITY NodeMetricType = 1 -) - -var NodeMetricType_name = map[int32]string{ - 0: "UNKNOWN", - 1: "BETWEENNESS_CENTRALITY", -} - -var NodeMetricType_value = map[string]int32{ - "UNKNOWN": 0, - "BETWEENNESS_CENTRALITY": 1, -} - -func (x NodeMetricType) String() string { - return proto.EnumName(NodeMetricType_name, int32(x)) -} - -func (NodeMetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{5} -} - -type InvoiceHTLCState int32 - -const ( - InvoiceHTLCState_ACCEPTED InvoiceHTLCState = 0 - InvoiceHTLCState_SETTLED InvoiceHTLCState = 1 - InvoiceHTLCState_CANCELED InvoiceHTLCState = 2 -) - -var InvoiceHTLCState_name = map[int32]string{ - 0: "ACCEPTED", - 1: "SETTLED", - 2: "CANCELED", -} - -var InvoiceHTLCState_value = map[string]int32{ - "ACCEPTED": 0, - "SETTLED": 1, - "CANCELED": 2, -} - -func (x InvoiceHTLCState) String() string { - return proto.EnumName(InvoiceHTLCState_name, int32(x)) -} - -func (InvoiceHTLCState) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{6} -} - -type PaymentFailureReason int32 - -const ( - // - //Payment isn't failed (yet). - PaymentFailureReason_FAILURE_REASON_NONE PaymentFailureReason = 0 - // - //There are more routes to try, but the payment timeout was exceeded. - PaymentFailureReason_FAILURE_REASON_TIMEOUT PaymentFailureReason = 1 - // - //All possible routes were tried and failed permanently. Or were no - //routes to the destination at all. - PaymentFailureReason_FAILURE_REASON_NO_ROUTE PaymentFailureReason = 2 - // - //A non-recoverable error has occured. - PaymentFailureReason_FAILURE_REASON_ERROR PaymentFailureReason = 3 - // - //Payment details incorrect (unknown hash, invalid amt or - //invalid final cltv delta) - PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS PaymentFailureReason = 4 - // - //Insufficient local balance. - PaymentFailureReason_FAILURE_REASON_INSUFFICIENT_BALANCE PaymentFailureReason = 5 -) - -var PaymentFailureReason_name = map[int32]string{ - 0: "FAILURE_REASON_NONE", - 1: "FAILURE_REASON_TIMEOUT", - 2: "FAILURE_REASON_NO_ROUTE", - 3: "FAILURE_REASON_ERROR", - 4: "FAILURE_REASON_INCORRECT_PAYMENT_DETAILS", - 5: "FAILURE_REASON_INSUFFICIENT_BALANCE", -} - -var PaymentFailureReason_value = map[string]int32{ - "FAILURE_REASON_NONE": 0, - "FAILURE_REASON_TIMEOUT": 1, - "FAILURE_REASON_NO_ROUTE": 2, - "FAILURE_REASON_ERROR": 3, - "FAILURE_REASON_INCORRECT_PAYMENT_DETAILS": 4, - "FAILURE_REASON_INSUFFICIENT_BALANCE": 5, -} - -func (x PaymentFailureReason) String() string { - return proto.EnumName(PaymentFailureReason_name, int32(x)) -} - -func (PaymentFailureReason) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{7} -} - -type FeatureBit int32 - -const ( - FeatureBit_DATALOSS_PROTECT_REQ FeatureBit = 0 - FeatureBit_DATALOSS_PROTECT_OPT FeatureBit = 1 - FeatureBit_INITIAL_ROUING_SYNC FeatureBit = 3 - FeatureBit_UPFRONT_SHUTDOWN_SCRIPT_REQ FeatureBit = 4 - FeatureBit_UPFRONT_SHUTDOWN_SCRIPT_OPT FeatureBit = 5 - FeatureBit_GOSSIP_QUERIES_REQ FeatureBit = 6 - FeatureBit_GOSSIP_QUERIES_OPT FeatureBit = 7 - FeatureBit_TLV_ONION_REQ FeatureBit = 8 - FeatureBit_TLV_ONION_OPT FeatureBit = 9 - FeatureBit_EXT_GOSSIP_QUERIES_REQ FeatureBit = 10 - FeatureBit_EXT_GOSSIP_QUERIES_OPT FeatureBit = 11 - FeatureBit_STATIC_REMOTE_KEY_REQ FeatureBit = 12 - FeatureBit_STATIC_REMOTE_KEY_OPT FeatureBit = 13 - FeatureBit_PAYMENT_ADDR_REQ FeatureBit = 14 - FeatureBit_PAYMENT_ADDR_OPT FeatureBit = 15 - FeatureBit_MPP_REQ FeatureBit = 16 - FeatureBit_MPP_OPT FeatureBit = 17 -) - -var FeatureBit_name = map[int32]string{ - 0: "DATALOSS_PROTECT_REQ", - 1: "DATALOSS_PROTECT_OPT", - 3: "INITIAL_ROUING_SYNC", - 4: "UPFRONT_SHUTDOWN_SCRIPT_REQ", - 5: "UPFRONT_SHUTDOWN_SCRIPT_OPT", - 6: "GOSSIP_QUERIES_REQ", - 7: "GOSSIP_QUERIES_OPT", - 8: "TLV_ONION_REQ", - 9: "TLV_ONION_OPT", - 10: "EXT_GOSSIP_QUERIES_REQ", - 11: "EXT_GOSSIP_QUERIES_OPT", - 12: "STATIC_REMOTE_KEY_REQ", - 13: "STATIC_REMOTE_KEY_OPT", - 14: "PAYMENT_ADDR_REQ", - 15: "PAYMENT_ADDR_OPT", - 16: "MPP_REQ", - 17: "MPP_OPT", -} - -var FeatureBit_value = map[string]int32{ - "DATALOSS_PROTECT_REQ": 0, - "DATALOSS_PROTECT_OPT": 1, - "INITIAL_ROUING_SYNC": 3, - "UPFRONT_SHUTDOWN_SCRIPT_REQ": 4, - "UPFRONT_SHUTDOWN_SCRIPT_OPT": 5, - "GOSSIP_QUERIES_REQ": 6, - "GOSSIP_QUERIES_OPT": 7, - "TLV_ONION_REQ": 8, - "TLV_ONION_OPT": 9, - "EXT_GOSSIP_QUERIES_REQ": 10, - "EXT_GOSSIP_QUERIES_OPT": 11, - "STATIC_REMOTE_KEY_REQ": 12, - "STATIC_REMOTE_KEY_OPT": 13, - "PAYMENT_ADDR_REQ": 14, - "PAYMENT_ADDR_OPT": 15, - "MPP_REQ": 16, - "MPP_OPT": 17, -} - -func (x FeatureBit) String() string { - return proto.EnumName(FeatureBit_name, int32(x)) -} - -func (FeatureBit) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{8} -} - -type ChannelCloseSummary_ClosureType int32 - -const ( - ChannelCloseSummary_COOPERATIVE_CLOSE ChannelCloseSummary_ClosureType = 0 - ChannelCloseSummary_LOCAL_FORCE_CLOSE ChannelCloseSummary_ClosureType = 1 - ChannelCloseSummary_REMOTE_FORCE_CLOSE ChannelCloseSummary_ClosureType = 2 - ChannelCloseSummary_BREACH_CLOSE ChannelCloseSummary_ClosureType = 3 - ChannelCloseSummary_FUNDING_CANCELED ChannelCloseSummary_ClosureType = 4 - ChannelCloseSummary_ABANDONED ChannelCloseSummary_ClosureType = 5 -) - -var ChannelCloseSummary_ClosureType_name = map[int32]string{ - 0: "COOPERATIVE_CLOSE", - 1: "LOCAL_FORCE_CLOSE", - 2: "REMOTE_FORCE_CLOSE", - 3: "BREACH_CLOSE", - 4: "FUNDING_CANCELED", - 5: "ABANDONED", -} - -var ChannelCloseSummary_ClosureType_value = map[string]int32{ - "COOPERATIVE_CLOSE": 0, - "LOCAL_FORCE_CLOSE": 1, - "REMOTE_FORCE_CLOSE": 2, - "BREACH_CLOSE": 3, - "FUNDING_CANCELED": 4, - "ABANDONED": 5, -} - -func (x ChannelCloseSummary_ClosureType) String() string { - return proto.EnumName(ChannelCloseSummary_ClosureType_name, int32(x)) -} - -func (ChannelCloseSummary_ClosureType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{36, 0} -} - -type Peer_SyncType int32 - -const ( - // - //Denotes that we cannot determine the peer's current sync type. - Peer_UNKNOWN_SYNC Peer_SyncType = 0 - // - //Denotes that we are actively receiving new graph updates from the peer. - Peer_ACTIVE_SYNC Peer_SyncType = 1 - // - //Denotes that we are not receiving new graph updates from the peer. - Peer_PASSIVE_SYNC Peer_SyncType = 2 -) - -var Peer_SyncType_name = map[int32]string{ - 0: "UNKNOWN_SYNC", - 1: "ACTIVE_SYNC", - 2: "PASSIVE_SYNC", -} - -var Peer_SyncType_value = map[string]int32{ - "UNKNOWN_SYNC": 0, - "ACTIVE_SYNC": 1, - "PASSIVE_SYNC": 2, -} - -func (x Peer_SyncType) String() string { - return proto.EnumName(Peer_SyncType_name, int32(x)) -} - -func (Peer_SyncType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{40, 0} -} - -type PeerEvent_EventType int32 - -const ( - PeerEvent_PEER_ONLINE PeerEvent_EventType = 0 - PeerEvent_PEER_OFFLINE PeerEvent_EventType = 1 -) - -var PeerEvent_EventType_name = map[int32]string{ - 0: "PEER_ONLINE", - 1: "PEER_OFFLINE", -} - -var PeerEvent_EventType_value = map[string]int32{ - "PEER_ONLINE": 0, - "PEER_OFFLINE": 1, -} - -func (x PeerEvent_EventType) String() string { - return proto.EnumName(PeerEvent_EventType_name, int32(x)) -} - -func (PeerEvent_EventType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{45, 0} -} - -type PendingChannelsResponse_ForceClosedChannel_AnchorState int32 - -const ( - PendingChannelsResponse_ForceClosedChannel_LIMBO PendingChannelsResponse_ForceClosedChannel_AnchorState = 0 - PendingChannelsResponse_ForceClosedChannel_RECOVERED PendingChannelsResponse_ForceClosedChannel_AnchorState = 1 - PendingChannelsResponse_ForceClosedChannel_LOST PendingChannelsResponse_ForceClosedChannel_AnchorState = 2 -) - -var PendingChannelsResponse_ForceClosedChannel_AnchorState_name = map[int32]string{ - 0: "LIMBO", - 1: "RECOVERED", - 2: "LOST", -} - -var PendingChannelsResponse_ForceClosedChannel_AnchorState_value = map[string]int32{ - "LIMBO": 0, - "RECOVERED": 1, - "LOST": 2, -} - -func (x PendingChannelsResponse_ForceClosedChannel_AnchorState) String() string { - return proto.EnumName(PendingChannelsResponse_ForceClosedChannel_AnchorState_name, int32(x)) -} - -func (PendingChannelsResponse_ForceClosedChannel_AnchorState) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{72, 5, 0} -} - -type ChannelEventUpdate_UpdateType int32 - -const ( - ChannelEventUpdate_OPEN_CHANNEL ChannelEventUpdate_UpdateType = 0 - ChannelEventUpdate_CLOSED_CHANNEL ChannelEventUpdate_UpdateType = 1 - ChannelEventUpdate_ACTIVE_CHANNEL ChannelEventUpdate_UpdateType = 2 - ChannelEventUpdate_INACTIVE_CHANNEL ChannelEventUpdate_UpdateType = 3 - ChannelEventUpdate_PENDING_OPEN_CHANNEL ChannelEventUpdate_UpdateType = 4 -) - -var ChannelEventUpdate_UpdateType_name = map[int32]string{ - 0: "OPEN_CHANNEL", - 1: "CLOSED_CHANNEL", - 2: "ACTIVE_CHANNEL", - 3: "INACTIVE_CHANNEL", - 4: "PENDING_OPEN_CHANNEL", -} - -var ChannelEventUpdate_UpdateType_value = map[string]int32{ - "OPEN_CHANNEL": 0, - "CLOSED_CHANNEL": 1, - "ACTIVE_CHANNEL": 2, - "INACTIVE_CHANNEL": 3, - "PENDING_OPEN_CHANNEL": 4, -} - -func (x ChannelEventUpdate_UpdateType) String() string { - return proto.EnumName(ChannelEventUpdate_UpdateType_name, int32(x)) -} - -func (ChannelEventUpdate_UpdateType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{74, 0} -} - -type Invoice_InvoiceState int32 - -const ( - Invoice_OPEN Invoice_InvoiceState = 0 - Invoice_SETTLED Invoice_InvoiceState = 1 - Invoice_CANCELED Invoice_InvoiceState = 2 - Invoice_ACCEPTED Invoice_InvoiceState = 3 -) - -var Invoice_InvoiceState_name = map[int32]string{ - 0: "OPEN", - 1: "SETTLED", - 2: "CANCELED", - 3: "ACCEPTED", -} - -var Invoice_InvoiceState_value = map[string]int32{ - "OPEN": 0, - "SETTLED": 1, - "CANCELED": 2, - "ACCEPTED": 3, -} - -func (x Invoice_InvoiceState) String() string { - return proto.EnumName(Invoice_InvoiceState_name, int32(x)) -} - -func (Invoice_InvoiceState) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{113, 0} -} - -type Payment_PaymentStatus int32 - -const ( - Payment_UNKNOWN Payment_PaymentStatus = 0 - Payment_IN_FLIGHT Payment_PaymentStatus = 1 - Payment_SUCCEEDED Payment_PaymentStatus = 2 - Payment_FAILED Payment_PaymentStatus = 3 -) - -var Payment_PaymentStatus_name = map[int32]string{ - 0: "UNKNOWN", - 1: "IN_FLIGHT", - 2: "SUCCEEDED", - 3: "FAILED", -} - -var Payment_PaymentStatus_value = map[string]int32{ - "UNKNOWN": 0, - "IN_FLIGHT": 1, - "SUCCEEDED": 2, - "FAILED": 3, -} - -func (x Payment_PaymentStatus) String() string { - return proto.EnumName(Payment_PaymentStatus_name, int32(x)) -} - -func (Payment_PaymentStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{120, 0} -} - -type HTLCAttempt_HTLCStatus int32 - -const ( - HTLCAttempt_IN_FLIGHT HTLCAttempt_HTLCStatus = 0 - HTLCAttempt_SUCCEEDED HTLCAttempt_HTLCStatus = 1 - HTLCAttempt_FAILED HTLCAttempt_HTLCStatus = 2 -) - -var HTLCAttempt_HTLCStatus_name = map[int32]string{ - 0: "IN_FLIGHT", - 1: "SUCCEEDED", - 2: "FAILED", -} - -var HTLCAttempt_HTLCStatus_value = map[string]int32{ - "IN_FLIGHT": 0, - "SUCCEEDED": 1, - "FAILED": 2, -} - -func (x HTLCAttempt_HTLCStatus) String() string { - return proto.EnumName(HTLCAttempt_HTLCStatus_name, int32(x)) -} - -func (HTLCAttempt_HTLCStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{121, 0} -} - -type Failure_FailureCode int32 - -const ( - // - //The numbers assigned in this enumeration match the failure codes as - //defined in BOLT #4. Because protobuf 3 requires enums to start with 0, - //a RESERVED value is added. - Failure_RESERVED Failure_FailureCode = 0 - Failure_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS Failure_FailureCode = 1 - Failure_INCORRECT_PAYMENT_AMOUNT Failure_FailureCode = 2 - Failure_FINAL_INCORRECT_CLTV_EXPIRY Failure_FailureCode = 3 - Failure_FINAL_INCORRECT_HTLC_AMOUNT Failure_FailureCode = 4 - Failure_FINAL_EXPIRY_TOO_SOON Failure_FailureCode = 5 - Failure_INVALID_REALM Failure_FailureCode = 6 - Failure_EXPIRY_TOO_SOON Failure_FailureCode = 7 - Failure_INVALID_ONION_VERSION Failure_FailureCode = 8 - Failure_INVALID_ONION_HMAC Failure_FailureCode = 9 - Failure_INVALID_ONION_KEY Failure_FailureCode = 10 - Failure_AMOUNT_BELOW_MINIMUM Failure_FailureCode = 11 - Failure_FEE_INSUFFICIENT Failure_FailureCode = 12 - Failure_INCORRECT_CLTV_EXPIRY Failure_FailureCode = 13 - Failure_CHANNEL_DISABLED Failure_FailureCode = 14 - Failure_TEMPORARY_CHANNEL_FAILURE Failure_FailureCode = 15 - Failure_REQUIRED_NODE_FEATURE_MISSING Failure_FailureCode = 16 - Failure_REQUIRED_CHANNEL_FEATURE_MISSING Failure_FailureCode = 17 - Failure_UNKNOWN_NEXT_PEER Failure_FailureCode = 18 - Failure_TEMPORARY_NODE_FAILURE Failure_FailureCode = 19 - Failure_PERMANENT_NODE_FAILURE Failure_FailureCode = 20 - Failure_PERMANENT_CHANNEL_FAILURE Failure_FailureCode = 21 - Failure_EXPIRY_TOO_FAR Failure_FailureCode = 22 - Failure_MPP_TIMEOUT Failure_FailureCode = 23 - // - //An internal error occurred. - Failure_INTERNAL_FAILURE Failure_FailureCode = 997 - // - //The error source is known, but the failure itself couldn't be decoded. - Failure_UNKNOWN_FAILURE Failure_FailureCode = 998 - // - //An unreadable failure result is returned if the received failure message - //cannot be decrypted. In that case the error source is unknown. - Failure_UNREADABLE_FAILURE Failure_FailureCode = 999 -) - -var Failure_FailureCode_name = map[int32]string{ - 0: "RESERVED", - 1: "INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS", - 2: "INCORRECT_PAYMENT_AMOUNT", - 3: "FINAL_INCORRECT_CLTV_EXPIRY", - 4: "FINAL_INCORRECT_HTLC_AMOUNT", - 5: "FINAL_EXPIRY_TOO_SOON", - 6: "INVALID_REALM", - 7: "EXPIRY_TOO_SOON", - 8: "INVALID_ONION_VERSION", - 9: "INVALID_ONION_HMAC", - 10: "INVALID_ONION_KEY", - 11: "AMOUNT_BELOW_MINIMUM", - 12: "FEE_INSUFFICIENT", - 13: "INCORRECT_CLTV_EXPIRY", - 14: "CHANNEL_DISABLED", - 15: "TEMPORARY_CHANNEL_FAILURE", - 16: "REQUIRED_NODE_FEATURE_MISSING", - 17: "REQUIRED_CHANNEL_FEATURE_MISSING", - 18: "UNKNOWN_NEXT_PEER", - 19: "TEMPORARY_NODE_FAILURE", - 20: "PERMANENT_NODE_FAILURE", - 21: "PERMANENT_CHANNEL_FAILURE", - 22: "EXPIRY_TOO_FAR", - 23: "MPP_TIMEOUT", - 997: "INTERNAL_FAILURE", - 998: "UNKNOWN_FAILURE", - 999: "UNREADABLE_FAILURE", -} - -var Failure_FailureCode_value = map[string]int32{ - "RESERVED": 0, - "INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS": 1, - "INCORRECT_PAYMENT_AMOUNT": 2, - "FINAL_INCORRECT_CLTV_EXPIRY": 3, - "FINAL_INCORRECT_HTLC_AMOUNT": 4, - "FINAL_EXPIRY_TOO_SOON": 5, - "INVALID_REALM": 6, - "EXPIRY_TOO_SOON": 7, - "INVALID_ONION_VERSION": 8, - "INVALID_ONION_HMAC": 9, - "INVALID_ONION_KEY": 10, - "AMOUNT_BELOW_MINIMUM": 11, - "FEE_INSUFFICIENT": 12, - "INCORRECT_CLTV_EXPIRY": 13, - "CHANNEL_DISABLED": 14, - "TEMPORARY_CHANNEL_FAILURE": 15, - "REQUIRED_NODE_FEATURE_MISSING": 16, - "REQUIRED_CHANNEL_FEATURE_MISSING": 17, - "UNKNOWN_NEXT_PEER": 18, - "TEMPORARY_NODE_FAILURE": 19, - "PERMANENT_NODE_FAILURE": 20, - "PERMANENT_CHANNEL_FAILURE": 21, - "EXPIRY_TOO_FAR": 22, - "MPP_TIMEOUT": 23, - "INTERNAL_FAILURE": 997, - "UNKNOWN_FAILURE": 998, - "UNREADABLE_FAILURE": 999, -} - -func (x Failure_FailureCode) String() string { - return proto.EnumName(Failure_FailureCode_name, int32(x)) -} - -func (Failure_FailureCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{161, 0} -} - -type Utxo struct { - // The type of address - AddressType AddressType `protobuf:"varint,1,opt,name=address_type,json=addressType,proto3,enum=lnrpc.AddressType" json:"address_type,omitempty"` - // The address - Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` - // The value of the unspent coin in satoshis - AmountSat int64 `protobuf:"varint,3,opt,name=amount_sat,json=amountSat,proto3" json:"amount_sat,omitempty"` - // The pkscript in hex - PkScript string `protobuf:"bytes,4,opt,name=pk_script,json=pkScript,proto3" json:"pk_script,omitempty"` - // The outpoint in format txid:n - Outpoint *OutPoint `protobuf:"bytes,5,opt,name=outpoint,proto3" json:"outpoint,omitempty"` - // The number of confirmations for the Utxo - Confirmations int64 `protobuf:"varint,6,opt,name=confirmations,proto3" json:"confirmations,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Utxo) Reset() { *m = Utxo{} } -func (m *Utxo) String() string { return proto.CompactTextString(m) } -func (*Utxo) ProtoMessage() {} -func (*Utxo) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{0} -} - -func (m *Utxo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Utxo.Unmarshal(m, b) -} -func (m *Utxo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Utxo.Marshal(b, m, deterministic) -} -func (m *Utxo) XXX_Merge(src proto.Message) { - xxx_messageInfo_Utxo.Merge(m, src) -} -func (m *Utxo) XXX_Size() int { - return xxx_messageInfo_Utxo.Size(m) -} -func (m *Utxo) XXX_DiscardUnknown() { - xxx_messageInfo_Utxo.DiscardUnknown(m) -} - -var xxx_messageInfo_Utxo proto.InternalMessageInfo - -func (m *Utxo) GetAddressType() AddressType { - if m != nil { - return m.AddressType - } - return AddressType_WITNESS_PUBKEY_HASH -} - -func (m *Utxo) GetAddress() string { - if m != nil { - return m.Address - } - return "" -} - -func (m *Utxo) GetAmountSat() int64 { - if m != nil { - return m.AmountSat - } - return 0 -} - -func (m *Utxo) GetPkScript() string { - if m != nil { - return m.PkScript - } - return "" -} - -func (m *Utxo) GetOutpoint() *OutPoint { - if m != nil { - return m.Outpoint - } - return nil -} - -func (m *Utxo) GetConfirmations() int64 { - if m != nil { - return m.Confirmations - } - return 0 -} - -type Transaction struct { - // The transaction hash - TxHash string `protobuf:"bytes,1,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` - // The transaction amount, denominated in satoshis - Amount int64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` - // The number of confirmations - NumConfirmations int32 `protobuf:"varint,3,opt,name=num_confirmations,json=numConfirmations,proto3" json:"num_confirmations,omitempty"` - // The hash of the block this transaction was included in - BlockHash string `protobuf:"bytes,4,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` - // The height of the block this transaction was included in - BlockHeight int32 `protobuf:"varint,5,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` - // Timestamp of this transaction - TimeStamp int64 `protobuf:"varint,6,opt,name=time_stamp,json=timeStamp,proto3" json:"time_stamp,omitempty"` - // Fees paid for this transaction - TotalFees int64 `protobuf:"varint,7,opt,name=total_fees,json=totalFees,proto3" json:"total_fees,omitempty"` - // Addresses that received funds for this transaction - DestAddresses []string `protobuf:"bytes,8,rep,name=dest_addresses,json=destAddresses,proto3" json:"dest_addresses,omitempty"` - // The raw transaction hex. - RawTxHex string `protobuf:"bytes,9,opt,name=raw_tx_hex,json=rawTxHex,proto3" json:"raw_tx_hex,omitempty"` - // A label that was optionally set on transaction broadcast. - Label string `protobuf:"bytes,10,opt,name=label,proto3" json:"label,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Transaction) Reset() { *m = Transaction{} } -func (m *Transaction) String() string { return proto.CompactTextString(m) } -func (*Transaction) ProtoMessage() {} -func (*Transaction) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{1} -} - -func (m *Transaction) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Transaction.Unmarshal(m, b) -} -func (m *Transaction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Transaction.Marshal(b, m, deterministic) -} -func (m *Transaction) XXX_Merge(src proto.Message) { - xxx_messageInfo_Transaction.Merge(m, src) -} -func (m *Transaction) XXX_Size() int { - return xxx_messageInfo_Transaction.Size(m) -} -func (m *Transaction) XXX_DiscardUnknown() { - xxx_messageInfo_Transaction.DiscardUnknown(m) -} - -var xxx_messageInfo_Transaction proto.InternalMessageInfo - -func (m *Transaction) GetTxHash() string { - if m != nil { - return m.TxHash - } - return "" -} - -func (m *Transaction) GetAmount() int64 { - if m != nil { - return m.Amount - } - return 0 -} - -func (m *Transaction) GetNumConfirmations() int32 { - if m != nil { - return m.NumConfirmations - } - return 0 -} - -func (m *Transaction) GetBlockHash() string { - if m != nil { - return m.BlockHash - } - return "" -} - -func (m *Transaction) GetBlockHeight() int32 { - if m != nil { - return m.BlockHeight - } - return 0 -} - -func (m *Transaction) GetTimeStamp() int64 { - if m != nil { - return m.TimeStamp - } - return 0 -} - -func (m *Transaction) GetTotalFees() int64 { - if m != nil { - return m.TotalFees - } - return 0 -} - -func (m *Transaction) GetDestAddresses() []string { - if m != nil { - return m.DestAddresses - } - return nil -} - -func (m *Transaction) GetRawTxHex() string { - if m != nil { - return m.RawTxHex - } - return "" -} - -func (m *Transaction) GetLabel() string { - if m != nil { - return m.Label - } - return "" -} - -type GetTransactionsRequest struct { - // - //The height from which to list transactions, inclusive. If this value is - //greater than end_height, transactions will be read in reverse. - StartHeight int32 `protobuf:"varint,1,opt,name=start_height,json=startHeight,proto3" json:"start_height,omitempty"` - // - //The height until which to list transactions, inclusive. To include - //unconfirmed transactions, this value should be set to -1, which will - //return transactions from start_height until the current chain tip and - //unconfirmed transactions. If no end_height is provided, the call will - //default to this option. - EndHeight int32 `protobuf:"varint,2,opt,name=end_height,json=endHeight,proto3" json:"end_height,omitempty"` - TxnsLimit int32 `protobuf:"varint,3,opt,name=txns_limit,json=txnsLimit,proto3" json:"txns_limit,omitempty"` - TxnsSkip int32 `protobuf:"varint,4,opt,name=txns_skip,json=txnsSkip,proto3" json:"txns_skip,omitempty"` - Coinbase int32 `protobuf:"varint,5,opt,name=coinbase,proto3" json:"coinbase,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetTransactionsRequest) Reset() { *m = GetTransactionsRequest{} } -func (m *GetTransactionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetTransactionsRequest) ProtoMessage() {} -func (*GetTransactionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{2} -} - -func (m *GetTransactionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetTransactionsRequest.Unmarshal(m, b) -} -func (m *GetTransactionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetTransactionsRequest.Marshal(b, m, deterministic) -} -func (m *GetTransactionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetTransactionsRequest.Merge(m, src) -} -func (m *GetTransactionsRequest) XXX_Size() int { - return xxx_messageInfo_GetTransactionsRequest.Size(m) -} -func (m *GetTransactionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetTransactionsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetTransactionsRequest proto.InternalMessageInfo - -func (m *GetTransactionsRequest) GetStartHeight() int32 { - if m != nil { - return m.StartHeight - } - return 0 -} - -func (m *GetTransactionsRequest) GetEndHeight() int32 { - if m != nil { - return m.EndHeight - } - return 0 -} - -func (m *GetTransactionsRequest) GetTxnsLimit() int32 { - if m != nil { - return m.TxnsLimit - } - return 0 -} - -func (m *GetTransactionsRequest) GetTxnsSkip() int32 { - if m != nil { - return m.TxnsSkip - } - return 0 -} - -func (m *GetTransactionsRequest) GetCoinbase() int32 { - if m != nil { - return m.Coinbase - } - return 0 -} - -type TransactionDetails struct { - // The list of transactions relevant to the wallet. - Transactions []*Transaction `protobuf:"bytes,1,rep,name=transactions,proto3" json:"transactions,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TransactionDetails) Reset() { *m = TransactionDetails{} } -func (m *TransactionDetails) String() string { return proto.CompactTextString(m) } -func (*TransactionDetails) ProtoMessage() {} -func (*TransactionDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{3} -} - -func (m *TransactionDetails) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TransactionDetails.Unmarshal(m, b) -} -func (m *TransactionDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TransactionDetails.Marshal(b, m, deterministic) -} -func (m *TransactionDetails) XXX_Merge(src proto.Message) { - xxx_messageInfo_TransactionDetails.Merge(m, src) -} -func (m *TransactionDetails) XXX_Size() int { - return xxx_messageInfo_TransactionDetails.Size(m) -} -func (m *TransactionDetails) XXX_DiscardUnknown() { - xxx_messageInfo_TransactionDetails.DiscardUnknown(m) -} - -var xxx_messageInfo_TransactionDetails proto.InternalMessageInfo - -func (m *TransactionDetails) GetTransactions() []*Transaction { - if m != nil { - return m.Transactions - } - return nil -} - -type FeeLimit struct { - // Types that are valid to be assigned to Limit: - // *FeeLimit_Fixed - // *FeeLimit_FixedMsat - // *FeeLimit_Percent - Limit isFeeLimit_Limit `protobuf_oneof:"limit"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FeeLimit) Reset() { *m = FeeLimit{} } -func (m *FeeLimit) String() string { return proto.CompactTextString(m) } -func (*FeeLimit) ProtoMessage() {} -func (*FeeLimit) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{4} -} - -func (m *FeeLimit) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FeeLimit.Unmarshal(m, b) -} -func (m *FeeLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FeeLimit.Marshal(b, m, deterministic) -} -func (m *FeeLimit) XXX_Merge(src proto.Message) { - xxx_messageInfo_FeeLimit.Merge(m, src) -} -func (m *FeeLimit) XXX_Size() int { - return xxx_messageInfo_FeeLimit.Size(m) -} -func (m *FeeLimit) XXX_DiscardUnknown() { - xxx_messageInfo_FeeLimit.DiscardUnknown(m) -} - -var xxx_messageInfo_FeeLimit proto.InternalMessageInfo - -type isFeeLimit_Limit interface { - isFeeLimit_Limit() -} - -type FeeLimit_Fixed struct { - Fixed int64 `protobuf:"varint,1,opt,name=fixed,proto3,oneof"` -} - -type FeeLimit_FixedMsat struct { - FixedMsat int64 `protobuf:"varint,3,opt,name=fixed_msat,json=fixedMsat,proto3,oneof"` -} - -type FeeLimit_Percent struct { - Percent int64 `protobuf:"varint,2,opt,name=percent,proto3,oneof"` -} - -func (*FeeLimit_Fixed) isFeeLimit_Limit() {} - -func (*FeeLimit_FixedMsat) isFeeLimit_Limit() {} - -func (*FeeLimit_Percent) isFeeLimit_Limit() {} - -func (m *FeeLimit) GetLimit() isFeeLimit_Limit { - if m != nil { - return m.Limit - } - return nil -} - -func (m *FeeLimit) GetFixed() int64 { - if x, ok := m.GetLimit().(*FeeLimit_Fixed); ok { - return x.Fixed - } - return 0 -} - -func (m *FeeLimit) GetFixedMsat() int64 { - if x, ok := m.GetLimit().(*FeeLimit_FixedMsat); ok { - return x.FixedMsat - } - return 0 -} - -func (m *FeeLimit) GetPercent() int64 { - if x, ok := m.GetLimit().(*FeeLimit_Percent); ok { - return x.Percent - } - return 0 -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*FeeLimit) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*FeeLimit_Fixed)(nil), - (*FeeLimit_FixedMsat)(nil), - (*FeeLimit_Percent)(nil), - } -} - -type SendRequest struct { - // - //The identity pubkey of the payment recipient. When using REST, this field - //must be encoded as base64. - Dest []byte `protobuf:"bytes,1,opt,name=dest,proto3" json:"dest,omitempty"` - // - //The hex-encoded identity pubkey of the payment recipient. Deprecated now - //that the REST gateway supports base64 encoding of bytes fields. - DestString string `protobuf:"bytes,2,opt,name=dest_string,json=destString,proto3" json:"dest_string,omitempty"` // Deprecated: Do not use. - // - //The amount to send expressed in satoshis. - // - //The fields amt and amt_msat are mutually exclusive. - Amt int64 `protobuf:"varint,3,opt,name=amt,proto3" json:"amt,omitempty"` - // - //The amount to send expressed in millisatoshis. - // - //The fields amt and amt_msat are mutually exclusive. - AmtMsat int64 `protobuf:"varint,12,opt,name=amt_msat,json=amtMsat,proto3" json:"amt_msat,omitempty"` - // - //The hash to use within the payment's HTLC. When using REST, this field - //must be encoded as base64. - PaymentHash []byte `protobuf:"bytes,4,opt,name=payment_hash,json=paymentHash,proto3" json:"payment_hash,omitempty"` - // - //The hex-encoded hash to use within the payment's HTLC. Deprecated now - //that the REST gateway supports base64 encoding of bytes fields. - PaymentHashString string `protobuf:"bytes,5,opt,name=payment_hash_string,json=paymentHashString,proto3" json:"payment_hash_string,omitempty"` // Deprecated: Do not use. - // - //A bare-bones invoice for a payment within the Lightning Network. With the - //details of the invoice, the sender has all the data necessary to send a - //payment to the recipient. - PaymentRequest string `protobuf:"bytes,6,opt,name=payment_request,json=paymentRequest,proto3" json:"payment_request,omitempty"` - // - //The CLTV delta from the current height that should be used to set the - //timelock for the final hop. - FinalCltvDelta int32 `protobuf:"varint,7,opt,name=final_cltv_delta,json=finalCltvDelta,proto3" json:"final_cltv_delta,omitempty"` - // - //The maximum number of satoshis that will be paid as a fee of the payment. - //This value can be represented either as a percentage of the amount being - //sent, or as a fixed amount of the maximum fee the user is willing the pay to - //send the payment. - FeeLimit *FeeLimit `protobuf:"bytes,8,opt,name=fee_limit,json=feeLimit,proto3" json:"fee_limit,omitempty"` - // - //The channel id of the channel that must be taken to the first hop. If zero, - //any channel may be used. - OutgoingChanId uint64 `protobuf:"varint,9,opt,name=outgoing_chan_id,json=outgoingChanId,proto3" json:"outgoing_chan_id,omitempty"` - // - //The pubkey of the last hop of the route. If empty, any hop may be used. - LastHopPubkey []byte `protobuf:"bytes,13,opt,name=last_hop_pubkey,json=lastHopPubkey,proto3" json:"last_hop_pubkey,omitempty"` - // - //An optional maximum total time lock for the route. This should not exceed - //lnd's `--max-cltv-expiry` setting. If zero, then the value of - //`--max-cltv-expiry` is enforced. - CltvLimit uint32 `protobuf:"varint,10,opt,name=cltv_limit,json=cltvLimit,proto3" json:"cltv_limit,omitempty"` - // - //An optional field that can be used to pass an arbitrary set of TLV records - //to a peer which understands the new records. This can be used to pass - //application specific data during the payment attempt. Record types are - //required to be in the custom range >= 65536. When using REST, the values - //must be encoded as base64. - DestCustomRecords map[uint64][]byte `protobuf:"bytes,11,rep,name=dest_custom_records,json=destCustomRecords,proto3" json:"dest_custom_records,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // If set, circular payments to self are permitted. - AllowSelfPayment bool `protobuf:"varint,14,opt,name=allow_self_payment,json=allowSelfPayment,proto3" json:"allow_self_payment,omitempty"` - // - //Features assumed to be supported by the final node. All transitive feature - //dependencies must also be set properly. For a given feature bit pair, either - //optional or remote may be set, but not both. If this field is nil or empty, - //the router will try to load destination features from the graph as a - //fallback. - DestFeatures []FeatureBit `protobuf:"varint,15,rep,packed,name=dest_features,json=destFeatures,proto3,enum=lnrpc.FeatureBit" json:"dest_features,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendRequest) Reset() { *m = SendRequest{} } -func (m *SendRequest) String() string { return proto.CompactTextString(m) } -func (*SendRequest) ProtoMessage() {} -func (*SendRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{5} -} - -func (m *SendRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendRequest.Unmarshal(m, b) -} -func (m *SendRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendRequest.Marshal(b, m, deterministic) -} -func (m *SendRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendRequest.Merge(m, src) -} -func (m *SendRequest) XXX_Size() int { - return xxx_messageInfo_SendRequest.Size(m) -} -func (m *SendRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SendRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SendRequest proto.InternalMessageInfo - -func (m *SendRequest) GetDest() []byte { - if m != nil { - return m.Dest - } - return nil -} - -// Deprecated: Do not use. -func (m *SendRequest) GetDestString() string { - if m != nil { - return m.DestString - } - return "" -} - -func (m *SendRequest) GetAmt() int64 { - if m != nil { - return m.Amt - } - return 0 -} - -func (m *SendRequest) GetAmtMsat() int64 { - if m != nil { - return m.AmtMsat - } - return 0 -} - -func (m *SendRequest) GetPaymentHash() []byte { - if m != nil { - return m.PaymentHash - } - return nil -} - -// Deprecated: Do not use. -func (m *SendRequest) GetPaymentHashString() string { - if m != nil { - return m.PaymentHashString - } - return "" -} - -func (m *SendRequest) GetPaymentRequest() string { - if m != nil { - return m.PaymentRequest - } - return "" -} - -func (m *SendRequest) GetFinalCltvDelta() int32 { - if m != nil { - return m.FinalCltvDelta - } - return 0 -} - -func (m *SendRequest) GetFeeLimit() *FeeLimit { - if m != nil { - return m.FeeLimit - } - return nil -} - -func (m *SendRequest) GetOutgoingChanId() uint64 { - if m != nil { - return m.OutgoingChanId - } - return 0 -} - -func (m *SendRequest) GetLastHopPubkey() []byte { - if m != nil { - return m.LastHopPubkey - } - return nil -} - -func (m *SendRequest) GetCltvLimit() uint32 { - if m != nil { - return m.CltvLimit - } - return 0 -} - -func (m *SendRequest) GetDestCustomRecords() map[uint64][]byte { - if m != nil { - return m.DestCustomRecords - } - return nil -} - -func (m *SendRequest) GetAllowSelfPayment() bool { - if m != nil { - return m.AllowSelfPayment - } - return false -} - -func (m *SendRequest) GetDestFeatures() []FeatureBit { - if m != nil { - return m.DestFeatures - } - return nil -} - -type SendResponse struct { - PaymentError string `protobuf:"bytes,1,opt,name=payment_error,json=paymentError,proto3" json:"payment_error,omitempty"` - PaymentPreimage []byte `protobuf:"bytes,2,opt,name=payment_preimage,json=paymentPreimage,proto3" json:"payment_preimage,omitempty"` - PaymentRoute *Route `protobuf:"bytes,3,opt,name=payment_route,json=paymentRoute,proto3" json:"payment_route,omitempty"` - PaymentHash []byte `protobuf:"bytes,4,opt,name=payment_hash,json=paymentHash,proto3" json:"payment_hash,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendResponse) Reset() { *m = SendResponse{} } -func (m *SendResponse) String() string { return proto.CompactTextString(m) } -func (*SendResponse) ProtoMessage() {} -func (*SendResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{6} -} - -func (m *SendResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendResponse.Unmarshal(m, b) -} -func (m *SendResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendResponse.Marshal(b, m, deterministic) -} -func (m *SendResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendResponse.Merge(m, src) -} -func (m *SendResponse) XXX_Size() int { - return xxx_messageInfo_SendResponse.Size(m) -} -func (m *SendResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SendResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SendResponse proto.InternalMessageInfo - -func (m *SendResponse) GetPaymentError() string { - if m != nil { - return m.PaymentError - } - return "" -} - -func (m *SendResponse) GetPaymentPreimage() []byte { - if m != nil { - return m.PaymentPreimage - } - return nil -} - -func (m *SendResponse) GetPaymentRoute() *Route { - if m != nil { - return m.PaymentRoute - } - return nil -} - -func (m *SendResponse) GetPaymentHash() []byte { - if m != nil { - return m.PaymentHash - } - return nil -} - -type SendToRouteRequest struct { - // - //The payment hash to use for the HTLC. When using REST, this field must be - //encoded as base64. - PaymentHash []byte `protobuf:"bytes,1,opt,name=payment_hash,json=paymentHash,proto3" json:"payment_hash,omitempty"` - // - //An optional hex-encoded payment hash to be used for the HTLC. Deprecated now - //that the REST gateway supports base64 encoding of bytes fields. - PaymentHashString string `protobuf:"bytes,2,opt,name=payment_hash_string,json=paymentHashString,proto3" json:"payment_hash_string,omitempty"` // Deprecated: Do not use. - // Route that should be used to attempt to complete the payment. - Route *Route `protobuf:"bytes,4,opt,name=route,proto3" json:"route,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendToRouteRequest) Reset() { *m = SendToRouteRequest{} } -func (m *SendToRouteRequest) String() string { return proto.CompactTextString(m) } -func (*SendToRouteRequest) ProtoMessage() {} -func (*SendToRouteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{7} -} - -func (m *SendToRouteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendToRouteRequest.Unmarshal(m, b) -} -func (m *SendToRouteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendToRouteRequest.Marshal(b, m, deterministic) -} -func (m *SendToRouteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendToRouteRequest.Merge(m, src) -} -func (m *SendToRouteRequest) XXX_Size() int { - return xxx_messageInfo_SendToRouteRequest.Size(m) -} -func (m *SendToRouteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SendToRouteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SendToRouteRequest proto.InternalMessageInfo - -func (m *SendToRouteRequest) GetPaymentHash() []byte { - if m != nil { - return m.PaymentHash - } - return nil -} - -// Deprecated: Do not use. -func (m *SendToRouteRequest) GetPaymentHashString() string { - if m != nil { - return m.PaymentHashString - } - return "" -} - -func (m *SendToRouteRequest) GetRoute() *Route { - if m != nil { - return m.Route - } - return nil -} - -type ChannelAcceptRequest struct { - // The pubkey of the node that wishes to open an inbound channel. - NodePubkey []byte `protobuf:"bytes,1,opt,name=node_pubkey,json=nodePubkey,proto3" json:"node_pubkey,omitempty"` - // The hash of the genesis block that the proposed channel resides in. - ChainHash []byte `protobuf:"bytes,2,opt,name=chain_hash,json=chainHash,proto3" json:"chain_hash,omitempty"` - // The pending channel id. - PendingChanId []byte `protobuf:"bytes,3,opt,name=pending_chan_id,json=pendingChanId,proto3" json:"pending_chan_id,omitempty"` - // The funding amount in satoshis that initiator wishes to use in the - // channel. - FundingAmt uint64 `protobuf:"varint,4,opt,name=funding_amt,json=fundingAmt,proto3" json:"funding_amt,omitempty"` - // The push amount of the proposed channel in millisatoshis. - PushAmt uint64 `protobuf:"varint,5,opt,name=push_amt,json=pushAmt,proto3" json:"push_amt,omitempty"` - // The dust limit of the initiator's commitment tx. - DustLimit uint64 `protobuf:"varint,6,opt,name=dust_limit,json=dustLimit,proto3" json:"dust_limit,omitempty"` - // The maximum amount of coins in millisatoshis that can be pending in this - // channel. - MaxValueInFlight uint64 `protobuf:"varint,7,opt,name=max_value_in_flight,json=maxValueInFlight,proto3" json:"max_value_in_flight,omitempty"` - // The minimum amount of satoshis the initiator requires us to have at all - // times. - ChannelReserve uint64 `protobuf:"varint,8,opt,name=channel_reserve,json=channelReserve,proto3" json:"channel_reserve,omitempty"` - // The smallest HTLC in millisatoshis that the initiator will accept. - MinHtlc uint64 `protobuf:"varint,9,opt,name=min_htlc,json=minHtlc,proto3" json:"min_htlc,omitempty"` - // The initial fee rate that the initiator suggests for both commitment - // transactions. - FeePerKw uint64 `protobuf:"varint,10,opt,name=fee_per_kw,json=feePerKw,proto3" json:"fee_per_kw,omitempty"` - // - //The number of blocks to use for the relative time lock in the pay-to-self - //output of both commitment transactions. - CsvDelay uint32 `protobuf:"varint,11,opt,name=csv_delay,json=csvDelay,proto3" json:"csv_delay,omitempty"` - // The total number of incoming HTLC's that the initiator will accept. - MaxAcceptedHtlcs uint32 `protobuf:"varint,12,opt,name=max_accepted_htlcs,json=maxAcceptedHtlcs,proto3" json:"max_accepted_htlcs,omitempty"` - // A bit-field which the initiator uses to specify proposed channel - // behavior. - ChannelFlags uint32 `protobuf:"varint,13,opt,name=channel_flags,json=channelFlags,proto3" json:"channel_flags,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChannelAcceptRequest) Reset() { *m = ChannelAcceptRequest{} } -func (m *ChannelAcceptRequest) String() string { return proto.CompactTextString(m) } -func (*ChannelAcceptRequest) ProtoMessage() {} -func (*ChannelAcceptRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{8} -} - -func (m *ChannelAcceptRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChannelAcceptRequest.Unmarshal(m, b) -} -func (m *ChannelAcceptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChannelAcceptRequest.Marshal(b, m, deterministic) -} -func (m *ChannelAcceptRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChannelAcceptRequest.Merge(m, src) -} -func (m *ChannelAcceptRequest) XXX_Size() int { - return xxx_messageInfo_ChannelAcceptRequest.Size(m) -} -func (m *ChannelAcceptRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ChannelAcceptRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ChannelAcceptRequest proto.InternalMessageInfo - -func (m *ChannelAcceptRequest) GetNodePubkey() []byte { - if m != nil { - return m.NodePubkey - } - return nil -} - -func (m *ChannelAcceptRequest) GetChainHash() []byte { - if m != nil { - return m.ChainHash - } - return nil -} - -func (m *ChannelAcceptRequest) GetPendingChanId() []byte { - if m != nil { - return m.PendingChanId - } - return nil -} - -func (m *ChannelAcceptRequest) GetFundingAmt() uint64 { - if m != nil { - return m.FundingAmt - } - return 0 -} - -func (m *ChannelAcceptRequest) GetPushAmt() uint64 { - if m != nil { - return m.PushAmt - } - return 0 -} - -func (m *ChannelAcceptRequest) GetDustLimit() uint64 { - if m != nil { - return m.DustLimit - } - return 0 -} - -func (m *ChannelAcceptRequest) GetMaxValueInFlight() uint64 { - if m != nil { - return m.MaxValueInFlight - } - return 0 -} - -func (m *ChannelAcceptRequest) GetChannelReserve() uint64 { - if m != nil { - return m.ChannelReserve - } - return 0 -} - -func (m *ChannelAcceptRequest) GetMinHtlc() uint64 { - if m != nil { - return m.MinHtlc - } - return 0 -} - -func (m *ChannelAcceptRequest) GetFeePerKw() uint64 { - if m != nil { - return m.FeePerKw - } - return 0 -} - -func (m *ChannelAcceptRequest) GetCsvDelay() uint32 { - if m != nil { - return m.CsvDelay - } - return 0 -} - -func (m *ChannelAcceptRequest) GetMaxAcceptedHtlcs() uint32 { - if m != nil { - return m.MaxAcceptedHtlcs - } - return 0 -} - -func (m *ChannelAcceptRequest) GetChannelFlags() uint32 { - if m != nil { - return m.ChannelFlags - } - return 0 -} - -type ChannelAcceptResponse struct { - // Whether or not the client accepts the channel. - Accept bool `protobuf:"varint,1,opt,name=accept,proto3" json:"accept,omitempty"` - // The pending channel id to which this response applies. - PendingChanId []byte `protobuf:"bytes,2,opt,name=pending_chan_id,json=pendingChanId,proto3" json:"pending_chan_id,omitempty"` - // - //An optional error to send the initiating party to indicate why the channel - //was rejected. This field *should not* contain sensitive information, it will - //be sent to the initiating party. This field should only be set if accept is - //false, the channel will be rejected if an error is set with accept=true - //because the meaning of this response is ambiguous. Limited to 500 - //characters. - Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` - // - //The upfront shutdown address to use if the initiating peer supports option - //upfront shutdown script (see ListPeers for the features supported). Note - //that the channel open will fail if this value is set for a peer that does - //not support this feature bit. - UpfrontShutdown string `protobuf:"bytes,4,opt,name=upfront_shutdown,json=upfrontShutdown,proto3" json:"upfront_shutdown,omitempty"` - // - //The csv delay (in blocks) that we require for the remote party. - CsvDelay uint32 `protobuf:"varint,5,opt,name=csv_delay,json=csvDelay,proto3" json:"csv_delay,omitempty"` - // - //The reserve amount in satoshis that we require the remote peer to adhere to. - //We require that the remote peer always have some reserve amount allocated to - //them so that there is always a disincentive to broadcast old state (if they - //hold 0 sats on their side of the channel, there is nothing to lose). - ReserveSat uint64 `protobuf:"varint,6,opt,name=reserve_sat,json=reserveSat,proto3" json:"reserve_sat,omitempty"` - // - //The maximum amount of funds in millisatoshis that we allow the remote peer - //to have in outstanding htlcs. - InFlightMaxMsat uint64 `protobuf:"varint,7,opt,name=in_flight_max_msat,json=inFlightMaxMsat,proto3" json:"in_flight_max_msat,omitempty"` - // - //The maximum number of htlcs that the remote peer can offer us. - MaxHtlcCount uint32 `protobuf:"varint,8,opt,name=max_htlc_count,json=maxHtlcCount,proto3" json:"max_htlc_count,omitempty"` - // - //The minimum value in millisatoshis for incoming htlcs on the channel. - MinHtlcIn uint64 `protobuf:"varint,9,opt,name=min_htlc_in,json=minHtlcIn,proto3" json:"min_htlc_in,omitempty"` - // - //The number of confirmations we require before we consider the channel open. - MinAcceptDepth uint32 `protobuf:"varint,10,opt,name=min_accept_depth,json=minAcceptDepth,proto3" json:"min_accept_depth,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChannelAcceptResponse) Reset() { *m = ChannelAcceptResponse{} } -func (m *ChannelAcceptResponse) String() string { return proto.CompactTextString(m) } -func (*ChannelAcceptResponse) ProtoMessage() {} -func (*ChannelAcceptResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{9} -} - -func (m *ChannelAcceptResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChannelAcceptResponse.Unmarshal(m, b) -} -func (m *ChannelAcceptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChannelAcceptResponse.Marshal(b, m, deterministic) -} -func (m *ChannelAcceptResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChannelAcceptResponse.Merge(m, src) -} -func (m *ChannelAcceptResponse) XXX_Size() int { - return xxx_messageInfo_ChannelAcceptResponse.Size(m) -} -func (m *ChannelAcceptResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ChannelAcceptResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ChannelAcceptResponse proto.InternalMessageInfo - -func (m *ChannelAcceptResponse) GetAccept() bool { - if m != nil { - return m.Accept - } - return false -} - -func (m *ChannelAcceptResponse) GetPendingChanId() []byte { - if m != nil { - return m.PendingChanId - } - return nil -} - -func (m *ChannelAcceptResponse) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -func (m *ChannelAcceptResponse) GetUpfrontShutdown() string { - if m != nil { - return m.UpfrontShutdown - } - return "" -} - -func (m *ChannelAcceptResponse) GetCsvDelay() uint32 { - if m != nil { - return m.CsvDelay - } - return 0 -} - -func (m *ChannelAcceptResponse) GetReserveSat() uint64 { - if m != nil { - return m.ReserveSat - } - return 0 -} - -func (m *ChannelAcceptResponse) GetInFlightMaxMsat() uint64 { - if m != nil { - return m.InFlightMaxMsat - } - return 0 -} - -func (m *ChannelAcceptResponse) GetMaxHtlcCount() uint32 { - if m != nil { - return m.MaxHtlcCount - } - return 0 -} - -func (m *ChannelAcceptResponse) GetMinHtlcIn() uint64 { - if m != nil { - return m.MinHtlcIn - } - return 0 -} - -func (m *ChannelAcceptResponse) GetMinAcceptDepth() uint32 { - if m != nil { - return m.MinAcceptDepth - } - return 0 -} - -type ChannelPoint struct { - // Types that are valid to be assigned to FundingTxid: - // *ChannelPoint_FundingTxidBytes - // *ChannelPoint_FundingTxidStr - FundingTxid isChannelPoint_FundingTxid `protobuf_oneof:"funding_txid"` - // The index of the output of the funding transaction - OutputIndex uint32 `protobuf:"varint,3,opt,name=output_index,json=outputIndex,proto3" json:"output_index,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChannelPoint) Reset() { *m = ChannelPoint{} } -func (m *ChannelPoint) String() string { return proto.CompactTextString(m) } -func (*ChannelPoint) ProtoMessage() {} -func (*ChannelPoint) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{10} -} - -func (m *ChannelPoint) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChannelPoint.Unmarshal(m, b) -} -func (m *ChannelPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChannelPoint.Marshal(b, m, deterministic) -} -func (m *ChannelPoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChannelPoint.Merge(m, src) -} -func (m *ChannelPoint) XXX_Size() int { - return xxx_messageInfo_ChannelPoint.Size(m) -} -func (m *ChannelPoint) XXX_DiscardUnknown() { - xxx_messageInfo_ChannelPoint.DiscardUnknown(m) -} - -var xxx_messageInfo_ChannelPoint proto.InternalMessageInfo - -type isChannelPoint_FundingTxid interface { - isChannelPoint_FundingTxid() -} - -type ChannelPoint_FundingTxidBytes struct { - FundingTxidBytes []byte `protobuf:"bytes,1,opt,name=funding_txid_bytes,json=fundingTxidBytes,proto3,oneof"` -} - -type ChannelPoint_FundingTxidStr struct { - FundingTxidStr string `protobuf:"bytes,2,opt,name=funding_txid_str,json=fundingTxidStr,proto3,oneof"` -} - -func (*ChannelPoint_FundingTxidBytes) isChannelPoint_FundingTxid() {} - -func (*ChannelPoint_FundingTxidStr) isChannelPoint_FundingTxid() {} - -func (m *ChannelPoint) GetFundingTxid() isChannelPoint_FundingTxid { - if m != nil { - return m.FundingTxid - } - return nil -} - -func (m *ChannelPoint) GetFundingTxidBytes() []byte { - if x, ok := m.GetFundingTxid().(*ChannelPoint_FundingTxidBytes); ok { - return x.FundingTxidBytes - } - return nil -} - -func (m *ChannelPoint) GetFundingTxidStr() string { - if x, ok := m.GetFundingTxid().(*ChannelPoint_FundingTxidStr); ok { - return x.FundingTxidStr - } - return "" -} - -func (m *ChannelPoint) GetOutputIndex() uint32 { - if m != nil { - return m.OutputIndex - } - return 0 -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*ChannelPoint) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*ChannelPoint_FundingTxidBytes)(nil), - (*ChannelPoint_FundingTxidStr)(nil), - } -} - -type OutPoint struct { - // Raw bytes representing the transaction id. - TxidBytes []byte `protobuf:"bytes,1,opt,name=txid_bytes,json=txidBytes,proto3" json:"txid_bytes,omitempty"` - // Reversed, hex-encoded string representing the transaction id. - TxidStr string `protobuf:"bytes,2,opt,name=txid_str,json=txidStr,proto3" json:"txid_str,omitempty"` - // The index of the output on the transaction. - OutputIndex uint32 `protobuf:"varint,3,opt,name=output_index,json=outputIndex,proto3" json:"output_index,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OutPoint) Reset() { *m = OutPoint{} } -func (m *OutPoint) String() string { return proto.CompactTextString(m) } -func (*OutPoint) ProtoMessage() {} -func (*OutPoint) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{11} -} - -func (m *OutPoint) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OutPoint.Unmarshal(m, b) -} -func (m *OutPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OutPoint.Marshal(b, m, deterministic) -} -func (m *OutPoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_OutPoint.Merge(m, src) -} -func (m *OutPoint) XXX_Size() int { - return xxx_messageInfo_OutPoint.Size(m) -} -func (m *OutPoint) XXX_DiscardUnknown() { - xxx_messageInfo_OutPoint.DiscardUnknown(m) -} - -var xxx_messageInfo_OutPoint proto.InternalMessageInfo - -func (m *OutPoint) GetTxidBytes() []byte { - if m != nil { - return m.TxidBytes - } - return nil -} - -func (m *OutPoint) GetTxidStr() string { - if m != nil { - return m.TxidStr - } - return "" -} - -func (m *OutPoint) GetOutputIndex() uint32 { - if m != nil { - return m.OutputIndex - } - return 0 -} - -type LightningAddress struct { - // The identity pubkey of the Lightning node - Pubkey string `protobuf:"bytes,1,opt,name=pubkey,proto3" json:"pubkey,omitempty"` - // The network location of the lightning node, e.g. `69.69.69.69:1337` or - // `localhost:10011` - Host string `protobuf:"bytes,2,opt,name=host,proto3" json:"host,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LightningAddress) Reset() { *m = LightningAddress{} } -func (m *LightningAddress) String() string { return proto.CompactTextString(m) } -func (*LightningAddress) ProtoMessage() {} -func (*LightningAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{12} -} - -func (m *LightningAddress) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LightningAddress.Unmarshal(m, b) -} -func (m *LightningAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LightningAddress.Marshal(b, m, deterministic) -} -func (m *LightningAddress) XXX_Merge(src proto.Message) { - xxx_messageInfo_LightningAddress.Merge(m, src) -} -func (m *LightningAddress) XXX_Size() int { - return xxx_messageInfo_LightningAddress.Size(m) -} -func (m *LightningAddress) XXX_DiscardUnknown() { - xxx_messageInfo_LightningAddress.DiscardUnknown(m) -} - -var xxx_messageInfo_LightningAddress proto.InternalMessageInfo - -func (m *LightningAddress) GetPubkey() string { - if m != nil { - return m.Pubkey - } - return "" -} - -func (m *LightningAddress) GetHost() string { - if m != nil { - return m.Host - } - return "" -} - -type EstimateFeeRequest struct { - // The map from addresses to amounts for the transaction. - AddrToAmount map[string]int64 `protobuf:"bytes,1,rep,name=AddrToAmount,proto3" json:"AddrToAmount,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - // The target number of blocks that this transaction should be confirmed - // by. - TargetConf int32 `protobuf:"varint,2,opt,name=target_conf,json=targetConf,proto3" json:"target_conf,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EstimateFeeRequest) Reset() { *m = EstimateFeeRequest{} } -func (m *EstimateFeeRequest) String() string { return proto.CompactTextString(m) } -func (*EstimateFeeRequest) ProtoMessage() {} -func (*EstimateFeeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{13} -} - -func (m *EstimateFeeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EstimateFeeRequest.Unmarshal(m, b) -} -func (m *EstimateFeeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EstimateFeeRequest.Marshal(b, m, deterministic) -} -func (m *EstimateFeeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_EstimateFeeRequest.Merge(m, src) -} -func (m *EstimateFeeRequest) XXX_Size() int { - return xxx_messageInfo_EstimateFeeRequest.Size(m) -} -func (m *EstimateFeeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_EstimateFeeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_EstimateFeeRequest proto.InternalMessageInfo - -func (m *EstimateFeeRequest) GetAddrToAmount() map[string]int64 { - if m != nil { - return m.AddrToAmount - } - return nil -} - -func (m *EstimateFeeRequest) GetTargetConf() int32 { - if m != nil { - return m.TargetConf - } - return 0 -} - -type EstimateFeeResponse struct { - // The total fee in satoshis. - FeeSat int64 `protobuf:"varint,1,opt,name=fee_sat,json=feeSat,proto3" json:"fee_sat,omitempty"` - // The fee rate in satoshi/byte. - FeerateSatPerByte int64 `protobuf:"varint,2,opt,name=feerate_sat_per_byte,json=feerateSatPerByte,proto3" json:"feerate_sat_per_byte,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EstimateFeeResponse) Reset() { *m = EstimateFeeResponse{} } -func (m *EstimateFeeResponse) String() string { return proto.CompactTextString(m) } -func (*EstimateFeeResponse) ProtoMessage() {} -func (*EstimateFeeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{14} -} - -func (m *EstimateFeeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EstimateFeeResponse.Unmarshal(m, b) -} -func (m *EstimateFeeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EstimateFeeResponse.Marshal(b, m, deterministic) -} -func (m *EstimateFeeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_EstimateFeeResponse.Merge(m, src) -} -func (m *EstimateFeeResponse) XXX_Size() int { - return xxx_messageInfo_EstimateFeeResponse.Size(m) -} -func (m *EstimateFeeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_EstimateFeeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_EstimateFeeResponse proto.InternalMessageInfo - -func (m *EstimateFeeResponse) GetFeeSat() int64 { - if m != nil { - return m.FeeSat - } - return 0 -} - -func (m *EstimateFeeResponse) GetFeerateSatPerByte() int64 { - if m != nil { - return m.FeerateSatPerByte - } - return 0 -} - -type SendManyRequest struct { - // The map from addresses to amounts - AddrToAmount map[string]int64 `protobuf:"bytes,1,rep,name=AddrToAmount,proto3" json:"AddrToAmount,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - // The target number of blocks that this transaction should be confirmed - // by. - TargetConf int32 `protobuf:"varint,3,opt,name=target_conf,json=targetConf,proto3" json:"target_conf,omitempty"` - // A manual fee rate set in sat/byte that should be used when crafting the - // transaction. - SatPerByte int64 `protobuf:"varint,5,opt,name=sat_per_byte,json=satPerByte,proto3" json:"sat_per_byte,omitempty"` - // An optional label for the transaction, limited to 500 characters. - Label string `protobuf:"bytes,6,opt,name=label,proto3" json:"label,omitempty"` - // The minimum number of confirmations each one of your outputs used for - // the transaction must satisfy. - MinConfs int32 `protobuf:"varint,7,opt,name=min_confs,json=minConfs,proto3" json:"min_confs,omitempty"` - // Whether unconfirmed outputs should be used as inputs for the transaction. - SpendUnconfirmed bool `protobuf:"varint,8,opt,name=spend_unconfirmed,json=spendUnconfirmed,proto3" json:"spend_unconfirmed,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendManyRequest) Reset() { *m = SendManyRequest{} } -func (m *SendManyRequest) String() string { return proto.CompactTextString(m) } -func (*SendManyRequest) ProtoMessage() {} -func (*SendManyRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{15} -} - -func (m *SendManyRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendManyRequest.Unmarshal(m, b) -} -func (m *SendManyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendManyRequest.Marshal(b, m, deterministic) -} -func (m *SendManyRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendManyRequest.Merge(m, src) -} -func (m *SendManyRequest) XXX_Size() int { - return xxx_messageInfo_SendManyRequest.Size(m) -} -func (m *SendManyRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SendManyRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SendManyRequest proto.InternalMessageInfo - -func (m *SendManyRequest) GetAddrToAmount() map[string]int64 { - if m != nil { - return m.AddrToAmount - } - return nil -} - -func (m *SendManyRequest) GetTargetConf() int32 { - if m != nil { - return m.TargetConf - } - return 0 -} - -func (m *SendManyRequest) GetSatPerByte() int64 { - if m != nil { - return m.SatPerByte - } - return 0 -} - -func (m *SendManyRequest) GetLabel() string { - if m != nil { - return m.Label - } - return "" -} - -func (m *SendManyRequest) GetMinConfs() int32 { - if m != nil { - return m.MinConfs - } - return 0 -} - -func (m *SendManyRequest) GetSpendUnconfirmed() bool { - if m != nil { - return m.SpendUnconfirmed - } - return false -} - -type SendManyResponse struct { - // The id of the transaction - Txid string `protobuf:"bytes,1,opt,name=txid,proto3" json:"txid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendManyResponse) Reset() { *m = SendManyResponse{} } -func (m *SendManyResponse) String() string { return proto.CompactTextString(m) } -func (*SendManyResponse) ProtoMessage() {} -func (*SendManyResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{16} -} - -func (m *SendManyResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendManyResponse.Unmarshal(m, b) -} -func (m *SendManyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendManyResponse.Marshal(b, m, deterministic) -} -func (m *SendManyResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendManyResponse.Merge(m, src) -} -func (m *SendManyResponse) XXX_Size() int { - return xxx_messageInfo_SendManyResponse.Size(m) -} -func (m *SendManyResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SendManyResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SendManyResponse proto.InternalMessageInfo - -func (m *SendManyResponse) GetTxid() string { - if m != nil { - return m.Txid - } - return "" -} - -type SendCoinsRequest struct { - // The address to send coins to - Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"` - // The amount in satoshis to send - Amount int64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` - // The target number of blocks that this transaction should be confirmed - // by. - TargetConf int32 `protobuf:"varint,3,opt,name=target_conf,json=targetConf,proto3" json:"target_conf,omitempty"` - // A manual fee rate set in sat/byte that should be used when crafting the - // transaction. - SatPerByte int64 `protobuf:"varint,5,opt,name=sat_per_byte,json=satPerByte,proto3" json:"sat_per_byte,omitempty"` - // - //If set, then the amount field will be ignored, and lnd will attempt to - //send all the coins under control of the internal wallet to the specified - //address. - SendAll bool `protobuf:"varint,6,opt,name=send_all,json=sendAll,proto3" json:"send_all,omitempty"` - // An optional label for the transaction, limited to 500 characters. - Label string `protobuf:"bytes,7,opt,name=label,proto3" json:"label,omitempty"` - // The minimum number of confirmations each one of your outputs used for - // the transaction must satisfy. - MinConfs int32 `protobuf:"varint,8,opt,name=min_confs,json=minConfs,proto3" json:"min_confs,omitempty"` - // Whether unconfirmed outputs should be used as inputs for the transaction. - SpendUnconfirmed bool `protobuf:"varint,9,opt,name=spend_unconfirmed,json=spendUnconfirmed,proto3" json:"spend_unconfirmed,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendCoinsRequest) Reset() { *m = SendCoinsRequest{} } -func (m *SendCoinsRequest) String() string { return proto.CompactTextString(m) } -func (*SendCoinsRequest) ProtoMessage() {} -func (*SendCoinsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{17} -} - -func (m *SendCoinsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendCoinsRequest.Unmarshal(m, b) -} -func (m *SendCoinsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendCoinsRequest.Marshal(b, m, deterministic) -} -func (m *SendCoinsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendCoinsRequest.Merge(m, src) -} -func (m *SendCoinsRequest) XXX_Size() int { - return xxx_messageInfo_SendCoinsRequest.Size(m) -} -func (m *SendCoinsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SendCoinsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SendCoinsRequest proto.InternalMessageInfo - -func (m *SendCoinsRequest) GetAddr() string { - if m != nil { - return m.Addr - } - return "" -} - -func (m *SendCoinsRequest) GetAmount() int64 { - if m != nil { - return m.Amount - } - return 0 -} - -func (m *SendCoinsRequest) GetTargetConf() int32 { - if m != nil { - return m.TargetConf - } - return 0 -} - -func (m *SendCoinsRequest) GetSatPerByte() int64 { - if m != nil { - return m.SatPerByte - } - return 0 -} - -func (m *SendCoinsRequest) GetSendAll() bool { - if m != nil { - return m.SendAll - } - return false -} - -func (m *SendCoinsRequest) GetLabel() string { - if m != nil { - return m.Label - } - return "" -} - -func (m *SendCoinsRequest) GetMinConfs() int32 { - if m != nil { - return m.MinConfs - } - return 0 -} - -func (m *SendCoinsRequest) GetSpendUnconfirmed() bool { - if m != nil { - return m.SpendUnconfirmed - } - return false -} - -type SendCoinsResponse struct { - // The transaction ID of the transaction - Txid string `protobuf:"bytes,1,opt,name=txid,proto3" json:"txid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendCoinsResponse) Reset() { *m = SendCoinsResponse{} } -func (m *SendCoinsResponse) String() string { return proto.CompactTextString(m) } -func (*SendCoinsResponse) ProtoMessage() {} -func (*SendCoinsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{18} -} - -func (m *SendCoinsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendCoinsResponse.Unmarshal(m, b) -} -func (m *SendCoinsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendCoinsResponse.Marshal(b, m, deterministic) -} -func (m *SendCoinsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendCoinsResponse.Merge(m, src) -} -func (m *SendCoinsResponse) XXX_Size() int { - return xxx_messageInfo_SendCoinsResponse.Size(m) -} -func (m *SendCoinsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SendCoinsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SendCoinsResponse proto.InternalMessageInfo - -func (m *SendCoinsResponse) GetTxid() string { - if m != nil { - return m.Txid - } - return "" -} - -type ListUnspentRequest struct { - // The minimum number of confirmations to be included. - MinConfs int32 `protobuf:"varint,1,opt,name=min_confs,json=minConfs,proto3" json:"min_confs,omitempty"` - // The maximum number of confirmations to be included. - MaxConfs int32 `protobuf:"varint,2,opt,name=max_confs,json=maxConfs,proto3" json:"max_confs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListUnspentRequest) Reset() { *m = ListUnspentRequest{} } -func (m *ListUnspentRequest) String() string { return proto.CompactTextString(m) } -func (*ListUnspentRequest) ProtoMessage() {} -func (*ListUnspentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{19} -} - -func (m *ListUnspentRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListUnspentRequest.Unmarshal(m, b) -} -func (m *ListUnspentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListUnspentRequest.Marshal(b, m, deterministic) -} -func (m *ListUnspentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListUnspentRequest.Merge(m, src) -} -func (m *ListUnspentRequest) XXX_Size() int { - return xxx_messageInfo_ListUnspentRequest.Size(m) -} -func (m *ListUnspentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListUnspentRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListUnspentRequest proto.InternalMessageInfo - -func (m *ListUnspentRequest) GetMinConfs() int32 { - if m != nil { - return m.MinConfs - } - return 0 -} - -func (m *ListUnspentRequest) GetMaxConfs() int32 { - if m != nil { - return m.MaxConfs - } - return 0 -} - -type ListUnspentResponse struct { - // A list of utxos - Utxos []*Utxo `protobuf:"bytes,1,rep,name=utxos,proto3" json:"utxos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListUnspentResponse) Reset() { *m = ListUnspentResponse{} } -func (m *ListUnspentResponse) String() string { return proto.CompactTextString(m) } -func (*ListUnspentResponse) ProtoMessage() {} -func (*ListUnspentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{20} -} - -func (m *ListUnspentResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListUnspentResponse.Unmarshal(m, b) -} -func (m *ListUnspentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListUnspentResponse.Marshal(b, m, deterministic) -} -func (m *ListUnspentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListUnspentResponse.Merge(m, src) -} -func (m *ListUnspentResponse) XXX_Size() int { - return xxx_messageInfo_ListUnspentResponse.Size(m) -} -func (m *ListUnspentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListUnspentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListUnspentResponse proto.InternalMessageInfo - -func (m *ListUnspentResponse) GetUtxos() []*Utxo { - if m != nil { - return m.Utxos - } - return nil -} - -type NewAddressRequest struct { - // The address type - Type AddressType `protobuf:"varint,1,opt,name=type,proto3,enum=lnrpc.AddressType" json:"type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NewAddressRequest) Reset() { *m = NewAddressRequest{} } -func (m *NewAddressRequest) String() string { return proto.CompactTextString(m) } -func (*NewAddressRequest) ProtoMessage() {} -func (*NewAddressRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{21} -} - -func (m *NewAddressRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NewAddressRequest.Unmarshal(m, b) -} -func (m *NewAddressRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NewAddressRequest.Marshal(b, m, deterministic) -} -func (m *NewAddressRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NewAddressRequest.Merge(m, src) -} -func (m *NewAddressRequest) XXX_Size() int { - return xxx_messageInfo_NewAddressRequest.Size(m) -} -func (m *NewAddressRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NewAddressRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NewAddressRequest proto.InternalMessageInfo - -func (m *NewAddressRequest) GetType() AddressType { - if m != nil { - return m.Type - } - return AddressType_WITNESS_PUBKEY_HASH -} - -type NewAddressResponse struct { - // The newly generated wallet address - Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NewAddressResponse) Reset() { *m = NewAddressResponse{} } -func (m *NewAddressResponse) String() string { return proto.CompactTextString(m) } -func (*NewAddressResponse) ProtoMessage() {} -func (*NewAddressResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{22} -} - -func (m *NewAddressResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NewAddressResponse.Unmarshal(m, b) -} -func (m *NewAddressResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NewAddressResponse.Marshal(b, m, deterministic) -} -func (m *NewAddressResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NewAddressResponse.Merge(m, src) -} -func (m *NewAddressResponse) XXX_Size() int { - return xxx_messageInfo_NewAddressResponse.Size(m) -} -func (m *NewAddressResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NewAddressResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NewAddressResponse proto.InternalMessageInfo - -func (m *NewAddressResponse) GetAddress() string { - if m != nil { - return m.Address - } - return "" -} - -type SignMessageRequest struct { - // - //The message to be signed. When using REST, this field must be encoded as - //base64. - Msg []byte `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SignMessageRequest) Reset() { *m = SignMessageRequest{} } -func (m *SignMessageRequest) String() string { return proto.CompactTextString(m) } -func (*SignMessageRequest) ProtoMessage() {} -func (*SignMessageRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{23} -} - -func (m *SignMessageRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SignMessageRequest.Unmarshal(m, b) -} -func (m *SignMessageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SignMessageRequest.Marshal(b, m, deterministic) -} -func (m *SignMessageRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SignMessageRequest.Merge(m, src) -} -func (m *SignMessageRequest) XXX_Size() int { - return xxx_messageInfo_SignMessageRequest.Size(m) -} -func (m *SignMessageRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SignMessageRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SignMessageRequest proto.InternalMessageInfo - -func (m *SignMessageRequest) GetMsg() []byte { - if m != nil { - return m.Msg - } - return nil -} - -type SignMessageResponse struct { - // The signature for the given message - Signature string `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SignMessageResponse) Reset() { *m = SignMessageResponse{} } -func (m *SignMessageResponse) String() string { return proto.CompactTextString(m) } -func (*SignMessageResponse) ProtoMessage() {} -func (*SignMessageResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{24} -} - -func (m *SignMessageResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SignMessageResponse.Unmarshal(m, b) -} -func (m *SignMessageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SignMessageResponse.Marshal(b, m, deterministic) -} -func (m *SignMessageResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SignMessageResponse.Merge(m, src) -} -func (m *SignMessageResponse) XXX_Size() int { - return xxx_messageInfo_SignMessageResponse.Size(m) -} -func (m *SignMessageResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SignMessageResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SignMessageResponse proto.InternalMessageInfo - -func (m *SignMessageResponse) GetSignature() string { - if m != nil { - return m.Signature - } - return "" -} - -type VerifyMessageRequest struct { - // - //The message over which the signature is to be verified. When using REST, - //this field must be encoded as base64. - Msg []byte `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` - // The signature to be verified over the given message - Signature string `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VerifyMessageRequest) Reset() { *m = VerifyMessageRequest{} } -func (m *VerifyMessageRequest) String() string { return proto.CompactTextString(m) } -func (*VerifyMessageRequest) ProtoMessage() {} -func (*VerifyMessageRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{25} -} - -func (m *VerifyMessageRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VerifyMessageRequest.Unmarshal(m, b) -} -func (m *VerifyMessageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VerifyMessageRequest.Marshal(b, m, deterministic) -} -func (m *VerifyMessageRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_VerifyMessageRequest.Merge(m, src) -} -func (m *VerifyMessageRequest) XXX_Size() int { - return xxx_messageInfo_VerifyMessageRequest.Size(m) -} -func (m *VerifyMessageRequest) XXX_DiscardUnknown() { - xxx_messageInfo_VerifyMessageRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_VerifyMessageRequest proto.InternalMessageInfo - -func (m *VerifyMessageRequest) GetMsg() []byte { - if m != nil { - return m.Msg - } - return nil -} - -func (m *VerifyMessageRequest) GetSignature() string { - if m != nil { - return m.Signature - } - return "" -} - -type VerifyMessageResponse struct { - // Whether the signature was valid over the given message - Valid bool `protobuf:"varint,1,opt,name=valid,proto3" json:"valid,omitempty"` - // The pubkey recovered from the signature - Pubkey string `protobuf:"bytes,2,opt,name=pubkey,proto3" json:"pubkey,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VerifyMessageResponse) Reset() { *m = VerifyMessageResponse{} } -func (m *VerifyMessageResponse) String() string { return proto.CompactTextString(m) } -func (*VerifyMessageResponse) ProtoMessage() {} -func (*VerifyMessageResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{26} -} - -func (m *VerifyMessageResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VerifyMessageResponse.Unmarshal(m, b) -} -func (m *VerifyMessageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VerifyMessageResponse.Marshal(b, m, deterministic) -} -func (m *VerifyMessageResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_VerifyMessageResponse.Merge(m, src) -} -func (m *VerifyMessageResponse) XXX_Size() int { - return xxx_messageInfo_VerifyMessageResponse.Size(m) -} -func (m *VerifyMessageResponse) XXX_DiscardUnknown() { - xxx_messageInfo_VerifyMessageResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_VerifyMessageResponse proto.InternalMessageInfo - -func (m *VerifyMessageResponse) GetValid() bool { - if m != nil { - return m.Valid - } - return false -} - -func (m *VerifyMessageResponse) GetPubkey() string { - if m != nil { - return m.Pubkey - } - return "" -} - -type ConnectPeerRequest struct { - // Lightning address of the peer, in the format `@host` - Addr *LightningAddress `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"` - // If set, the daemon will attempt to persistently connect to the target - // peer. Otherwise, the call will be synchronous. - Perm bool `protobuf:"varint,2,opt,name=perm,proto3" json:"perm,omitempty"` - // - //The connection timeout value (in seconds) for this request. It won't affect - //other requests. - Timeout uint64 `protobuf:"varint,3,opt,name=timeout,proto3" json:"timeout,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConnectPeerRequest) Reset() { *m = ConnectPeerRequest{} } -func (m *ConnectPeerRequest) String() string { return proto.CompactTextString(m) } -func (*ConnectPeerRequest) ProtoMessage() {} -func (*ConnectPeerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{27} -} - -func (m *ConnectPeerRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConnectPeerRequest.Unmarshal(m, b) -} -func (m *ConnectPeerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConnectPeerRequest.Marshal(b, m, deterministic) -} -func (m *ConnectPeerRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnectPeerRequest.Merge(m, src) -} -func (m *ConnectPeerRequest) XXX_Size() int { - return xxx_messageInfo_ConnectPeerRequest.Size(m) -} -func (m *ConnectPeerRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ConnectPeerRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ConnectPeerRequest proto.InternalMessageInfo - -func (m *ConnectPeerRequest) GetAddr() *LightningAddress { - if m != nil { - return m.Addr - } - return nil -} - -func (m *ConnectPeerRequest) GetPerm() bool { - if m != nil { - return m.Perm - } - return false -} - -func (m *ConnectPeerRequest) GetTimeout() uint64 { - if m != nil { - return m.Timeout - } - return 0 -} - -type ConnectPeerResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConnectPeerResponse) Reset() { *m = ConnectPeerResponse{} } -func (m *ConnectPeerResponse) String() string { return proto.CompactTextString(m) } -func (*ConnectPeerResponse) ProtoMessage() {} -func (*ConnectPeerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{28} -} - -func (m *ConnectPeerResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConnectPeerResponse.Unmarshal(m, b) -} -func (m *ConnectPeerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConnectPeerResponse.Marshal(b, m, deterministic) -} -func (m *ConnectPeerResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnectPeerResponse.Merge(m, src) -} -func (m *ConnectPeerResponse) XXX_Size() int { - return xxx_messageInfo_ConnectPeerResponse.Size(m) -} -func (m *ConnectPeerResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ConnectPeerResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ConnectPeerResponse proto.InternalMessageInfo - -type DisconnectPeerRequest struct { - // The pubkey of the node to disconnect from - PubKey string `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DisconnectPeerRequest) Reset() { *m = DisconnectPeerRequest{} } -func (m *DisconnectPeerRequest) String() string { return proto.CompactTextString(m) } -func (*DisconnectPeerRequest) ProtoMessage() {} -func (*DisconnectPeerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{29} -} - -func (m *DisconnectPeerRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DisconnectPeerRequest.Unmarshal(m, b) -} -func (m *DisconnectPeerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DisconnectPeerRequest.Marshal(b, m, deterministic) -} -func (m *DisconnectPeerRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DisconnectPeerRequest.Merge(m, src) -} -func (m *DisconnectPeerRequest) XXX_Size() int { - return xxx_messageInfo_DisconnectPeerRequest.Size(m) -} -func (m *DisconnectPeerRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DisconnectPeerRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DisconnectPeerRequest proto.InternalMessageInfo - -func (m *DisconnectPeerRequest) GetPubKey() string { - if m != nil { - return m.PubKey - } - return "" -} - -type DisconnectPeerResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DisconnectPeerResponse) Reset() { *m = DisconnectPeerResponse{} } -func (m *DisconnectPeerResponse) String() string { return proto.CompactTextString(m) } -func (*DisconnectPeerResponse) ProtoMessage() {} -func (*DisconnectPeerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{30} -} - -func (m *DisconnectPeerResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DisconnectPeerResponse.Unmarshal(m, b) -} -func (m *DisconnectPeerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DisconnectPeerResponse.Marshal(b, m, deterministic) -} -func (m *DisconnectPeerResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DisconnectPeerResponse.Merge(m, src) -} -func (m *DisconnectPeerResponse) XXX_Size() int { - return xxx_messageInfo_DisconnectPeerResponse.Size(m) -} -func (m *DisconnectPeerResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DisconnectPeerResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DisconnectPeerResponse proto.InternalMessageInfo - -type HTLC struct { - Incoming bool `protobuf:"varint,1,opt,name=incoming,proto3" json:"incoming,omitempty"` - Amount int64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` - HashLock []byte `protobuf:"bytes,3,opt,name=hash_lock,json=hashLock,proto3" json:"hash_lock,omitempty"` - ExpirationHeight uint32 `protobuf:"varint,4,opt,name=expiration_height,json=expirationHeight,proto3" json:"expiration_height,omitempty"` - // Index identifying the htlc on the channel. - HtlcIndex uint64 `protobuf:"varint,5,opt,name=htlc_index,json=htlcIndex,proto3" json:"htlc_index,omitempty"` - // If this HTLC is involved in a forwarding operation, this field indicates - // the forwarding channel. For an outgoing htlc, it is the incoming channel. - // For an incoming htlc, it is the outgoing channel. When the htlc - // originates from this node or this node is the final destination, - // forwarding_channel will be zero. The forwarding channel will also be zero - // for htlcs that need to be forwarded but don't have a forwarding decision - // persisted yet. - ForwardingChannel uint64 `protobuf:"varint,6,opt,name=forwarding_channel,json=forwardingChannel,proto3" json:"forwarding_channel,omitempty"` - // Index identifying the htlc on the forwarding channel. - ForwardingHtlcIndex uint64 `protobuf:"varint,7,opt,name=forwarding_htlc_index,json=forwardingHtlcIndex,proto3" json:"forwarding_htlc_index,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HTLC) Reset() { *m = HTLC{} } -func (m *HTLC) String() string { return proto.CompactTextString(m) } -func (*HTLC) ProtoMessage() {} -func (*HTLC) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{31} -} - -func (m *HTLC) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_HTLC.Unmarshal(m, b) -} -func (m *HTLC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_HTLC.Marshal(b, m, deterministic) -} -func (m *HTLC) XXX_Merge(src proto.Message) { - xxx_messageInfo_HTLC.Merge(m, src) -} -func (m *HTLC) XXX_Size() int { - return xxx_messageInfo_HTLC.Size(m) -} -func (m *HTLC) XXX_DiscardUnknown() { - xxx_messageInfo_HTLC.DiscardUnknown(m) -} - -var xxx_messageInfo_HTLC proto.InternalMessageInfo - -func (m *HTLC) GetIncoming() bool { - if m != nil { - return m.Incoming - } - return false -} - -func (m *HTLC) GetAmount() int64 { - if m != nil { - return m.Amount - } - return 0 -} - -func (m *HTLC) GetHashLock() []byte { - if m != nil { - return m.HashLock - } - return nil -} - -func (m *HTLC) GetExpirationHeight() uint32 { - if m != nil { - return m.ExpirationHeight - } - return 0 -} - -func (m *HTLC) GetHtlcIndex() uint64 { - if m != nil { - return m.HtlcIndex - } - return 0 -} - -func (m *HTLC) GetForwardingChannel() uint64 { - if m != nil { - return m.ForwardingChannel - } - return 0 -} - -func (m *HTLC) GetForwardingHtlcIndex() uint64 { - if m != nil { - return m.ForwardingHtlcIndex - } - return 0 -} - -type ChannelConstraints struct { - // - //The CSV delay expressed in relative blocks. If the channel is force closed, - //we will need to wait for this many blocks before we can regain our funds. - CsvDelay uint32 `protobuf:"varint,1,opt,name=csv_delay,json=csvDelay,proto3" json:"csv_delay,omitempty"` - // The minimum satoshis this node is required to reserve in its balance. - ChanReserveSat uint64 `protobuf:"varint,2,opt,name=chan_reserve_sat,json=chanReserveSat,proto3" json:"chan_reserve_sat,omitempty"` - // The dust limit (in satoshis) of the initiator's commitment tx. - DustLimitSat uint64 `protobuf:"varint,3,opt,name=dust_limit_sat,json=dustLimitSat,proto3" json:"dust_limit_sat,omitempty"` - // The maximum amount of coins in millisatoshis that can be pending in this - // channel. - MaxPendingAmtMsat uint64 `protobuf:"varint,4,opt,name=max_pending_amt_msat,json=maxPendingAmtMsat,proto3" json:"max_pending_amt_msat,omitempty"` - // The smallest HTLC in millisatoshis that the initiator will accept. - MinHtlcMsat uint64 `protobuf:"varint,5,opt,name=min_htlc_msat,json=minHtlcMsat,proto3" json:"min_htlc_msat,omitempty"` - // The total number of incoming HTLC's that the initiator will accept. - MaxAcceptedHtlcs uint32 `protobuf:"varint,6,opt,name=max_accepted_htlcs,json=maxAcceptedHtlcs,proto3" json:"max_accepted_htlcs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChannelConstraints) Reset() { *m = ChannelConstraints{} } -func (m *ChannelConstraints) String() string { return proto.CompactTextString(m) } -func (*ChannelConstraints) ProtoMessage() {} -func (*ChannelConstraints) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{32} -} - -func (m *ChannelConstraints) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChannelConstraints.Unmarshal(m, b) -} -func (m *ChannelConstraints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChannelConstraints.Marshal(b, m, deterministic) -} -func (m *ChannelConstraints) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChannelConstraints.Merge(m, src) -} -func (m *ChannelConstraints) XXX_Size() int { - return xxx_messageInfo_ChannelConstraints.Size(m) -} -func (m *ChannelConstraints) XXX_DiscardUnknown() { - xxx_messageInfo_ChannelConstraints.DiscardUnknown(m) -} - -var xxx_messageInfo_ChannelConstraints proto.InternalMessageInfo - -func (m *ChannelConstraints) GetCsvDelay() uint32 { - if m != nil { - return m.CsvDelay - } - return 0 -} - -func (m *ChannelConstraints) GetChanReserveSat() uint64 { - if m != nil { - return m.ChanReserveSat - } - return 0 -} - -func (m *ChannelConstraints) GetDustLimitSat() uint64 { - if m != nil { - return m.DustLimitSat - } - return 0 -} - -func (m *ChannelConstraints) GetMaxPendingAmtMsat() uint64 { - if m != nil { - return m.MaxPendingAmtMsat - } - return 0 -} - -func (m *ChannelConstraints) GetMinHtlcMsat() uint64 { - if m != nil { - return m.MinHtlcMsat - } - return 0 -} - -func (m *ChannelConstraints) GetMaxAcceptedHtlcs() uint32 { - if m != nil { - return m.MaxAcceptedHtlcs - } - return 0 -} - -type Channel struct { - // Whether this channel is active or not - Active bool `protobuf:"varint,1,opt,name=active,proto3" json:"active,omitempty"` - // The identity pubkey of the remote node - RemotePubkey string `protobuf:"bytes,2,opt,name=remote_pubkey,json=remotePubkey,proto3" json:"remote_pubkey,omitempty"` - // - //The outpoint (txid:index) of the funding transaction. With this value, Bob - //will be able to generate a signature for Alice's version of the commitment - //transaction. - ChannelPoint string `protobuf:"bytes,3,opt,name=channel_point,json=channelPoint,proto3" json:"channel_point,omitempty"` - // - //The unique channel ID for the channel. The first 3 bytes are the block - //height, the next 3 the index within the block, and the last 2 bytes are the - //output index for the channel. - ChanId uint64 `protobuf:"varint,4,opt,name=chan_id,json=chanId,proto3" json:"chan_id,omitempty"` - // The total amount of funds held in this channel - Capacity int64 `protobuf:"varint,5,opt,name=capacity,proto3" json:"capacity,omitempty"` - // This node's current balance in this channel - LocalBalance int64 `protobuf:"varint,6,opt,name=local_balance,json=localBalance,proto3" json:"local_balance,omitempty"` - // The counterparty's current balance in this channel - RemoteBalance int64 `protobuf:"varint,7,opt,name=remote_balance,json=remoteBalance,proto3" json:"remote_balance,omitempty"` - // - //The amount calculated to be paid in fees for the current set of commitment - //transactions. The fee amount is persisted with the channel in order to - //allow the fee amount to be removed and recalculated with each channel state - //update, including updates that happen after a system restart. - CommitFee int64 `protobuf:"varint,8,opt,name=commit_fee,json=commitFee,proto3" json:"commit_fee,omitempty"` - // The weight of the commitment transaction - CommitWeight int64 `protobuf:"varint,9,opt,name=commit_weight,json=commitWeight,proto3" json:"commit_weight,omitempty"` - // - //The required number of satoshis per kilo-weight that the requester will pay - //at all times, for both the funding transaction and commitment transaction. - //This value can later be updated once the channel is open. - FeePerKw int64 `protobuf:"varint,10,opt,name=fee_per_kw,json=feePerKw,proto3" json:"fee_per_kw,omitempty"` - // The unsettled balance in this channel - UnsettledBalance int64 `protobuf:"varint,11,opt,name=unsettled_balance,json=unsettledBalance,proto3" json:"unsettled_balance,omitempty"` - // - //The total number of satoshis we've sent within this channel. - TotalSatoshisSent int64 `protobuf:"varint,12,opt,name=total_satoshis_sent,json=totalSatoshisSent,proto3" json:"total_satoshis_sent,omitempty"` - // - //The total number of satoshis we've received within this channel. - TotalSatoshisReceived int64 `protobuf:"varint,13,opt,name=total_satoshis_received,json=totalSatoshisReceived,proto3" json:"total_satoshis_received,omitempty"` - // - //The total number of updates conducted within this channel. - NumUpdates uint64 `protobuf:"varint,14,opt,name=num_updates,json=numUpdates,proto3" json:"num_updates,omitempty"` - // - //The list of active, uncleared HTLCs currently pending within the channel. - PendingHtlcs []*HTLC `protobuf:"bytes,15,rep,name=pending_htlcs,json=pendingHtlcs,proto3" json:"pending_htlcs,omitempty"` - // - //Deprecated. The CSV delay expressed in relative blocks. If the channel is - //force closed, we will need to wait for this many blocks before we can regain - //our funds. - CsvDelay uint32 `protobuf:"varint,16,opt,name=csv_delay,json=csvDelay,proto3" json:"csv_delay,omitempty"` // Deprecated: Do not use. - // Whether this channel is advertised to the network or not. - Private bool `protobuf:"varint,17,opt,name=private,proto3" json:"private,omitempty"` - // True if we were the ones that created the channel. - Initiator bool `protobuf:"varint,18,opt,name=initiator,proto3" json:"initiator,omitempty"` - // A set of flags showing the current state of the channel. - ChanStatusFlags string `protobuf:"bytes,19,opt,name=chan_status_flags,json=chanStatusFlags,proto3" json:"chan_status_flags,omitempty"` - // Deprecated. The minimum satoshis this node is required to reserve in its - // balance. - LocalChanReserveSat int64 `protobuf:"varint,20,opt,name=local_chan_reserve_sat,json=localChanReserveSat,proto3" json:"local_chan_reserve_sat,omitempty"` // Deprecated: Do not use. - // - //Deprecated. The minimum satoshis the other node is required to reserve in - //its balance. - RemoteChanReserveSat int64 `protobuf:"varint,21,opt,name=remote_chan_reserve_sat,json=remoteChanReserveSat,proto3" json:"remote_chan_reserve_sat,omitempty"` // Deprecated: Do not use. - // Deprecated. Use commitment_type. - StaticRemoteKey bool `protobuf:"varint,22,opt,name=static_remote_key,json=staticRemoteKey,proto3" json:"static_remote_key,omitempty"` // Deprecated: Do not use. - // The commitment type used by this channel. - CommitmentType CommitmentType `protobuf:"varint,26,opt,name=commitment_type,json=commitmentType,proto3,enum=lnrpc.CommitmentType" json:"commitment_type,omitempty"` - // - //The number of seconds that the channel has been monitored by the channel - //scoring system. Scores are currently not persisted, so this value may be - //less than the lifetime of the channel [EXPERIMENTAL]. - Lifetime int64 `protobuf:"varint,23,opt,name=lifetime,proto3" json:"lifetime,omitempty"` - // - //The number of seconds that the remote peer has been observed as being online - //by the channel scoring system over the lifetime of the channel - //[EXPERIMENTAL]. - Uptime int64 `protobuf:"varint,24,opt,name=uptime,proto3" json:"uptime,omitempty"` - // - //Close address is the address that we will enforce payout to on cooperative - //close if the channel was opened utilizing option upfront shutdown. This - //value can be set on channel open by setting close_address in an open channel - //request. If this value is not set, you can still choose a payout address by - //cooperatively closing with the delivery_address field set. - CloseAddress string `protobuf:"bytes,25,opt,name=close_address,json=closeAddress,proto3" json:"close_address,omitempty"` - // - //The amount that the initiator of the channel optionally pushed to the remote - //party on channel open. This amount will be zero if the channel initiator did - //not push any funds to the remote peer. If the initiator field is true, we - //pushed this amount to our peer, if it is false, the remote peer pushed this - //amount to us. - PushAmountSat uint64 `protobuf:"varint,27,opt,name=push_amount_sat,json=pushAmountSat,proto3" json:"push_amount_sat,omitempty"` - // - //This uint32 indicates if this channel is to be considered 'frozen'. A - //frozen channel doest not allow a cooperative channel close by the - //initiator. The thaw_height is the height that this restriction stops - //applying to the channel. This field is optional, not setting it or using a - //value of zero will mean the channel has no additional restrictions. The - //height can be interpreted in two ways: as a relative height if the value is - //less than 500,000, or as an absolute height otherwise. - ThawHeight uint32 `protobuf:"varint,28,opt,name=thaw_height,json=thawHeight,proto3" json:"thaw_height,omitempty"` - // List constraints for the local node. - LocalConstraints *ChannelConstraints `protobuf:"bytes,29,opt,name=local_constraints,json=localConstraints,proto3" json:"local_constraints,omitempty"` - // List constraints for the remote node. - RemoteConstraints *ChannelConstraints `protobuf:"bytes,30,opt,name=remote_constraints,json=remoteConstraints,proto3" json:"remote_constraints,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Channel) Reset() { *m = Channel{} } -func (m *Channel) String() string { return proto.CompactTextString(m) } -func (*Channel) ProtoMessage() {} -func (*Channel) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{33} -} - -func (m *Channel) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Channel.Unmarshal(m, b) -} -func (m *Channel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Channel.Marshal(b, m, deterministic) -} -func (m *Channel) XXX_Merge(src proto.Message) { - xxx_messageInfo_Channel.Merge(m, src) -} -func (m *Channel) XXX_Size() int { - return xxx_messageInfo_Channel.Size(m) -} -func (m *Channel) XXX_DiscardUnknown() { - xxx_messageInfo_Channel.DiscardUnknown(m) -} - -var xxx_messageInfo_Channel proto.InternalMessageInfo - -func (m *Channel) GetActive() bool { - if m != nil { - return m.Active - } - return false -} - -func (m *Channel) GetRemotePubkey() string { - if m != nil { - return m.RemotePubkey - } - return "" -} - -func (m *Channel) GetChannelPoint() string { - if m != nil { - return m.ChannelPoint - } - return "" -} - -func (m *Channel) GetChanId() uint64 { - if m != nil { - return m.ChanId - } - return 0 -} - -func (m *Channel) GetCapacity() int64 { - if m != nil { - return m.Capacity - } - return 0 -} - -func (m *Channel) GetLocalBalance() int64 { - if m != nil { - return m.LocalBalance - } - return 0 -} - -func (m *Channel) GetRemoteBalance() int64 { - if m != nil { - return m.RemoteBalance - } - return 0 -} - -func (m *Channel) GetCommitFee() int64 { - if m != nil { - return m.CommitFee - } - return 0 -} - -func (m *Channel) GetCommitWeight() int64 { - if m != nil { - return m.CommitWeight - } - return 0 -} - -func (m *Channel) GetFeePerKw() int64 { - if m != nil { - return m.FeePerKw - } - return 0 -} - -func (m *Channel) GetUnsettledBalance() int64 { - if m != nil { - return m.UnsettledBalance - } - return 0 -} - -func (m *Channel) GetTotalSatoshisSent() int64 { - if m != nil { - return m.TotalSatoshisSent - } - return 0 -} - -func (m *Channel) GetTotalSatoshisReceived() int64 { - if m != nil { - return m.TotalSatoshisReceived - } - return 0 -} - -func (m *Channel) GetNumUpdates() uint64 { - if m != nil { - return m.NumUpdates - } - return 0 -} - -func (m *Channel) GetPendingHtlcs() []*HTLC { - if m != nil { - return m.PendingHtlcs - } - return nil -} - -// Deprecated: Do not use. -func (m *Channel) GetCsvDelay() uint32 { - if m != nil { - return m.CsvDelay - } - return 0 -} - -func (m *Channel) GetPrivate() bool { - if m != nil { - return m.Private - } - return false -} - -func (m *Channel) GetInitiator() bool { - if m != nil { - return m.Initiator - } - return false -} - -func (m *Channel) GetChanStatusFlags() string { - if m != nil { - return m.ChanStatusFlags - } - return "" -} - -// Deprecated: Do not use. -func (m *Channel) GetLocalChanReserveSat() int64 { - if m != nil { - return m.LocalChanReserveSat - } - return 0 -} - -// Deprecated: Do not use. -func (m *Channel) GetRemoteChanReserveSat() int64 { - if m != nil { - return m.RemoteChanReserveSat - } - return 0 -} - -// Deprecated: Do not use. -func (m *Channel) GetStaticRemoteKey() bool { - if m != nil { - return m.StaticRemoteKey - } - return false -} - -func (m *Channel) GetCommitmentType() CommitmentType { - if m != nil { - return m.CommitmentType - } - return CommitmentType_LEGACY -} - -func (m *Channel) GetLifetime() int64 { - if m != nil { - return m.Lifetime - } - return 0 -} - -func (m *Channel) GetUptime() int64 { - if m != nil { - return m.Uptime - } - return 0 -} - -func (m *Channel) GetCloseAddress() string { - if m != nil { - return m.CloseAddress - } - return "" -} - -func (m *Channel) GetPushAmountSat() uint64 { - if m != nil { - return m.PushAmountSat - } - return 0 -} - -func (m *Channel) GetThawHeight() uint32 { - if m != nil { - return m.ThawHeight - } - return 0 -} - -func (m *Channel) GetLocalConstraints() *ChannelConstraints { - if m != nil { - return m.LocalConstraints - } - return nil -} - -func (m *Channel) GetRemoteConstraints() *ChannelConstraints { - if m != nil { - return m.RemoteConstraints - } - return nil -} - -type ListChannelsRequest struct { - ActiveOnly bool `protobuf:"varint,1,opt,name=active_only,json=activeOnly,proto3" json:"active_only,omitempty"` - InactiveOnly bool `protobuf:"varint,2,opt,name=inactive_only,json=inactiveOnly,proto3" json:"inactive_only,omitempty"` - PublicOnly bool `protobuf:"varint,3,opt,name=public_only,json=publicOnly,proto3" json:"public_only,omitempty"` - PrivateOnly bool `protobuf:"varint,4,opt,name=private_only,json=privateOnly,proto3" json:"private_only,omitempty"` - // - //Filters the response for channels with a target peer's pubkey. If peer is - //empty, all channels will be returned. - Peer []byte `protobuf:"bytes,5,opt,name=peer,proto3" json:"peer,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListChannelsRequest) Reset() { *m = ListChannelsRequest{} } -func (m *ListChannelsRequest) String() string { return proto.CompactTextString(m) } -func (*ListChannelsRequest) ProtoMessage() {} -func (*ListChannelsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{34} -} - -func (m *ListChannelsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListChannelsRequest.Unmarshal(m, b) -} -func (m *ListChannelsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListChannelsRequest.Marshal(b, m, deterministic) -} -func (m *ListChannelsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListChannelsRequest.Merge(m, src) -} -func (m *ListChannelsRequest) XXX_Size() int { - return xxx_messageInfo_ListChannelsRequest.Size(m) -} -func (m *ListChannelsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListChannelsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListChannelsRequest proto.InternalMessageInfo - -func (m *ListChannelsRequest) GetActiveOnly() bool { - if m != nil { - return m.ActiveOnly - } - return false -} - -func (m *ListChannelsRequest) GetInactiveOnly() bool { - if m != nil { - return m.InactiveOnly - } - return false -} - -func (m *ListChannelsRequest) GetPublicOnly() bool { - if m != nil { - return m.PublicOnly - } - return false -} - -func (m *ListChannelsRequest) GetPrivateOnly() bool { - if m != nil { - return m.PrivateOnly - } - return false -} - -func (m *ListChannelsRequest) GetPeer() []byte { - if m != nil { - return m.Peer - } - return nil -} - -type ListChannelsResponse struct { - // The list of active channels - Channels []*Channel `protobuf:"bytes,11,rep,name=channels,proto3" json:"channels,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListChannelsResponse) Reset() { *m = ListChannelsResponse{} } -func (m *ListChannelsResponse) String() string { return proto.CompactTextString(m) } -func (*ListChannelsResponse) ProtoMessage() {} -func (*ListChannelsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{35} -} - -func (m *ListChannelsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListChannelsResponse.Unmarshal(m, b) -} -func (m *ListChannelsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListChannelsResponse.Marshal(b, m, deterministic) -} -func (m *ListChannelsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListChannelsResponse.Merge(m, src) -} -func (m *ListChannelsResponse) XXX_Size() int { - return xxx_messageInfo_ListChannelsResponse.Size(m) -} -func (m *ListChannelsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListChannelsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListChannelsResponse proto.InternalMessageInfo - -func (m *ListChannelsResponse) GetChannels() []*Channel { - if m != nil { - return m.Channels - } - return nil -} - -type ChannelCloseSummary struct { - // The outpoint (txid:index) of the funding transaction. - ChannelPoint string `protobuf:"bytes,1,opt,name=channel_point,json=channelPoint,proto3" json:"channel_point,omitempty"` - // The unique channel ID for the channel. - ChanId uint64 `protobuf:"varint,2,opt,name=chan_id,json=chanId,proto3" json:"chan_id,omitempty"` - // The hash of the genesis block that this channel resides within. - ChainHash string `protobuf:"bytes,3,opt,name=chain_hash,json=chainHash,proto3" json:"chain_hash,omitempty"` - // The txid of the transaction which ultimately closed this channel. - ClosingTxHash string `protobuf:"bytes,4,opt,name=closing_tx_hash,json=closingTxHash,proto3" json:"closing_tx_hash,omitempty"` - // Public key of the remote peer that we formerly had a channel with. - RemotePubkey string `protobuf:"bytes,5,opt,name=remote_pubkey,json=remotePubkey,proto3" json:"remote_pubkey,omitempty"` - // Total capacity of the channel. - Capacity int64 `protobuf:"varint,6,opt,name=capacity,proto3" json:"capacity,omitempty"` - // Height at which the funding transaction was spent. - CloseHeight uint32 `protobuf:"varint,7,opt,name=close_height,json=closeHeight,proto3" json:"close_height,omitempty"` - // Settled balance at the time of channel closure - SettledBalance int64 `protobuf:"varint,8,opt,name=settled_balance,json=settledBalance,proto3" json:"settled_balance,omitempty"` - // The sum of all the time-locked outputs at the time of channel closure - TimeLockedBalance int64 `protobuf:"varint,9,opt,name=time_locked_balance,json=timeLockedBalance,proto3" json:"time_locked_balance,omitempty"` - // Details on how the channel was closed. - CloseType ChannelCloseSummary_ClosureType `protobuf:"varint,10,opt,name=close_type,json=closeType,proto3,enum=lnrpc.ChannelCloseSummary_ClosureType" json:"close_type,omitempty"` - // - //Open initiator is the party that initiated opening the channel. Note that - //this value may be unknown if the channel was closed before we migrated to - //store open channel information after close. - OpenInitiator Initiator `protobuf:"varint,11,opt,name=open_initiator,json=openInitiator,proto3,enum=lnrpc.Initiator" json:"open_initiator,omitempty"` - // - //Close initiator indicates which party initiated the close. This value will - //be unknown for channels that were cooperatively closed before we started - //tracking cooperative close initiators. Note that this indicates which party - //initiated a close, and it is possible for both to initiate cooperative or - //force closes, although only one party's close will be confirmed on chain. - CloseInitiator Initiator `protobuf:"varint,12,opt,name=close_initiator,json=closeInitiator,proto3,enum=lnrpc.Initiator" json:"close_initiator,omitempty"` - Resolutions []*Resolution `protobuf:"bytes,13,rep,name=resolutions,proto3" json:"resolutions,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChannelCloseSummary) Reset() { *m = ChannelCloseSummary{} } -func (m *ChannelCloseSummary) String() string { return proto.CompactTextString(m) } -func (*ChannelCloseSummary) ProtoMessage() {} -func (*ChannelCloseSummary) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{36} -} - -func (m *ChannelCloseSummary) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChannelCloseSummary.Unmarshal(m, b) -} -func (m *ChannelCloseSummary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChannelCloseSummary.Marshal(b, m, deterministic) -} -func (m *ChannelCloseSummary) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChannelCloseSummary.Merge(m, src) -} -func (m *ChannelCloseSummary) XXX_Size() int { - return xxx_messageInfo_ChannelCloseSummary.Size(m) -} -func (m *ChannelCloseSummary) XXX_DiscardUnknown() { - xxx_messageInfo_ChannelCloseSummary.DiscardUnknown(m) -} - -var xxx_messageInfo_ChannelCloseSummary proto.InternalMessageInfo - -func (m *ChannelCloseSummary) GetChannelPoint() string { - if m != nil { - return m.ChannelPoint - } - return "" -} - -func (m *ChannelCloseSummary) GetChanId() uint64 { - if m != nil { - return m.ChanId - } - return 0 -} - -func (m *ChannelCloseSummary) GetChainHash() string { - if m != nil { - return m.ChainHash - } - return "" -} - -func (m *ChannelCloseSummary) GetClosingTxHash() string { - if m != nil { - return m.ClosingTxHash - } - return "" -} - -func (m *ChannelCloseSummary) GetRemotePubkey() string { - if m != nil { - return m.RemotePubkey - } - return "" -} - -func (m *ChannelCloseSummary) GetCapacity() int64 { - if m != nil { - return m.Capacity - } - return 0 -} - -func (m *ChannelCloseSummary) GetCloseHeight() uint32 { - if m != nil { - return m.CloseHeight - } - return 0 -} - -func (m *ChannelCloseSummary) GetSettledBalance() int64 { - if m != nil { - return m.SettledBalance - } - return 0 -} - -func (m *ChannelCloseSummary) GetTimeLockedBalance() int64 { - if m != nil { - return m.TimeLockedBalance - } - return 0 -} - -func (m *ChannelCloseSummary) GetCloseType() ChannelCloseSummary_ClosureType { - if m != nil { - return m.CloseType - } - return ChannelCloseSummary_COOPERATIVE_CLOSE -} - -func (m *ChannelCloseSummary) GetOpenInitiator() Initiator { - if m != nil { - return m.OpenInitiator - } - return Initiator_INITIATOR_UNKNOWN -} - -func (m *ChannelCloseSummary) GetCloseInitiator() Initiator { - if m != nil { - return m.CloseInitiator - } - return Initiator_INITIATOR_UNKNOWN -} - -func (m *ChannelCloseSummary) GetResolutions() []*Resolution { - if m != nil { - return m.Resolutions - } - return nil -} - -type Resolution struct { - // The type of output we are resolving. - ResolutionType ResolutionType `protobuf:"varint,1,opt,name=resolution_type,json=resolutionType,proto3,enum=lnrpc.ResolutionType" json:"resolution_type,omitempty"` - // The outcome of our on chain action that resolved the outpoint. - Outcome ResolutionOutcome `protobuf:"varint,2,opt,name=outcome,proto3,enum=lnrpc.ResolutionOutcome" json:"outcome,omitempty"` - // The outpoint that was spent by the resolution. - Outpoint *OutPoint `protobuf:"bytes,3,opt,name=outpoint,proto3" json:"outpoint,omitempty"` - // The amount that was claimed by the resolution. - AmountSat uint64 `protobuf:"varint,4,opt,name=amount_sat,json=amountSat,proto3" json:"amount_sat,omitempty"` - // The hex-encoded transaction ID of the sweep transaction that spent the - // output. - SweepTxid string `protobuf:"bytes,5,opt,name=sweep_txid,json=sweepTxid,proto3" json:"sweep_txid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Resolution) Reset() { *m = Resolution{} } -func (m *Resolution) String() string { return proto.CompactTextString(m) } -func (*Resolution) ProtoMessage() {} -func (*Resolution) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{37} -} - -func (m *Resolution) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Resolution.Unmarshal(m, b) -} -func (m *Resolution) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Resolution.Marshal(b, m, deterministic) -} -func (m *Resolution) XXX_Merge(src proto.Message) { - xxx_messageInfo_Resolution.Merge(m, src) -} -func (m *Resolution) XXX_Size() int { - return xxx_messageInfo_Resolution.Size(m) -} -func (m *Resolution) XXX_DiscardUnknown() { - xxx_messageInfo_Resolution.DiscardUnknown(m) -} - -var xxx_messageInfo_Resolution proto.InternalMessageInfo - -func (m *Resolution) GetResolutionType() ResolutionType { - if m != nil { - return m.ResolutionType - } - return ResolutionType_TYPE_UNKNOWN -} - -func (m *Resolution) GetOutcome() ResolutionOutcome { - if m != nil { - return m.Outcome - } - return ResolutionOutcome_OUTCOME_UNKNOWN -} - -func (m *Resolution) GetOutpoint() *OutPoint { - if m != nil { - return m.Outpoint - } - return nil -} - -func (m *Resolution) GetAmountSat() uint64 { - if m != nil { - return m.AmountSat - } - return 0 -} - -func (m *Resolution) GetSweepTxid() string { - if m != nil { - return m.SweepTxid - } - return "" -} - -type ClosedChannelsRequest struct { - Cooperative bool `protobuf:"varint,1,opt,name=cooperative,proto3" json:"cooperative,omitempty"` - LocalForce bool `protobuf:"varint,2,opt,name=local_force,json=localForce,proto3" json:"local_force,omitempty"` - RemoteForce bool `protobuf:"varint,3,opt,name=remote_force,json=remoteForce,proto3" json:"remote_force,omitempty"` - Breach bool `protobuf:"varint,4,opt,name=breach,proto3" json:"breach,omitempty"` - FundingCanceled bool `protobuf:"varint,5,opt,name=funding_canceled,json=fundingCanceled,proto3" json:"funding_canceled,omitempty"` - Abandoned bool `protobuf:"varint,6,opt,name=abandoned,proto3" json:"abandoned,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClosedChannelsRequest) Reset() { *m = ClosedChannelsRequest{} } -func (m *ClosedChannelsRequest) String() string { return proto.CompactTextString(m) } -func (*ClosedChannelsRequest) ProtoMessage() {} -func (*ClosedChannelsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{38} -} - -func (m *ClosedChannelsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClosedChannelsRequest.Unmarshal(m, b) -} -func (m *ClosedChannelsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClosedChannelsRequest.Marshal(b, m, deterministic) -} -func (m *ClosedChannelsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClosedChannelsRequest.Merge(m, src) -} -func (m *ClosedChannelsRequest) XXX_Size() int { - return xxx_messageInfo_ClosedChannelsRequest.Size(m) -} -func (m *ClosedChannelsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ClosedChannelsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ClosedChannelsRequest proto.InternalMessageInfo - -func (m *ClosedChannelsRequest) GetCooperative() bool { - if m != nil { - return m.Cooperative - } - return false -} - -func (m *ClosedChannelsRequest) GetLocalForce() bool { - if m != nil { - return m.LocalForce - } - return false -} - -func (m *ClosedChannelsRequest) GetRemoteForce() bool { - if m != nil { - return m.RemoteForce - } - return false -} - -func (m *ClosedChannelsRequest) GetBreach() bool { - if m != nil { - return m.Breach - } - return false -} - -func (m *ClosedChannelsRequest) GetFundingCanceled() bool { - if m != nil { - return m.FundingCanceled - } - return false -} - -func (m *ClosedChannelsRequest) GetAbandoned() bool { - if m != nil { - return m.Abandoned - } - return false -} - -type ClosedChannelsResponse struct { - Channels []*ChannelCloseSummary `protobuf:"bytes,1,rep,name=channels,proto3" json:"channels,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClosedChannelsResponse) Reset() { *m = ClosedChannelsResponse{} } -func (m *ClosedChannelsResponse) String() string { return proto.CompactTextString(m) } -func (*ClosedChannelsResponse) ProtoMessage() {} -func (*ClosedChannelsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{39} -} - -func (m *ClosedChannelsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClosedChannelsResponse.Unmarshal(m, b) -} -func (m *ClosedChannelsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClosedChannelsResponse.Marshal(b, m, deterministic) -} -func (m *ClosedChannelsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClosedChannelsResponse.Merge(m, src) -} -func (m *ClosedChannelsResponse) XXX_Size() int { - return xxx_messageInfo_ClosedChannelsResponse.Size(m) -} -func (m *ClosedChannelsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ClosedChannelsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ClosedChannelsResponse proto.InternalMessageInfo - -func (m *ClosedChannelsResponse) GetChannels() []*ChannelCloseSummary { - if m != nil { - return m.Channels - } - return nil -} - -type Peer struct { - // The identity pubkey of the peer - PubKey string `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` - // Network address of the peer; eg `127.0.0.1:10011` - Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` - // Bytes of data transmitted to this peer - BytesSent uint64 `protobuf:"varint,4,opt,name=bytes_sent,json=bytesSent,proto3" json:"bytes_sent,omitempty"` - // Bytes of data transmitted from this peer - BytesRecv uint64 `protobuf:"varint,5,opt,name=bytes_recv,json=bytesRecv,proto3" json:"bytes_recv,omitempty"` - // Satoshis sent to this peer - SatSent int64 `protobuf:"varint,6,opt,name=sat_sent,json=satSent,proto3" json:"sat_sent,omitempty"` - // Satoshis received from this peer - SatRecv int64 `protobuf:"varint,7,opt,name=sat_recv,json=satRecv,proto3" json:"sat_recv,omitempty"` - // A channel is inbound if the counterparty initiated the channel - Inbound bool `protobuf:"varint,8,opt,name=inbound,proto3" json:"inbound,omitempty"` - // Ping time to this peer - PingTime int64 `protobuf:"varint,9,opt,name=ping_time,json=pingTime,proto3" json:"ping_time,omitempty"` - // The type of sync we are currently performing with this peer. - SyncType Peer_SyncType `protobuf:"varint,10,opt,name=sync_type,json=syncType,proto3,enum=lnrpc.Peer_SyncType" json:"sync_type,omitempty"` - // Features advertised by the remote peer in their init message. - Features map[uint32]*Feature `protobuf:"bytes,11,rep,name=features,proto3" json:"features,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // - //The latest errors received from our peer with timestamps, limited to the 10 - //most recent errors. These errors are tracked across peer connections, but - //are not persisted across lnd restarts. Note that these errors are only - //stored for peers that we have channels open with, to prevent peers from - //spamming us with errors at no cost. - Errors []*TimestampedError `protobuf:"bytes,12,rep,name=errors,proto3" json:"errors,omitempty"` - // - //The number of times we have recorded this peer going offline or coming - //online, recorded across restarts. Note that this value is decreased over - //time if the peer has not recently flapped, so that we can forgive peers - //with historically high flap counts. - FlapCount int32 `protobuf:"varint,13,opt,name=flap_count,json=flapCount,proto3" json:"flap_count,omitempty"` - // - //The timestamp of the last flap we observed for this peer. If this value is - //zero, we have not observed any flaps for this peer. - LastFlapNs int64 `protobuf:"varint,14,opt,name=last_flap_ns,json=lastFlapNs,proto3" json:"last_flap_ns,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Peer) Reset() { *m = Peer{} } -func (m *Peer) String() string { return proto.CompactTextString(m) } -func (*Peer) ProtoMessage() {} -func (*Peer) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{40} -} - -func (m *Peer) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Peer.Unmarshal(m, b) -} -func (m *Peer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Peer.Marshal(b, m, deterministic) -} -func (m *Peer) XXX_Merge(src proto.Message) { - xxx_messageInfo_Peer.Merge(m, src) -} -func (m *Peer) XXX_Size() int { - return xxx_messageInfo_Peer.Size(m) -} -func (m *Peer) XXX_DiscardUnknown() { - xxx_messageInfo_Peer.DiscardUnknown(m) -} - -var xxx_messageInfo_Peer proto.InternalMessageInfo - -func (m *Peer) GetPubKey() string { - if m != nil { - return m.PubKey - } - return "" -} - -func (m *Peer) GetAddress() string { - if m != nil { - return m.Address - } - return "" -} - -func (m *Peer) GetBytesSent() uint64 { - if m != nil { - return m.BytesSent - } - return 0 -} - -func (m *Peer) GetBytesRecv() uint64 { - if m != nil { - return m.BytesRecv - } - return 0 -} - -func (m *Peer) GetSatSent() int64 { - if m != nil { - return m.SatSent - } - return 0 -} - -func (m *Peer) GetSatRecv() int64 { - if m != nil { - return m.SatRecv - } - return 0 -} - -func (m *Peer) GetInbound() bool { - if m != nil { - return m.Inbound - } - return false -} - -func (m *Peer) GetPingTime() int64 { - if m != nil { - return m.PingTime - } - return 0 -} - -func (m *Peer) GetSyncType() Peer_SyncType { - if m != nil { - return m.SyncType - } - return Peer_UNKNOWN_SYNC -} - -func (m *Peer) GetFeatures() map[uint32]*Feature { - if m != nil { - return m.Features - } - return nil -} - -func (m *Peer) GetErrors() []*TimestampedError { - if m != nil { - return m.Errors - } - return nil -} - -func (m *Peer) GetFlapCount() int32 { - if m != nil { - return m.FlapCount - } - return 0 -} - -func (m *Peer) GetLastFlapNs() int64 { - if m != nil { - return m.LastFlapNs - } - return 0 -} - -type TimestampedError struct { - // The unix timestamp in seconds when the error occurred. - Timestamp uint64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // The string representation of the error sent by our peer. - Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TimestampedError) Reset() { *m = TimestampedError{} } -func (m *TimestampedError) String() string { return proto.CompactTextString(m) } -func (*TimestampedError) ProtoMessage() {} -func (*TimestampedError) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{41} -} - -func (m *TimestampedError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TimestampedError.Unmarshal(m, b) -} -func (m *TimestampedError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TimestampedError.Marshal(b, m, deterministic) -} -func (m *TimestampedError) XXX_Merge(src proto.Message) { - xxx_messageInfo_TimestampedError.Merge(m, src) -} -func (m *TimestampedError) XXX_Size() int { - return xxx_messageInfo_TimestampedError.Size(m) -} -func (m *TimestampedError) XXX_DiscardUnknown() { - xxx_messageInfo_TimestampedError.DiscardUnknown(m) -} - -var xxx_messageInfo_TimestampedError proto.InternalMessageInfo - -func (m *TimestampedError) GetTimestamp() uint64 { - if m != nil { - return m.Timestamp - } - return 0 -} - -func (m *TimestampedError) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -type ListPeersRequest struct { - // - //If true, only the last error that our peer sent us will be returned with - //the peer's information, rather than the full set of historic errors we have - //stored. - LatestError bool `protobuf:"varint,1,opt,name=latest_error,json=latestError,proto3" json:"latest_error,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListPeersRequest) Reset() { *m = ListPeersRequest{} } -func (m *ListPeersRequest) String() string { return proto.CompactTextString(m) } -func (*ListPeersRequest) ProtoMessage() {} -func (*ListPeersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{42} -} - -func (m *ListPeersRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListPeersRequest.Unmarshal(m, b) -} -func (m *ListPeersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListPeersRequest.Marshal(b, m, deterministic) -} -func (m *ListPeersRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListPeersRequest.Merge(m, src) -} -func (m *ListPeersRequest) XXX_Size() int { - return xxx_messageInfo_ListPeersRequest.Size(m) -} -func (m *ListPeersRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListPeersRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListPeersRequest proto.InternalMessageInfo - -func (m *ListPeersRequest) GetLatestError() bool { - if m != nil { - return m.LatestError - } - return false -} - -type ListPeersResponse struct { - // The list of currently connected peers - Peers []*Peer `protobuf:"bytes,1,rep,name=peers,proto3" json:"peers,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListPeersResponse) Reset() { *m = ListPeersResponse{} } -func (m *ListPeersResponse) String() string { return proto.CompactTextString(m) } -func (*ListPeersResponse) ProtoMessage() {} -func (*ListPeersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{43} -} - -func (m *ListPeersResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListPeersResponse.Unmarshal(m, b) -} -func (m *ListPeersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListPeersResponse.Marshal(b, m, deterministic) -} -func (m *ListPeersResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListPeersResponse.Merge(m, src) -} -func (m *ListPeersResponse) XXX_Size() int { - return xxx_messageInfo_ListPeersResponse.Size(m) -} -func (m *ListPeersResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListPeersResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListPeersResponse proto.InternalMessageInfo - -func (m *ListPeersResponse) GetPeers() []*Peer { - if m != nil { - return m.Peers - } - return nil -} - -type PeerEventSubscription struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PeerEventSubscription) Reset() { *m = PeerEventSubscription{} } -func (m *PeerEventSubscription) String() string { return proto.CompactTextString(m) } -func (*PeerEventSubscription) ProtoMessage() {} -func (*PeerEventSubscription) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{44} -} - -func (m *PeerEventSubscription) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PeerEventSubscription.Unmarshal(m, b) -} -func (m *PeerEventSubscription) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PeerEventSubscription.Marshal(b, m, deterministic) -} -func (m *PeerEventSubscription) XXX_Merge(src proto.Message) { - xxx_messageInfo_PeerEventSubscription.Merge(m, src) -} -func (m *PeerEventSubscription) XXX_Size() int { - return xxx_messageInfo_PeerEventSubscription.Size(m) -} -func (m *PeerEventSubscription) XXX_DiscardUnknown() { - xxx_messageInfo_PeerEventSubscription.DiscardUnknown(m) -} - -var xxx_messageInfo_PeerEventSubscription proto.InternalMessageInfo - -type PeerEvent struct { - // The identity pubkey of the peer. - PubKey string `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` - Type PeerEvent_EventType `protobuf:"varint,2,opt,name=type,proto3,enum=lnrpc.PeerEvent_EventType" json:"type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PeerEvent) Reset() { *m = PeerEvent{} } -func (m *PeerEvent) String() string { return proto.CompactTextString(m) } -func (*PeerEvent) ProtoMessage() {} -func (*PeerEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{45} -} - -func (m *PeerEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PeerEvent.Unmarshal(m, b) -} -func (m *PeerEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PeerEvent.Marshal(b, m, deterministic) -} -func (m *PeerEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_PeerEvent.Merge(m, src) -} -func (m *PeerEvent) XXX_Size() int { - return xxx_messageInfo_PeerEvent.Size(m) -} -func (m *PeerEvent) XXX_DiscardUnknown() { - xxx_messageInfo_PeerEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_PeerEvent proto.InternalMessageInfo - -func (m *PeerEvent) GetPubKey() string { - if m != nil { - return m.PubKey - } - return "" -} - -func (m *PeerEvent) GetType() PeerEvent_EventType { - if m != nil { - return m.Type - } - return PeerEvent_PEER_ONLINE -} - -type GetInfoRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetInfoRequest) Reset() { *m = GetInfoRequest{} } -func (m *GetInfoRequest) String() string { return proto.CompactTextString(m) } -func (*GetInfoRequest) ProtoMessage() {} -func (*GetInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{46} -} - -func (m *GetInfoRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetInfoRequest.Unmarshal(m, b) -} -func (m *GetInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetInfoRequest.Marshal(b, m, deterministic) -} -func (m *GetInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetInfoRequest.Merge(m, src) -} -func (m *GetInfoRequest) XXX_Size() int { - return xxx_messageInfo_GetInfoRequest.Size(m) -} -func (m *GetInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetInfoRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetInfoRequest proto.InternalMessageInfo - -type GetInfoResponse struct { - // The version of the LND software that the node is running. - Version string `protobuf:"bytes,14,opt,name=version,proto3" json:"version,omitempty"` - // The SHA1 commit hash that the daemon is compiled with. - CommitHash string `protobuf:"bytes,20,opt,name=commit_hash,json=commitHash,proto3" json:"commit_hash,omitempty"` - // The identity pubkey of the current node. - IdentityPubkey string `protobuf:"bytes,1,opt,name=identity_pubkey,json=identityPubkey,proto3" json:"identity_pubkey,omitempty"` - // If applicable, the alias of the current node, e.g. "bob" - Alias string `protobuf:"bytes,2,opt,name=alias,proto3" json:"alias,omitempty"` - // The color of the current node in hex code format - Color string `protobuf:"bytes,17,opt,name=color,proto3" json:"color,omitempty"` - // Number of pending channels - NumPendingChannels uint32 `protobuf:"varint,3,opt,name=num_pending_channels,json=numPendingChannels,proto3" json:"num_pending_channels,omitempty"` - // Number of active channels - NumActiveChannels uint32 `protobuf:"varint,4,opt,name=num_active_channels,json=numActiveChannels,proto3" json:"num_active_channels,omitempty"` - // Number of inactive channels - NumInactiveChannels uint32 `protobuf:"varint,15,opt,name=num_inactive_channels,json=numInactiveChannels,proto3" json:"num_inactive_channels,omitempty"` - // Number of peers - NumPeers uint32 `protobuf:"varint,5,opt,name=num_peers,json=numPeers,proto3" json:"num_peers,omitempty"` - // The node's current view of the height of the best block - BlockHeight uint32 `protobuf:"varint,6,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` - // The node's current view of the hash of the best block - BlockHash string `protobuf:"bytes,8,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` - // Timestamp of the block best known to the wallet - BestHeaderTimestamp int64 `protobuf:"varint,13,opt,name=best_header_timestamp,json=bestHeaderTimestamp,proto3" json:"best_header_timestamp,omitempty"` - // Whether the wallet's view is synced to the main chain - SyncedToChain bool `protobuf:"varint,9,opt,name=synced_to_chain,json=syncedToChain,proto3" json:"synced_to_chain,omitempty"` - // Whether we consider ourselves synced with the public channel graph. - SyncedToGraph bool `protobuf:"varint,18,opt,name=synced_to_graph,json=syncedToGraph,proto3" json:"synced_to_graph,omitempty"` - // - //Whether the current node is connected to testnet. This field is - //deprecated and the network field should be used instead - Testnet bool `protobuf:"varint,10,opt,name=testnet,proto3" json:"testnet,omitempty"` // Deprecated: Do not use. - // A list of active chains the node is connected to - Chains []*Chain `protobuf:"bytes,16,rep,name=chains,proto3" json:"chains,omitempty"` - // The URIs of the current node. - Uris []string `protobuf:"bytes,12,rep,name=uris,proto3" json:"uris,omitempty"` - // - //Features that our node has advertised in our init message, node - //announcements and invoices. - Features map[uint32]*Feature `protobuf:"bytes,19,rep,name=features,proto3" json:"features,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetInfoResponse) Reset() { *m = GetInfoResponse{} } -func (m *GetInfoResponse) String() string { return proto.CompactTextString(m) } -func (*GetInfoResponse) ProtoMessage() {} -func (*GetInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{47} -} - -func (m *GetInfoResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetInfoResponse.Unmarshal(m, b) -} -func (m *GetInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetInfoResponse.Marshal(b, m, deterministic) -} -func (m *GetInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetInfoResponse.Merge(m, src) -} -func (m *GetInfoResponse) XXX_Size() int { - return xxx_messageInfo_GetInfoResponse.Size(m) -} -func (m *GetInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetInfoResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetInfoResponse proto.InternalMessageInfo - -func (m *GetInfoResponse) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func (m *GetInfoResponse) GetCommitHash() string { - if m != nil { - return m.CommitHash - } - return "" -} - -func (m *GetInfoResponse) GetIdentityPubkey() string { - if m != nil { - return m.IdentityPubkey - } - return "" -} - -func (m *GetInfoResponse) GetAlias() string { - if m != nil { - return m.Alias - } - return "" -} - -func (m *GetInfoResponse) GetColor() string { - if m != nil { - return m.Color - } - return "" -} - -func (m *GetInfoResponse) GetNumPendingChannels() uint32 { - if m != nil { - return m.NumPendingChannels - } - return 0 -} - -func (m *GetInfoResponse) GetNumActiveChannels() uint32 { - if m != nil { - return m.NumActiveChannels - } - return 0 -} - -func (m *GetInfoResponse) GetNumInactiveChannels() uint32 { - if m != nil { - return m.NumInactiveChannels - } - return 0 -} - -func (m *GetInfoResponse) GetNumPeers() uint32 { - if m != nil { - return m.NumPeers - } - return 0 -} - -func (m *GetInfoResponse) GetBlockHeight() uint32 { - if m != nil { - return m.BlockHeight - } - return 0 -} - -func (m *GetInfoResponse) GetBlockHash() string { - if m != nil { - return m.BlockHash - } - return "" -} - -func (m *GetInfoResponse) GetBestHeaderTimestamp() int64 { - if m != nil { - return m.BestHeaderTimestamp - } - return 0 -} - -func (m *GetInfoResponse) GetSyncedToChain() bool { - if m != nil { - return m.SyncedToChain - } - return false -} - -func (m *GetInfoResponse) GetSyncedToGraph() bool { - if m != nil { - return m.SyncedToGraph - } - return false -} - -// Deprecated: Do not use. -func (m *GetInfoResponse) GetTestnet() bool { - if m != nil { - return m.Testnet - } - return false -} - -func (m *GetInfoResponse) GetChains() []*Chain { - if m != nil { - return m.Chains - } - return nil -} - -func (m *GetInfoResponse) GetUris() []string { - if m != nil { - return m.Uris - } - return nil -} - -func (m *GetInfoResponse) GetFeatures() map[uint32]*Feature { - if m != nil { - return m.Features - } - return nil -} - -type GetRecoveryInfoRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetRecoveryInfoRequest) Reset() { *m = GetRecoveryInfoRequest{} } -func (m *GetRecoveryInfoRequest) String() string { return proto.CompactTextString(m) } -func (*GetRecoveryInfoRequest) ProtoMessage() {} -func (*GetRecoveryInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{48} -} - -func (m *GetRecoveryInfoRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetRecoveryInfoRequest.Unmarshal(m, b) -} -func (m *GetRecoveryInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetRecoveryInfoRequest.Marshal(b, m, deterministic) -} -func (m *GetRecoveryInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetRecoveryInfoRequest.Merge(m, src) -} -func (m *GetRecoveryInfoRequest) XXX_Size() int { - return xxx_messageInfo_GetRecoveryInfoRequest.Size(m) -} -func (m *GetRecoveryInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetRecoveryInfoRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetRecoveryInfoRequest proto.InternalMessageInfo - -type GetRecoveryInfoResponse struct { - // Whether the wallet is in recovery mode - RecoveryMode bool `protobuf:"varint,1,opt,name=recovery_mode,json=recoveryMode,proto3" json:"recovery_mode,omitempty"` - // Whether the wallet recovery progress is finished - RecoveryFinished bool `protobuf:"varint,2,opt,name=recovery_finished,json=recoveryFinished,proto3" json:"recovery_finished,omitempty"` - // The recovery progress, ranging from 0 to 1. - Progress float64 `protobuf:"fixed64,3,opt,name=progress,proto3" json:"progress,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetRecoveryInfoResponse) Reset() { *m = GetRecoveryInfoResponse{} } -func (m *GetRecoveryInfoResponse) String() string { return proto.CompactTextString(m) } -func (*GetRecoveryInfoResponse) ProtoMessage() {} -func (*GetRecoveryInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{49} -} - -func (m *GetRecoveryInfoResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetRecoveryInfoResponse.Unmarshal(m, b) -} -func (m *GetRecoveryInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetRecoveryInfoResponse.Marshal(b, m, deterministic) -} -func (m *GetRecoveryInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetRecoveryInfoResponse.Merge(m, src) -} -func (m *GetRecoveryInfoResponse) XXX_Size() int { - return xxx_messageInfo_GetRecoveryInfoResponse.Size(m) -} -func (m *GetRecoveryInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetRecoveryInfoResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetRecoveryInfoResponse proto.InternalMessageInfo - -func (m *GetRecoveryInfoResponse) GetRecoveryMode() bool { - if m != nil { - return m.RecoveryMode - } - return false -} - -func (m *GetRecoveryInfoResponse) GetRecoveryFinished() bool { - if m != nil { - return m.RecoveryFinished - } - return false -} - -func (m *GetRecoveryInfoResponse) GetProgress() float64 { - if m != nil { - return m.Progress - } - return 0 -} - -type Chain struct { - // The blockchain the node is on (eg bitcoin, litecoin) - Chain string `protobuf:"bytes,1,opt,name=chain,proto3" json:"chain,omitempty"` - // The network the node is on (eg regtest, testnet, mainnet) - Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Chain) Reset() { *m = Chain{} } -func (m *Chain) String() string { return proto.CompactTextString(m) } -func (*Chain) ProtoMessage() {} -func (*Chain) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{50} -} - -func (m *Chain) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Chain.Unmarshal(m, b) -} -func (m *Chain) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Chain.Marshal(b, m, deterministic) -} -func (m *Chain) XXX_Merge(src proto.Message) { - xxx_messageInfo_Chain.Merge(m, src) -} -func (m *Chain) XXX_Size() int { - return xxx_messageInfo_Chain.Size(m) -} -func (m *Chain) XXX_DiscardUnknown() { - xxx_messageInfo_Chain.DiscardUnknown(m) -} - -var xxx_messageInfo_Chain proto.InternalMessageInfo - -func (m *Chain) GetChain() string { - if m != nil { - return m.Chain - } - return "" -} - -func (m *Chain) GetNetwork() string { - if m != nil { - return m.Network - } - return "" -} - -type ConfirmationUpdate struct { - BlockSha []byte `protobuf:"bytes,1,opt,name=block_sha,json=blockSha,proto3" json:"block_sha,omitempty"` - BlockHeight int32 `protobuf:"varint,2,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` - NumConfsLeft uint32 `protobuf:"varint,3,opt,name=num_confs_left,json=numConfsLeft,proto3" json:"num_confs_left,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConfirmationUpdate) Reset() { *m = ConfirmationUpdate{} } -func (m *ConfirmationUpdate) String() string { return proto.CompactTextString(m) } -func (*ConfirmationUpdate) ProtoMessage() {} -func (*ConfirmationUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{51} -} - -func (m *ConfirmationUpdate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConfirmationUpdate.Unmarshal(m, b) -} -func (m *ConfirmationUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConfirmationUpdate.Marshal(b, m, deterministic) -} -func (m *ConfirmationUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConfirmationUpdate.Merge(m, src) -} -func (m *ConfirmationUpdate) XXX_Size() int { - return xxx_messageInfo_ConfirmationUpdate.Size(m) -} -func (m *ConfirmationUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_ConfirmationUpdate.DiscardUnknown(m) -} - -var xxx_messageInfo_ConfirmationUpdate proto.InternalMessageInfo - -func (m *ConfirmationUpdate) GetBlockSha() []byte { - if m != nil { - return m.BlockSha - } - return nil -} - -func (m *ConfirmationUpdate) GetBlockHeight() int32 { - if m != nil { - return m.BlockHeight - } - return 0 -} - -func (m *ConfirmationUpdate) GetNumConfsLeft() uint32 { - if m != nil { - return m.NumConfsLeft - } - return 0 -} - -type ChannelOpenUpdate struct { - ChannelPoint *ChannelPoint `protobuf:"bytes,1,opt,name=channel_point,json=channelPoint,proto3" json:"channel_point,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChannelOpenUpdate) Reset() { *m = ChannelOpenUpdate{} } -func (m *ChannelOpenUpdate) String() string { return proto.CompactTextString(m) } -func (*ChannelOpenUpdate) ProtoMessage() {} -func (*ChannelOpenUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{52} -} - -func (m *ChannelOpenUpdate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChannelOpenUpdate.Unmarshal(m, b) -} -func (m *ChannelOpenUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChannelOpenUpdate.Marshal(b, m, deterministic) -} -func (m *ChannelOpenUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChannelOpenUpdate.Merge(m, src) -} -func (m *ChannelOpenUpdate) XXX_Size() int { - return xxx_messageInfo_ChannelOpenUpdate.Size(m) -} -func (m *ChannelOpenUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_ChannelOpenUpdate.DiscardUnknown(m) -} - -var xxx_messageInfo_ChannelOpenUpdate proto.InternalMessageInfo - -func (m *ChannelOpenUpdate) GetChannelPoint() *ChannelPoint { - if m != nil { - return m.ChannelPoint - } - return nil -} - -type ChannelCloseUpdate struct { - ClosingTxid []byte `protobuf:"bytes,1,opt,name=closing_txid,json=closingTxid,proto3" json:"closing_txid,omitempty"` - Success bool `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChannelCloseUpdate) Reset() { *m = ChannelCloseUpdate{} } -func (m *ChannelCloseUpdate) String() string { return proto.CompactTextString(m) } -func (*ChannelCloseUpdate) ProtoMessage() {} -func (*ChannelCloseUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{53} -} - -func (m *ChannelCloseUpdate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChannelCloseUpdate.Unmarshal(m, b) -} -func (m *ChannelCloseUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChannelCloseUpdate.Marshal(b, m, deterministic) -} -func (m *ChannelCloseUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChannelCloseUpdate.Merge(m, src) -} -func (m *ChannelCloseUpdate) XXX_Size() int { - return xxx_messageInfo_ChannelCloseUpdate.Size(m) -} -func (m *ChannelCloseUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_ChannelCloseUpdate.DiscardUnknown(m) -} - -var xxx_messageInfo_ChannelCloseUpdate proto.InternalMessageInfo - -func (m *ChannelCloseUpdate) GetClosingTxid() []byte { - if m != nil { - return m.ClosingTxid - } - return nil -} - -func (m *ChannelCloseUpdate) GetSuccess() bool { - if m != nil { - return m.Success - } - return false -} - -type CloseChannelRequest struct { - // - //The outpoint (txid:index) of the funding transaction. With this value, Bob - //will be able to generate a signature for Alice's version of the commitment - //transaction. - ChannelPoint *ChannelPoint `protobuf:"bytes,1,opt,name=channel_point,json=channelPoint,proto3" json:"channel_point,omitempty"` - // If true, then the channel will be closed forcibly. This means the - // current commitment transaction will be signed and broadcast. - Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` - // The target number of blocks that the closure transaction should be - // confirmed by. - TargetConf int32 `protobuf:"varint,3,opt,name=target_conf,json=targetConf,proto3" json:"target_conf,omitempty"` - // A manual fee rate set in sat/byte that should be used when crafting the - // closure transaction. - SatPerByte int64 `protobuf:"varint,4,opt,name=sat_per_byte,json=satPerByte,proto3" json:"sat_per_byte,omitempty"` - // - //An optional address to send funds to in the case of a cooperative close. - //If the channel was opened with an upfront shutdown script and this field - //is set, the request to close will fail because the channel must pay out - //to the upfront shutdown addresss. - DeliveryAddress string `protobuf:"bytes,5,opt,name=delivery_address,json=deliveryAddress,proto3" json:"delivery_address,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CloseChannelRequest) Reset() { *m = CloseChannelRequest{} } -func (m *CloseChannelRequest) String() string { return proto.CompactTextString(m) } -func (*CloseChannelRequest) ProtoMessage() {} -func (*CloseChannelRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{54} -} - -func (m *CloseChannelRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CloseChannelRequest.Unmarshal(m, b) -} -func (m *CloseChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CloseChannelRequest.Marshal(b, m, deterministic) -} -func (m *CloseChannelRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CloseChannelRequest.Merge(m, src) -} -func (m *CloseChannelRequest) XXX_Size() int { - return xxx_messageInfo_CloseChannelRequest.Size(m) -} -func (m *CloseChannelRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CloseChannelRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CloseChannelRequest proto.InternalMessageInfo - -func (m *CloseChannelRequest) GetChannelPoint() *ChannelPoint { - if m != nil { - return m.ChannelPoint - } - return nil -} - -func (m *CloseChannelRequest) GetForce() bool { - if m != nil { - return m.Force - } - return false -} - -func (m *CloseChannelRequest) GetTargetConf() int32 { - if m != nil { - return m.TargetConf - } - return 0 -} - -func (m *CloseChannelRequest) GetSatPerByte() int64 { - if m != nil { - return m.SatPerByte - } - return 0 -} - -func (m *CloseChannelRequest) GetDeliveryAddress() string { - if m != nil { - return m.DeliveryAddress - } - return "" -} - -type CloseStatusUpdate struct { - // Types that are valid to be assigned to Update: - // *CloseStatusUpdate_ClosePending - // *CloseStatusUpdate_ChanClose - Update isCloseStatusUpdate_Update `protobuf_oneof:"update"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CloseStatusUpdate) Reset() { *m = CloseStatusUpdate{} } -func (m *CloseStatusUpdate) String() string { return proto.CompactTextString(m) } -func (*CloseStatusUpdate) ProtoMessage() {} -func (*CloseStatusUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{55} -} - -func (m *CloseStatusUpdate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CloseStatusUpdate.Unmarshal(m, b) -} -func (m *CloseStatusUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CloseStatusUpdate.Marshal(b, m, deterministic) -} -func (m *CloseStatusUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_CloseStatusUpdate.Merge(m, src) -} -func (m *CloseStatusUpdate) XXX_Size() int { - return xxx_messageInfo_CloseStatusUpdate.Size(m) -} -func (m *CloseStatusUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_CloseStatusUpdate.DiscardUnknown(m) -} - -var xxx_messageInfo_CloseStatusUpdate proto.InternalMessageInfo - -type isCloseStatusUpdate_Update interface { - isCloseStatusUpdate_Update() -} - -type CloseStatusUpdate_ClosePending struct { - ClosePending *PendingUpdate `protobuf:"bytes,1,opt,name=close_pending,json=closePending,proto3,oneof"` -} - -type CloseStatusUpdate_ChanClose struct { - ChanClose *ChannelCloseUpdate `protobuf:"bytes,3,opt,name=chan_close,json=chanClose,proto3,oneof"` -} - -func (*CloseStatusUpdate_ClosePending) isCloseStatusUpdate_Update() {} - -func (*CloseStatusUpdate_ChanClose) isCloseStatusUpdate_Update() {} - -func (m *CloseStatusUpdate) GetUpdate() isCloseStatusUpdate_Update { - if m != nil { - return m.Update - } - return nil -} - -func (m *CloseStatusUpdate) GetClosePending() *PendingUpdate { - if x, ok := m.GetUpdate().(*CloseStatusUpdate_ClosePending); ok { - return x.ClosePending - } - return nil -} - -func (m *CloseStatusUpdate) GetChanClose() *ChannelCloseUpdate { - if x, ok := m.GetUpdate().(*CloseStatusUpdate_ChanClose); ok { - return x.ChanClose - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*CloseStatusUpdate) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*CloseStatusUpdate_ClosePending)(nil), - (*CloseStatusUpdate_ChanClose)(nil), - } -} - -type PendingUpdate struct { - Txid []byte `protobuf:"bytes,1,opt,name=txid,proto3" json:"txid,omitempty"` - OutputIndex uint32 `protobuf:"varint,2,opt,name=output_index,json=outputIndex,proto3" json:"output_index,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PendingUpdate) Reset() { *m = PendingUpdate{} } -func (m *PendingUpdate) String() string { return proto.CompactTextString(m) } -func (*PendingUpdate) ProtoMessage() {} -func (*PendingUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{56} -} - -func (m *PendingUpdate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PendingUpdate.Unmarshal(m, b) -} -func (m *PendingUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PendingUpdate.Marshal(b, m, deterministic) -} -func (m *PendingUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_PendingUpdate.Merge(m, src) -} -func (m *PendingUpdate) XXX_Size() int { - return xxx_messageInfo_PendingUpdate.Size(m) -} -func (m *PendingUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_PendingUpdate.DiscardUnknown(m) -} - -var xxx_messageInfo_PendingUpdate proto.InternalMessageInfo - -func (m *PendingUpdate) GetTxid() []byte { - if m != nil { - return m.Txid - } - return nil -} - -func (m *PendingUpdate) GetOutputIndex() uint32 { - if m != nil { - return m.OutputIndex - } - return 0 -} - -type ReadyForPsbtFunding struct { - // - //The P2WSH address of the channel funding multisig address that the below - //specified amount in satoshis needs to be sent to. - FundingAddress string `protobuf:"bytes,1,opt,name=funding_address,json=fundingAddress,proto3" json:"funding_address,omitempty"` - // - //The exact amount in satoshis that needs to be sent to the above address to - //fund the pending channel. - FundingAmount int64 `protobuf:"varint,2,opt,name=funding_amount,json=fundingAmount,proto3" json:"funding_amount,omitempty"` - // - //A raw PSBT that contains the pending channel output. If a base PSBT was - //provided in the PsbtShim, this is the base PSBT with one additional output. - //If no base PSBT was specified, this is an otherwise empty PSBT with exactly - //one output. - Psbt []byte `protobuf:"bytes,3,opt,name=psbt,proto3" json:"psbt,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadyForPsbtFunding) Reset() { *m = ReadyForPsbtFunding{} } -func (m *ReadyForPsbtFunding) String() string { return proto.CompactTextString(m) } -func (*ReadyForPsbtFunding) ProtoMessage() {} -func (*ReadyForPsbtFunding) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{57} -} - -func (m *ReadyForPsbtFunding) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadyForPsbtFunding.Unmarshal(m, b) -} -func (m *ReadyForPsbtFunding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadyForPsbtFunding.Marshal(b, m, deterministic) -} -func (m *ReadyForPsbtFunding) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadyForPsbtFunding.Merge(m, src) -} -func (m *ReadyForPsbtFunding) XXX_Size() int { - return xxx_messageInfo_ReadyForPsbtFunding.Size(m) -} -func (m *ReadyForPsbtFunding) XXX_DiscardUnknown() { - xxx_messageInfo_ReadyForPsbtFunding.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadyForPsbtFunding proto.InternalMessageInfo - -func (m *ReadyForPsbtFunding) GetFundingAddress() string { - if m != nil { - return m.FundingAddress - } - return "" -} - -func (m *ReadyForPsbtFunding) GetFundingAmount() int64 { - if m != nil { - return m.FundingAmount - } - return 0 -} - -func (m *ReadyForPsbtFunding) GetPsbt() []byte { - if m != nil { - return m.Psbt - } - return nil -} - -type OpenChannelRequest struct { - // - //The pubkey of the node to open a channel with. When using REST, this field - //must be encoded as base64. - NodePubkey []byte `protobuf:"bytes,2,opt,name=node_pubkey,json=nodePubkey,proto3" json:"node_pubkey,omitempty"` - // - //The hex encoded pubkey of the node to open a channel with. Deprecated now - //that the REST gateway supports base64 encoding of bytes fields. - NodePubkeyString string `protobuf:"bytes,3,opt,name=node_pubkey_string,json=nodePubkeyString,proto3" json:"node_pubkey_string,omitempty"` // Deprecated: Do not use. - // The number of satoshis the wallet should commit to the channel - LocalFundingAmount int64 `protobuf:"varint,4,opt,name=local_funding_amount,json=localFundingAmount,proto3" json:"local_funding_amount,omitempty"` - // The number of satoshis to push to the remote side as part of the initial - // commitment state - PushSat int64 `protobuf:"varint,5,opt,name=push_sat,json=pushSat,proto3" json:"push_sat,omitempty"` - // The target number of blocks that the funding transaction should be - // confirmed by. - TargetConf int32 `protobuf:"varint,6,opt,name=target_conf,json=targetConf,proto3" json:"target_conf,omitempty"` - // A manual fee rate set in sat/byte that should be used when crafting the - // funding transaction. - SatPerByte int64 `protobuf:"varint,7,opt,name=sat_per_byte,json=satPerByte,proto3" json:"sat_per_byte,omitempty"` - // Whether this channel should be private, not announced to the greater - // network. - Private bool `protobuf:"varint,8,opt,name=private,proto3" json:"private,omitempty"` - // The minimum value in millisatoshi we will require for incoming HTLCs on - // the channel. - MinHtlcMsat int64 `protobuf:"varint,9,opt,name=min_htlc_msat,json=minHtlcMsat,proto3" json:"min_htlc_msat,omitempty"` - // The delay we require on the remote's commitment transaction. If this is - // not set, it will be scaled automatically with the channel size. - RemoteCsvDelay uint32 `protobuf:"varint,10,opt,name=remote_csv_delay,json=remoteCsvDelay,proto3" json:"remote_csv_delay,omitempty"` - // The minimum number of confirmations each one of your outputs used for - // the funding transaction must satisfy. - MinConfs int32 `protobuf:"varint,11,opt,name=min_confs,json=minConfs,proto3" json:"min_confs,omitempty"` - // Whether unconfirmed outputs should be used as inputs for the funding - // transaction. - SpendUnconfirmed bool `protobuf:"varint,12,opt,name=spend_unconfirmed,json=spendUnconfirmed,proto3" json:"spend_unconfirmed,omitempty"` - // - //Close address is an optional address which specifies the address to which - //funds should be paid out to upon cooperative close. This field may only be - //set if the peer supports the option upfront feature bit (call listpeers - //to check). The remote peer will only accept cooperative closes to this - //address if it is set. - // - //Note: If this value is set on channel creation, you will *not* be able to - //cooperatively close out to a different address. - CloseAddress string `protobuf:"bytes,13,opt,name=close_address,json=closeAddress,proto3" json:"close_address,omitempty"` - // - //Funding shims are an optional argument that allow the caller to intercept - //certain funding functionality. For example, a shim can be provided to use a - //particular key for the commitment key (ideally cold) rather than use one - //that is generated by the wallet as normal, or signal that signing will be - //carried out in an interactive manner (PSBT based). - FundingShim *FundingShim `protobuf:"bytes,14,opt,name=funding_shim,json=fundingShim,proto3" json:"funding_shim,omitempty"` - // - //The maximum amount of coins in millisatoshi that can be pending within - //the channel. It only applies to the remote party. - RemoteMaxValueInFlightMsat uint64 `protobuf:"varint,15,opt,name=remote_max_value_in_flight_msat,json=remoteMaxValueInFlightMsat,proto3" json:"remote_max_value_in_flight_msat,omitempty"` - // - //The maximum number of concurrent HTLCs we will allow the remote party to add - //to the commitment transaction. - RemoteMaxHtlcs uint32 `protobuf:"varint,16,opt,name=remote_max_htlcs,json=remoteMaxHtlcs,proto3" json:"remote_max_htlcs,omitempty"` - // - //Max local csv is the maximum csv delay we will allow for our own commitment - //transaction. - MaxLocalCsv uint32 `protobuf:"varint,17,opt,name=max_local_csv,json=maxLocalCsv,proto3" json:"max_local_csv,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OpenChannelRequest) Reset() { *m = OpenChannelRequest{} } -func (m *OpenChannelRequest) String() string { return proto.CompactTextString(m) } -func (*OpenChannelRequest) ProtoMessage() {} -func (*OpenChannelRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{58} -} - -func (m *OpenChannelRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OpenChannelRequest.Unmarshal(m, b) -} -func (m *OpenChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OpenChannelRequest.Marshal(b, m, deterministic) -} -func (m *OpenChannelRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_OpenChannelRequest.Merge(m, src) -} -func (m *OpenChannelRequest) XXX_Size() int { - return xxx_messageInfo_OpenChannelRequest.Size(m) -} -func (m *OpenChannelRequest) XXX_DiscardUnknown() { - xxx_messageInfo_OpenChannelRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_OpenChannelRequest proto.InternalMessageInfo - -func (m *OpenChannelRequest) GetNodePubkey() []byte { - if m != nil { - return m.NodePubkey - } - return nil -} - -// Deprecated: Do not use. -func (m *OpenChannelRequest) GetNodePubkeyString() string { - if m != nil { - return m.NodePubkeyString - } - return "" -} - -func (m *OpenChannelRequest) GetLocalFundingAmount() int64 { - if m != nil { - return m.LocalFundingAmount - } - return 0 -} - -func (m *OpenChannelRequest) GetPushSat() int64 { - if m != nil { - return m.PushSat - } - return 0 -} - -func (m *OpenChannelRequest) GetTargetConf() int32 { - if m != nil { - return m.TargetConf - } - return 0 -} - -func (m *OpenChannelRequest) GetSatPerByte() int64 { - if m != nil { - return m.SatPerByte - } - return 0 -} - -func (m *OpenChannelRequest) GetPrivate() bool { - if m != nil { - return m.Private - } - return false -} - -func (m *OpenChannelRequest) GetMinHtlcMsat() int64 { - if m != nil { - return m.MinHtlcMsat - } - return 0 -} - -func (m *OpenChannelRequest) GetRemoteCsvDelay() uint32 { - if m != nil { - return m.RemoteCsvDelay - } - return 0 -} - -func (m *OpenChannelRequest) GetMinConfs() int32 { - if m != nil { - return m.MinConfs - } - return 0 -} - -func (m *OpenChannelRequest) GetSpendUnconfirmed() bool { - if m != nil { - return m.SpendUnconfirmed - } - return false -} - -func (m *OpenChannelRequest) GetCloseAddress() string { - if m != nil { - return m.CloseAddress - } - return "" -} - -func (m *OpenChannelRequest) GetFundingShim() *FundingShim { - if m != nil { - return m.FundingShim - } - return nil -} - -func (m *OpenChannelRequest) GetRemoteMaxValueInFlightMsat() uint64 { - if m != nil { - return m.RemoteMaxValueInFlightMsat - } - return 0 -} - -func (m *OpenChannelRequest) GetRemoteMaxHtlcs() uint32 { - if m != nil { - return m.RemoteMaxHtlcs - } - return 0 -} - -func (m *OpenChannelRequest) GetMaxLocalCsv() uint32 { - if m != nil { - return m.MaxLocalCsv - } - return 0 -} - -type OpenStatusUpdate struct { - // Types that are valid to be assigned to Update: - // *OpenStatusUpdate_ChanPending - // *OpenStatusUpdate_ChanOpen - // *OpenStatusUpdate_PsbtFund - Update isOpenStatusUpdate_Update `protobuf_oneof:"update"` - // - //The pending channel ID of the created channel. This value may be used to - //further the funding flow manually via the FundingStateStep method. - PendingChanId []byte `protobuf:"bytes,4,opt,name=pending_chan_id,json=pendingChanId,proto3" json:"pending_chan_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OpenStatusUpdate) Reset() { *m = OpenStatusUpdate{} } -func (m *OpenStatusUpdate) String() string { return proto.CompactTextString(m) } -func (*OpenStatusUpdate) ProtoMessage() {} -func (*OpenStatusUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{59} -} - -func (m *OpenStatusUpdate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OpenStatusUpdate.Unmarshal(m, b) -} -func (m *OpenStatusUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OpenStatusUpdate.Marshal(b, m, deterministic) -} -func (m *OpenStatusUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_OpenStatusUpdate.Merge(m, src) -} -func (m *OpenStatusUpdate) XXX_Size() int { - return xxx_messageInfo_OpenStatusUpdate.Size(m) -} -func (m *OpenStatusUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_OpenStatusUpdate.DiscardUnknown(m) -} - -var xxx_messageInfo_OpenStatusUpdate proto.InternalMessageInfo - -type isOpenStatusUpdate_Update interface { - isOpenStatusUpdate_Update() -} - -type OpenStatusUpdate_ChanPending struct { - ChanPending *PendingUpdate `protobuf:"bytes,1,opt,name=chan_pending,json=chanPending,proto3,oneof"` -} - -type OpenStatusUpdate_ChanOpen struct { - ChanOpen *ChannelOpenUpdate `protobuf:"bytes,3,opt,name=chan_open,json=chanOpen,proto3,oneof"` -} - -type OpenStatusUpdate_PsbtFund struct { - PsbtFund *ReadyForPsbtFunding `protobuf:"bytes,5,opt,name=psbt_fund,json=psbtFund,proto3,oneof"` -} - -func (*OpenStatusUpdate_ChanPending) isOpenStatusUpdate_Update() {} - -func (*OpenStatusUpdate_ChanOpen) isOpenStatusUpdate_Update() {} - -func (*OpenStatusUpdate_PsbtFund) isOpenStatusUpdate_Update() {} - -func (m *OpenStatusUpdate) GetUpdate() isOpenStatusUpdate_Update { - if m != nil { - return m.Update - } - return nil -} - -func (m *OpenStatusUpdate) GetChanPending() *PendingUpdate { - if x, ok := m.GetUpdate().(*OpenStatusUpdate_ChanPending); ok { - return x.ChanPending - } - return nil -} - -func (m *OpenStatusUpdate) GetChanOpen() *ChannelOpenUpdate { - if x, ok := m.GetUpdate().(*OpenStatusUpdate_ChanOpen); ok { - return x.ChanOpen - } - return nil -} - -func (m *OpenStatusUpdate) GetPsbtFund() *ReadyForPsbtFunding { - if x, ok := m.GetUpdate().(*OpenStatusUpdate_PsbtFund); ok { - return x.PsbtFund - } - return nil -} - -func (m *OpenStatusUpdate) GetPendingChanId() []byte { - if m != nil { - return m.PendingChanId - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*OpenStatusUpdate) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*OpenStatusUpdate_ChanPending)(nil), - (*OpenStatusUpdate_ChanOpen)(nil), - (*OpenStatusUpdate_PsbtFund)(nil), - } -} - -type KeyLocator struct { - // The family of key being identified. - KeyFamily int32 `protobuf:"varint,1,opt,name=key_family,json=keyFamily,proto3" json:"key_family,omitempty"` - // The precise index of the key being identified. - KeyIndex int32 `protobuf:"varint,2,opt,name=key_index,json=keyIndex,proto3" json:"key_index,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KeyLocator) Reset() { *m = KeyLocator{} } -func (m *KeyLocator) String() string { return proto.CompactTextString(m) } -func (*KeyLocator) ProtoMessage() {} -func (*KeyLocator) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{60} -} - -func (m *KeyLocator) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_KeyLocator.Unmarshal(m, b) -} -func (m *KeyLocator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_KeyLocator.Marshal(b, m, deterministic) -} -func (m *KeyLocator) XXX_Merge(src proto.Message) { - xxx_messageInfo_KeyLocator.Merge(m, src) -} -func (m *KeyLocator) XXX_Size() int { - return xxx_messageInfo_KeyLocator.Size(m) -} -func (m *KeyLocator) XXX_DiscardUnknown() { - xxx_messageInfo_KeyLocator.DiscardUnknown(m) -} - -var xxx_messageInfo_KeyLocator proto.InternalMessageInfo - -func (m *KeyLocator) GetKeyFamily() int32 { - if m != nil { - return m.KeyFamily - } - return 0 -} - -func (m *KeyLocator) GetKeyIndex() int32 { - if m != nil { - return m.KeyIndex - } - return 0 -} - -type KeyDescriptor struct { - // - //The raw bytes of the key being identified. - RawKeyBytes []byte `protobuf:"bytes,1,opt,name=raw_key_bytes,json=rawKeyBytes,proto3" json:"raw_key_bytes,omitempty"` - // - //The key locator that identifies which key to use for signing. - KeyLoc *KeyLocator `protobuf:"bytes,2,opt,name=key_loc,json=keyLoc,proto3" json:"key_loc,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KeyDescriptor) Reset() { *m = KeyDescriptor{} } -func (m *KeyDescriptor) String() string { return proto.CompactTextString(m) } -func (*KeyDescriptor) ProtoMessage() {} -func (*KeyDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{61} -} - -func (m *KeyDescriptor) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_KeyDescriptor.Unmarshal(m, b) -} -func (m *KeyDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_KeyDescriptor.Marshal(b, m, deterministic) -} -func (m *KeyDescriptor) XXX_Merge(src proto.Message) { - xxx_messageInfo_KeyDescriptor.Merge(m, src) -} -func (m *KeyDescriptor) XXX_Size() int { - return xxx_messageInfo_KeyDescriptor.Size(m) -} -func (m *KeyDescriptor) XXX_DiscardUnknown() { - xxx_messageInfo_KeyDescriptor.DiscardUnknown(m) -} - -var xxx_messageInfo_KeyDescriptor proto.InternalMessageInfo - -func (m *KeyDescriptor) GetRawKeyBytes() []byte { - if m != nil { - return m.RawKeyBytes - } - return nil -} - -func (m *KeyDescriptor) GetKeyLoc() *KeyLocator { - if m != nil { - return m.KeyLoc - } - return nil -} - -type ChanPointShim struct { - // - //The size of the pre-crafted output to be used as the channel point for this - //channel funding. - Amt int64 `protobuf:"varint,1,opt,name=amt,proto3" json:"amt,omitempty"` - // The target channel point to refrence in created commitment transactions. - ChanPoint *ChannelPoint `protobuf:"bytes,2,opt,name=chan_point,json=chanPoint,proto3" json:"chan_point,omitempty"` - // Our local key to use when creating the multi-sig output. - LocalKey *KeyDescriptor `protobuf:"bytes,3,opt,name=local_key,json=localKey,proto3" json:"local_key,omitempty"` - // The key of the remote party to use when creating the multi-sig output. - RemoteKey []byte `protobuf:"bytes,4,opt,name=remote_key,json=remoteKey,proto3" json:"remote_key,omitempty"` - // - //If non-zero, then this will be used as the pending channel ID on the wire - //protocol to initate the funding request. This is an optional field, and - //should only be set if the responder is already expecting a specific pending - //channel ID. - PendingChanId []byte `protobuf:"bytes,5,opt,name=pending_chan_id,json=pendingChanId,proto3" json:"pending_chan_id,omitempty"` - // - //This uint32 indicates if this channel is to be considered 'frozen'. A frozen - //channel does not allow a cooperative channel close by the initiator. The - //thaw_height is the height that this restriction stops applying to the - //channel. The height can be interpreted in two ways: as a relative height if - //the value is less than 500,000, or as an absolute height otherwise. - ThawHeight uint32 `protobuf:"varint,6,opt,name=thaw_height,json=thawHeight,proto3" json:"thaw_height,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChanPointShim) Reset() { *m = ChanPointShim{} } -func (m *ChanPointShim) String() string { return proto.CompactTextString(m) } -func (*ChanPointShim) ProtoMessage() {} -func (*ChanPointShim) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{62} -} - -func (m *ChanPointShim) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChanPointShim.Unmarshal(m, b) -} -func (m *ChanPointShim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChanPointShim.Marshal(b, m, deterministic) -} -func (m *ChanPointShim) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChanPointShim.Merge(m, src) -} -func (m *ChanPointShim) XXX_Size() int { - return xxx_messageInfo_ChanPointShim.Size(m) -} -func (m *ChanPointShim) XXX_DiscardUnknown() { - xxx_messageInfo_ChanPointShim.DiscardUnknown(m) -} - -var xxx_messageInfo_ChanPointShim proto.InternalMessageInfo - -func (m *ChanPointShim) GetAmt() int64 { - if m != nil { - return m.Amt - } - return 0 -} - -func (m *ChanPointShim) GetChanPoint() *ChannelPoint { - if m != nil { - return m.ChanPoint - } - return nil -} - -func (m *ChanPointShim) GetLocalKey() *KeyDescriptor { - if m != nil { - return m.LocalKey - } - return nil -} - -func (m *ChanPointShim) GetRemoteKey() []byte { - if m != nil { - return m.RemoteKey - } - return nil -} - -func (m *ChanPointShim) GetPendingChanId() []byte { - if m != nil { - return m.PendingChanId - } - return nil -} - -func (m *ChanPointShim) GetThawHeight() uint32 { - if m != nil { - return m.ThawHeight - } - return 0 -} - -type PsbtShim struct { - // - //A unique identifier of 32 random bytes that will be used as the pending - //channel ID to identify the PSBT state machine when interacting with it and - //on the wire protocol to initiate the funding request. - PendingChanId []byte `protobuf:"bytes,1,opt,name=pending_chan_id,json=pendingChanId,proto3" json:"pending_chan_id,omitempty"` - // - //An optional base PSBT the new channel output will be added to. If this is - //non-empty, it must be a binary serialized PSBT. - BasePsbt []byte `protobuf:"bytes,2,opt,name=base_psbt,json=basePsbt,proto3" json:"base_psbt,omitempty"` - // - //If a channel should be part of a batch (multiple channel openings in one - //transaction), it can be dangerous if the whole batch transaction is - //published too early before all channel opening negotiations are completed. - //This flag prevents this particular channel from broadcasting the transaction - //after the negotiation with the remote peer. In a batch of channel openings - //this flag should be set to true for every channel but the very last. - NoPublish bool `protobuf:"varint,3,opt,name=no_publish,json=noPublish,proto3" json:"no_publish,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PsbtShim) Reset() { *m = PsbtShim{} } -func (m *PsbtShim) String() string { return proto.CompactTextString(m) } -func (*PsbtShim) ProtoMessage() {} -func (*PsbtShim) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{63} -} - -func (m *PsbtShim) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PsbtShim.Unmarshal(m, b) -} -func (m *PsbtShim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PsbtShim.Marshal(b, m, deterministic) -} -func (m *PsbtShim) XXX_Merge(src proto.Message) { - xxx_messageInfo_PsbtShim.Merge(m, src) -} -func (m *PsbtShim) XXX_Size() int { - return xxx_messageInfo_PsbtShim.Size(m) -} -func (m *PsbtShim) XXX_DiscardUnknown() { - xxx_messageInfo_PsbtShim.DiscardUnknown(m) -} - -var xxx_messageInfo_PsbtShim proto.InternalMessageInfo - -func (m *PsbtShim) GetPendingChanId() []byte { - if m != nil { - return m.PendingChanId - } - return nil -} - -func (m *PsbtShim) GetBasePsbt() []byte { - if m != nil { - return m.BasePsbt - } - return nil -} - -func (m *PsbtShim) GetNoPublish() bool { - if m != nil { - return m.NoPublish - } - return false -} - -type FundingShim struct { - // Types that are valid to be assigned to Shim: - // *FundingShim_ChanPointShim - // *FundingShim_PsbtShim - Shim isFundingShim_Shim `protobuf_oneof:"shim"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FundingShim) Reset() { *m = FundingShim{} } -func (m *FundingShim) String() string { return proto.CompactTextString(m) } -func (*FundingShim) ProtoMessage() {} -func (*FundingShim) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{64} -} - -func (m *FundingShim) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FundingShim.Unmarshal(m, b) -} -func (m *FundingShim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FundingShim.Marshal(b, m, deterministic) -} -func (m *FundingShim) XXX_Merge(src proto.Message) { - xxx_messageInfo_FundingShim.Merge(m, src) -} -func (m *FundingShim) XXX_Size() int { - return xxx_messageInfo_FundingShim.Size(m) -} -func (m *FundingShim) XXX_DiscardUnknown() { - xxx_messageInfo_FundingShim.DiscardUnknown(m) -} - -var xxx_messageInfo_FundingShim proto.InternalMessageInfo - -type isFundingShim_Shim interface { - isFundingShim_Shim() -} - -type FundingShim_ChanPointShim struct { - ChanPointShim *ChanPointShim `protobuf:"bytes,1,opt,name=chan_point_shim,json=chanPointShim,proto3,oneof"` -} - -type FundingShim_PsbtShim struct { - PsbtShim *PsbtShim `protobuf:"bytes,2,opt,name=psbt_shim,json=psbtShim,proto3,oneof"` -} - -func (*FundingShim_ChanPointShim) isFundingShim_Shim() {} - -func (*FundingShim_PsbtShim) isFundingShim_Shim() {} - -func (m *FundingShim) GetShim() isFundingShim_Shim { - if m != nil { - return m.Shim - } - return nil -} - -func (m *FundingShim) GetChanPointShim() *ChanPointShim { - if x, ok := m.GetShim().(*FundingShim_ChanPointShim); ok { - return x.ChanPointShim - } - return nil -} - -func (m *FundingShim) GetPsbtShim() *PsbtShim { - if x, ok := m.GetShim().(*FundingShim_PsbtShim); ok { - return x.PsbtShim - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*FundingShim) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*FundingShim_ChanPointShim)(nil), - (*FundingShim_PsbtShim)(nil), - } -} - -type FundingShimCancel struct { - // The pending channel ID of the channel to cancel the funding shim for. - PendingChanId []byte `protobuf:"bytes,1,opt,name=pending_chan_id,json=pendingChanId,proto3" json:"pending_chan_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FundingShimCancel) Reset() { *m = FundingShimCancel{} } -func (m *FundingShimCancel) String() string { return proto.CompactTextString(m) } -func (*FundingShimCancel) ProtoMessage() {} -func (*FundingShimCancel) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{65} -} - -func (m *FundingShimCancel) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FundingShimCancel.Unmarshal(m, b) -} -func (m *FundingShimCancel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FundingShimCancel.Marshal(b, m, deterministic) -} -func (m *FundingShimCancel) XXX_Merge(src proto.Message) { - xxx_messageInfo_FundingShimCancel.Merge(m, src) -} -func (m *FundingShimCancel) XXX_Size() int { - return xxx_messageInfo_FundingShimCancel.Size(m) -} -func (m *FundingShimCancel) XXX_DiscardUnknown() { - xxx_messageInfo_FundingShimCancel.DiscardUnknown(m) -} - -var xxx_messageInfo_FundingShimCancel proto.InternalMessageInfo - -func (m *FundingShimCancel) GetPendingChanId() []byte { - if m != nil { - return m.PendingChanId - } - return nil -} - -type FundingPsbtVerify struct { - // - //The funded but not yet signed PSBT that sends the exact channel capacity - //amount to the PK script returned in the open channel message in a previous - //step. - FundedPsbt []byte `protobuf:"bytes,1,opt,name=funded_psbt,json=fundedPsbt,proto3" json:"funded_psbt,omitempty"` - // The pending channel ID of the channel to get the PSBT for. - PendingChanId []byte `protobuf:"bytes,2,opt,name=pending_chan_id,json=pendingChanId,proto3" json:"pending_chan_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FundingPsbtVerify) Reset() { *m = FundingPsbtVerify{} } -func (m *FundingPsbtVerify) String() string { return proto.CompactTextString(m) } -func (*FundingPsbtVerify) ProtoMessage() {} -func (*FundingPsbtVerify) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{66} -} - -func (m *FundingPsbtVerify) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FundingPsbtVerify.Unmarshal(m, b) -} -func (m *FundingPsbtVerify) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FundingPsbtVerify.Marshal(b, m, deterministic) -} -func (m *FundingPsbtVerify) XXX_Merge(src proto.Message) { - xxx_messageInfo_FundingPsbtVerify.Merge(m, src) -} -func (m *FundingPsbtVerify) XXX_Size() int { - return xxx_messageInfo_FundingPsbtVerify.Size(m) -} -func (m *FundingPsbtVerify) XXX_DiscardUnknown() { - xxx_messageInfo_FundingPsbtVerify.DiscardUnknown(m) -} - -var xxx_messageInfo_FundingPsbtVerify proto.InternalMessageInfo - -func (m *FundingPsbtVerify) GetFundedPsbt() []byte { - if m != nil { - return m.FundedPsbt - } - return nil -} - -func (m *FundingPsbtVerify) GetPendingChanId() []byte { - if m != nil { - return m.PendingChanId - } - return nil -} - -type FundingPsbtFinalize struct { - // - //The funded PSBT that contains all witness data to send the exact channel - //capacity amount to the PK script returned in the open channel message in a - //previous step. Cannot be set at the same time as final_raw_tx. - SignedPsbt []byte `protobuf:"bytes,1,opt,name=signed_psbt,json=signedPsbt,proto3" json:"signed_psbt,omitempty"` - // The pending channel ID of the channel to get the PSBT for. - PendingChanId []byte `protobuf:"bytes,2,opt,name=pending_chan_id,json=pendingChanId,proto3" json:"pending_chan_id,omitempty"` - // - //As an alternative to the signed PSBT with all witness data, the final raw - //wire format transaction can also be specified directly. Cannot be set at the - //same time as signed_psbt. - FinalRawTx []byte `protobuf:"bytes,3,opt,name=final_raw_tx,json=finalRawTx,proto3" json:"final_raw_tx,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FundingPsbtFinalize) Reset() { *m = FundingPsbtFinalize{} } -func (m *FundingPsbtFinalize) String() string { return proto.CompactTextString(m) } -func (*FundingPsbtFinalize) ProtoMessage() {} -func (*FundingPsbtFinalize) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{67} -} - -func (m *FundingPsbtFinalize) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FundingPsbtFinalize.Unmarshal(m, b) -} -func (m *FundingPsbtFinalize) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FundingPsbtFinalize.Marshal(b, m, deterministic) -} -func (m *FundingPsbtFinalize) XXX_Merge(src proto.Message) { - xxx_messageInfo_FundingPsbtFinalize.Merge(m, src) -} -func (m *FundingPsbtFinalize) XXX_Size() int { - return xxx_messageInfo_FundingPsbtFinalize.Size(m) -} -func (m *FundingPsbtFinalize) XXX_DiscardUnknown() { - xxx_messageInfo_FundingPsbtFinalize.DiscardUnknown(m) -} - -var xxx_messageInfo_FundingPsbtFinalize proto.InternalMessageInfo - -func (m *FundingPsbtFinalize) GetSignedPsbt() []byte { - if m != nil { - return m.SignedPsbt - } - return nil -} - -func (m *FundingPsbtFinalize) GetPendingChanId() []byte { - if m != nil { - return m.PendingChanId - } - return nil -} - -func (m *FundingPsbtFinalize) GetFinalRawTx() []byte { - if m != nil { - return m.FinalRawTx - } - return nil -} - -type FundingTransitionMsg struct { - // Types that are valid to be assigned to Trigger: - // *FundingTransitionMsg_ShimRegister - // *FundingTransitionMsg_ShimCancel - // *FundingTransitionMsg_PsbtVerify - // *FundingTransitionMsg_PsbtFinalize - Trigger isFundingTransitionMsg_Trigger `protobuf_oneof:"trigger"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FundingTransitionMsg) Reset() { *m = FundingTransitionMsg{} } -func (m *FundingTransitionMsg) String() string { return proto.CompactTextString(m) } -func (*FundingTransitionMsg) ProtoMessage() {} -func (*FundingTransitionMsg) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{68} -} - -func (m *FundingTransitionMsg) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FundingTransitionMsg.Unmarshal(m, b) -} -func (m *FundingTransitionMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FundingTransitionMsg.Marshal(b, m, deterministic) -} -func (m *FundingTransitionMsg) XXX_Merge(src proto.Message) { - xxx_messageInfo_FundingTransitionMsg.Merge(m, src) -} -func (m *FundingTransitionMsg) XXX_Size() int { - return xxx_messageInfo_FundingTransitionMsg.Size(m) -} -func (m *FundingTransitionMsg) XXX_DiscardUnknown() { - xxx_messageInfo_FundingTransitionMsg.DiscardUnknown(m) -} - -var xxx_messageInfo_FundingTransitionMsg proto.InternalMessageInfo - -type isFundingTransitionMsg_Trigger interface { - isFundingTransitionMsg_Trigger() -} - -type FundingTransitionMsg_ShimRegister struct { - ShimRegister *FundingShim `protobuf:"bytes,1,opt,name=shim_register,json=shimRegister,proto3,oneof"` -} - -type FundingTransitionMsg_ShimCancel struct { - ShimCancel *FundingShimCancel `protobuf:"bytes,2,opt,name=shim_cancel,json=shimCancel,proto3,oneof"` -} - -type FundingTransitionMsg_PsbtVerify struct { - PsbtVerify *FundingPsbtVerify `protobuf:"bytes,3,opt,name=psbt_verify,json=psbtVerify,proto3,oneof"` -} - -type FundingTransitionMsg_PsbtFinalize struct { - PsbtFinalize *FundingPsbtFinalize `protobuf:"bytes,4,opt,name=psbt_finalize,json=psbtFinalize,proto3,oneof"` -} - -func (*FundingTransitionMsg_ShimRegister) isFundingTransitionMsg_Trigger() {} - -func (*FundingTransitionMsg_ShimCancel) isFundingTransitionMsg_Trigger() {} - -func (*FundingTransitionMsg_PsbtVerify) isFundingTransitionMsg_Trigger() {} - -func (*FundingTransitionMsg_PsbtFinalize) isFundingTransitionMsg_Trigger() {} - -func (m *FundingTransitionMsg) GetTrigger() isFundingTransitionMsg_Trigger { - if m != nil { - return m.Trigger - } - return nil -} - -func (m *FundingTransitionMsg) GetShimRegister() *FundingShim { - if x, ok := m.GetTrigger().(*FundingTransitionMsg_ShimRegister); ok { - return x.ShimRegister - } - return nil -} - -func (m *FundingTransitionMsg) GetShimCancel() *FundingShimCancel { - if x, ok := m.GetTrigger().(*FundingTransitionMsg_ShimCancel); ok { - return x.ShimCancel - } - return nil -} - -func (m *FundingTransitionMsg) GetPsbtVerify() *FundingPsbtVerify { - if x, ok := m.GetTrigger().(*FundingTransitionMsg_PsbtVerify); ok { - return x.PsbtVerify - } - return nil -} - -func (m *FundingTransitionMsg) GetPsbtFinalize() *FundingPsbtFinalize { - if x, ok := m.GetTrigger().(*FundingTransitionMsg_PsbtFinalize); ok { - return x.PsbtFinalize - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*FundingTransitionMsg) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*FundingTransitionMsg_ShimRegister)(nil), - (*FundingTransitionMsg_ShimCancel)(nil), - (*FundingTransitionMsg_PsbtVerify)(nil), - (*FundingTransitionMsg_PsbtFinalize)(nil), - } -} - -type FundingStateStepResp struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FundingStateStepResp) Reset() { *m = FundingStateStepResp{} } -func (m *FundingStateStepResp) String() string { return proto.CompactTextString(m) } -func (*FundingStateStepResp) ProtoMessage() {} -func (*FundingStateStepResp) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{69} -} - -func (m *FundingStateStepResp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FundingStateStepResp.Unmarshal(m, b) -} -func (m *FundingStateStepResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FundingStateStepResp.Marshal(b, m, deterministic) -} -func (m *FundingStateStepResp) XXX_Merge(src proto.Message) { - xxx_messageInfo_FundingStateStepResp.Merge(m, src) -} -func (m *FundingStateStepResp) XXX_Size() int { - return xxx_messageInfo_FundingStateStepResp.Size(m) -} -func (m *FundingStateStepResp) XXX_DiscardUnknown() { - xxx_messageInfo_FundingStateStepResp.DiscardUnknown(m) -} - -var xxx_messageInfo_FundingStateStepResp proto.InternalMessageInfo - -type PendingHTLC struct { - // The direction within the channel that the htlc was sent - Incoming bool `protobuf:"varint,1,opt,name=incoming,proto3" json:"incoming,omitempty"` - // The total value of the htlc - Amount int64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` - // The final output to be swept back to the user's wallet - Outpoint string `protobuf:"bytes,3,opt,name=outpoint,proto3" json:"outpoint,omitempty"` - // The next block height at which we can spend the current stage - MaturityHeight uint32 `protobuf:"varint,4,opt,name=maturity_height,json=maturityHeight,proto3" json:"maturity_height,omitempty"` - // - //The number of blocks remaining until the current stage can be swept. - //Negative values indicate how many blocks have passed since becoming - //mature. - BlocksTilMaturity int32 `protobuf:"varint,5,opt,name=blocks_til_maturity,json=blocksTilMaturity,proto3" json:"blocks_til_maturity,omitempty"` - // Indicates whether the htlc is in its first or second stage of recovery - Stage uint32 `protobuf:"varint,6,opt,name=stage,proto3" json:"stage,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PendingHTLC) Reset() { *m = PendingHTLC{} } -func (m *PendingHTLC) String() string { return proto.CompactTextString(m) } -func (*PendingHTLC) ProtoMessage() {} -func (*PendingHTLC) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{70} -} - -func (m *PendingHTLC) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PendingHTLC.Unmarshal(m, b) -} -func (m *PendingHTLC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PendingHTLC.Marshal(b, m, deterministic) -} -func (m *PendingHTLC) XXX_Merge(src proto.Message) { - xxx_messageInfo_PendingHTLC.Merge(m, src) -} -func (m *PendingHTLC) XXX_Size() int { - return xxx_messageInfo_PendingHTLC.Size(m) -} -func (m *PendingHTLC) XXX_DiscardUnknown() { - xxx_messageInfo_PendingHTLC.DiscardUnknown(m) -} - -var xxx_messageInfo_PendingHTLC proto.InternalMessageInfo - -func (m *PendingHTLC) GetIncoming() bool { - if m != nil { - return m.Incoming - } - return false -} - -func (m *PendingHTLC) GetAmount() int64 { - if m != nil { - return m.Amount - } - return 0 -} - -func (m *PendingHTLC) GetOutpoint() string { - if m != nil { - return m.Outpoint - } - return "" -} - -func (m *PendingHTLC) GetMaturityHeight() uint32 { - if m != nil { - return m.MaturityHeight - } - return 0 -} - -func (m *PendingHTLC) GetBlocksTilMaturity() int32 { - if m != nil { - return m.BlocksTilMaturity - } - return 0 -} - -func (m *PendingHTLC) GetStage() uint32 { - if m != nil { - return m.Stage - } - return 0 -} - -type PendingChannelsRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PendingChannelsRequest) Reset() { *m = PendingChannelsRequest{} } -func (m *PendingChannelsRequest) String() string { return proto.CompactTextString(m) } -func (*PendingChannelsRequest) ProtoMessage() {} -func (*PendingChannelsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{71} -} - -func (m *PendingChannelsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PendingChannelsRequest.Unmarshal(m, b) -} -func (m *PendingChannelsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PendingChannelsRequest.Marshal(b, m, deterministic) -} -func (m *PendingChannelsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PendingChannelsRequest.Merge(m, src) -} -func (m *PendingChannelsRequest) XXX_Size() int { - return xxx_messageInfo_PendingChannelsRequest.Size(m) -} -func (m *PendingChannelsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PendingChannelsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PendingChannelsRequest proto.InternalMessageInfo - -type PendingChannelsResponse struct { - // The balance in satoshis encumbered in pending channels - TotalLimboBalance int64 `protobuf:"varint,1,opt,name=total_limbo_balance,json=totalLimboBalance,proto3" json:"total_limbo_balance,omitempty"` - // Channels pending opening - PendingOpenChannels []*PendingChannelsResponse_PendingOpenChannel `protobuf:"bytes,2,rep,name=pending_open_channels,json=pendingOpenChannels,proto3" json:"pending_open_channels,omitempty"` - // - //Deprecated: Channels pending closing previously contained cooperatively - //closed channels with a single confirmation. These channels are now - //considered closed from the time we see them on chain. - PendingClosingChannels []*PendingChannelsResponse_ClosedChannel `protobuf:"bytes,3,rep,name=pending_closing_channels,json=pendingClosingChannels,proto3" json:"pending_closing_channels,omitempty"` // Deprecated: Do not use. - // Channels pending force closing - PendingForceClosingChannels []*PendingChannelsResponse_ForceClosedChannel `protobuf:"bytes,4,rep,name=pending_force_closing_channels,json=pendingForceClosingChannels,proto3" json:"pending_force_closing_channels,omitempty"` - // Channels waiting for closing tx to confirm - WaitingCloseChannels []*PendingChannelsResponse_WaitingCloseChannel `protobuf:"bytes,5,rep,name=waiting_close_channels,json=waitingCloseChannels,proto3" json:"waiting_close_channels,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PendingChannelsResponse) Reset() { *m = PendingChannelsResponse{} } -func (m *PendingChannelsResponse) String() string { return proto.CompactTextString(m) } -func (*PendingChannelsResponse) ProtoMessage() {} -func (*PendingChannelsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{72} -} - -func (m *PendingChannelsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PendingChannelsResponse.Unmarshal(m, b) -} -func (m *PendingChannelsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PendingChannelsResponse.Marshal(b, m, deterministic) -} -func (m *PendingChannelsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PendingChannelsResponse.Merge(m, src) -} -func (m *PendingChannelsResponse) XXX_Size() int { - return xxx_messageInfo_PendingChannelsResponse.Size(m) -} -func (m *PendingChannelsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PendingChannelsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_PendingChannelsResponse proto.InternalMessageInfo - -func (m *PendingChannelsResponse) GetTotalLimboBalance() int64 { - if m != nil { - return m.TotalLimboBalance - } - return 0 -} - -func (m *PendingChannelsResponse) GetPendingOpenChannels() []*PendingChannelsResponse_PendingOpenChannel { - if m != nil { - return m.PendingOpenChannels - } - return nil -} - -// Deprecated: Do not use. -func (m *PendingChannelsResponse) GetPendingClosingChannels() []*PendingChannelsResponse_ClosedChannel { - if m != nil { - return m.PendingClosingChannels - } - return nil -} - -func (m *PendingChannelsResponse) GetPendingForceClosingChannels() []*PendingChannelsResponse_ForceClosedChannel { - if m != nil { - return m.PendingForceClosingChannels - } - return nil -} - -func (m *PendingChannelsResponse) GetWaitingCloseChannels() []*PendingChannelsResponse_WaitingCloseChannel { - if m != nil { - return m.WaitingCloseChannels - } - return nil -} - -type PendingChannelsResponse_PendingChannel struct { - RemoteNodePub string `protobuf:"bytes,1,opt,name=remote_node_pub,json=remoteNodePub,proto3" json:"remote_node_pub,omitempty"` - ChannelPoint string `protobuf:"bytes,2,opt,name=channel_point,json=channelPoint,proto3" json:"channel_point,omitempty"` - Capacity int64 `protobuf:"varint,3,opt,name=capacity,proto3" json:"capacity,omitempty"` - LocalBalance int64 `protobuf:"varint,4,opt,name=local_balance,json=localBalance,proto3" json:"local_balance,omitempty"` - RemoteBalance int64 `protobuf:"varint,5,opt,name=remote_balance,json=remoteBalance,proto3" json:"remote_balance,omitempty"` - // The minimum satoshis this node is required to reserve in its - // balance. - LocalChanReserveSat int64 `protobuf:"varint,6,opt,name=local_chan_reserve_sat,json=localChanReserveSat,proto3" json:"local_chan_reserve_sat,omitempty"` - // - //The minimum satoshis the other node is required to reserve in its - //balance. - RemoteChanReserveSat int64 `protobuf:"varint,7,opt,name=remote_chan_reserve_sat,json=remoteChanReserveSat,proto3" json:"remote_chan_reserve_sat,omitempty"` - // The party that initiated opening the channel. - Initiator Initiator `protobuf:"varint,8,opt,name=initiator,proto3,enum=lnrpc.Initiator" json:"initiator,omitempty"` - // The commitment type used by this channel. - CommitmentType CommitmentType `protobuf:"varint,9,opt,name=commitment_type,json=commitmentType,proto3,enum=lnrpc.CommitmentType" json:"commitment_type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PendingChannelsResponse_PendingChannel) Reset() { - *m = PendingChannelsResponse_PendingChannel{} -} -func (m *PendingChannelsResponse_PendingChannel) String() string { return proto.CompactTextString(m) } -func (*PendingChannelsResponse_PendingChannel) ProtoMessage() {} -func (*PendingChannelsResponse_PendingChannel) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{72, 0} -} - -func (m *PendingChannelsResponse_PendingChannel) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PendingChannelsResponse_PendingChannel.Unmarshal(m, b) -} -func (m *PendingChannelsResponse_PendingChannel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PendingChannelsResponse_PendingChannel.Marshal(b, m, deterministic) -} -func (m *PendingChannelsResponse_PendingChannel) XXX_Merge(src proto.Message) { - xxx_messageInfo_PendingChannelsResponse_PendingChannel.Merge(m, src) -} -func (m *PendingChannelsResponse_PendingChannel) XXX_Size() int { - return xxx_messageInfo_PendingChannelsResponse_PendingChannel.Size(m) -} -func (m *PendingChannelsResponse_PendingChannel) XXX_DiscardUnknown() { - xxx_messageInfo_PendingChannelsResponse_PendingChannel.DiscardUnknown(m) -} - -var xxx_messageInfo_PendingChannelsResponse_PendingChannel proto.InternalMessageInfo - -func (m *PendingChannelsResponse_PendingChannel) GetRemoteNodePub() string { - if m != nil { - return m.RemoteNodePub - } - return "" -} - -func (m *PendingChannelsResponse_PendingChannel) GetChannelPoint() string { - if m != nil { - return m.ChannelPoint - } - return "" -} - -func (m *PendingChannelsResponse_PendingChannel) GetCapacity() int64 { - if m != nil { - return m.Capacity - } - return 0 -} - -func (m *PendingChannelsResponse_PendingChannel) GetLocalBalance() int64 { - if m != nil { - return m.LocalBalance - } - return 0 -} - -func (m *PendingChannelsResponse_PendingChannel) GetRemoteBalance() int64 { - if m != nil { - return m.RemoteBalance - } - return 0 -} - -func (m *PendingChannelsResponse_PendingChannel) GetLocalChanReserveSat() int64 { - if m != nil { - return m.LocalChanReserveSat - } - return 0 -} - -func (m *PendingChannelsResponse_PendingChannel) GetRemoteChanReserveSat() int64 { - if m != nil { - return m.RemoteChanReserveSat - } - return 0 -} - -func (m *PendingChannelsResponse_PendingChannel) GetInitiator() Initiator { - if m != nil { - return m.Initiator - } - return Initiator_INITIATOR_UNKNOWN -} - -func (m *PendingChannelsResponse_PendingChannel) GetCommitmentType() CommitmentType { - if m != nil { - return m.CommitmentType - } - return CommitmentType_LEGACY -} - -type PendingChannelsResponse_PendingOpenChannel struct { - // The pending channel - Channel *PendingChannelsResponse_PendingChannel `protobuf:"bytes,1,opt,name=channel,proto3" json:"channel,omitempty"` - // The height at which this channel will be confirmed - ConfirmationHeight uint32 `protobuf:"varint,2,opt,name=confirmation_height,json=confirmationHeight,proto3" json:"confirmation_height,omitempty"` - // - //The amount calculated to be paid in fees for the current set of - //commitment transactions. The fee amount is persisted with the channel - //in order to allow the fee amount to be removed and recalculated with - //each channel state update, including updates that happen after a system - //restart. - CommitFee int64 `protobuf:"varint,4,opt,name=commit_fee,json=commitFee,proto3" json:"commit_fee,omitempty"` - // The weight of the commitment transaction - CommitWeight int64 `protobuf:"varint,5,opt,name=commit_weight,json=commitWeight,proto3" json:"commit_weight,omitempty"` - // - //The required number of satoshis per kilo-weight that the requester will - //pay at all times, for both the funding transaction and commitment - //transaction. This value can later be updated once the channel is open. - FeePerKw int64 `protobuf:"varint,6,opt,name=fee_per_kw,json=feePerKw,proto3" json:"fee_per_kw,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PendingChannelsResponse_PendingOpenChannel) Reset() { - *m = PendingChannelsResponse_PendingOpenChannel{} -} -func (m *PendingChannelsResponse_PendingOpenChannel) String() string { - return proto.CompactTextString(m) -} -func (*PendingChannelsResponse_PendingOpenChannel) ProtoMessage() {} -func (*PendingChannelsResponse_PendingOpenChannel) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{72, 1} -} - -func (m *PendingChannelsResponse_PendingOpenChannel) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PendingChannelsResponse_PendingOpenChannel.Unmarshal(m, b) -} -func (m *PendingChannelsResponse_PendingOpenChannel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PendingChannelsResponse_PendingOpenChannel.Marshal(b, m, deterministic) -} -func (m *PendingChannelsResponse_PendingOpenChannel) XXX_Merge(src proto.Message) { - xxx_messageInfo_PendingChannelsResponse_PendingOpenChannel.Merge(m, src) -} -func (m *PendingChannelsResponse_PendingOpenChannel) XXX_Size() int { - return xxx_messageInfo_PendingChannelsResponse_PendingOpenChannel.Size(m) -} -func (m *PendingChannelsResponse_PendingOpenChannel) XXX_DiscardUnknown() { - xxx_messageInfo_PendingChannelsResponse_PendingOpenChannel.DiscardUnknown(m) -} - -var xxx_messageInfo_PendingChannelsResponse_PendingOpenChannel proto.InternalMessageInfo - -func (m *PendingChannelsResponse_PendingOpenChannel) GetChannel() *PendingChannelsResponse_PendingChannel { - if m != nil { - return m.Channel - } - return nil -} - -func (m *PendingChannelsResponse_PendingOpenChannel) GetConfirmationHeight() uint32 { - if m != nil { - return m.ConfirmationHeight - } - return 0 -} - -func (m *PendingChannelsResponse_PendingOpenChannel) GetCommitFee() int64 { - if m != nil { - return m.CommitFee - } - return 0 -} - -func (m *PendingChannelsResponse_PendingOpenChannel) GetCommitWeight() int64 { - if m != nil { - return m.CommitWeight - } - return 0 -} - -func (m *PendingChannelsResponse_PendingOpenChannel) GetFeePerKw() int64 { - if m != nil { - return m.FeePerKw - } - return 0 -} - -type PendingChannelsResponse_WaitingCloseChannel struct { - // The pending channel waiting for closing tx to confirm - Channel *PendingChannelsResponse_PendingChannel `protobuf:"bytes,1,opt,name=channel,proto3" json:"channel,omitempty"` - // The balance in satoshis encumbered in this channel - LimboBalance int64 `protobuf:"varint,2,opt,name=limbo_balance,json=limboBalance,proto3" json:"limbo_balance,omitempty"` - // - //A list of valid commitment transactions. Any of these can confirm at - //this point. - Commitments *PendingChannelsResponse_Commitments `protobuf:"bytes,3,opt,name=commitments,proto3" json:"commitments,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PendingChannelsResponse_WaitingCloseChannel) Reset() { - *m = PendingChannelsResponse_WaitingCloseChannel{} -} -func (m *PendingChannelsResponse_WaitingCloseChannel) String() string { - return proto.CompactTextString(m) -} -func (*PendingChannelsResponse_WaitingCloseChannel) ProtoMessage() {} -func (*PendingChannelsResponse_WaitingCloseChannel) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{72, 2} -} - -func (m *PendingChannelsResponse_WaitingCloseChannel) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PendingChannelsResponse_WaitingCloseChannel.Unmarshal(m, b) -} -func (m *PendingChannelsResponse_WaitingCloseChannel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PendingChannelsResponse_WaitingCloseChannel.Marshal(b, m, deterministic) -} -func (m *PendingChannelsResponse_WaitingCloseChannel) XXX_Merge(src proto.Message) { - xxx_messageInfo_PendingChannelsResponse_WaitingCloseChannel.Merge(m, src) -} -func (m *PendingChannelsResponse_WaitingCloseChannel) XXX_Size() int { - return xxx_messageInfo_PendingChannelsResponse_WaitingCloseChannel.Size(m) -} -func (m *PendingChannelsResponse_WaitingCloseChannel) XXX_DiscardUnknown() { - xxx_messageInfo_PendingChannelsResponse_WaitingCloseChannel.DiscardUnknown(m) -} - -var xxx_messageInfo_PendingChannelsResponse_WaitingCloseChannel proto.InternalMessageInfo - -func (m *PendingChannelsResponse_WaitingCloseChannel) GetChannel() *PendingChannelsResponse_PendingChannel { - if m != nil { - return m.Channel - } - return nil -} - -func (m *PendingChannelsResponse_WaitingCloseChannel) GetLimboBalance() int64 { - if m != nil { - return m.LimboBalance - } - return 0 -} - -func (m *PendingChannelsResponse_WaitingCloseChannel) GetCommitments() *PendingChannelsResponse_Commitments { - if m != nil { - return m.Commitments - } - return nil -} - -type PendingChannelsResponse_Commitments struct { - // Hash of the local version of the commitment tx. - LocalTxid string `protobuf:"bytes,1,opt,name=local_txid,json=localTxid,proto3" json:"local_txid,omitempty"` - // Hash of the remote version of the commitment tx. - RemoteTxid string `protobuf:"bytes,2,opt,name=remote_txid,json=remoteTxid,proto3" json:"remote_txid,omitempty"` - // Hash of the remote pending version of the commitment tx. - RemotePendingTxid string `protobuf:"bytes,3,opt,name=remote_pending_txid,json=remotePendingTxid,proto3" json:"remote_pending_txid,omitempty"` - // - //The amount in satoshis calculated to be paid in fees for the local - //commitment. - LocalCommitFeeSat uint64 `protobuf:"varint,4,opt,name=local_commit_fee_sat,json=localCommitFeeSat,proto3" json:"local_commit_fee_sat,omitempty"` - // - //The amount in satoshis calculated to be paid in fees for the remote - //commitment. - RemoteCommitFeeSat uint64 `protobuf:"varint,5,opt,name=remote_commit_fee_sat,json=remoteCommitFeeSat,proto3" json:"remote_commit_fee_sat,omitempty"` - // - //The amount in satoshis calculated to be paid in fees for the remote - //pending commitment. - RemotePendingCommitFeeSat uint64 `protobuf:"varint,6,opt,name=remote_pending_commit_fee_sat,json=remotePendingCommitFeeSat,proto3" json:"remote_pending_commit_fee_sat,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PendingChannelsResponse_Commitments) Reset() { *m = PendingChannelsResponse_Commitments{} } -func (m *PendingChannelsResponse_Commitments) String() string { return proto.CompactTextString(m) } -func (*PendingChannelsResponse_Commitments) ProtoMessage() {} -func (*PendingChannelsResponse_Commitments) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{72, 3} -} - -func (m *PendingChannelsResponse_Commitments) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PendingChannelsResponse_Commitments.Unmarshal(m, b) -} -func (m *PendingChannelsResponse_Commitments) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PendingChannelsResponse_Commitments.Marshal(b, m, deterministic) -} -func (m *PendingChannelsResponse_Commitments) XXX_Merge(src proto.Message) { - xxx_messageInfo_PendingChannelsResponse_Commitments.Merge(m, src) -} -func (m *PendingChannelsResponse_Commitments) XXX_Size() int { - return xxx_messageInfo_PendingChannelsResponse_Commitments.Size(m) -} -func (m *PendingChannelsResponse_Commitments) XXX_DiscardUnknown() { - xxx_messageInfo_PendingChannelsResponse_Commitments.DiscardUnknown(m) -} - -var xxx_messageInfo_PendingChannelsResponse_Commitments proto.InternalMessageInfo - -func (m *PendingChannelsResponse_Commitments) GetLocalTxid() string { - if m != nil { - return m.LocalTxid - } - return "" -} - -func (m *PendingChannelsResponse_Commitments) GetRemoteTxid() string { - if m != nil { - return m.RemoteTxid - } - return "" -} - -func (m *PendingChannelsResponse_Commitments) GetRemotePendingTxid() string { - if m != nil { - return m.RemotePendingTxid - } - return "" -} - -func (m *PendingChannelsResponse_Commitments) GetLocalCommitFeeSat() uint64 { - if m != nil { - return m.LocalCommitFeeSat - } - return 0 -} - -func (m *PendingChannelsResponse_Commitments) GetRemoteCommitFeeSat() uint64 { - if m != nil { - return m.RemoteCommitFeeSat - } - return 0 -} - -func (m *PendingChannelsResponse_Commitments) GetRemotePendingCommitFeeSat() uint64 { - if m != nil { - return m.RemotePendingCommitFeeSat - } - return 0 -} - -type PendingChannelsResponse_ClosedChannel struct { - // The pending channel to be closed - Channel *PendingChannelsResponse_PendingChannel `protobuf:"bytes,1,opt,name=channel,proto3" json:"channel,omitempty"` - // The transaction id of the closing transaction - ClosingTxid string `protobuf:"bytes,2,opt,name=closing_txid,json=closingTxid,proto3" json:"closing_txid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PendingChannelsResponse_ClosedChannel) Reset() { *m = PendingChannelsResponse_ClosedChannel{} } -func (m *PendingChannelsResponse_ClosedChannel) String() string { return proto.CompactTextString(m) } -func (*PendingChannelsResponse_ClosedChannel) ProtoMessage() {} -func (*PendingChannelsResponse_ClosedChannel) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{72, 4} -} - -func (m *PendingChannelsResponse_ClosedChannel) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PendingChannelsResponse_ClosedChannel.Unmarshal(m, b) -} -func (m *PendingChannelsResponse_ClosedChannel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PendingChannelsResponse_ClosedChannel.Marshal(b, m, deterministic) -} -func (m *PendingChannelsResponse_ClosedChannel) XXX_Merge(src proto.Message) { - xxx_messageInfo_PendingChannelsResponse_ClosedChannel.Merge(m, src) -} -func (m *PendingChannelsResponse_ClosedChannel) XXX_Size() int { - return xxx_messageInfo_PendingChannelsResponse_ClosedChannel.Size(m) -} -func (m *PendingChannelsResponse_ClosedChannel) XXX_DiscardUnknown() { - xxx_messageInfo_PendingChannelsResponse_ClosedChannel.DiscardUnknown(m) -} - -var xxx_messageInfo_PendingChannelsResponse_ClosedChannel proto.InternalMessageInfo - -func (m *PendingChannelsResponse_ClosedChannel) GetChannel() *PendingChannelsResponse_PendingChannel { - if m != nil { - return m.Channel - } - return nil -} - -func (m *PendingChannelsResponse_ClosedChannel) GetClosingTxid() string { - if m != nil { - return m.ClosingTxid - } - return "" -} - -type PendingChannelsResponse_ForceClosedChannel struct { - // The pending channel to be force closed - Channel *PendingChannelsResponse_PendingChannel `protobuf:"bytes,1,opt,name=channel,proto3" json:"channel,omitempty"` - // The transaction id of the closing transaction - ClosingTxid string `protobuf:"bytes,2,opt,name=closing_txid,json=closingTxid,proto3" json:"closing_txid,omitempty"` - // The balance in satoshis encumbered in this pending channel - LimboBalance int64 `protobuf:"varint,3,opt,name=limbo_balance,json=limboBalance,proto3" json:"limbo_balance,omitempty"` - // The height at which funds can be swept into the wallet - MaturityHeight uint32 `protobuf:"varint,4,opt,name=maturity_height,json=maturityHeight,proto3" json:"maturity_height,omitempty"` - // - //Remaining # of blocks until the commitment output can be swept. - //Negative values indicate how many blocks have passed since becoming - //mature. - BlocksTilMaturity int32 `protobuf:"varint,5,opt,name=blocks_til_maturity,json=blocksTilMaturity,proto3" json:"blocks_til_maturity,omitempty"` - // The total value of funds successfully recovered from this channel - RecoveredBalance int64 `protobuf:"varint,6,opt,name=recovered_balance,json=recoveredBalance,proto3" json:"recovered_balance,omitempty"` - PendingHtlcs []*PendingHTLC `protobuf:"bytes,8,rep,name=pending_htlcs,json=pendingHtlcs,proto3" json:"pending_htlcs,omitempty"` - Anchor PendingChannelsResponse_ForceClosedChannel_AnchorState `protobuf:"varint,9,opt,name=anchor,proto3,enum=lnrpc.PendingChannelsResponse_ForceClosedChannel_AnchorState" json:"anchor,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PendingChannelsResponse_ForceClosedChannel) Reset() { - *m = PendingChannelsResponse_ForceClosedChannel{} -} -func (m *PendingChannelsResponse_ForceClosedChannel) String() string { - return proto.CompactTextString(m) -} -func (*PendingChannelsResponse_ForceClosedChannel) ProtoMessage() {} -func (*PendingChannelsResponse_ForceClosedChannel) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{72, 5} -} - -func (m *PendingChannelsResponse_ForceClosedChannel) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PendingChannelsResponse_ForceClosedChannel.Unmarshal(m, b) -} -func (m *PendingChannelsResponse_ForceClosedChannel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PendingChannelsResponse_ForceClosedChannel.Marshal(b, m, deterministic) -} -func (m *PendingChannelsResponse_ForceClosedChannel) XXX_Merge(src proto.Message) { - xxx_messageInfo_PendingChannelsResponse_ForceClosedChannel.Merge(m, src) -} -func (m *PendingChannelsResponse_ForceClosedChannel) XXX_Size() int { - return xxx_messageInfo_PendingChannelsResponse_ForceClosedChannel.Size(m) -} -func (m *PendingChannelsResponse_ForceClosedChannel) XXX_DiscardUnknown() { - xxx_messageInfo_PendingChannelsResponse_ForceClosedChannel.DiscardUnknown(m) -} - -var xxx_messageInfo_PendingChannelsResponse_ForceClosedChannel proto.InternalMessageInfo - -func (m *PendingChannelsResponse_ForceClosedChannel) GetChannel() *PendingChannelsResponse_PendingChannel { - if m != nil { - return m.Channel - } - return nil -} - -func (m *PendingChannelsResponse_ForceClosedChannel) GetClosingTxid() string { - if m != nil { - return m.ClosingTxid - } - return "" -} - -func (m *PendingChannelsResponse_ForceClosedChannel) GetLimboBalance() int64 { - if m != nil { - return m.LimboBalance - } - return 0 -} - -func (m *PendingChannelsResponse_ForceClosedChannel) GetMaturityHeight() uint32 { - if m != nil { - return m.MaturityHeight - } - return 0 -} - -func (m *PendingChannelsResponse_ForceClosedChannel) GetBlocksTilMaturity() int32 { - if m != nil { - return m.BlocksTilMaturity - } - return 0 -} - -func (m *PendingChannelsResponse_ForceClosedChannel) GetRecoveredBalance() int64 { - if m != nil { - return m.RecoveredBalance - } - return 0 -} - -func (m *PendingChannelsResponse_ForceClosedChannel) GetPendingHtlcs() []*PendingHTLC { - if m != nil { - return m.PendingHtlcs - } - return nil -} - -func (m *PendingChannelsResponse_ForceClosedChannel) GetAnchor() PendingChannelsResponse_ForceClosedChannel_AnchorState { - if m != nil { - return m.Anchor - } - return PendingChannelsResponse_ForceClosedChannel_LIMBO -} - -type ChannelEventSubscription struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChannelEventSubscription) Reset() { *m = ChannelEventSubscription{} } -func (m *ChannelEventSubscription) String() string { return proto.CompactTextString(m) } -func (*ChannelEventSubscription) ProtoMessage() {} -func (*ChannelEventSubscription) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{73} -} - -func (m *ChannelEventSubscription) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChannelEventSubscription.Unmarshal(m, b) -} -func (m *ChannelEventSubscription) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChannelEventSubscription.Marshal(b, m, deterministic) -} -func (m *ChannelEventSubscription) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChannelEventSubscription.Merge(m, src) -} -func (m *ChannelEventSubscription) XXX_Size() int { - return xxx_messageInfo_ChannelEventSubscription.Size(m) -} -func (m *ChannelEventSubscription) XXX_DiscardUnknown() { - xxx_messageInfo_ChannelEventSubscription.DiscardUnknown(m) -} - -var xxx_messageInfo_ChannelEventSubscription proto.InternalMessageInfo - -type ChannelEventUpdate struct { - // Types that are valid to be assigned to Channel: - // *ChannelEventUpdate_OpenChannel - // *ChannelEventUpdate_ClosedChannel - // *ChannelEventUpdate_ActiveChannel - // *ChannelEventUpdate_InactiveChannel - // *ChannelEventUpdate_PendingOpenChannel - Channel isChannelEventUpdate_Channel `protobuf_oneof:"channel"` - Type ChannelEventUpdate_UpdateType `protobuf:"varint,5,opt,name=type,proto3,enum=lnrpc.ChannelEventUpdate_UpdateType" json:"type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChannelEventUpdate) Reset() { *m = ChannelEventUpdate{} } -func (m *ChannelEventUpdate) String() string { return proto.CompactTextString(m) } -func (*ChannelEventUpdate) ProtoMessage() {} -func (*ChannelEventUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{74} -} - -func (m *ChannelEventUpdate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChannelEventUpdate.Unmarshal(m, b) -} -func (m *ChannelEventUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChannelEventUpdate.Marshal(b, m, deterministic) -} -func (m *ChannelEventUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChannelEventUpdate.Merge(m, src) -} -func (m *ChannelEventUpdate) XXX_Size() int { - return xxx_messageInfo_ChannelEventUpdate.Size(m) -} -func (m *ChannelEventUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_ChannelEventUpdate.DiscardUnknown(m) -} - -var xxx_messageInfo_ChannelEventUpdate proto.InternalMessageInfo - -type isChannelEventUpdate_Channel interface { - isChannelEventUpdate_Channel() -} - -type ChannelEventUpdate_OpenChannel struct { - OpenChannel *Channel `protobuf:"bytes,1,opt,name=open_channel,json=openChannel,proto3,oneof"` -} - -type ChannelEventUpdate_ClosedChannel struct { - ClosedChannel *ChannelCloseSummary `protobuf:"bytes,2,opt,name=closed_channel,json=closedChannel,proto3,oneof"` -} - -type ChannelEventUpdate_ActiveChannel struct { - ActiveChannel *ChannelPoint `protobuf:"bytes,3,opt,name=active_channel,json=activeChannel,proto3,oneof"` -} - -type ChannelEventUpdate_InactiveChannel struct { - InactiveChannel *ChannelPoint `protobuf:"bytes,4,opt,name=inactive_channel,json=inactiveChannel,proto3,oneof"` -} - -type ChannelEventUpdate_PendingOpenChannel struct { - PendingOpenChannel *PendingUpdate `protobuf:"bytes,6,opt,name=pending_open_channel,json=pendingOpenChannel,proto3,oneof"` -} - -func (*ChannelEventUpdate_OpenChannel) isChannelEventUpdate_Channel() {} - -func (*ChannelEventUpdate_ClosedChannel) isChannelEventUpdate_Channel() {} - -func (*ChannelEventUpdate_ActiveChannel) isChannelEventUpdate_Channel() {} - -func (*ChannelEventUpdate_InactiveChannel) isChannelEventUpdate_Channel() {} - -func (*ChannelEventUpdate_PendingOpenChannel) isChannelEventUpdate_Channel() {} - -func (m *ChannelEventUpdate) GetChannel() isChannelEventUpdate_Channel { - if m != nil { - return m.Channel - } - return nil -} - -func (m *ChannelEventUpdate) GetOpenChannel() *Channel { - if x, ok := m.GetChannel().(*ChannelEventUpdate_OpenChannel); ok { - return x.OpenChannel - } - return nil -} - -func (m *ChannelEventUpdate) GetClosedChannel() *ChannelCloseSummary { - if x, ok := m.GetChannel().(*ChannelEventUpdate_ClosedChannel); ok { - return x.ClosedChannel - } - return nil -} - -func (m *ChannelEventUpdate) GetActiveChannel() *ChannelPoint { - if x, ok := m.GetChannel().(*ChannelEventUpdate_ActiveChannel); ok { - return x.ActiveChannel - } - return nil -} - -func (m *ChannelEventUpdate) GetInactiveChannel() *ChannelPoint { - if x, ok := m.GetChannel().(*ChannelEventUpdate_InactiveChannel); ok { - return x.InactiveChannel - } - return nil -} - -func (m *ChannelEventUpdate) GetPendingOpenChannel() *PendingUpdate { - if x, ok := m.GetChannel().(*ChannelEventUpdate_PendingOpenChannel); ok { - return x.PendingOpenChannel - } - return nil -} - -func (m *ChannelEventUpdate) GetType() ChannelEventUpdate_UpdateType { - if m != nil { - return m.Type - } - return ChannelEventUpdate_OPEN_CHANNEL -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*ChannelEventUpdate) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*ChannelEventUpdate_OpenChannel)(nil), - (*ChannelEventUpdate_ClosedChannel)(nil), - (*ChannelEventUpdate_ActiveChannel)(nil), - (*ChannelEventUpdate_InactiveChannel)(nil), - (*ChannelEventUpdate_PendingOpenChannel)(nil), - } -} - -type WalletBalanceRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WalletBalanceRequest) Reset() { *m = WalletBalanceRequest{} } -func (m *WalletBalanceRequest) String() string { return proto.CompactTextString(m) } -func (*WalletBalanceRequest) ProtoMessage() {} -func (*WalletBalanceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{75} -} - -func (m *WalletBalanceRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_WalletBalanceRequest.Unmarshal(m, b) -} -func (m *WalletBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_WalletBalanceRequest.Marshal(b, m, deterministic) -} -func (m *WalletBalanceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WalletBalanceRequest.Merge(m, src) -} -func (m *WalletBalanceRequest) XXX_Size() int { - return xxx_messageInfo_WalletBalanceRequest.Size(m) -} -func (m *WalletBalanceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WalletBalanceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_WalletBalanceRequest proto.InternalMessageInfo - -type WalletBalanceResponse struct { - // The balance of the wallet - TotalBalance int64 `protobuf:"varint,1,opt,name=total_balance,json=totalBalance,proto3" json:"total_balance,omitempty"` - // The confirmed balance of a wallet(with >= 1 confirmations) - ConfirmedBalance int64 `protobuf:"varint,2,opt,name=confirmed_balance,json=confirmedBalance,proto3" json:"confirmed_balance,omitempty"` - // The unconfirmed balance of a wallet(with 0 confirmations) - UnconfirmedBalance int64 `protobuf:"varint,3,opt,name=unconfirmed_balance,json=unconfirmedBalance,proto3" json:"unconfirmed_balance,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WalletBalanceResponse) Reset() { *m = WalletBalanceResponse{} } -func (m *WalletBalanceResponse) String() string { return proto.CompactTextString(m) } -func (*WalletBalanceResponse) ProtoMessage() {} -func (*WalletBalanceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{76} -} - -func (m *WalletBalanceResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_WalletBalanceResponse.Unmarshal(m, b) -} -func (m *WalletBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_WalletBalanceResponse.Marshal(b, m, deterministic) -} -func (m *WalletBalanceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WalletBalanceResponse.Merge(m, src) -} -func (m *WalletBalanceResponse) XXX_Size() int { - return xxx_messageInfo_WalletBalanceResponse.Size(m) -} -func (m *WalletBalanceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WalletBalanceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_WalletBalanceResponse proto.InternalMessageInfo - -func (m *WalletBalanceResponse) GetTotalBalance() int64 { - if m != nil { - return m.TotalBalance - } - return 0 -} - -func (m *WalletBalanceResponse) GetConfirmedBalance() int64 { - if m != nil { - return m.ConfirmedBalance - } - return 0 -} - -func (m *WalletBalanceResponse) GetUnconfirmedBalance() int64 { - if m != nil { - return m.UnconfirmedBalance - } - return 0 -} - -type GetAddressBalancesRequest struct { - // Minimum number of confirmations for coins to be considered received - Minconf int32 `protobuf:"varint,1,opt,name=minconf,proto3" json:"minconf,omitempty"` - // If true then addresses which have been created but carry zero balance will be included - Showzerobalance bool `protobuf:"varint,2,opt,name=showzerobalance,proto3" json:"showzerobalance,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetAddressBalancesRequest) Reset() { *m = GetAddressBalancesRequest{} } -func (m *GetAddressBalancesRequest) String() string { return proto.CompactTextString(m) } -func (*GetAddressBalancesRequest) ProtoMessage() {} -func (*GetAddressBalancesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{77} -} - -func (m *GetAddressBalancesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetAddressBalancesRequest.Unmarshal(m, b) -} -func (m *GetAddressBalancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetAddressBalancesRequest.Marshal(b, m, deterministic) -} -func (m *GetAddressBalancesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAddressBalancesRequest.Merge(m, src) -} -func (m *GetAddressBalancesRequest) XXX_Size() int { - return xxx_messageInfo_GetAddressBalancesRequest.Size(m) -} -func (m *GetAddressBalancesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetAddressBalancesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetAddressBalancesRequest proto.InternalMessageInfo - -func (m *GetAddressBalancesRequest) GetMinconf() int32 { - if m != nil { - return m.Minconf - } - return 0 -} - -func (m *GetAddressBalancesRequest) GetShowzerobalance() bool { - if m != nil { - return m.Showzerobalance - } - return false -} - -type GetAddressBalancesResponseAddr struct { - // The address which has this balance - Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` - // Total balance in coins - Total float64 `protobuf:"fixed64,2,opt,name=total,proto3" json:"total,omitempty"` - // Total balance (atomic units) - Stotal int64 `protobuf:"varint,3,opt,name=stotal,proto3" json:"stotal,omitempty"` - // Balance which is currently spendable (coins) - Spendable float64 `protobuf:"fixed64,4,opt,name=spendable,proto3" json:"spendable,omitempty"` - // Balance which is currently spendable (atomic units) - Sspendable int64 `protobuf:"varint,5,opt,name=sspendable,proto3" json:"sspendable,omitempty"` - // Mined coins which have not yet matured (coins) - Immaturereward float64 `protobuf:"fixed64,6,opt,name=immaturereward,proto3" json:"immaturereward,omitempty"` - // Mined coins which have not yet matured (atomic units) - Simmaturereward int64 `protobuf:"varint,7,opt,name=simmaturereward,proto3" json:"simmaturereward,omitempty"` - // Unconfirmed balance in coins - Unconfirmed float64 `protobuf:"fixed64,8,opt,name=unconfirmed,proto3" json:"unconfirmed,omitempty"` - // Unconfirmed balance in atomic units - Sunconfirmed int64 `protobuf:"varint,9,opt,name=sunconfirmed,proto3" json:"sunconfirmed,omitempty"` - // The number of transaction outputs which make up the balance - Outputcount int32 `protobuf:"varint,10,opt,name=outputcount,proto3" json:"outputcount,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetAddressBalancesResponseAddr) Reset() { *m = GetAddressBalancesResponseAddr{} } -func (m *GetAddressBalancesResponseAddr) String() string { return proto.CompactTextString(m) } -func (*GetAddressBalancesResponseAddr) ProtoMessage() {} -func (*GetAddressBalancesResponseAddr) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{78} -} - -func (m *GetAddressBalancesResponseAddr) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetAddressBalancesResponseAddr.Unmarshal(m, b) -} -func (m *GetAddressBalancesResponseAddr) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetAddressBalancesResponseAddr.Marshal(b, m, deterministic) -} -func (m *GetAddressBalancesResponseAddr) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAddressBalancesResponseAddr.Merge(m, src) -} -func (m *GetAddressBalancesResponseAddr) XXX_Size() int { - return xxx_messageInfo_GetAddressBalancesResponseAddr.Size(m) -} -func (m *GetAddressBalancesResponseAddr) XXX_DiscardUnknown() { - xxx_messageInfo_GetAddressBalancesResponseAddr.DiscardUnknown(m) -} - -var xxx_messageInfo_GetAddressBalancesResponseAddr proto.InternalMessageInfo - -func (m *GetAddressBalancesResponseAddr) GetAddress() string { - if m != nil { - return m.Address - } - return "" -} - -func (m *GetAddressBalancesResponseAddr) GetTotal() float64 { - if m != nil { - return m.Total - } - return 0 -} - -func (m *GetAddressBalancesResponseAddr) GetStotal() int64 { - if m != nil { - return m.Stotal - } - return 0 -} - -func (m *GetAddressBalancesResponseAddr) GetSpendable() float64 { - if m != nil { - return m.Spendable - } - return 0 -} - -func (m *GetAddressBalancesResponseAddr) GetSspendable() int64 { - if m != nil { - return m.Sspendable - } - return 0 -} - -func (m *GetAddressBalancesResponseAddr) GetImmaturereward() float64 { - if m != nil { - return m.Immaturereward - } - return 0 -} - -func (m *GetAddressBalancesResponseAddr) GetSimmaturereward() int64 { - if m != nil { - return m.Simmaturereward - } - return 0 -} - -func (m *GetAddressBalancesResponseAddr) GetUnconfirmed() float64 { - if m != nil { - return m.Unconfirmed - } - return 0 -} - -func (m *GetAddressBalancesResponseAddr) GetSunconfirmed() int64 { - if m != nil { - return m.Sunconfirmed - } - return 0 -} - -func (m *GetAddressBalancesResponseAddr) GetOutputcount() int32 { - if m != nil { - return m.Outputcount - } - return 0 -} - -type GetAddressBalancesResponse struct { - Addrs []*GetAddressBalancesResponseAddr `protobuf:"bytes,1,rep,name=addrs,proto3" json:"addrs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetAddressBalancesResponse) Reset() { *m = GetAddressBalancesResponse{} } -func (m *GetAddressBalancesResponse) String() string { return proto.CompactTextString(m) } -func (*GetAddressBalancesResponse) ProtoMessage() {} -func (*GetAddressBalancesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{79} -} - -func (m *GetAddressBalancesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetAddressBalancesResponse.Unmarshal(m, b) -} -func (m *GetAddressBalancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetAddressBalancesResponse.Marshal(b, m, deterministic) -} -func (m *GetAddressBalancesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAddressBalancesResponse.Merge(m, src) -} -func (m *GetAddressBalancesResponse) XXX_Size() int { - return xxx_messageInfo_GetAddressBalancesResponse.Size(m) -} -func (m *GetAddressBalancesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetAddressBalancesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetAddressBalancesResponse proto.InternalMessageInfo - -func (m *GetAddressBalancesResponse) GetAddrs() []*GetAddressBalancesResponseAddr { - if m != nil { - return m.Addrs - } - return nil -} - -type Amount struct { - // Value denominated in satoshis. - Sat uint64 `protobuf:"varint,1,opt,name=sat,proto3" json:"sat,omitempty"` - // Value denominated in milli-satoshis. - Msat uint64 `protobuf:"varint,2,opt,name=msat,proto3" json:"msat,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Amount) Reset() { *m = Amount{} } -func (m *Amount) String() string { return proto.CompactTextString(m) } -func (*Amount) ProtoMessage() {} -func (*Amount) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{80} -} - -func (m *Amount) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Amount.Unmarshal(m, b) -} -func (m *Amount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Amount.Marshal(b, m, deterministic) -} -func (m *Amount) XXX_Merge(src proto.Message) { - xxx_messageInfo_Amount.Merge(m, src) -} -func (m *Amount) XXX_Size() int { - return xxx_messageInfo_Amount.Size(m) -} -func (m *Amount) XXX_DiscardUnknown() { - xxx_messageInfo_Amount.DiscardUnknown(m) -} - -var xxx_messageInfo_Amount proto.InternalMessageInfo - -func (m *Amount) GetSat() uint64 { - if m != nil { - return m.Sat - } - return 0 -} - -func (m *Amount) GetMsat() uint64 { - if m != nil { - return m.Msat - } - return 0 -} - -type ChannelBalanceRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChannelBalanceRequest) Reset() { *m = ChannelBalanceRequest{} } -func (m *ChannelBalanceRequest) String() string { return proto.CompactTextString(m) } -func (*ChannelBalanceRequest) ProtoMessage() {} -func (*ChannelBalanceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{81} -} - -func (m *ChannelBalanceRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChannelBalanceRequest.Unmarshal(m, b) -} -func (m *ChannelBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChannelBalanceRequest.Marshal(b, m, deterministic) -} -func (m *ChannelBalanceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChannelBalanceRequest.Merge(m, src) -} -func (m *ChannelBalanceRequest) XXX_Size() int { - return xxx_messageInfo_ChannelBalanceRequest.Size(m) -} -func (m *ChannelBalanceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ChannelBalanceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ChannelBalanceRequest proto.InternalMessageInfo - -type ChannelBalanceResponse struct { - // Deprecated. Sum of channels balances denominated in satoshis - Balance int64 `protobuf:"varint,1,opt,name=balance,proto3" json:"balance,omitempty"` // Deprecated: Do not use. - // Deprecated. Sum of channels pending balances denominated in satoshis - PendingOpenBalance int64 `protobuf:"varint,2,opt,name=pending_open_balance,json=pendingOpenBalance,proto3" json:"pending_open_balance,omitempty"` // Deprecated: Do not use. - // Sum of channels local balances. - LocalBalance *Amount `protobuf:"bytes,3,opt,name=local_balance,json=localBalance,proto3" json:"local_balance,omitempty"` - // Sum of channels remote balances. - RemoteBalance *Amount `protobuf:"bytes,4,opt,name=remote_balance,json=remoteBalance,proto3" json:"remote_balance,omitempty"` - // Sum of channels local unsettled balances. - UnsettledLocalBalance *Amount `protobuf:"bytes,5,opt,name=unsettled_local_balance,json=unsettledLocalBalance,proto3" json:"unsettled_local_balance,omitempty"` - // Sum of channels remote unsettled balances. - UnsettledRemoteBalance *Amount `protobuf:"bytes,6,opt,name=unsettled_remote_balance,json=unsettledRemoteBalance,proto3" json:"unsettled_remote_balance,omitempty"` - // Sum of channels pending local balances. - PendingOpenLocalBalance *Amount `protobuf:"bytes,7,opt,name=pending_open_local_balance,json=pendingOpenLocalBalance,proto3" json:"pending_open_local_balance,omitempty"` - // Sum of channels pending remote balances. - PendingOpenRemoteBalance *Amount `protobuf:"bytes,8,opt,name=pending_open_remote_balance,json=pendingOpenRemoteBalance,proto3" json:"pending_open_remote_balance,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChannelBalanceResponse) Reset() { *m = ChannelBalanceResponse{} } -func (m *ChannelBalanceResponse) String() string { return proto.CompactTextString(m) } -func (*ChannelBalanceResponse) ProtoMessage() {} -func (*ChannelBalanceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{82} -} - -func (m *ChannelBalanceResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChannelBalanceResponse.Unmarshal(m, b) -} -func (m *ChannelBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChannelBalanceResponse.Marshal(b, m, deterministic) -} -func (m *ChannelBalanceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChannelBalanceResponse.Merge(m, src) -} -func (m *ChannelBalanceResponse) XXX_Size() int { - return xxx_messageInfo_ChannelBalanceResponse.Size(m) -} -func (m *ChannelBalanceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ChannelBalanceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ChannelBalanceResponse proto.InternalMessageInfo - -// Deprecated: Do not use. -func (m *ChannelBalanceResponse) GetBalance() int64 { - if m != nil { - return m.Balance - } - return 0 -} - -// Deprecated: Do not use. -func (m *ChannelBalanceResponse) GetPendingOpenBalance() int64 { - if m != nil { - return m.PendingOpenBalance - } - return 0 -} - -func (m *ChannelBalanceResponse) GetLocalBalance() *Amount { - if m != nil { - return m.LocalBalance - } - return nil -} - -func (m *ChannelBalanceResponse) GetRemoteBalance() *Amount { - if m != nil { - return m.RemoteBalance - } - return nil -} - -func (m *ChannelBalanceResponse) GetUnsettledLocalBalance() *Amount { - if m != nil { - return m.UnsettledLocalBalance - } - return nil -} - -func (m *ChannelBalanceResponse) GetUnsettledRemoteBalance() *Amount { - if m != nil { - return m.UnsettledRemoteBalance - } - return nil -} - -func (m *ChannelBalanceResponse) GetPendingOpenLocalBalance() *Amount { - if m != nil { - return m.PendingOpenLocalBalance - } - return nil -} - -func (m *ChannelBalanceResponse) GetPendingOpenRemoteBalance() *Amount { - if m != nil { - return m.PendingOpenRemoteBalance - } - return nil -} - -type QueryRoutesRequest struct { - // The 33-byte hex-encoded public key for the payment destination - PubKey string `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` - // - //The amount to send expressed in satoshis. - // - //The fields amt and amt_msat are mutually exclusive. - Amt int64 `protobuf:"varint,2,opt,name=amt,proto3" json:"amt,omitempty"` - // - //The amount to send expressed in millisatoshis. - // - //The fields amt and amt_msat are mutually exclusive. - AmtMsat int64 `protobuf:"varint,12,opt,name=amt_msat,json=amtMsat,proto3" json:"amt_msat,omitempty"` - // - //An optional CLTV delta from the current height that should be used for the - //timelock of the final hop. Note that unlike SendPayment, QueryRoutes does - //not add any additional block padding on top of final_ctlv_delta. This - //padding of a few blocks needs to be added manually or otherwise failures may - //happen when a block comes in while the payment is in flight. - FinalCltvDelta int32 `protobuf:"varint,4,opt,name=final_cltv_delta,json=finalCltvDelta,proto3" json:"final_cltv_delta,omitempty"` - // - //The maximum number of satoshis that will be paid as a fee of the payment. - //This value can be represented either as a percentage of the amount being - //sent, or as a fixed amount of the maximum fee the user is willing the pay to - //send the payment. - FeeLimit *FeeLimit `protobuf:"bytes,5,opt,name=fee_limit,json=feeLimit,proto3" json:"fee_limit,omitempty"` - // - //A list of nodes to ignore during path finding. When using REST, these fields - //must be encoded as base64. - IgnoredNodes [][]byte `protobuf:"bytes,6,rep,name=ignored_nodes,json=ignoredNodes,proto3" json:"ignored_nodes,omitempty"` - // - //Deprecated. A list of edges to ignore during path finding. - IgnoredEdges []*EdgeLocator `protobuf:"bytes,7,rep,name=ignored_edges,json=ignoredEdges,proto3" json:"ignored_edges,omitempty"` // Deprecated: Do not use. - // - //The source node where the request route should originated from. If empty, - //self is assumed. - SourcePubKey string `protobuf:"bytes,8,opt,name=source_pub_key,json=sourcePubKey,proto3" json:"source_pub_key,omitempty"` - // - //If set to true, edge probabilities from mission control will be used to get - //the optimal route. - UseMissionControl bool `protobuf:"varint,9,opt,name=use_mission_control,json=useMissionControl,proto3" json:"use_mission_control,omitempty"` - // - //A list of directed node pairs that will be ignored during path finding. - IgnoredPairs []*NodePair `protobuf:"bytes,10,rep,name=ignored_pairs,json=ignoredPairs,proto3" json:"ignored_pairs,omitempty"` - // - //An optional maximum total time lock for the route. If the source is empty or - //ourselves, this should not exceed lnd's `--max-cltv-expiry` setting. If - //zero, then the value of `--max-cltv-expiry` is used as the limit. - CltvLimit uint32 `protobuf:"varint,11,opt,name=cltv_limit,json=cltvLimit,proto3" json:"cltv_limit,omitempty"` - // - //An optional field that can be used to pass an arbitrary set of TLV records - //to a peer which understands the new records. This can be used to pass - //application specific data during the payment attempt. If the destination - //does not support the specified recrods, and error will be returned. - //Record types are required to be in the custom range >= 65536. When using - //REST, the values must be encoded as base64. - DestCustomRecords map[uint64][]byte `protobuf:"bytes,13,rep,name=dest_custom_records,json=destCustomRecords,proto3" json:"dest_custom_records,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // - //The channel id of the channel that must be taken to the first hop. If zero, - //any channel may be used. - OutgoingChanId uint64 `protobuf:"varint,14,opt,name=outgoing_chan_id,json=outgoingChanId,proto3" json:"outgoing_chan_id,omitempty"` - // - //The pubkey of the last hop of the route. If empty, any hop may be used. - LastHopPubkey []byte `protobuf:"bytes,15,opt,name=last_hop_pubkey,json=lastHopPubkey,proto3" json:"last_hop_pubkey,omitempty"` - // - //Optional route hints to reach the destination through private channels. - RouteHints []*RouteHint `protobuf:"bytes,16,rep,name=route_hints,json=routeHints,proto3" json:"route_hints,omitempty"` - // - //Features assumed to be supported by the final node. All transitive feature - //dependencies must also be set properly. For a given feature bit pair, either - //optional or remote may be set, but not both. If this field is nil or empty, - //the router will try to load destination features from the graph as a - //fallback. - DestFeatures []FeatureBit `protobuf:"varint,17,rep,packed,name=dest_features,json=destFeatures,proto3,enum=lnrpc.FeatureBit" json:"dest_features,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *QueryRoutesRequest) Reset() { *m = QueryRoutesRequest{} } -func (m *QueryRoutesRequest) String() string { return proto.CompactTextString(m) } -func (*QueryRoutesRequest) ProtoMessage() {} -func (*QueryRoutesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{83} -} - -func (m *QueryRoutesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_QueryRoutesRequest.Unmarshal(m, b) -} -func (m *QueryRoutesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_QueryRoutesRequest.Marshal(b, m, deterministic) -} -func (m *QueryRoutesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryRoutesRequest.Merge(m, src) -} -func (m *QueryRoutesRequest) XXX_Size() int { - return xxx_messageInfo_QueryRoutesRequest.Size(m) -} -func (m *QueryRoutesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryRoutesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryRoutesRequest proto.InternalMessageInfo - -func (m *QueryRoutesRequest) GetPubKey() string { - if m != nil { - return m.PubKey - } - return "" -} - -func (m *QueryRoutesRequest) GetAmt() int64 { - if m != nil { - return m.Amt - } - return 0 -} - -func (m *QueryRoutesRequest) GetAmtMsat() int64 { - if m != nil { - return m.AmtMsat - } - return 0 -} - -func (m *QueryRoutesRequest) GetFinalCltvDelta() int32 { - if m != nil { - return m.FinalCltvDelta - } - return 0 -} - -func (m *QueryRoutesRequest) GetFeeLimit() *FeeLimit { - if m != nil { - return m.FeeLimit - } - return nil -} - -func (m *QueryRoutesRequest) GetIgnoredNodes() [][]byte { - if m != nil { - return m.IgnoredNodes - } - return nil -} - -// Deprecated: Do not use. -func (m *QueryRoutesRequest) GetIgnoredEdges() []*EdgeLocator { - if m != nil { - return m.IgnoredEdges - } - return nil -} - -func (m *QueryRoutesRequest) GetSourcePubKey() string { - if m != nil { - return m.SourcePubKey - } - return "" -} - -func (m *QueryRoutesRequest) GetUseMissionControl() bool { - if m != nil { - return m.UseMissionControl - } - return false -} - -func (m *QueryRoutesRequest) GetIgnoredPairs() []*NodePair { - if m != nil { - return m.IgnoredPairs - } - return nil -} - -func (m *QueryRoutesRequest) GetCltvLimit() uint32 { - if m != nil { - return m.CltvLimit - } - return 0 -} - -func (m *QueryRoutesRequest) GetDestCustomRecords() map[uint64][]byte { - if m != nil { - return m.DestCustomRecords - } - return nil -} - -func (m *QueryRoutesRequest) GetOutgoingChanId() uint64 { - if m != nil { - return m.OutgoingChanId - } - return 0 -} - -func (m *QueryRoutesRequest) GetLastHopPubkey() []byte { - if m != nil { - return m.LastHopPubkey - } - return nil -} - -func (m *QueryRoutesRequest) GetRouteHints() []*RouteHint { - if m != nil { - return m.RouteHints - } - return nil -} - -func (m *QueryRoutesRequest) GetDestFeatures() []FeatureBit { - if m != nil { - return m.DestFeatures - } - return nil -} - -type NodePair struct { - // - //The sending node of the pair. When using REST, this field must be encoded as - //base64. - From []byte `protobuf:"bytes,1,opt,name=from,proto3" json:"from,omitempty"` - // - //The receiving node of the pair. When using REST, this field must be encoded - //as base64. - To []byte `protobuf:"bytes,2,opt,name=to,proto3" json:"to,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodePair) Reset() { *m = NodePair{} } -func (m *NodePair) String() string { return proto.CompactTextString(m) } -func (*NodePair) ProtoMessage() {} -func (*NodePair) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{84} -} - -func (m *NodePair) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodePair.Unmarshal(m, b) -} -func (m *NodePair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodePair.Marshal(b, m, deterministic) -} -func (m *NodePair) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodePair.Merge(m, src) -} -func (m *NodePair) XXX_Size() int { - return xxx_messageInfo_NodePair.Size(m) -} -func (m *NodePair) XXX_DiscardUnknown() { - xxx_messageInfo_NodePair.DiscardUnknown(m) -} - -var xxx_messageInfo_NodePair proto.InternalMessageInfo - -func (m *NodePair) GetFrom() []byte { - if m != nil { - return m.From - } - return nil -} - -func (m *NodePair) GetTo() []byte { - if m != nil { - return m.To - } - return nil -} - -type EdgeLocator struct { - // The short channel id of this edge. - ChannelId uint64 `protobuf:"varint,1,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` - // - //The direction of this edge. If direction_reverse is false, the direction - //of this edge is from the channel endpoint with the lexicographically smaller - //pub key to the endpoint with the larger pub key. If direction_reverse is - //is true, the edge goes the other way. - DirectionReverse bool `protobuf:"varint,2,opt,name=direction_reverse,json=directionReverse,proto3" json:"direction_reverse,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EdgeLocator) Reset() { *m = EdgeLocator{} } -func (m *EdgeLocator) String() string { return proto.CompactTextString(m) } -func (*EdgeLocator) ProtoMessage() {} -func (*EdgeLocator) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{85} -} - -func (m *EdgeLocator) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EdgeLocator.Unmarshal(m, b) -} -func (m *EdgeLocator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EdgeLocator.Marshal(b, m, deterministic) -} -func (m *EdgeLocator) XXX_Merge(src proto.Message) { - xxx_messageInfo_EdgeLocator.Merge(m, src) -} -func (m *EdgeLocator) XXX_Size() int { - return xxx_messageInfo_EdgeLocator.Size(m) -} -func (m *EdgeLocator) XXX_DiscardUnknown() { - xxx_messageInfo_EdgeLocator.DiscardUnknown(m) -} - -var xxx_messageInfo_EdgeLocator proto.InternalMessageInfo - -func (m *EdgeLocator) GetChannelId() uint64 { - if m != nil { - return m.ChannelId - } - return 0 -} - -func (m *EdgeLocator) GetDirectionReverse() bool { - if m != nil { - return m.DirectionReverse - } - return false -} - -type QueryRoutesResponse struct { - // - //The route that results from the path finding operation. This is still a - //repeated field to retain backwards compatibility. - Routes []*Route `protobuf:"bytes,1,rep,name=routes,proto3" json:"routes,omitempty"` - // - //The success probability of the returned route based on the current mission - //control state. [EXPERIMENTAL] - SuccessProb float64 `protobuf:"fixed64,2,opt,name=success_prob,json=successProb,proto3" json:"success_prob,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *QueryRoutesResponse) Reset() { *m = QueryRoutesResponse{} } -func (m *QueryRoutesResponse) String() string { return proto.CompactTextString(m) } -func (*QueryRoutesResponse) ProtoMessage() {} -func (*QueryRoutesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{86} -} - -func (m *QueryRoutesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_QueryRoutesResponse.Unmarshal(m, b) -} -func (m *QueryRoutesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_QueryRoutesResponse.Marshal(b, m, deterministic) -} -func (m *QueryRoutesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryRoutesResponse.Merge(m, src) -} -func (m *QueryRoutesResponse) XXX_Size() int { - return xxx_messageInfo_QueryRoutesResponse.Size(m) -} -func (m *QueryRoutesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryRoutesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryRoutesResponse proto.InternalMessageInfo - -func (m *QueryRoutesResponse) GetRoutes() []*Route { - if m != nil { - return m.Routes - } - return nil -} - -func (m *QueryRoutesResponse) GetSuccessProb() float64 { - if m != nil { - return m.SuccessProb - } - return 0 -} - -type Hop struct { - // - //The unique channel ID for the channel. The first 3 bytes are the block - //height, the next 3 the index within the block, and the last 2 bytes are the - //output index for the channel. - ChanId uint64 `protobuf:"varint,1,opt,name=chan_id,json=chanId,proto3" json:"chan_id,omitempty"` - ChanCapacity int64 `protobuf:"varint,2,opt,name=chan_capacity,json=chanCapacity,proto3" json:"chan_capacity,omitempty"` - AmtToForward int64 `protobuf:"varint,3,opt,name=amt_to_forward,json=amtToForward,proto3" json:"amt_to_forward,omitempty"` // Deprecated: Do not use. - Fee int64 `protobuf:"varint,4,opt,name=fee,proto3" json:"fee,omitempty"` // Deprecated: Do not use. - Expiry uint32 `protobuf:"varint,5,opt,name=expiry,proto3" json:"expiry,omitempty"` - AmtToForwardMsat int64 `protobuf:"varint,6,opt,name=amt_to_forward_msat,json=amtToForwardMsat,proto3" json:"amt_to_forward_msat,omitempty"` - FeeMsat int64 `protobuf:"varint,7,opt,name=fee_msat,json=feeMsat,proto3" json:"fee_msat,omitempty"` - // - //An optional public key of the hop. If the public key is given, the payment - //can be executed without relying on a copy of the channel graph. - PubKey string `protobuf:"bytes,8,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` - // - //If set to true, then this hop will be encoded using the new variable length - //TLV format. Note that if any custom tlv_records below are specified, then - //this field MUST be set to true for them to be encoded properly. - TlvPayload bool `protobuf:"varint,9,opt,name=tlv_payload,json=tlvPayload,proto3" json:"tlv_payload,omitempty"` - // - //An optional TLV record that signals the use of an MPP payment. If present, - //the receiver will enforce that that the same mpp_record is included in the - //final hop payload of all non-zero payments in the HTLC set. If empty, a - //regular single-shot payment is or was attempted. - MppRecord *MPPRecord `protobuf:"bytes,10,opt,name=mpp_record,json=mppRecord,proto3" json:"mpp_record,omitempty"` - // - //An optional set of key-value TLV records. This is useful within the context - //of the SendToRoute call as it allows callers to specify arbitrary K-V pairs - //to drop off at each hop within the onion. - CustomRecords map[uint64][]byte `protobuf:"bytes,11,rep,name=custom_records,json=customRecords,proto3" json:"custom_records,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Hop) Reset() { *m = Hop{} } -func (m *Hop) String() string { return proto.CompactTextString(m) } -func (*Hop) ProtoMessage() {} -func (*Hop) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{87} -} - -func (m *Hop) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Hop.Unmarshal(m, b) -} -func (m *Hop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Hop.Marshal(b, m, deterministic) -} -func (m *Hop) XXX_Merge(src proto.Message) { - xxx_messageInfo_Hop.Merge(m, src) -} -func (m *Hop) XXX_Size() int { - return xxx_messageInfo_Hop.Size(m) -} -func (m *Hop) XXX_DiscardUnknown() { - xxx_messageInfo_Hop.DiscardUnknown(m) -} - -var xxx_messageInfo_Hop proto.InternalMessageInfo - -func (m *Hop) GetChanId() uint64 { - if m != nil { - return m.ChanId - } - return 0 -} - -func (m *Hop) GetChanCapacity() int64 { - if m != nil { - return m.ChanCapacity - } - return 0 -} - -// Deprecated: Do not use. -func (m *Hop) GetAmtToForward() int64 { - if m != nil { - return m.AmtToForward - } - return 0 -} - -// Deprecated: Do not use. -func (m *Hop) GetFee() int64 { - if m != nil { - return m.Fee - } - return 0 -} - -func (m *Hop) GetExpiry() uint32 { - if m != nil { - return m.Expiry - } - return 0 -} - -func (m *Hop) GetAmtToForwardMsat() int64 { - if m != nil { - return m.AmtToForwardMsat - } - return 0 -} - -func (m *Hop) GetFeeMsat() int64 { - if m != nil { - return m.FeeMsat - } - return 0 -} - -func (m *Hop) GetPubKey() string { - if m != nil { - return m.PubKey - } - return "" -} - -func (m *Hop) GetTlvPayload() bool { - if m != nil { - return m.TlvPayload - } - return false -} - -func (m *Hop) GetMppRecord() *MPPRecord { - if m != nil { - return m.MppRecord - } - return nil -} - -func (m *Hop) GetCustomRecords() map[uint64][]byte { - if m != nil { - return m.CustomRecords - } - return nil -} - -type MPPRecord struct { - // - //A unique, random identifier used to authenticate the sender as the intended - //payer of a multi-path payment. The payment_addr must be the same for all - //subpayments, and match the payment_addr provided in the receiver's invoice. - //The same payment_addr must be used on all subpayments. - PaymentAddr []byte `protobuf:"bytes,11,opt,name=payment_addr,json=paymentAddr,proto3" json:"payment_addr,omitempty"` - // - //The total amount in milli-satoshis being sent as part of a larger multi-path - //payment. The caller is responsible for ensuring subpayments to the same node - //and payment_hash sum exactly to total_amt_msat. The same - //total_amt_msat must be used on all subpayments. - TotalAmtMsat int64 `protobuf:"varint,10,opt,name=total_amt_msat,json=totalAmtMsat,proto3" json:"total_amt_msat,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MPPRecord) Reset() { *m = MPPRecord{} } -func (m *MPPRecord) String() string { return proto.CompactTextString(m) } -func (*MPPRecord) ProtoMessage() {} -func (*MPPRecord) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{88} -} - -func (m *MPPRecord) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MPPRecord.Unmarshal(m, b) -} -func (m *MPPRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MPPRecord.Marshal(b, m, deterministic) -} -func (m *MPPRecord) XXX_Merge(src proto.Message) { - xxx_messageInfo_MPPRecord.Merge(m, src) -} -func (m *MPPRecord) XXX_Size() int { - return xxx_messageInfo_MPPRecord.Size(m) -} -func (m *MPPRecord) XXX_DiscardUnknown() { - xxx_messageInfo_MPPRecord.DiscardUnknown(m) -} - -var xxx_messageInfo_MPPRecord proto.InternalMessageInfo - -func (m *MPPRecord) GetPaymentAddr() []byte { - if m != nil { - return m.PaymentAddr - } - return nil -} - -func (m *MPPRecord) GetTotalAmtMsat() int64 { - if m != nil { - return m.TotalAmtMsat - } - return 0 -} - -// -//A path through the channel graph which runs over one or more channels in -//succession. This struct carries all the information required to craft the -//Sphinx onion packet, and send the payment along the first hop in the path. A -//route is only selected as valid if all the channels have sufficient capacity to -//carry the initial payment amount after fees are accounted for. -type Route struct { - // - //The cumulative (final) time lock across the entire route. This is the CLTV - //value that should be extended to the first hop in the route. All other hops - //will decrement the time-lock as advertised, leaving enough time for all - //hops to wait for or present the payment preimage to complete the payment. - TotalTimeLock uint32 `protobuf:"varint,1,opt,name=total_time_lock,json=totalTimeLock,proto3" json:"total_time_lock,omitempty"` - // - //The sum of the fees paid at each hop within the final route. In the case - //of a one-hop payment, this value will be zero as we don't need to pay a fee - //to ourselves. - TotalFees int64 `protobuf:"varint,2,opt,name=total_fees,json=totalFees,proto3" json:"total_fees,omitempty"` // Deprecated: Do not use. - // - //The total amount of funds required to complete a payment over this route. - //This value includes the cumulative fees at each hop. As a result, the HTLC - //extended to the first-hop in the route will need to have at least this many - //satoshis, otherwise the route will fail at an intermediate node due to an - //insufficient amount of fees. - TotalAmt int64 `protobuf:"varint,3,opt,name=total_amt,json=totalAmt,proto3" json:"total_amt,omitempty"` // Deprecated: Do not use. - // - //Contains details concerning the specific forwarding details at each hop. - Hops []*Hop `protobuf:"bytes,4,rep,name=hops,proto3" json:"hops,omitempty"` - // - //The total fees in millisatoshis. - TotalFeesMsat int64 `protobuf:"varint,5,opt,name=total_fees_msat,json=totalFeesMsat,proto3" json:"total_fees_msat,omitempty"` - // - //The total amount in millisatoshis. - TotalAmtMsat int64 `protobuf:"varint,6,opt,name=total_amt_msat,json=totalAmtMsat,proto3" json:"total_amt_msat,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Route) Reset() { *m = Route{} } -func (m *Route) String() string { return proto.CompactTextString(m) } -func (*Route) ProtoMessage() {} -func (*Route) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{89} -} - -func (m *Route) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Route.Unmarshal(m, b) -} -func (m *Route) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Route.Marshal(b, m, deterministic) -} -func (m *Route) XXX_Merge(src proto.Message) { - xxx_messageInfo_Route.Merge(m, src) -} -func (m *Route) XXX_Size() int { - return xxx_messageInfo_Route.Size(m) -} -func (m *Route) XXX_DiscardUnknown() { - xxx_messageInfo_Route.DiscardUnknown(m) -} - -var xxx_messageInfo_Route proto.InternalMessageInfo - -func (m *Route) GetTotalTimeLock() uint32 { - if m != nil { - return m.TotalTimeLock - } - return 0 -} - -// Deprecated: Do not use. -func (m *Route) GetTotalFees() int64 { - if m != nil { - return m.TotalFees - } - return 0 -} - -// Deprecated: Do not use. -func (m *Route) GetTotalAmt() int64 { - if m != nil { - return m.TotalAmt - } - return 0 -} - -func (m *Route) GetHops() []*Hop { - if m != nil { - return m.Hops - } - return nil -} - -func (m *Route) GetTotalFeesMsat() int64 { - if m != nil { - return m.TotalFeesMsat - } - return 0 -} - -func (m *Route) GetTotalAmtMsat() int64 { - if m != nil { - return m.TotalAmtMsat - } - return 0 -} - -type NodeInfoRequest struct { - // The 33-byte hex-encoded compressed public of the target node - PubKey string `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` - // If true, will include all known channels associated with the node. - IncludeChannels bool `protobuf:"varint,2,opt,name=include_channels,json=includeChannels,proto3" json:"include_channels,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeInfoRequest) Reset() { *m = NodeInfoRequest{} } -func (m *NodeInfoRequest) String() string { return proto.CompactTextString(m) } -func (*NodeInfoRequest) ProtoMessage() {} -func (*NodeInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{90} -} - -func (m *NodeInfoRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeInfoRequest.Unmarshal(m, b) -} -func (m *NodeInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeInfoRequest.Marshal(b, m, deterministic) -} -func (m *NodeInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeInfoRequest.Merge(m, src) -} -func (m *NodeInfoRequest) XXX_Size() int { - return xxx_messageInfo_NodeInfoRequest.Size(m) -} -func (m *NodeInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeInfoRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeInfoRequest proto.InternalMessageInfo - -func (m *NodeInfoRequest) GetPubKey() string { - if m != nil { - return m.PubKey - } - return "" -} - -func (m *NodeInfoRequest) GetIncludeChannels() bool { - if m != nil { - return m.IncludeChannels - } - return false -} - -type NodeInfo struct { - // - //An individual vertex/node within the channel graph. A node is - //connected to other nodes by one or more channel edges emanating from it. As - //the graph is directed, a node will also have an incoming edge attached to - //it for each outgoing edge. - Node *LightningNode `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - // The total number of channels for the node. - NumChannels uint32 `protobuf:"varint,2,opt,name=num_channels,json=numChannels,proto3" json:"num_channels,omitempty"` - // The sum of all channels capacity for the node, denominated in satoshis. - TotalCapacity int64 `protobuf:"varint,3,opt,name=total_capacity,json=totalCapacity,proto3" json:"total_capacity,omitempty"` - // A list of all public channels for the node. - Channels []*ChannelEdge `protobuf:"bytes,4,rep,name=channels,proto3" json:"channels,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeInfo) Reset() { *m = NodeInfo{} } -func (m *NodeInfo) String() string { return proto.CompactTextString(m) } -func (*NodeInfo) ProtoMessage() {} -func (*NodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{91} -} - -func (m *NodeInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeInfo.Unmarshal(m, b) -} -func (m *NodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeInfo.Marshal(b, m, deterministic) -} -func (m *NodeInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeInfo.Merge(m, src) -} -func (m *NodeInfo) XXX_Size() int { - return xxx_messageInfo_NodeInfo.Size(m) -} -func (m *NodeInfo) XXX_DiscardUnknown() { - xxx_messageInfo_NodeInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeInfo proto.InternalMessageInfo - -func (m *NodeInfo) GetNode() *LightningNode { - if m != nil { - return m.Node - } - return nil -} - -func (m *NodeInfo) GetNumChannels() uint32 { - if m != nil { - return m.NumChannels - } - return 0 -} - -func (m *NodeInfo) GetTotalCapacity() int64 { - if m != nil { - return m.TotalCapacity - } - return 0 -} - -func (m *NodeInfo) GetChannels() []*ChannelEdge { - if m != nil { - return m.Channels - } - return nil -} - -// -//An individual vertex/node within the channel graph. A node is -//connected to other nodes by one or more channel edges emanating from it. As the -//graph is directed, a node will also have an incoming edge attached to it for -//each outgoing edge. -type LightningNode struct { - LastUpdate uint32 `protobuf:"varint,1,opt,name=last_update,json=lastUpdate,proto3" json:"last_update,omitempty"` - PubKey string `protobuf:"bytes,2,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` - Alias string `protobuf:"bytes,3,opt,name=alias,proto3" json:"alias,omitempty"` - Addresses []*NodeAddress `protobuf:"bytes,4,rep,name=addresses,proto3" json:"addresses,omitempty"` - Color string `protobuf:"bytes,5,opt,name=color,proto3" json:"color,omitempty"` - Features map[uint32]*Feature `protobuf:"bytes,6,rep,name=features,proto3" json:"features,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LightningNode) Reset() { *m = LightningNode{} } -func (m *LightningNode) String() string { return proto.CompactTextString(m) } -func (*LightningNode) ProtoMessage() {} -func (*LightningNode) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{92} -} - -func (m *LightningNode) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LightningNode.Unmarshal(m, b) -} -func (m *LightningNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LightningNode.Marshal(b, m, deterministic) -} -func (m *LightningNode) XXX_Merge(src proto.Message) { - xxx_messageInfo_LightningNode.Merge(m, src) -} -func (m *LightningNode) XXX_Size() int { - return xxx_messageInfo_LightningNode.Size(m) -} -func (m *LightningNode) XXX_DiscardUnknown() { - xxx_messageInfo_LightningNode.DiscardUnknown(m) -} - -var xxx_messageInfo_LightningNode proto.InternalMessageInfo - -func (m *LightningNode) GetLastUpdate() uint32 { - if m != nil { - return m.LastUpdate - } - return 0 -} - -func (m *LightningNode) GetPubKey() string { - if m != nil { - return m.PubKey - } - return "" -} - -func (m *LightningNode) GetAlias() string { - if m != nil { - return m.Alias - } - return "" -} - -func (m *LightningNode) GetAddresses() []*NodeAddress { - if m != nil { - return m.Addresses - } - return nil -} - -func (m *LightningNode) GetColor() string { - if m != nil { - return m.Color - } - return "" -} - -func (m *LightningNode) GetFeatures() map[uint32]*Feature { - if m != nil { - return m.Features - } - return nil -} - -type NodeAddress struct { - Network string `protobuf:"bytes,1,opt,name=network,proto3" json:"network,omitempty"` - Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeAddress) Reset() { *m = NodeAddress{} } -func (m *NodeAddress) String() string { return proto.CompactTextString(m) } -func (*NodeAddress) ProtoMessage() {} -func (*NodeAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{93} -} - -func (m *NodeAddress) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeAddress.Unmarshal(m, b) -} -func (m *NodeAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeAddress.Marshal(b, m, deterministic) -} -func (m *NodeAddress) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeAddress.Merge(m, src) -} -func (m *NodeAddress) XXX_Size() int { - return xxx_messageInfo_NodeAddress.Size(m) -} -func (m *NodeAddress) XXX_DiscardUnknown() { - xxx_messageInfo_NodeAddress.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeAddress proto.InternalMessageInfo - -func (m *NodeAddress) GetNetwork() string { - if m != nil { - return m.Network - } - return "" -} - -func (m *NodeAddress) GetAddr() string { - if m != nil { - return m.Addr - } - return "" -} - -type RoutingPolicy struct { - TimeLockDelta uint32 `protobuf:"varint,1,opt,name=time_lock_delta,json=timeLockDelta,proto3" json:"time_lock_delta,omitempty"` - MinHtlc int64 `protobuf:"varint,2,opt,name=min_htlc,json=minHtlc,proto3" json:"min_htlc,omitempty"` - FeeBaseMsat int64 `protobuf:"varint,3,opt,name=fee_base_msat,json=feeBaseMsat,proto3" json:"fee_base_msat,omitempty"` - FeeRateMilliMsat int64 `protobuf:"varint,4,opt,name=fee_rate_milli_msat,json=feeRateMilliMsat,proto3" json:"fee_rate_milli_msat,omitempty"` - Disabled bool `protobuf:"varint,5,opt,name=disabled,proto3" json:"disabled,omitempty"` - MaxHtlcMsat uint64 `protobuf:"varint,6,opt,name=max_htlc_msat,json=maxHtlcMsat,proto3" json:"max_htlc_msat,omitempty"` - LastUpdate uint32 `protobuf:"varint,7,opt,name=last_update,json=lastUpdate,proto3" json:"last_update,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RoutingPolicy) Reset() { *m = RoutingPolicy{} } -func (m *RoutingPolicy) String() string { return proto.CompactTextString(m) } -func (*RoutingPolicy) ProtoMessage() {} -func (*RoutingPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{94} -} - -func (m *RoutingPolicy) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RoutingPolicy.Unmarshal(m, b) -} -func (m *RoutingPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RoutingPolicy.Marshal(b, m, deterministic) -} -func (m *RoutingPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_RoutingPolicy.Merge(m, src) -} -func (m *RoutingPolicy) XXX_Size() int { - return xxx_messageInfo_RoutingPolicy.Size(m) -} -func (m *RoutingPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_RoutingPolicy.DiscardUnknown(m) -} - -var xxx_messageInfo_RoutingPolicy proto.InternalMessageInfo - -func (m *RoutingPolicy) GetTimeLockDelta() uint32 { - if m != nil { - return m.TimeLockDelta - } - return 0 -} - -func (m *RoutingPolicy) GetMinHtlc() int64 { - if m != nil { - return m.MinHtlc - } - return 0 -} - -func (m *RoutingPolicy) GetFeeBaseMsat() int64 { - if m != nil { - return m.FeeBaseMsat - } - return 0 -} - -func (m *RoutingPolicy) GetFeeRateMilliMsat() int64 { - if m != nil { - return m.FeeRateMilliMsat - } - return 0 -} - -func (m *RoutingPolicy) GetDisabled() bool { - if m != nil { - return m.Disabled - } - return false -} - -func (m *RoutingPolicy) GetMaxHtlcMsat() uint64 { - if m != nil { - return m.MaxHtlcMsat - } - return 0 -} - -func (m *RoutingPolicy) GetLastUpdate() uint32 { - if m != nil { - return m.LastUpdate - } - return 0 -} - -// -//A fully authenticated channel along with all its unique attributes. -//Once an authenticated channel announcement has been processed on the network, -//then an instance of ChannelEdgeInfo encapsulating the channels attributes is -//stored. The other portions relevant to routing policy of a channel are stored -//within a ChannelEdgePolicy for each direction of the channel. -type ChannelEdge struct { - // - //The unique channel ID for the channel. The first 3 bytes are the block - //height, the next 3 the index within the block, and the last 2 bytes are the - //output index for the channel. - ChannelId uint64 `protobuf:"varint,1,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` - ChanPoint string `protobuf:"bytes,2,opt,name=chan_point,json=chanPoint,proto3" json:"chan_point,omitempty"` - LastUpdate uint32 `protobuf:"varint,3,opt,name=last_update,json=lastUpdate,proto3" json:"last_update,omitempty"` // Deprecated: Do not use. - Node1Pub string `protobuf:"bytes,4,opt,name=node1_pub,json=node1Pub,proto3" json:"node1_pub,omitempty"` - Node2Pub string `protobuf:"bytes,5,opt,name=node2_pub,json=node2Pub,proto3" json:"node2_pub,omitempty"` - Capacity int64 `protobuf:"varint,6,opt,name=capacity,proto3" json:"capacity,omitempty"` - Node1Policy *RoutingPolicy `protobuf:"bytes,7,opt,name=node1_policy,json=node1Policy,proto3" json:"node1_policy,omitempty"` - Node2Policy *RoutingPolicy `protobuf:"bytes,8,opt,name=node2_policy,json=node2Policy,proto3" json:"node2_policy,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChannelEdge) Reset() { *m = ChannelEdge{} } -func (m *ChannelEdge) String() string { return proto.CompactTextString(m) } -func (*ChannelEdge) ProtoMessage() {} -func (*ChannelEdge) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{95} -} - -func (m *ChannelEdge) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChannelEdge.Unmarshal(m, b) -} -func (m *ChannelEdge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChannelEdge.Marshal(b, m, deterministic) -} -func (m *ChannelEdge) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChannelEdge.Merge(m, src) -} -func (m *ChannelEdge) XXX_Size() int { - return xxx_messageInfo_ChannelEdge.Size(m) -} -func (m *ChannelEdge) XXX_DiscardUnknown() { - xxx_messageInfo_ChannelEdge.DiscardUnknown(m) -} - -var xxx_messageInfo_ChannelEdge proto.InternalMessageInfo - -func (m *ChannelEdge) GetChannelId() uint64 { - if m != nil { - return m.ChannelId - } - return 0 -} - -func (m *ChannelEdge) GetChanPoint() string { - if m != nil { - return m.ChanPoint - } - return "" -} - -// Deprecated: Do not use. -func (m *ChannelEdge) GetLastUpdate() uint32 { - if m != nil { - return m.LastUpdate - } - return 0 -} - -func (m *ChannelEdge) GetNode1Pub() string { - if m != nil { - return m.Node1Pub - } - return "" -} - -func (m *ChannelEdge) GetNode2Pub() string { - if m != nil { - return m.Node2Pub - } - return "" -} - -func (m *ChannelEdge) GetCapacity() int64 { - if m != nil { - return m.Capacity - } - return 0 -} - -func (m *ChannelEdge) GetNode1Policy() *RoutingPolicy { - if m != nil { - return m.Node1Policy - } - return nil -} - -func (m *ChannelEdge) GetNode2Policy() *RoutingPolicy { - if m != nil { - return m.Node2Policy - } - return nil -} - -type ChannelGraphRequest struct { - // - //Whether unannounced channels are included in the response or not. If set, - //unannounced channels are included. Unannounced channels are both private - //channels, and public channels that are not yet announced to the network. - IncludeUnannounced bool `protobuf:"varint,1,opt,name=include_unannounced,json=includeUnannounced,proto3" json:"include_unannounced,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChannelGraphRequest) Reset() { *m = ChannelGraphRequest{} } -func (m *ChannelGraphRequest) String() string { return proto.CompactTextString(m) } -func (*ChannelGraphRequest) ProtoMessage() {} -func (*ChannelGraphRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{96} -} - -func (m *ChannelGraphRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChannelGraphRequest.Unmarshal(m, b) -} -func (m *ChannelGraphRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChannelGraphRequest.Marshal(b, m, deterministic) -} -func (m *ChannelGraphRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChannelGraphRequest.Merge(m, src) -} -func (m *ChannelGraphRequest) XXX_Size() int { - return xxx_messageInfo_ChannelGraphRequest.Size(m) -} -func (m *ChannelGraphRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ChannelGraphRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ChannelGraphRequest proto.InternalMessageInfo - -func (m *ChannelGraphRequest) GetIncludeUnannounced() bool { - if m != nil { - return m.IncludeUnannounced - } - return false -} - -// Returns a new instance of the directed channel graph. -type ChannelGraph struct { - // The list of `LightningNode`s in this channel graph - Nodes []*LightningNode `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"` - // The list of `ChannelEdge`s in this channel graph - Edges []*ChannelEdge `protobuf:"bytes,2,rep,name=edges,proto3" json:"edges,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChannelGraph) Reset() { *m = ChannelGraph{} } -func (m *ChannelGraph) String() string { return proto.CompactTextString(m) } -func (*ChannelGraph) ProtoMessage() {} -func (*ChannelGraph) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{97} -} - -func (m *ChannelGraph) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChannelGraph.Unmarshal(m, b) -} -func (m *ChannelGraph) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChannelGraph.Marshal(b, m, deterministic) -} -func (m *ChannelGraph) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChannelGraph.Merge(m, src) -} -func (m *ChannelGraph) XXX_Size() int { - return xxx_messageInfo_ChannelGraph.Size(m) -} -func (m *ChannelGraph) XXX_DiscardUnknown() { - xxx_messageInfo_ChannelGraph.DiscardUnknown(m) -} - -var xxx_messageInfo_ChannelGraph proto.InternalMessageInfo - -func (m *ChannelGraph) GetNodes() []*LightningNode { - if m != nil { - return m.Nodes - } - return nil -} - -func (m *ChannelGraph) GetEdges() []*ChannelEdge { - if m != nil { - return m.Edges - } - return nil -} - -type NodeMetricsRequest struct { - // The requested node metrics. - Types []NodeMetricType `protobuf:"varint,1,rep,packed,name=types,proto3,enum=lnrpc.NodeMetricType" json:"types,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeMetricsRequest) Reset() { *m = NodeMetricsRequest{} } -func (m *NodeMetricsRequest) String() string { return proto.CompactTextString(m) } -func (*NodeMetricsRequest) ProtoMessage() {} -func (*NodeMetricsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{98} -} - -func (m *NodeMetricsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeMetricsRequest.Unmarshal(m, b) -} -func (m *NodeMetricsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeMetricsRequest.Marshal(b, m, deterministic) -} -func (m *NodeMetricsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeMetricsRequest.Merge(m, src) -} -func (m *NodeMetricsRequest) XXX_Size() int { - return xxx_messageInfo_NodeMetricsRequest.Size(m) -} -func (m *NodeMetricsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeMetricsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeMetricsRequest proto.InternalMessageInfo - -func (m *NodeMetricsRequest) GetTypes() []NodeMetricType { - if m != nil { - return m.Types - } - return nil -} - -type NodeMetricsResponse struct { - // - //Betweenness centrality is the sum of the ratio of shortest paths that pass - //through the node for each pair of nodes in the graph (not counting paths - //starting or ending at this node). - //Map of node pubkey to betweenness centrality of the node. Normalized - //values are in the [0,1] closed interval. - BetweennessCentrality map[string]*FloatMetric `protobuf:"bytes,1,rep,name=betweenness_centrality,json=betweennessCentrality,proto3" json:"betweenness_centrality,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeMetricsResponse) Reset() { *m = NodeMetricsResponse{} } -func (m *NodeMetricsResponse) String() string { return proto.CompactTextString(m) } -func (*NodeMetricsResponse) ProtoMessage() {} -func (*NodeMetricsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{99} -} - -func (m *NodeMetricsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeMetricsResponse.Unmarshal(m, b) -} -func (m *NodeMetricsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeMetricsResponse.Marshal(b, m, deterministic) -} -func (m *NodeMetricsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeMetricsResponse.Merge(m, src) -} -func (m *NodeMetricsResponse) XXX_Size() int { - return xxx_messageInfo_NodeMetricsResponse.Size(m) -} -func (m *NodeMetricsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeMetricsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeMetricsResponse proto.InternalMessageInfo - -func (m *NodeMetricsResponse) GetBetweennessCentrality() map[string]*FloatMetric { - if m != nil { - return m.BetweennessCentrality - } - return nil -} - -type FloatMetric struct { - // Arbitrary float value. - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` - // The value normalized to [0,1] or [-1,1]. - NormalizedValue float64 `protobuf:"fixed64,2,opt,name=normalized_value,json=normalizedValue,proto3" json:"normalized_value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FloatMetric) Reset() { *m = FloatMetric{} } -func (m *FloatMetric) String() string { return proto.CompactTextString(m) } -func (*FloatMetric) ProtoMessage() {} -func (*FloatMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{100} -} - -func (m *FloatMetric) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FloatMetric.Unmarshal(m, b) -} -func (m *FloatMetric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FloatMetric.Marshal(b, m, deterministic) -} -func (m *FloatMetric) XXX_Merge(src proto.Message) { - xxx_messageInfo_FloatMetric.Merge(m, src) -} -func (m *FloatMetric) XXX_Size() int { - return xxx_messageInfo_FloatMetric.Size(m) -} -func (m *FloatMetric) XXX_DiscardUnknown() { - xxx_messageInfo_FloatMetric.DiscardUnknown(m) -} - -var xxx_messageInfo_FloatMetric proto.InternalMessageInfo - -func (m *FloatMetric) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -func (m *FloatMetric) GetNormalizedValue() float64 { - if m != nil { - return m.NormalizedValue - } - return 0 -} - -type ChanInfoRequest struct { - // - //The unique channel ID for the channel. The first 3 bytes are the block - //height, the next 3 the index within the block, and the last 2 bytes are the - //output index for the channel. - ChanId uint64 `protobuf:"varint,1,opt,name=chan_id,json=chanId,proto3" json:"chan_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChanInfoRequest) Reset() { *m = ChanInfoRequest{} } -func (m *ChanInfoRequest) String() string { return proto.CompactTextString(m) } -func (*ChanInfoRequest) ProtoMessage() {} -func (*ChanInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{101} -} - -func (m *ChanInfoRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChanInfoRequest.Unmarshal(m, b) -} -func (m *ChanInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChanInfoRequest.Marshal(b, m, deterministic) -} -func (m *ChanInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChanInfoRequest.Merge(m, src) -} -func (m *ChanInfoRequest) XXX_Size() int { - return xxx_messageInfo_ChanInfoRequest.Size(m) -} -func (m *ChanInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ChanInfoRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ChanInfoRequest proto.InternalMessageInfo - -func (m *ChanInfoRequest) GetChanId() uint64 { - if m != nil { - return m.ChanId - } - return 0 -} - -type NetworkInfoRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NetworkInfoRequest) Reset() { *m = NetworkInfoRequest{} } -func (m *NetworkInfoRequest) String() string { return proto.CompactTextString(m) } -func (*NetworkInfoRequest) ProtoMessage() {} -func (*NetworkInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{102} -} - -func (m *NetworkInfoRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NetworkInfoRequest.Unmarshal(m, b) -} -func (m *NetworkInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NetworkInfoRequest.Marshal(b, m, deterministic) -} -func (m *NetworkInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkInfoRequest.Merge(m, src) -} -func (m *NetworkInfoRequest) XXX_Size() int { - return xxx_messageInfo_NetworkInfoRequest.Size(m) -} -func (m *NetworkInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkInfoRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NetworkInfoRequest proto.InternalMessageInfo - -type NetworkInfo struct { - GraphDiameter uint32 `protobuf:"varint,1,opt,name=graph_diameter,json=graphDiameter,proto3" json:"graph_diameter,omitempty"` - AvgOutDegree float64 `protobuf:"fixed64,2,opt,name=avg_out_degree,json=avgOutDegree,proto3" json:"avg_out_degree,omitempty"` - MaxOutDegree uint32 `protobuf:"varint,3,opt,name=max_out_degree,json=maxOutDegree,proto3" json:"max_out_degree,omitempty"` - NumNodes uint32 `protobuf:"varint,4,opt,name=num_nodes,json=numNodes,proto3" json:"num_nodes,omitempty"` - NumChannels uint32 `protobuf:"varint,5,opt,name=num_channels,json=numChannels,proto3" json:"num_channels,omitempty"` - TotalNetworkCapacity int64 `protobuf:"varint,6,opt,name=total_network_capacity,json=totalNetworkCapacity,proto3" json:"total_network_capacity,omitempty"` - AvgChannelSize float64 `protobuf:"fixed64,7,opt,name=avg_channel_size,json=avgChannelSize,proto3" json:"avg_channel_size,omitempty"` - MinChannelSize int64 `protobuf:"varint,8,opt,name=min_channel_size,json=minChannelSize,proto3" json:"min_channel_size,omitempty"` - MaxChannelSize int64 `protobuf:"varint,9,opt,name=max_channel_size,json=maxChannelSize,proto3" json:"max_channel_size,omitempty"` - MedianChannelSizeSat int64 `protobuf:"varint,10,opt,name=median_channel_size_sat,json=medianChannelSizeSat,proto3" json:"median_channel_size_sat,omitempty"` - // The number of edges marked as zombies. - NumZombieChans uint64 `protobuf:"varint,11,opt,name=num_zombie_chans,json=numZombieChans,proto3" json:"num_zombie_chans,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NetworkInfo) Reset() { *m = NetworkInfo{} } -func (m *NetworkInfo) String() string { return proto.CompactTextString(m) } -func (*NetworkInfo) ProtoMessage() {} -func (*NetworkInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{103} -} - -func (m *NetworkInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NetworkInfo.Unmarshal(m, b) -} -func (m *NetworkInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NetworkInfo.Marshal(b, m, deterministic) -} -func (m *NetworkInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkInfo.Merge(m, src) -} -func (m *NetworkInfo) XXX_Size() int { - return xxx_messageInfo_NetworkInfo.Size(m) -} -func (m *NetworkInfo) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_NetworkInfo proto.InternalMessageInfo - -func (m *NetworkInfo) GetGraphDiameter() uint32 { - if m != nil { - return m.GraphDiameter - } - return 0 -} - -func (m *NetworkInfo) GetAvgOutDegree() float64 { - if m != nil { - return m.AvgOutDegree - } - return 0 -} - -func (m *NetworkInfo) GetMaxOutDegree() uint32 { - if m != nil { - return m.MaxOutDegree - } - return 0 -} - -func (m *NetworkInfo) GetNumNodes() uint32 { - if m != nil { - return m.NumNodes - } - return 0 -} - -func (m *NetworkInfo) GetNumChannels() uint32 { - if m != nil { - return m.NumChannels - } - return 0 -} - -func (m *NetworkInfo) GetTotalNetworkCapacity() int64 { - if m != nil { - return m.TotalNetworkCapacity - } - return 0 -} - -func (m *NetworkInfo) GetAvgChannelSize() float64 { - if m != nil { - return m.AvgChannelSize - } - return 0 -} - -func (m *NetworkInfo) GetMinChannelSize() int64 { - if m != nil { - return m.MinChannelSize - } - return 0 -} - -func (m *NetworkInfo) GetMaxChannelSize() int64 { - if m != nil { - return m.MaxChannelSize - } - return 0 -} - -func (m *NetworkInfo) GetMedianChannelSizeSat() int64 { - if m != nil { - return m.MedianChannelSizeSat - } - return 0 -} - -func (m *NetworkInfo) GetNumZombieChans() uint64 { - if m != nil { - return m.NumZombieChans - } - return 0 -} - -type StopRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StopRequest) Reset() { *m = StopRequest{} } -func (m *StopRequest) String() string { return proto.CompactTextString(m) } -func (*StopRequest) ProtoMessage() {} -func (*StopRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{104} -} - -func (m *StopRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StopRequest.Unmarshal(m, b) -} -func (m *StopRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StopRequest.Marshal(b, m, deterministic) -} -func (m *StopRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StopRequest.Merge(m, src) -} -func (m *StopRequest) XXX_Size() int { - return xxx_messageInfo_StopRequest.Size(m) -} -func (m *StopRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StopRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StopRequest proto.InternalMessageInfo - -type StopResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StopResponse) Reset() { *m = StopResponse{} } -func (m *StopResponse) String() string { return proto.CompactTextString(m) } -func (*StopResponse) ProtoMessage() {} -func (*StopResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{105} -} - -func (m *StopResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StopResponse.Unmarshal(m, b) -} -func (m *StopResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StopResponse.Marshal(b, m, deterministic) -} -func (m *StopResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StopResponse.Merge(m, src) -} -func (m *StopResponse) XXX_Size() int { - return xxx_messageInfo_StopResponse.Size(m) -} -func (m *StopResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StopResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StopResponse proto.InternalMessageInfo - -type GraphTopologySubscription struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GraphTopologySubscription) Reset() { *m = GraphTopologySubscription{} } -func (m *GraphTopologySubscription) String() string { return proto.CompactTextString(m) } -func (*GraphTopologySubscription) ProtoMessage() {} -func (*GraphTopologySubscription) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{106} -} - -func (m *GraphTopologySubscription) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GraphTopologySubscription.Unmarshal(m, b) -} -func (m *GraphTopologySubscription) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GraphTopologySubscription.Marshal(b, m, deterministic) -} -func (m *GraphTopologySubscription) XXX_Merge(src proto.Message) { - xxx_messageInfo_GraphTopologySubscription.Merge(m, src) -} -func (m *GraphTopologySubscription) XXX_Size() int { - return xxx_messageInfo_GraphTopologySubscription.Size(m) -} -func (m *GraphTopologySubscription) XXX_DiscardUnknown() { - xxx_messageInfo_GraphTopologySubscription.DiscardUnknown(m) -} - -var xxx_messageInfo_GraphTopologySubscription proto.InternalMessageInfo - -type GraphTopologyUpdate struct { - NodeUpdates []*NodeUpdate `protobuf:"bytes,1,rep,name=node_updates,json=nodeUpdates,proto3" json:"node_updates,omitempty"` - ChannelUpdates []*ChannelEdgeUpdate `protobuf:"bytes,2,rep,name=channel_updates,json=channelUpdates,proto3" json:"channel_updates,omitempty"` - ClosedChans []*ClosedChannelUpdate `protobuf:"bytes,3,rep,name=closed_chans,json=closedChans,proto3" json:"closed_chans,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GraphTopologyUpdate) Reset() { *m = GraphTopologyUpdate{} } -func (m *GraphTopologyUpdate) String() string { return proto.CompactTextString(m) } -func (*GraphTopologyUpdate) ProtoMessage() {} -func (*GraphTopologyUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{107} -} - -func (m *GraphTopologyUpdate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GraphTopologyUpdate.Unmarshal(m, b) -} -func (m *GraphTopologyUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GraphTopologyUpdate.Marshal(b, m, deterministic) -} -func (m *GraphTopologyUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_GraphTopologyUpdate.Merge(m, src) -} -func (m *GraphTopologyUpdate) XXX_Size() int { - return xxx_messageInfo_GraphTopologyUpdate.Size(m) -} -func (m *GraphTopologyUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_GraphTopologyUpdate.DiscardUnknown(m) -} - -var xxx_messageInfo_GraphTopologyUpdate proto.InternalMessageInfo - -func (m *GraphTopologyUpdate) GetNodeUpdates() []*NodeUpdate { - if m != nil { - return m.NodeUpdates - } - return nil -} - -func (m *GraphTopologyUpdate) GetChannelUpdates() []*ChannelEdgeUpdate { - if m != nil { - return m.ChannelUpdates - } - return nil -} - -func (m *GraphTopologyUpdate) GetClosedChans() []*ClosedChannelUpdate { - if m != nil { - return m.ClosedChans - } - return nil -} - -type NodeUpdate struct { - Addresses []string `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses,omitempty"` - IdentityKey string `protobuf:"bytes,2,opt,name=identity_key,json=identityKey,proto3" json:"identity_key,omitempty"` - GlobalFeatures []byte `protobuf:"bytes,3,opt,name=global_features,json=globalFeatures,proto3" json:"global_features,omitempty"` - Alias string `protobuf:"bytes,4,opt,name=alias,proto3" json:"alias,omitempty"` - Color string `protobuf:"bytes,5,opt,name=color,proto3" json:"color,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeUpdate) Reset() { *m = NodeUpdate{} } -func (m *NodeUpdate) String() string { return proto.CompactTextString(m) } -func (*NodeUpdate) ProtoMessage() {} -func (*NodeUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{108} -} - -func (m *NodeUpdate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeUpdate.Unmarshal(m, b) -} -func (m *NodeUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeUpdate.Marshal(b, m, deterministic) -} -func (m *NodeUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeUpdate.Merge(m, src) -} -func (m *NodeUpdate) XXX_Size() int { - return xxx_messageInfo_NodeUpdate.Size(m) -} -func (m *NodeUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_NodeUpdate.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeUpdate proto.InternalMessageInfo - -func (m *NodeUpdate) GetAddresses() []string { - if m != nil { - return m.Addresses - } - return nil -} - -func (m *NodeUpdate) GetIdentityKey() string { - if m != nil { - return m.IdentityKey - } - return "" -} - -func (m *NodeUpdate) GetGlobalFeatures() []byte { - if m != nil { - return m.GlobalFeatures - } - return nil -} - -func (m *NodeUpdate) GetAlias() string { - if m != nil { - return m.Alias - } - return "" -} - -func (m *NodeUpdate) GetColor() string { - if m != nil { - return m.Color - } - return "" -} - -type ChannelEdgeUpdate struct { - // - //The unique channel ID for the channel. The first 3 bytes are the block - //height, the next 3 the index within the block, and the last 2 bytes are the - //output index for the channel. - ChanId uint64 `protobuf:"varint,1,opt,name=chan_id,json=chanId,proto3" json:"chan_id,omitempty"` - ChanPoint *ChannelPoint `protobuf:"bytes,2,opt,name=chan_point,json=chanPoint,proto3" json:"chan_point,omitempty"` - Capacity int64 `protobuf:"varint,3,opt,name=capacity,proto3" json:"capacity,omitempty"` - RoutingPolicy *RoutingPolicy `protobuf:"bytes,4,opt,name=routing_policy,json=routingPolicy,proto3" json:"routing_policy,omitempty"` - AdvertisingNode string `protobuf:"bytes,5,opt,name=advertising_node,json=advertisingNode,proto3" json:"advertising_node,omitempty"` - ConnectingNode string `protobuf:"bytes,6,opt,name=connecting_node,json=connectingNode,proto3" json:"connecting_node,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChannelEdgeUpdate) Reset() { *m = ChannelEdgeUpdate{} } -func (m *ChannelEdgeUpdate) String() string { return proto.CompactTextString(m) } -func (*ChannelEdgeUpdate) ProtoMessage() {} -func (*ChannelEdgeUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{109} -} - -func (m *ChannelEdgeUpdate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChannelEdgeUpdate.Unmarshal(m, b) -} -func (m *ChannelEdgeUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChannelEdgeUpdate.Marshal(b, m, deterministic) -} -func (m *ChannelEdgeUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChannelEdgeUpdate.Merge(m, src) -} -func (m *ChannelEdgeUpdate) XXX_Size() int { - return xxx_messageInfo_ChannelEdgeUpdate.Size(m) -} -func (m *ChannelEdgeUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_ChannelEdgeUpdate.DiscardUnknown(m) -} - -var xxx_messageInfo_ChannelEdgeUpdate proto.InternalMessageInfo - -func (m *ChannelEdgeUpdate) GetChanId() uint64 { - if m != nil { - return m.ChanId - } - return 0 -} - -func (m *ChannelEdgeUpdate) GetChanPoint() *ChannelPoint { - if m != nil { - return m.ChanPoint - } - return nil -} - -func (m *ChannelEdgeUpdate) GetCapacity() int64 { - if m != nil { - return m.Capacity - } - return 0 -} - -func (m *ChannelEdgeUpdate) GetRoutingPolicy() *RoutingPolicy { - if m != nil { - return m.RoutingPolicy - } - return nil -} - -func (m *ChannelEdgeUpdate) GetAdvertisingNode() string { - if m != nil { - return m.AdvertisingNode - } - return "" -} - -func (m *ChannelEdgeUpdate) GetConnectingNode() string { - if m != nil { - return m.ConnectingNode - } - return "" -} - -type ClosedChannelUpdate struct { - // - //The unique channel ID for the channel. The first 3 bytes are the block - //height, the next 3 the index within the block, and the last 2 bytes are the - //output index for the channel. - ChanId uint64 `protobuf:"varint,1,opt,name=chan_id,json=chanId,proto3" json:"chan_id,omitempty"` - Capacity int64 `protobuf:"varint,2,opt,name=capacity,proto3" json:"capacity,omitempty"` - ClosedHeight uint32 `protobuf:"varint,3,opt,name=closed_height,json=closedHeight,proto3" json:"closed_height,omitempty"` - ChanPoint *ChannelPoint `protobuf:"bytes,4,opt,name=chan_point,json=chanPoint,proto3" json:"chan_point,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClosedChannelUpdate) Reset() { *m = ClosedChannelUpdate{} } -func (m *ClosedChannelUpdate) String() string { return proto.CompactTextString(m) } -func (*ClosedChannelUpdate) ProtoMessage() {} -func (*ClosedChannelUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{110} -} - -func (m *ClosedChannelUpdate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClosedChannelUpdate.Unmarshal(m, b) -} -func (m *ClosedChannelUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClosedChannelUpdate.Marshal(b, m, deterministic) -} -func (m *ClosedChannelUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClosedChannelUpdate.Merge(m, src) -} -func (m *ClosedChannelUpdate) XXX_Size() int { - return xxx_messageInfo_ClosedChannelUpdate.Size(m) -} -func (m *ClosedChannelUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_ClosedChannelUpdate.DiscardUnknown(m) -} - -var xxx_messageInfo_ClosedChannelUpdate proto.InternalMessageInfo - -func (m *ClosedChannelUpdate) GetChanId() uint64 { - if m != nil { - return m.ChanId - } - return 0 -} - -func (m *ClosedChannelUpdate) GetCapacity() int64 { - if m != nil { - return m.Capacity - } - return 0 -} - -func (m *ClosedChannelUpdate) GetClosedHeight() uint32 { - if m != nil { - return m.ClosedHeight - } - return 0 -} - -func (m *ClosedChannelUpdate) GetChanPoint() *ChannelPoint { - if m != nil { - return m.ChanPoint - } - return nil -} - -type HopHint struct { - // The public key of the node at the start of the channel. - NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` - // The unique identifier of the channel. - ChanId uint64 `protobuf:"varint,2,opt,name=chan_id,json=chanId,proto3" json:"chan_id,omitempty"` - // The base fee of the channel denominated in millisatoshis. - FeeBaseMsat uint32 `protobuf:"varint,3,opt,name=fee_base_msat,json=feeBaseMsat,proto3" json:"fee_base_msat,omitempty"` - // - //The fee rate of the channel for sending one satoshi across it denominated in - //millionths of a satoshi. - FeeProportionalMillionths uint32 `protobuf:"varint,4,opt,name=fee_proportional_millionths,json=feeProportionalMillionths,proto3" json:"fee_proportional_millionths,omitempty"` - // The time-lock delta of the channel. - CltvExpiryDelta uint32 `protobuf:"varint,5,opt,name=cltv_expiry_delta,json=cltvExpiryDelta,proto3" json:"cltv_expiry_delta,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HopHint) Reset() { *m = HopHint{} } -func (m *HopHint) String() string { return proto.CompactTextString(m) } -func (*HopHint) ProtoMessage() {} -func (*HopHint) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{111} -} - -func (m *HopHint) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_HopHint.Unmarshal(m, b) -} -func (m *HopHint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_HopHint.Marshal(b, m, deterministic) -} -func (m *HopHint) XXX_Merge(src proto.Message) { - xxx_messageInfo_HopHint.Merge(m, src) -} -func (m *HopHint) XXX_Size() int { - return xxx_messageInfo_HopHint.Size(m) -} -func (m *HopHint) XXX_DiscardUnknown() { - xxx_messageInfo_HopHint.DiscardUnknown(m) -} - -var xxx_messageInfo_HopHint proto.InternalMessageInfo - -func (m *HopHint) GetNodeId() string { - if m != nil { - return m.NodeId - } - return "" -} - -func (m *HopHint) GetChanId() uint64 { - if m != nil { - return m.ChanId - } - return 0 -} - -func (m *HopHint) GetFeeBaseMsat() uint32 { - if m != nil { - return m.FeeBaseMsat - } - return 0 -} - -func (m *HopHint) GetFeeProportionalMillionths() uint32 { - if m != nil { - return m.FeeProportionalMillionths - } - return 0 -} - -func (m *HopHint) GetCltvExpiryDelta() uint32 { - if m != nil { - return m.CltvExpiryDelta - } - return 0 -} - -type RouteHint struct { - // - //A list of hop hints that when chained together can assist in reaching a - //specific destination. - HopHints []*HopHint `protobuf:"bytes,1,rep,name=hop_hints,json=hopHints,proto3" json:"hop_hints,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RouteHint) Reset() { *m = RouteHint{} } -func (m *RouteHint) String() string { return proto.CompactTextString(m) } -func (*RouteHint) ProtoMessage() {} -func (*RouteHint) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{112} -} - -func (m *RouteHint) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RouteHint.Unmarshal(m, b) -} -func (m *RouteHint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RouteHint.Marshal(b, m, deterministic) -} -func (m *RouteHint) XXX_Merge(src proto.Message) { - xxx_messageInfo_RouteHint.Merge(m, src) -} -func (m *RouteHint) XXX_Size() int { - return xxx_messageInfo_RouteHint.Size(m) -} -func (m *RouteHint) XXX_DiscardUnknown() { - xxx_messageInfo_RouteHint.DiscardUnknown(m) -} - -var xxx_messageInfo_RouteHint proto.InternalMessageInfo - -func (m *RouteHint) GetHopHints() []*HopHint { - if m != nil { - return m.HopHints - } - return nil -} - -type Invoice struct { - // - //An optional memo to attach along with the invoice. Used for record keeping - //purposes for the invoice's creator, and will also be set in the description - //field of the encoded payment request if the description_hash field is not - //being used. - Memo string `protobuf:"bytes,1,opt,name=memo,proto3" json:"memo,omitempty"` - // - //The hex-encoded preimage (32 byte) which will allow settling an incoming - //HTLC payable to this preimage. When using REST, this field must be encoded - //as base64. - RPreimage []byte `protobuf:"bytes,3,opt,name=r_preimage,json=rPreimage,proto3" json:"r_preimage,omitempty"` - // - //The hash of the preimage. When using REST, this field must be encoded as - //base64. - RHash []byte `protobuf:"bytes,4,opt,name=r_hash,json=rHash,proto3" json:"r_hash,omitempty"` - // - //The value of this invoice in satoshis - // - //The fields value and value_msat are mutually exclusive. - Value int64 `protobuf:"varint,5,opt,name=value,proto3" json:"value,omitempty"` - // - //The value of this invoice in millisatoshis - // - //The fields value and value_msat are mutually exclusive. - ValueMsat int64 `protobuf:"varint,23,opt,name=value_msat,json=valueMsat,proto3" json:"value_msat,omitempty"` - // Whether this invoice has been fulfilled - Settled bool `protobuf:"varint,6,opt,name=settled,proto3" json:"settled,omitempty"` // Deprecated: Do not use. - // When this invoice was created - CreationDate int64 `protobuf:"varint,7,opt,name=creation_date,json=creationDate,proto3" json:"creation_date,omitempty"` - // When this invoice was settled - SettleDate int64 `protobuf:"varint,8,opt,name=settle_date,json=settleDate,proto3" json:"settle_date,omitempty"` - // - //A bare-bones invoice for a payment within the Lightning Network. With the - //details of the invoice, the sender has all the data necessary to send a - //payment to the recipient. - PaymentRequest string `protobuf:"bytes,9,opt,name=payment_request,json=paymentRequest,proto3" json:"payment_request,omitempty"` - // - //Hash (SHA-256) of a description of the payment. Used if the description of - //payment (memo) is too long to naturally fit within the description field - //of an encoded payment request. When using REST, this field must be encoded - //as base64. - DescriptionHash []byte `protobuf:"bytes,10,opt,name=description_hash,json=descriptionHash,proto3" json:"description_hash,omitempty"` - // Payment request expiry time in seconds. Default is 3600 (1 hour). - Expiry int64 `protobuf:"varint,11,opt,name=expiry,proto3" json:"expiry,omitempty"` - // Fallback on-chain address. - FallbackAddr string `protobuf:"bytes,12,opt,name=fallback_addr,json=fallbackAddr,proto3" json:"fallback_addr,omitempty"` - // Delta to use for the time-lock of the CLTV extended to the final hop. - CltvExpiry uint64 `protobuf:"varint,13,opt,name=cltv_expiry,json=cltvExpiry,proto3" json:"cltv_expiry,omitempty"` - // - //Route hints that can each be individually used to assist in reaching the - //invoice's destination. - RouteHints []*RouteHint `protobuf:"bytes,14,rep,name=route_hints,json=routeHints,proto3" json:"route_hints,omitempty"` - // Whether this invoice should include routing hints for private channels. - Private bool `protobuf:"varint,15,opt,name=private,proto3" json:"private,omitempty"` - // - //The "add" index of this invoice. Each newly created invoice will increment - //this index making it monotonically increasing. Callers to the - //SubscribeInvoices call can use this to instantly get notified of all added - //invoices with an add_index greater than this one. - AddIndex uint64 `protobuf:"varint,16,opt,name=add_index,json=addIndex,proto3" json:"add_index,omitempty"` - // - //The "settle" index of this invoice. Each newly settled invoice will - //increment this index making it monotonically increasing. Callers to the - //SubscribeInvoices call can use this to instantly get notified of all - //settled invoices with an settle_index greater than this one. - SettleIndex uint64 `protobuf:"varint,17,opt,name=settle_index,json=settleIndex,proto3" json:"settle_index,omitempty"` - // Deprecated, use amt_paid_sat or amt_paid_msat. - AmtPaid int64 `protobuf:"varint,18,opt,name=amt_paid,json=amtPaid,proto3" json:"amt_paid,omitempty"` // Deprecated: Do not use. - // - //The amount that was accepted for this invoice, in satoshis. This will ONLY - //be set if this invoice has been settled. We provide this field as if the - //invoice was created with a zero value, then we need to record what amount - //was ultimately accepted. Additionally, it's possible that the sender paid - //MORE that was specified in the original invoice. So we'll record that here - //as well. - AmtPaidSat int64 `protobuf:"varint,19,opt,name=amt_paid_sat,json=amtPaidSat,proto3" json:"amt_paid_sat,omitempty"` - // - //The amount that was accepted for this invoice, in millisatoshis. This will - //ONLY be set if this invoice has been settled. We provide this field as if - //the invoice was created with a zero value, then we need to record what - //amount was ultimately accepted. Additionally, it's possible that the sender - //paid MORE that was specified in the original invoice. So we'll record that - //here as well. - AmtPaidMsat int64 `protobuf:"varint,20,opt,name=amt_paid_msat,json=amtPaidMsat,proto3" json:"amt_paid_msat,omitempty"` - // - //The state the invoice is in. - State Invoice_InvoiceState `protobuf:"varint,21,opt,name=state,proto3,enum=lnrpc.Invoice_InvoiceState" json:"state,omitempty"` - // List of HTLCs paying to this invoice [EXPERIMENTAL]. - Htlcs []*InvoiceHTLC `protobuf:"bytes,22,rep,name=htlcs,proto3" json:"htlcs,omitempty"` - // List of features advertised on the invoice. - Features map[uint32]*Feature `protobuf:"bytes,24,rep,name=features,proto3" json:"features,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // - //Indicates if this invoice was a spontaneous payment that arrived via keysend - //[EXPERIMENTAL]. - IsKeysend bool `protobuf:"varint,25,opt,name=is_keysend,json=isKeysend,proto3" json:"is_keysend,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Invoice) Reset() { *m = Invoice{} } -func (m *Invoice) String() string { return proto.CompactTextString(m) } -func (*Invoice) ProtoMessage() {} -func (*Invoice) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{113} -} - -func (m *Invoice) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Invoice.Unmarshal(m, b) -} -func (m *Invoice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Invoice.Marshal(b, m, deterministic) -} -func (m *Invoice) XXX_Merge(src proto.Message) { - xxx_messageInfo_Invoice.Merge(m, src) -} -func (m *Invoice) XXX_Size() int { - return xxx_messageInfo_Invoice.Size(m) -} -func (m *Invoice) XXX_DiscardUnknown() { - xxx_messageInfo_Invoice.DiscardUnknown(m) -} - -var xxx_messageInfo_Invoice proto.InternalMessageInfo - -func (m *Invoice) GetMemo() string { - if m != nil { - return m.Memo - } - return "" -} - -func (m *Invoice) GetRPreimage() []byte { - if m != nil { - return m.RPreimage - } - return nil -} - -func (m *Invoice) GetRHash() []byte { - if m != nil { - return m.RHash - } - return nil -} - -func (m *Invoice) GetValue() int64 { - if m != nil { - return m.Value - } - return 0 -} - -func (m *Invoice) GetValueMsat() int64 { - if m != nil { - return m.ValueMsat - } - return 0 -} - -// Deprecated: Do not use. -func (m *Invoice) GetSettled() bool { - if m != nil { - return m.Settled - } - return false -} - -func (m *Invoice) GetCreationDate() int64 { - if m != nil { - return m.CreationDate - } - return 0 -} - -func (m *Invoice) GetSettleDate() int64 { - if m != nil { - return m.SettleDate - } - return 0 -} - -func (m *Invoice) GetPaymentRequest() string { - if m != nil { - return m.PaymentRequest - } - return "" -} - -func (m *Invoice) GetDescriptionHash() []byte { - if m != nil { - return m.DescriptionHash - } - return nil -} - -func (m *Invoice) GetExpiry() int64 { - if m != nil { - return m.Expiry - } - return 0 -} - -func (m *Invoice) GetFallbackAddr() string { - if m != nil { - return m.FallbackAddr - } - return "" -} - -func (m *Invoice) GetCltvExpiry() uint64 { - if m != nil { - return m.CltvExpiry - } - return 0 -} - -func (m *Invoice) GetRouteHints() []*RouteHint { - if m != nil { - return m.RouteHints - } - return nil -} - -func (m *Invoice) GetPrivate() bool { - if m != nil { - return m.Private - } - return false -} - -func (m *Invoice) GetAddIndex() uint64 { - if m != nil { - return m.AddIndex - } - return 0 -} - -func (m *Invoice) GetSettleIndex() uint64 { - if m != nil { - return m.SettleIndex - } - return 0 -} - -// Deprecated: Do not use. -func (m *Invoice) GetAmtPaid() int64 { - if m != nil { - return m.AmtPaid - } - return 0 -} - -func (m *Invoice) GetAmtPaidSat() int64 { - if m != nil { - return m.AmtPaidSat - } - return 0 -} - -func (m *Invoice) GetAmtPaidMsat() int64 { - if m != nil { - return m.AmtPaidMsat - } - return 0 -} - -func (m *Invoice) GetState() Invoice_InvoiceState { - if m != nil { - return m.State - } - return Invoice_OPEN -} - -func (m *Invoice) GetHtlcs() []*InvoiceHTLC { - if m != nil { - return m.Htlcs - } - return nil -} - -func (m *Invoice) GetFeatures() map[uint32]*Feature { - if m != nil { - return m.Features - } - return nil -} - -func (m *Invoice) GetIsKeysend() bool { - if m != nil { - return m.IsKeysend - } - return false -} - -// Details of an HTLC that paid to an invoice -type InvoiceHTLC struct { - // Short channel id over which the htlc was received. - ChanId uint64 `protobuf:"varint,1,opt,name=chan_id,json=chanId,proto3" json:"chan_id,omitempty"` - // Index identifying the htlc on the channel. - HtlcIndex uint64 `protobuf:"varint,2,opt,name=htlc_index,json=htlcIndex,proto3" json:"htlc_index,omitempty"` - // The amount of the htlc in msat. - AmtMsat uint64 `protobuf:"varint,3,opt,name=amt_msat,json=amtMsat,proto3" json:"amt_msat,omitempty"` - // Block height at which this htlc was accepted. - AcceptHeight int32 `protobuf:"varint,4,opt,name=accept_height,json=acceptHeight,proto3" json:"accept_height,omitempty"` - // Time at which this htlc was accepted. - AcceptTime int64 `protobuf:"varint,5,opt,name=accept_time,json=acceptTime,proto3" json:"accept_time,omitempty"` - // Time at which this htlc was settled or canceled. - ResolveTime int64 `protobuf:"varint,6,opt,name=resolve_time,json=resolveTime,proto3" json:"resolve_time,omitempty"` - // Block height at which this htlc expires. - ExpiryHeight int32 `protobuf:"varint,7,opt,name=expiry_height,json=expiryHeight,proto3" json:"expiry_height,omitempty"` - // Current state the htlc is in. - State InvoiceHTLCState `protobuf:"varint,8,opt,name=state,proto3,enum=lnrpc.InvoiceHTLCState" json:"state,omitempty"` - // Custom tlv records. - CustomRecords map[uint64][]byte `protobuf:"bytes,9,rep,name=custom_records,json=customRecords,proto3" json:"custom_records,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // The total amount of the mpp payment in msat. - MppTotalAmtMsat uint64 `protobuf:"varint,10,opt,name=mpp_total_amt_msat,json=mppTotalAmtMsat,proto3" json:"mpp_total_amt_msat,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InvoiceHTLC) Reset() { *m = InvoiceHTLC{} } -func (m *InvoiceHTLC) String() string { return proto.CompactTextString(m) } -func (*InvoiceHTLC) ProtoMessage() {} -func (*InvoiceHTLC) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{114} -} - -func (m *InvoiceHTLC) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InvoiceHTLC.Unmarshal(m, b) -} -func (m *InvoiceHTLC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InvoiceHTLC.Marshal(b, m, deterministic) -} -func (m *InvoiceHTLC) XXX_Merge(src proto.Message) { - xxx_messageInfo_InvoiceHTLC.Merge(m, src) -} -func (m *InvoiceHTLC) XXX_Size() int { - return xxx_messageInfo_InvoiceHTLC.Size(m) -} -func (m *InvoiceHTLC) XXX_DiscardUnknown() { - xxx_messageInfo_InvoiceHTLC.DiscardUnknown(m) -} - -var xxx_messageInfo_InvoiceHTLC proto.InternalMessageInfo - -func (m *InvoiceHTLC) GetChanId() uint64 { - if m != nil { - return m.ChanId - } - return 0 -} - -func (m *InvoiceHTLC) GetHtlcIndex() uint64 { - if m != nil { - return m.HtlcIndex - } - return 0 -} - -func (m *InvoiceHTLC) GetAmtMsat() uint64 { - if m != nil { - return m.AmtMsat - } - return 0 -} - -func (m *InvoiceHTLC) GetAcceptHeight() int32 { - if m != nil { - return m.AcceptHeight - } - return 0 -} - -func (m *InvoiceHTLC) GetAcceptTime() int64 { - if m != nil { - return m.AcceptTime - } - return 0 -} - -func (m *InvoiceHTLC) GetResolveTime() int64 { - if m != nil { - return m.ResolveTime - } - return 0 -} - -func (m *InvoiceHTLC) GetExpiryHeight() int32 { - if m != nil { - return m.ExpiryHeight - } - return 0 -} - -func (m *InvoiceHTLC) GetState() InvoiceHTLCState { - if m != nil { - return m.State - } - return InvoiceHTLCState_ACCEPTED -} - -func (m *InvoiceHTLC) GetCustomRecords() map[uint64][]byte { - if m != nil { - return m.CustomRecords - } - return nil -} - -func (m *InvoiceHTLC) GetMppTotalAmtMsat() uint64 { - if m != nil { - return m.MppTotalAmtMsat - } - return 0 -} - -type AddInvoiceResponse struct { - RHash []byte `protobuf:"bytes,1,opt,name=r_hash,json=rHash,proto3" json:"r_hash,omitempty"` - // - //A bare-bones invoice for a payment within the Lightning Network. With the - //details of the invoice, the sender has all the data necessary to send a - //payment to the recipient. - PaymentRequest string `protobuf:"bytes,2,opt,name=payment_request,json=paymentRequest,proto3" json:"payment_request,omitempty"` - // - //The "add" index of this invoice. Each newly created invoice will increment - //this index making it monotonically increasing. Callers to the - //SubscribeInvoices call can use this to instantly get notified of all added - //invoices with an add_index greater than this one. - AddIndex uint64 `protobuf:"varint,16,opt,name=add_index,json=addIndex,proto3" json:"add_index,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddInvoiceResponse) Reset() { *m = AddInvoiceResponse{} } -func (m *AddInvoiceResponse) String() string { return proto.CompactTextString(m) } -func (*AddInvoiceResponse) ProtoMessage() {} -func (*AddInvoiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{115} -} - -func (m *AddInvoiceResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AddInvoiceResponse.Unmarshal(m, b) -} -func (m *AddInvoiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AddInvoiceResponse.Marshal(b, m, deterministic) -} -func (m *AddInvoiceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddInvoiceResponse.Merge(m, src) -} -func (m *AddInvoiceResponse) XXX_Size() int { - return xxx_messageInfo_AddInvoiceResponse.Size(m) -} -func (m *AddInvoiceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AddInvoiceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AddInvoiceResponse proto.InternalMessageInfo - -func (m *AddInvoiceResponse) GetRHash() []byte { - if m != nil { - return m.RHash - } - return nil -} - -func (m *AddInvoiceResponse) GetPaymentRequest() string { - if m != nil { - return m.PaymentRequest - } - return "" -} - -func (m *AddInvoiceResponse) GetAddIndex() uint64 { - if m != nil { - return m.AddIndex - } - return 0 -} - -type PaymentHash struct { - // - //The hex-encoded payment hash of the invoice to be looked up. The passed - //payment hash must be exactly 32 bytes, otherwise an error is returned. - //Deprecated now that the REST gateway supports base64 encoding of bytes - //fields. - RHashStr string `protobuf:"bytes,1,opt,name=r_hash_str,json=rHashStr,proto3" json:"r_hash_str,omitempty"` // Deprecated: Do not use. - // - //The payment hash of the invoice to be looked up. When using REST, this field - //must be encoded as base64. - RHash []byte `protobuf:"bytes,2,opt,name=r_hash,json=rHash,proto3" json:"r_hash,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PaymentHash) Reset() { *m = PaymentHash{} } -func (m *PaymentHash) String() string { return proto.CompactTextString(m) } -func (*PaymentHash) ProtoMessage() {} -func (*PaymentHash) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{116} -} - -func (m *PaymentHash) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PaymentHash.Unmarshal(m, b) -} -func (m *PaymentHash) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PaymentHash.Marshal(b, m, deterministic) -} -func (m *PaymentHash) XXX_Merge(src proto.Message) { - xxx_messageInfo_PaymentHash.Merge(m, src) -} -func (m *PaymentHash) XXX_Size() int { - return xxx_messageInfo_PaymentHash.Size(m) -} -func (m *PaymentHash) XXX_DiscardUnknown() { - xxx_messageInfo_PaymentHash.DiscardUnknown(m) -} - -var xxx_messageInfo_PaymentHash proto.InternalMessageInfo - -// Deprecated: Do not use. -func (m *PaymentHash) GetRHashStr() string { - if m != nil { - return m.RHashStr - } - return "" -} - -func (m *PaymentHash) GetRHash() []byte { - if m != nil { - return m.RHash - } - return nil -} - -type ListInvoiceRequest struct { - // - //If set, only invoices that are not settled and not canceled will be returned - //in the response. - PendingOnly bool `protobuf:"varint,1,opt,name=pending_only,json=pendingOnly,proto3" json:"pending_only,omitempty"` - // - //The index of an invoice that will be used as either the start or end of a - //query to determine which invoices should be returned in the response. - IndexOffset uint64 `protobuf:"varint,4,opt,name=index_offset,json=indexOffset,proto3" json:"index_offset,omitempty"` - // The max number of invoices to return in the response to this query. - NumMaxInvoices uint64 `protobuf:"varint,5,opt,name=num_max_invoices,json=numMaxInvoices,proto3" json:"num_max_invoices,omitempty"` - // - //If set, the invoices returned will result from seeking backwards from the - //specified index offset. This can be used to paginate backwards. - Reversed bool `protobuf:"varint,6,opt,name=reversed,proto3" json:"reversed,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListInvoiceRequest) Reset() { *m = ListInvoiceRequest{} } -func (m *ListInvoiceRequest) String() string { return proto.CompactTextString(m) } -func (*ListInvoiceRequest) ProtoMessage() {} -func (*ListInvoiceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{117} -} - -func (m *ListInvoiceRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListInvoiceRequest.Unmarshal(m, b) -} -func (m *ListInvoiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListInvoiceRequest.Marshal(b, m, deterministic) -} -func (m *ListInvoiceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListInvoiceRequest.Merge(m, src) -} -func (m *ListInvoiceRequest) XXX_Size() int { - return xxx_messageInfo_ListInvoiceRequest.Size(m) -} -func (m *ListInvoiceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListInvoiceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListInvoiceRequest proto.InternalMessageInfo - -func (m *ListInvoiceRequest) GetPendingOnly() bool { - if m != nil { - return m.PendingOnly - } - return false -} - -func (m *ListInvoiceRequest) GetIndexOffset() uint64 { - if m != nil { - return m.IndexOffset - } - return 0 -} - -func (m *ListInvoiceRequest) GetNumMaxInvoices() uint64 { - if m != nil { - return m.NumMaxInvoices - } - return 0 -} - -func (m *ListInvoiceRequest) GetReversed() bool { - if m != nil { - return m.Reversed - } - return false -} - -type ListInvoiceResponse struct { - // - //A list of invoices from the time slice of the time series specified in the - //request. - Invoices []*Invoice `protobuf:"bytes,1,rep,name=invoices,proto3" json:"invoices,omitempty"` - // - //The index of the last item in the set of returned invoices. This can be used - //to seek further, pagination style. - LastIndexOffset uint64 `protobuf:"varint,2,opt,name=last_index_offset,json=lastIndexOffset,proto3" json:"last_index_offset,omitempty"` - // - //The index of the last item in the set of returned invoices. This can be used - //to seek backwards, pagination style. - FirstIndexOffset uint64 `protobuf:"varint,3,opt,name=first_index_offset,json=firstIndexOffset,proto3" json:"first_index_offset,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListInvoiceResponse) Reset() { *m = ListInvoiceResponse{} } -func (m *ListInvoiceResponse) String() string { return proto.CompactTextString(m) } -func (*ListInvoiceResponse) ProtoMessage() {} -func (*ListInvoiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{118} -} - -func (m *ListInvoiceResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListInvoiceResponse.Unmarshal(m, b) -} -func (m *ListInvoiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListInvoiceResponse.Marshal(b, m, deterministic) -} -func (m *ListInvoiceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListInvoiceResponse.Merge(m, src) -} -func (m *ListInvoiceResponse) XXX_Size() int { - return xxx_messageInfo_ListInvoiceResponse.Size(m) -} -func (m *ListInvoiceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListInvoiceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListInvoiceResponse proto.InternalMessageInfo - -func (m *ListInvoiceResponse) GetInvoices() []*Invoice { - if m != nil { - return m.Invoices - } - return nil -} - -func (m *ListInvoiceResponse) GetLastIndexOffset() uint64 { - if m != nil { - return m.LastIndexOffset - } - return 0 -} - -func (m *ListInvoiceResponse) GetFirstIndexOffset() uint64 { - if m != nil { - return m.FirstIndexOffset - } - return 0 -} - -type InvoiceSubscription struct { - // - //If specified (non-zero), then we'll first start by sending out - //notifications for all added indexes with an add_index greater than this - //value. This allows callers to catch up on any events they missed while they - //weren't connected to the streaming RPC. - AddIndex uint64 `protobuf:"varint,1,opt,name=add_index,json=addIndex,proto3" json:"add_index,omitempty"` - // - //If specified (non-zero), then we'll first start by sending out - //notifications for all settled indexes with an settle_index greater than - //this value. This allows callers to catch up on any events they missed while - //they weren't connected to the streaming RPC. - SettleIndex uint64 `protobuf:"varint,2,opt,name=settle_index,json=settleIndex,proto3" json:"settle_index,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InvoiceSubscription) Reset() { *m = InvoiceSubscription{} } -func (m *InvoiceSubscription) String() string { return proto.CompactTextString(m) } -func (*InvoiceSubscription) ProtoMessage() {} -func (*InvoiceSubscription) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{119} -} - -func (m *InvoiceSubscription) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InvoiceSubscription.Unmarshal(m, b) -} -func (m *InvoiceSubscription) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InvoiceSubscription.Marshal(b, m, deterministic) -} -func (m *InvoiceSubscription) XXX_Merge(src proto.Message) { - xxx_messageInfo_InvoiceSubscription.Merge(m, src) -} -func (m *InvoiceSubscription) XXX_Size() int { - return xxx_messageInfo_InvoiceSubscription.Size(m) -} -func (m *InvoiceSubscription) XXX_DiscardUnknown() { - xxx_messageInfo_InvoiceSubscription.DiscardUnknown(m) -} - -var xxx_messageInfo_InvoiceSubscription proto.InternalMessageInfo - -func (m *InvoiceSubscription) GetAddIndex() uint64 { - if m != nil { - return m.AddIndex - } - return 0 -} - -func (m *InvoiceSubscription) GetSettleIndex() uint64 { - if m != nil { - return m.SettleIndex - } - return 0 -} - -type Payment struct { - // The payment hash - PaymentHash string `protobuf:"bytes,1,opt,name=payment_hash,json=paymentHash,proto3" json:"payment_hash,omitempty"` - // Deprecated, use value_sat or value_msat. - Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` // Deprecated: Do not use. - // Deprecated, use creation_time_ns - CreationDate int64 `protobuf:"varint,3,opt,name=creation_date,json=creationDate,proto3" json:"creation_date,omitempty"` // Deprecated: Do not use. - // Deprecated, use fee_sat or fee_msat. - Fee int64 `protobuf:"varint,5,opt,name=fee,proto3" json:"fee,omitempty"` // Deprecated: Do not use. - // The payment preimage - PaymentPreimage string `protobuf:"bytes,6,opt,name=payment_preimage,json=paymentPreimage,proto3" json:"payment_preimage,omitempty"` - // The value of the payment in satoshis - ValueSat int64 `protobuf:"varint,7,opt,name=value_sat,json=valueSat,proto3" json:"value_sat,omitempty"` - // The value of the payment in milli-satoshis - ValueMsat int64 `protobuf:"varint,8,opt,name=value_msat,json=valueMsat,proto3" json:"value_msat,omitempty"` - // The optional payment request being fulfilled. - PaymentRequest string `protobuf:"bytes,9,opt,name=payment_request,json=paymentRequest,proto3" json:"payment_request,omitempty"` - // The status of the payment. - Status Payment_PaymentStatus `protobuf:"varint,10,opt,name=status,proto3,enum=lnrpc.Payment_PaymentStatus" json:"status,omitempty"` - // The fee paid for this payment in satoshis - FeeSat int64 `protobuf:"varint,11,opt,name=fee_sat,json=feeSat,proto3" json:"fee_sat,omitempty"` - // The fee paid for this payment in milli-satoshis - FeeMsat int64 `protobuf:"varint,12,opt,name=fee_msat,json=feeMsat,proto3" json:"fee_msat,omitempty"` - // The time in UNIX nanoseconds at which the payment was created. - CreationTimeNs int64 `protobuf:"varint,13,opt,name=creation_time_ns,json=creationTimeNs,proto3" json:"creation_time_ns,omitempty"` - // The HTLCs made in attempt to settle the payment. - Htlcs []*HTLCAttempt `protobuf:"bytes,14,rep,name=htlcs,proto3" json:"htlcs,omitempty"` - // - //The creation index of this payment. Each payment can be uniquely identified - //by this index, which may not strictly increment by 1 for payments made in - //older versions of lnd. - PaymentIndex uint64 `protobuf:"varint,15,opt,name=payment_index,json=paymentIndex,proto3" json:"payment_index,omitempty"` - FailureReason PaymentFailureReason `protobuf:"varint,16,opt,name=failure_reason,json=failureReason,proto3,enum=lnrpc.PaymentFailureReason" json:"failure_reason,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Payment) Reset() { *m = Payment{} } -func (m *Payment) String() string { return proto.CompactTextString(m) } -func (*Payment) ProtoMessage() {} -func (*Payment) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{120} -} - -func (m *Payment) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Payment.Unmarshal(m, b) -} -func (m *Payment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Payment.Marshal(b, m, deterministic) -} -func (m *Payment) XXX_Merge(src proto.Message) { - xxx_messageInfo_Payment.Merge(m, src) -} -func (m *Payment) XXX_Size() int { - return xxx_messageInfo_Payment.Size(m) -} -func (m *Payment) XXX_DiscardUnknown() { - xxx_messageInfo_Payment.DiscardUnknown(m) -} - -var xxx_messageInfo_Payment proto.InternalMessageInfo - -func (m *Payment) GetPaymentHash() string { - if m != nil { - return m.PaymentHash - } - return "" -} - -// Deprecated: Do not use. -func (m *Payment) GetValue() int64 { - if m != nil { - return m.Value - } - return 0 -} - -// Deprecated: Do not use. -func (m *Payment) GetCreationDate() int64 { - if m != nil { - return m.CreationDate - } - return 0 -} - -// Deprecated: Do not use. -func (m *Payment) GetFee() int64 { - if m != nil { - return m.Fee - } - return 0 -} - -func (m *Payment) GetPaymentPreimage() string { - if m != nil { - return m.PaymentPreimage - } - return "" -} - -func (m *Payment) GetValueSat() int64 { - if m != nil { - return m.ValueSat - } - return 0 -} - -func (m *Payment) GetValueMsat() int64 { - if m != nil { - return m.ValueMsat - } - return 0 -} - -func (m *Payment) GetPaymentRequest() string { - if m != nil { - return m.PaymentRequest - } - return "" -} - -func (m *Payment) GetStatus() Payment_PaymentStatus { - if m != nil { - return m.Status - } - return Payment_UNKNOWN -} - -func (m *Payment) GetFeeSat() int64 { - if m != nil { - return m.FeeSat - } - return 0 -} - -func (m *Payment) GetFeeMsat() int64 { - if m != nil { - return m.FeeMsat - } - return 0 -} - -func (m *Payment) GetCreationTimeNs() int64 { - if m != nil { - return m.CreationTimeNs - } - return 0 -} - -func (m *Payment) GetHtlcs() []*HTLCAttempt { - if m != nil { - return m.Htlcs - } - return nil -} - -func (m *Payment) GetPaymentIndex() uint64 { - if m != nil { - return m.PaymentIndex - } - return 0 -} - -func (m *Payment) GetFailureReason() PaymentFailureReason { - if m != nil { - return m.FailureReason - } - return PaymentFailureReason_FAILURE_REASON_NONE -} - -type HTLCAttempt struct { - // The status of the HTLC. - Status HTLCAttempt_HTLCStatus `protobuf:"varint,1,opt,name=status,proto3,enum=lnrpc.HTLCAttempt_HTLCStatus" json:"status,omitempty"` - // The route taken by this HTLC. - Route *Route `protobuf:"bytes,2,opt,name=route,proto3" json:"route,omitempty"` - // The time in UNIX nanoseconds at which this HTLC was sent. - AttemptTimeNs int64 `protobuf:"varint,3,opt,name=attempt_time_ns,json=attemptTimeNs,proto3" json:"attempt_time_ns,omitempty"` - // - //The time in UNIX nanoseconds at which this HTLC was settled or failed. - //This value will not be set if the HTLC is still IN_FLIGHT. - ResolveTimeNs int64 `protobuf:"varint,4,opt,name=resolve_time_ns,json=resolveTimeNs,proto3" json:"resolve_time_ns,omitempty"` - // Detailed htlc failure info. - Failure *Failure `protobuf:"bytes,5,opt,name=failure,proto3" json:"failure,omitempty"` - // The preimage that was used to settle the HTLC. - Preimage []byte `protobuf:"bytes,6,opt,name=preimage,proto3" json:"preimage,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HTLCAttempt) Reset() { *m = HTLCAttempt{} } -func (m *HTLCAttempt) String() string { return proto.CompactTextString(m) } -func (*HTLCAttempt) ProtoMessage() {} -func (*HTLCAttempt) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{121} -} - -func (m *HTLCAttempt) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_HTLCAttempt.Unmarshal(m, b) -} -func (m *HTLCAttempt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_HTLCAttempt.Marshal(b, m, deterministic) -} -func (m *HTLCAttempt) XXX_Merge(src proto.Message) { - xxx_messageInfo_HTLCAttempt.Merge(m, src) -} -func (m *HTLCAttempt) XXX_Size() int { - return xxx_messageInfo_HTLCAttempt.Size(m) -} -func (m *HTLCAttempt) XXX_DiscardUnknown() { - xxx_messageInfo_HTLCAttempt.DiscardUnknown(m) -} - -var xxx_messageInfo_HTLCAttempt proto.InternalMessageInfo - -func (m *HTLCAttempt) GetStatus() HTLCAttempt_HTLCStatus { - if m != nil { - return m.Status - } - return HTLCAttempt_IN_FLIGHT -} - -func (m *HTLCAttempt) GetRoute() *Route { - if m != nil { - return m.Route - } - return nil -} - -func (m *HTLCAttempt) GetAttemptTimeNs() int64 { - if m != nil { - return m.AttemptTimeNs - } - return 0 -} - -func (m *HTLCAttempt) GetResolveTimeNs() int64 { - if m != nil { - return m.ResolveTimeNs - } - return 0 -} - -func (m *HTLCAttempt) GetFailure() *Failure { - if m != nil { - return m.Failure - } - return nil -} - -func (m *HTLCAttempt) GetPreimage() []byte { - if m != nil { - return m.Preimage - } - return nil -} - -type ListPaymentsRequest struct { - // - //If true, then return payments that have not yet fully completed. This means - //that pending payments, as well as failed payments will show up if this - //field is set to true. This flag doesn't change the meaning of the indices, - //which are tied to individual payments. - IncludeIncomplete bool `protobuf:"varint,1,opt,name=include_incomplete,json=includeIncomplete,proto3" json:"include_incomplete,omitempty"` - // - //The index of a payment that will be used as either the start or end of a - //query to determine which payments should be returned in the response. The - //index_offset is exclusive. In the case of a zero index_offset, the query - //will start with the oldest payment when paginating forwards, or will end - //with the most recent payment when paginating backwards. - IndexOffset uint64 `protobuf:"varint,2,opt,name=index_offset,json=indexOffset,proto3" json:"index_offset,omitempty"` - // The maximal number of payments returned in the response to this query. - MaxPayments uint64 `protobuf:"varint,3,opt,name=max_payments,json=maxPayments,proto3" json:"max_payments,omitempty"` - // - //If set, the payments returned will result from seeking backwards from the - //specified index offset. This can be used to paginate backwards. The order - //of the returned payments is always oldest first (ascending index order). - Reversed bool `protobuf:"varint,4,opt,name=reversed,proto3" json:"reversed,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListPaymentsRequest) Reset() { *m = ListPaymentsRequest{} } -func (m *ListPaymentsRequest) String() string { return proto.CompactTextString(m) } -func (*ListPaymentsRequest) ProtoMessage() {} -func (*ListPaymentsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{122} -} - -func (m *ListPaymentsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListPaymentsRequest.Unmarshal(m, b) -} -func (m *ListPaymentsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListPaymentsRequest.Marshal(b, m, deterministic) -} -func (m *ListPaymentsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListPaymentsRequest.Merge(m, src) -} -func (m *ListPaymentsRequest) XXX_Size() int { - return xxx_messageInfo_ListPaymentsRequest.Size(m) -} -func (m *ListPaymentsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListPaymentsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListPaymentsRequest proto.InternalMessageInfo - -func (m *ListPaymentsRequest) GetIncludeIncomplete() bool { - if m != nil { - return m.IncludeIncomplete - } - return false -} - -func (m *ListPaymentsRequest) GetIndexOffset() uint64 { - if m != nil { - return m.IndexOffset - } - return 0 -} - -func (m *ListPaymentsRequest) GetMaxPayments() uint64 { - if m != nil { - return m.MaxPayments - } - return 0 -} - -func (m *ListPaymentsRequest) GetReversed() bool { - if m != nil { - return m.Reversed - } - return false -} - -type ListPaymentsResponse struct { - // The list of payments - Payments []*Payment `protobuf:"bytes,1,rep,name=payments,proto3" json:"payments,omitempty"` - // - //The index of the first item in the set of returned payments. This can be - //used as the index_offset to continue seeking backwards in the next request. - FirstIndexOffset uint64 `protobuf:"varint,2,opt,name=first_index_offset,json=firstIndexOffset,proto3" json:"first_index_offset,omitempty"` - // - //The index of the last item in the set of returned payments. This can be used - //as the index_offset to continue seeking forwards in the next request. - LastIndexOffset uint64 `protobuf:"varint,3,opt,name=last_index_offset,json=lastIndexOffset,proto3" json:"last_index_offset,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListPaymentsResponse) Reset() { *m = ListPaymentsResponse{} } -func (m *ListPaymentsResponse) String() string { return proto.CompactTextString(m) } -func (*ListPaymentsResponse) ProtoMessage() {} -func (*ListPaymentsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{123} -} - -func (m *ListPaymentsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListPaymentsResponse.Unmarshal(m, b) -} -func (m *ListPaymentsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListPaymentsResponse.Marshal(b, m, deterministic) -} -func (m *ListPaymentsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListPaymentsResponse.Merge(m, src) -} -func (m *ListPaymentsResponse) XXX_Size() int { - return xxx_messageInfo_ListPaymentsResponse.Size(m) -} -func (m *ListPaymentsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListPaymentsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListPaymentsResponse proto.InternalMessageInfo - -func (m *ListPaymentsResponse) GetPayments() []*Payment { - if m != nil { - return m.Payments - } - return nil -} - -func (m *ListPaymentsResponse) GetFirstIndexOffset() uint64 { - if m != nil { - return m.FirstIndexOffset - } - return 0 -} - -func (m *ListPaymentsResponse) GetLastIndexOffset() uint64 { - if m != nil { - return m.LastIndexOffset - } - return 0 -} - -type DeleteAllPaymentsRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteAllPaymentsRequest) Reset() { *m = DeleteAllPaymentsRequest{} } -func (m *DeleteAllPaymentsRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteAllPaymentsRequest) ProtoMessage() {} -func (*DeleteAllPaymentsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{124} -} - -func (m *DeleteAllPaymentsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteAllPaymentsRequest.Unmarshal(m, b) -} -func (m *DeleteAllPaymentsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteAllPaymentsRequest.Marshal(b, m, deterministic) -} -func (m *DeleteAllPaymentsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteAllPaymentsRequest.Merge(m, src) -} -func (m *DeleteAllPaymentsRequest) XXX_Size() int { - return xxx_messageInfo_DeleteAllPaymentsRequest.Size(m) -} -func (m *DeleteAllPaymentsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteAllPaymentsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteAllPaymentsRequest proto.InternalMessageInfo - -type DeleteAllPaymentsResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteAllPaymentsResponse) Reset() { *m = DeleteAllPaymentsResponse{} } -func (m *DeleteAllPaymentsResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteAllPaymentsResponse) ProtoMessage() {} -func (*DeleteAllPaymentsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{125} -} - -func (m *DeleteAllPaymentsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteAllPaymentsResponse.Unmarshal(m, b) -} -func (m *DeleteAllPaymentsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteAllPaymentsResponse.Marshal(b, m, deterministic) -} -func (m *DeleteAllPaymentsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteAllPaymentsResponse.Merge(m, src) -} -func (m *DeleteAllPaymentsResponse) XXX_Size() int { - return xxx_messageInfo_DeleteAllPaymentsResponse.Size(m) -} -func (m *DeleteAllPaymentsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteAllPaymentsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteAllPaymentsResponse proto.InternalMessageInfo - -type AbandonChannelRequest struct { - ChannelPoint *ChannelPoint `protobuf:"bytes,1,opt,name=channel_point,json=channelPoint,proto3" json:"channel_point,omitempty"` - PendingFundingShimOnly bool `protobuf:"varint,2,opt,name=pending_funding_shim_only,json=pendingFundingShimOnly,proto3" json:"pending_funding_shim_only,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AbandonChannelRequest) Reset() { *m = AbandonChannelRequest{} } -func (m *AbandonChannelRequest) String() string { return proto.CompactTextString(m) } -func (*AbandonChannelRequest) ProtoMessage() {} -func (*AbandonChannelRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{126} -} - -func (m *AbandonChannelRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AbandonChannelRequest.Unmarshal(m, b) -} -func (m *AbandonChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AbandonChannelRequest.Marshal(b, m, deterministic) -} -func (m *AbandonChannelRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AbandonChannelRequest.Merge(m, src) -} -func (m *AbandonChannelRequest) XXX_Size() int { - return xxx_messageInfo_AbandonChannelRequest.Size(m) -} -func (m *AbandonChannelRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AbandonChannelRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AbandonChannelRequest proto.InternalMessageInfo - -func (m *AbandonChannelRequest) GetChannelPoint() *ChannelPoint { - if m != nil { - return m.ChannelPoint - } - return nil -} - -func (m *AbandonChannelRequest) GetPendingFundingShimOnly() bool { - if m != nil { - return m.PendingFundingShimOnly - } - return false -} - -type AbandonChannelResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AbandonChannelResponse) Reset() { *m = AbandonChannelResponse{} } -func (m *AbandonChannelResponse) String() string { return proto.CompactTextString(m) } -func (*AbandonChannelResponse) ProtoMessage() {} -func (*AbandonChannelResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{127} -} - -func (m *AbandonChannelResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AbandonChannelResponse.Unmarshal(m, b) -} -func (m *AbandonChannelResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AbandonChannelResponse.Marshal(b, m, deterministic) -} -func (m *AbandonChannelResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AbandonChannelResponse.Merge(m, src) -} -func (m *AbandonChannelResponse) XXX_Size() int { - return xxx_messageInfo_AbandonChannelResponse.Size(m) -} -func (m *AbandonChannelResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AbandonChannelResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AbandonChannelResponse proto.InternalMessageInfo - -type DebugLevelRequest struct { - Show bool `protobuf:"varint,1,opt,name=show,proto3" json:"show,omitempty"` - LevelSpec string `protobuf:"bytes,2,opt,name=level_spec,json=levelSpec,proto3" json:"level_spec,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DebugLevelRequest) Reset() { *m = DebugLevelRequest{} } -func (m *DebugLevelRequest) String() string { return proto.CompactTextString(m) } -func (*DebugLevelRequest) ProtoMessage() {} -func (*DebugLevelRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{128} -} - -func (m *DebugLevelRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DebugLevelRequest.Unmarshal(m, b) -} -func (m *DebugLevelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DebugLevelRequest.Marshal(b, m, deterministic) -} -func (m *DebugLevelRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DebugLevelRequest.Merge(m, src) -} -func (m *DebugLevelRequest) XXX_Size() int { - return xxx_messageInfo_DebugLevelRequest.Size(m) -} -func (m *DebugLevelRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DebugLevelRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DebugLevelRequest proto.InternalMessageInfo - -func (m *DebugLevelRequest) GetShow() bool { - if m != nil { - return m.Show - } - return false -} - -func (m *DebugLevelRequest) GetLevelSpec() string { - if m != nil { - return m.LevelSpec - } - return "" -} - -type DebugLevelResponse struct { - SubSystems string `protobuf:"bytes,1,opt,name=sub_systems,json=subSystems,proto3" json:"sub_systems,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DebugLevelResponse) Reset() { *m = DebugLevelResponse{} } -func (m *DebugLevelResponse) String() string { return proto.CompactTextString(m) } -func (*DebugLevelResponse) ProtoMessage() {} -func (*DebugLevelResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{129} -} - -func (m *DebugLevelResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DebugLevelResponse.Unmarshal(m, b) -} -func (m *DebugLevelResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DebugLevelResponse.Marshal(b, m, deterministic) -} -func (m *DebugLevelResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DebugLevelResponse.Merge(m, src) -} -func (m *DebugLevelResponse) XXX_Size() int { - return xxx_messageInfo_DebugLevelResponse.Size(m) -} -func (m *DebugLevelResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DebugLevelResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DebugLevelResponse proto.InternalMessageInfo - -func (m *DebugLevelResponse) GetSubSystems() string { - if m != nil { - return m.SubSystems - } - return "" -} - -type PayReqString struct { - // The payment request string to be decoded - PayReq string `protobuf:"bytes,1,opt,name=pay_req,json=payReq,proto3" json:"pay_req,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PayReqString) Reset() { *m = PayReqString{} } -func (m *PayReqString) String() string { return proto.CompactTextString(m) } -func (*PayReqString) ProtoMessage() {} -func (*PayReqString) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{130} -} - -func (m *PayReqString) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PayReqString.Unmarshal(m, b) -} -func (m *PayReqString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PayReqString.Marshal(b, m, deterministic) -} -func (m *PayReqString) XXX_Merge(src proto.Message) { - xxx_messageInfo_PayReqString.Merge(m, src) -} -func (m *PayReqString) XXX_Size() int { - return xxx_messageInfo_PayReqString.Size(m) -} -func (m *PayReqString) XXX_DiscardUnknown() { - xxx_messageInfo_PayReqString.DiscardUnknown(m) -} - -var xxx_messageInfo_PayReqString proto.InternalMessageInfo - -func (m *PayReqString) GetPayReq() string { - if m != nil { - return m.PayReq - } - return "" -} - -type PayReq struct { - Destination string `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"` - PaymentHash string `protobuf:"bytes,2,opt,name=payment_hash,json=paymentHash,proto3" json:"payment_hash,omitempty"` - NumSatoshis int64 `protobuf:"varint,3,opt,name=num_satoshis,json=numSatoshis,proto3" json:"num_satoshis,omitempty"` - Timestamp int64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - Expiry int64 `protobuf:"varint,5,opt,name=expiry,proto3" json:"expiry,omitempty"` - Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` - DescriptionHash string `protobuf:"bytes,7,opt,name=description_hash,json=descriptionHash,proto3" json:"description_hash,omitempty"` - FallbackAddr string `protobuf:"bytes,8,opt,name=fallback_addr,json=fallbackAddr,proto3" json:"fallback_addr,omitempty"` - CltvExpiry int64 `protobuf:"varint,9,opt,name=cltv_expiry,json=cltvExpiry,proto3" json:"cltv_expiry,omitempty"` - RouteHints []*RouteHint `protobuf:"bytes,10,rep,name=route_hints,json=routeHints,proto3" json:"route_hints,omitempty"` - PaymentAddr []byte `protobuf:"bytes,11,opt,name=payment_addr,json=paymentAddr,proto3" json:"payment_addr,omitempty"` - NumMsat int64 `protobuf:"varint,12,opt,name=num_msat,json=numMsat,proto3" json:"num_msat,omitempty"` - Features map[uint32]*Feature `protobuf:"bytes,13,rep,name=features,proto3" json:"features,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PayReq) Reset() { *m = PayReq{} } -func (m *PayReq) String() string { return proto.CompactTextString(m) } -func (*PayReq) ProtoMessage() {} -func (*PayReq) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{131} -} - -func (m *PayReq) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PayReq.Unmarshal(m, b) -} -func (m *PayReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PayReq.Marshal(b, m, deterministic) -} -func (m *PayReq) XXX_Merge(src proto.Message) { - xxx_messageInfo_PayReq.Merge(m, src) -} -func (m *PayReq) XXX_Size() int { - return xxx_messageInfo_PayReq.Size(m) -} -func (m *PayReq) XXX_DiscardUnknown() { - xxx_messageInfo_PayReq.DiscardUnknown(m) -} - -var xxx_messageInfo_PayReq proto.InternalMessageInfo - -func (m *PayReq) GetDestination() string { - if m != nil { - return m.Destination - } - return "" -} - -func (m *PayReq) GetPaymentHash() string { - if m != nil { - return m.PaymentHash - } - return "" -} - -func (m *PayReq) GetNumSatoshis() int64 { - if m != nil { - return m.NumSatoshis - } - return 0 -} - -func (m *PayReq) GetTimestamp() int64 { - if m != nil { - return m.Timestamp - } - return 0 -} - -func (m *PayReq) GetExpiry() int64 { - if m != nil { - return m.Expiry - } - return 0 -} - -func (m *PayReq) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *PayReq) GetDescriptionHash() string { - if m != nil { - return m.DescriptionHash - } - return "" -} - -func (m *PayReq) GetFallbackAddr() string { - if m != nil { - return m.FallbackAddr - } - return "" -} - -func (m *PayReq) GetCltvExpiry() int64 { - if m != nil { - return m.CltvExpiry - } - return 0 -} - -func (m *PayReq) GetRouteHints() []*RouteHint { - if m != nil { - return m.RouteHints - } - return nil -} - -func (m *PayReq) GetPaymentAddr() []byte { - if m != nil { - return m.PaymentAddr - } - return nil -} - -func (m *PayReq) GetNumMsat() int64 { - if m != nil { - return m.NumMsat - } - return 0 -} - -func (m *PayReq) GetFeatures() map[uint32]*Feature { - if m != nil { - return m.Features - } - return nil -} - -type Feature struct { - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - IsRequired bool `protobuf:"varint,3,opt,name=is_required,json=isRequired,proto3" json:"is_required,omitempty"` - IsKnown bool `protobuf:"varint,4,opt,name=is_known,json=isKnown,proto3" json:"is_known,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Feature) Reset() { *m = Feature{} } -func (m *Feature) String() string { return proto.CompactTextString(m) } -func (*Feature) ProtoMessage() {} -func (*Feature) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{132} -} - -func (m *Feature) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Feature.Unmarshal(m, b) -} -func (m *Feature) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Feature.Marshal(b, m, deterministic) -} -func (m *Feature) XXX_Merge(src proto.Message) { - xxx_messageInfo_Feature.Merge(m, src) -} -func (m *Feature) XXX_Size() int { - return xxx_messageInfo_Feature.Size(m) -} -func (m *Feature) XXX_DiscardUnknown() { - xxx_messageInfo_Feature.DiscardUnknown(m) -} - -var xxx_messageInfo_Feature proto.InternalMessageInfo - -func (m *Feature) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Feature) GetIsRequired() bool { - if m != nil { - return m.IsRequired - } - return false -} - -func (m *Feature) GetIsKnown() bool { - if m != nil { - return m.IsKnown - } - return false -} - -type FeeReportRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FeeReportRequest) Reset() { *m = FeeReportRequest{} } -func (m *FeeReportRequest) String() string { return proto.CompactTextString(m) } -func (*FeeReportRequest) ProtoMessage() {} -func (*FeeReportRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{133} -} - -func (m *FeeReportRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FeeReportRequest.Unmarshal(m, b) -} -func (m *FeeReportRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FeeReportRequest.Marshal(b, m, deterministic) -} -func (m *FeeReportRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_FeeReportRequest.Merge(m, src) -} -func (m *FeeReportRequest) XXX_Size() int { - return xxx_messageInfo_FeeReportRequest.Size(m) -} -func (m *FeeReportRequest) XXX_DiscardUnknown() { - xxx_messageInfo_FeeReportRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_FeeReportRequest proto.InternalMessageInfo - -type ChannelFeeReport struct { - // The short channel id that this fee report belongs to. - ChanId uint64 `protobuf:"varint,5,opt,name=chan_id,json=chanId,proto3" json:"chan_id,omitempty"` - // The channel that this fee report belongs to. - ChannelPoint string `protobuf:"bytes,1,opt,name=channel_point,json=channelPoint,proto3" json:"channel_point,omitempty"` - // The base fee charged regardless of the number of milli-satoshis sent. - BaseFeeMsat int64 `protobuf:"varint,2,opt,name=base_fee_msat,json=baseFeeMsat,proto3" json:"base_fee_msat,omitempty"` - // The amount charged per milli-satoshis transferred expressed in - // millionths of a satoshi. - FeePerMil int64 `protobuf:"varint,3,opt,name=fee_per_mil,json=feePerMil,proto3" json:"fee_per_mil,omitempty"` - // The effective fee rate in milli-satoshis. Computed by dividing the - // fee_per_mil value by 1 million. - FeeRate float64 `protobuf:"fixed64,4,opt,name=fee_rate,json=feeRate,proto3" json:"fee_rate,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChannelFeeReport) Reset() { *m = ChannelFeeReport{} } -func (m *ChannelFeeReport) String() string { return proto.CompactTextString(m) } -func (*ChannelFeeReport) ProtoMessage() {} -func (*ChannelFeeReport) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{134} -} - -func (m *ChannelFeeReport) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChannelFeeReport.Unmarshal(m, b) -} -func (m *ChannelFeeReport) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChannelFeeReport.Marshal(b, m, deterministic) -} -func (m *ChannelFeeReport) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChannelFeeReport.Merge(m, src) -} -func (m *ChannelFeeReport) XXX_Size() int { - return xxx_messageInfo_ChannelFeeReport.Size(m) -} -func (m *ChannelFeeReport) XXX_DiscardUnknown() { - xxx_messageInfo_ChannelFeeReport.DiscardUnknown(m) -} - -var xxx_messageInfo_ChannelFeeReport proto.InternalMessageInfo - -func (m *ChannelFeeReport) GetChanId() uint64 { - if m != nil { - return m.ChanId - } - return 0 -} - -func (m *ChannelFeeReport) GetChannelPoint() string { - if m != nil { - return m.ChannelPoint - } - return "" -} - -func (m *ChannelFeeReport) GetBaseFeeMsat() int64 { - if m != nil { - return m.BaseFeeMsat - } - return 0 -} - -func (m *ChannelFeeReport) GetFeePerMil() int64 { - if m != nil { - return m.FeePerMil - } - return 0 -} - -func (m *ChannelFeeReport) GetFeeRate() float64 { - if m != nil { - return m.FeeRate - } - return 0 -} - -type FeeReportResponse struct { - // An array of channel fee reports which describes the current fee schedule - // for each channel. - ChannelFees []*ChannelFeeReport `protobuf:"bytes,1,rep,name=channel_fees,json=channelFees,proto3" json:"channel_fees,omitempty"` - // The total amount of fee revenue (in satoshis) the switch has collected - // over the past 24 hrs. - DayFeeSum uint64 `protobuf:"varint,2,opt,name=day_fee_sum,json=dayFeeSum,proto3" json:"day_fee_sum,omitempty"` - // The total amount of fee revenue (in satoshis) the switch has collected - // over the past 1 week. - WeekFeeSum uint64 `protobuf:"varint,3,opt,name=week_fee_sum,json=weekFeeSum,proto3" json:"week_fee_sum,omitempty"` - // The total amount of fee revenue (in satoshis) the switch has collected - // over the past 1 month. - MonthFeeSum uint64 `protobuf:"varint,4,opt,name=month_fee_sum,json=monthFeeSum,proto3" json:"month_fee_sum,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FeeReportResponse) Reset() { *m = FeeReportResponse{} } -func (m *FeeReportResponse) String() string { return proto.CompactTextString(m) } -func (*FeeReportResponse) ProtoMessage() {} -func (*FeeReportResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{135} -} - -func (m *FeeReportResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FeeReportResponse.Unmarshal(m, b) -} -func (m *FeeReportResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FeeReportResponse.Marshal(b, m, deterministic) -} -func (m *FeeReportResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_FeeReportResponse.Merge(m, src) -} -func (m *FeeReportResponse) XXX_Size() int { - return xxx_messageInfo_FeeReportResponse.Size(m) -} -func (m *FeeReportResponse) XXX_DiscardUnknown() { - xxx_messageInfo_FeeReportResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_FeeReportResponse proto.InternalMessageInfo - -func (m *FeeReportResponse) GetChannelFees() []*ChannelFeeReport { - if m != nil { - return m.ChannelFees - } - return nil -} - -func (m *FeeReportResponse) GetDayFeeSum() uint64 { - if m != nil { - return m.DayFeeSum - } - return 0 -} - -func (m *FeeReportResponse) GetWeekFeeSum() uint64 { - if m != nil { - return m.WeekFeeSum - } - return 0 -} - -func (m *FeeReportResponse) GetMonthFeeSum() uint64 { - if m != nil { - return m.MonthFeeSum - } - return 0 -} - -type PolicyUpdateRequest struct { - // Types that are valid to be assigned to Scope: - // *PolicyUpdateRequest_Global - // *PolicyUpdateRequest_ChanPoint - Scope isPolicyUpdateRequest_Scope `protobuf_oneof:"scope"` - // The base fee charged regardless of the number of milli-satoshis sent. - BaseFeeMsat int64 `protobuf:"varint,3,opt,name=base_fee_msat,json=baseFeeMsat,proto3" json:"base_fee_msat,omitempty"` - // The effective fee rate in milli-satoshis. The precision of this value - // goes up to 6 decimal places, so 1e-6. - FeeRate float64 `protobuf:"fixed64,4,opt,name=fee_rate,json=feeRate,proto3" json:"fee_rate,omitempty"` - // The required timelock delta for HTLCs forwarded over the channel. - TimeLockDelta uint32 `protobuf:"varint,5,opt,name=time_lock_delta,json=timeLockDelta,proto3" json:"time_lock_delta,omitempty"` - // If set, the maximum HTLC size in milli-satoshis. If unset, the maximum - // HTLC will be unchanged. - MaxHtlcMsat uint64 `protobuf:"varint,6,opt,name=max_htlc_msat,json=maxHtlcMsat,proto3" json:"max_htlc_msat,omitempty"` - // The minimum HTLC size in milli-satoshis. Only applied if - // min_htlc_msat_specified is true. - MinHtlcMsat uint64 `protobuf:"varint,7,opt,name=min_htlc_msat,json=minHtlcMsat,proto3" json:"min_htlc_msat,omitempty"` - // If true, min_htlc_msat is applied. - MinHtlcMsatSpecified bool `protobuf:"varint,8,opt,name=min_htlc_msat_specified,json=minHtlcMsatSpecified,proto3" json:"min_htlc_msat_specified,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PolicyUpdateRequest) Reset() { *m = PolicyUpdateRequest{} } -func (m *PolicyUpdateRequest) String() string { return proto.CompactTextString(m) } -func (*PolicyUpdateRequest) ProtoMessage() {} -func (*PolicyUpdateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{136} -} - -func (m *PolicyUpdateRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PolicyUpdateRequest.Unmarshal(m, b) -} -func (m *PolicyUpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PolicyUpdateRequest.Marshal(b, m, deterministic) -} -func (m *PolicyUpdateRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PolicyUpdateRequest.Merge(m, src) -} -func (m *PolicyUpdateRequest) XXX_Size() int { - return xxx_messageInfo_PolicyUpdateRequest.Size(m) -} -func (m *PolicyUpdateRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PolicyUpdateRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PolicyUpdateRequest proto.InternalMessageInfo - -type isPolicyUpdateRequest_Scope interface { - isPolicyUpdateRequest_Scope() -} - -type PolicyUpdateRequest_Global struct { - Global bool `protobuf:"varint,1,opt,name=global,proto3,oneof"` -} - -type PolicyUpdateRequest_ChanPoint struct { - ChanPoint *ChannelPoint `protobuf:"bytes,2,opt,name=chan_point,json=chanPoint,proto3,oneof"` -} - -func (*PolicyUpdateRequest_Global) isPolicyUpdateRequest_Scope() {} - -func (*PolicyUpdateRequest_ChanPoint) isPolicyUpdateRequest_Scope() {} - -func (m *PolicyUpdateRequest) GetScope() isPolicyUpdateRequest_Scope { - if m != nil { - return m.Scope - } - return nil -} - -func (m *PolicyUpdateRequest) GetGlobal() bool { - if x, ok := m.GetScope().(*PolicyUpdateRequest_Global); ok { - return x.Global - } - return false -} - -func (m *PolicyUpdateRequest) GetChanPoint() *ChannelPoint { - if x, ok := m.GetScope().(*PolicyUpdateRequest_ChanPoint); ok { - return x.ChanPoint - } - return nil -} - -func (m *PolicyUpdateRequest) GetBaseFeeMsat() int64 { - if m != nil { - return m.BaseFeeMsat - } - return 0 -} - -func (m *PolicyUpdateRequest) GetFeeRate() float64 { - if m != nil { - return m.FeeRate - } - return 0 -} - -func (m *PolicyUpdateRequest) GetTimeLockDelta() uint32 { - if m != nil { - return m.TimeLockDelta - } - return 0 -} - -func (m *PolicyUpdateRequest) GetMaxHtlcMsat() uint64 { - if m != nil { - return m.MaxHtlcMsat - } - return 0 -} - -func (m *PolicyUpdateRequest) GetMinHtlcMsat() uint64 { - if m != nil { - return m.MinHtlcMsat - } - return 0 -} - -func (m *PolicyUpdateRequest) GetMinHtlcMsatSpecified() bool { - if m != nil { - return m.MinHtlcMsatSpecified - } - return false -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*PolicyUpdateRequest) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*PolicyUpdateRequest_Global)(nil), - (*PolicyUpdateRequest_ChanPoint)(nil), - } -} - -type PolicyUpdateResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PolicyUpdateResponse) Reset() { *m = PolicyUpdateResponse{} } -func (m *PolicyUpdateResponse) String() string { return proto.CompactTextString(m) } -func (*PolicyUpdateResponse) ProtoMessage() {} -func (*PolicyUpdateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{137} -} - -func (m *PolicyUpdateResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PolicyUpdateResponse.Unmarshal(m, b) -} -func (m *PolicyUpdateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PolicyUpdateResponse.Marshal(b, m, deterministic) -} -func (m *PolicyUpdateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PolicyUpdateResponse.Merge(m, src) -} -func (m *PolicyUpdateResponse) XXX_Size() int { - return xxx_messageInfo_PolicyUpdateResponse.Size(m) -} -func (m *PolicyUpdateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PolicyUpdateResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_PolicyUpdateResponse proto.InternalMessageInfo - -type ForwardingHistoryRequest struct { - // Start time is the starting point of the forwarding history request. All - // records beyond this point will be included, respecting the end time, and - // the index offset. - StartTime uint64 `protobuf:"varint,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` - // End time is the end point of the forwarding history request. The - // response will carry at most 50k records between the start time and the - // end time. The index offset can be used to implement pagination. - EndTime uint64 `protobuf:"varint,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` - // Index offset is the offset in the time series to start at. As each - // response can only contain 50k records, callers can use this to skip - // around within a packed time series. - IndexOffset uint32 `protobuf:"varint,3,opt,name=index_offset,json=indexOffset,proto3" json:"index_offset,omitempty"` - // The max number of events to return in the response to this query. - NumMaxEvents uint32 `protobuf:"varint,4,opt,name=num_max_events,json=numMaxEvents,proto3" json:"num_max_events,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ForwardingHistoryRequest) Reset() { *m = ForwardingHistoryRequest{} } -func (m *ForwardingHistoryRequest) String() string { return proto.CompactTextString(m) } -func (*ForwardingHistoryRequest) ProtoMessage() {} -func (*ForwardingHistoryRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{138} -} - -func (m *ForwardingHistoryRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ForwardingHistoryRequest.Unmarshal(m, b) -} -func (m *ForwardingHistoryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ForwardingHistoryRequest.Marshal(b, m, deterministic) -} -func (m *ForwardingHistoryRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ForwardingHistoryRequest.Merge(m, src) -} -func (m *ForwardingHistoryRequest) XXX_Size() int { - return xxx_messageInfo_ForwardingHistoryRequest.Size(m) -} -func (m *ForwardingHistoryRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ForwardingHistoryRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ForwardingHistoryRequest proto.InternalMessageInfo - -func (m *ForwardingHistoryRequest) GetStartTime() uint64 { - if m != nil { - return m.StartTime - } - return 0 -} - -func (m *ForwardingHistoryRequest) GetEndTime() uint64 { - if m != nil { - return m.EndTime - } - return 0 -} - -func (m *ForwardingHistoryRequest) GetIndexOffset() uint32 { - if m != nil { - return m.IndexOffset - } - return 0 -} - -func (m *ForwardingHistoryRequest) GetNumMaxEvents() uint32 { - if m != nil { - return m.NumMaxEvents - } - return 0 -} - -type ForwardingEvent struct { - // Timestamp is the time (unix epoch offset) that this circuit was - // completed. - Timestamp uint64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // The incoming channel ID that carried the HTLC that created the circuit. - ChanIdIn uint64 `protobuf:"varint,2,opt,name=chan_id_in,json=chanIdIn,proto3" json:"chan_id_in,omitempty"` - // The outgoing channel ID that carried the preimage that completed the - // circuit. - ChanIdOut uint64 `protobuf:"varint,4,opt,name=chan_id_out,json=chanIdOut,proto3" json:"chan_id_out,omitempty"` - // The total amount (in satoshis) of the incoming HTLC that created half - // the circuit. - AmtIn uint64 `protobuf:"varint,5,opt,name=amt_in,json=amtIn,proto3" json:"amt_in,omitempty"` - // The total amount (in satoshis) of the outgoing HTLC that created the - // second half of the circuit. - AmtOut uint64 `protobuf:"varint,6,opt,name=amt_out,json=amtOut,proto3" json:"amt_out,omitempty"` - // The total fee (in satoshis) that this payment circuit carried. - Fee uint64 `protobuf:"varint,7,opt,name=fee,proto3" json:"fee,omitempty"` - // The total fee (in milli-satoshis) that this payment circuit carried. - FeeMsat uint64 `protobuf:"varint,8,opt,name=fee_msat,json=feeMsat,proto3" json:"fee_msat,omitempty"` - // The total amount (in milli-satoshis) of the incoming HTLC that created - // half the circuit. - AmtInMsat uint64 `protobuf:"varint,9,opt,name=amt_in_msat,json=amtInMsat,proto3" json:"amt_in_msat,omitempty"` - // The total amount (in milli-satoshis) of the outgoing HTLC that created - // the second half of the circuit. - AmtOutMsat uint64 `protobuf:"varint,10,opt,name=amt_out_msat,json=amtOutMsat,proto3" json:"amt_out_msat,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ForwardingEvent) Reset() { *m = ForwardingEvent{} } -func (m *ForwardingEvent) String() string { return proto.CompactTextString(m) } -func (*ForwardingEvent) ProtoMessage() {} -func (*ForwardingEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{139} -} - -func (m *ForwardingEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ForwardingEvent.Unmarshal(m, b) -} -func (m *ForwardingEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ForwardingEvent.Marshal(b, m, deterministic) -} -func (m *ForwardingEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_ForwardingEvent.Merge(m, src) -} -func (m *ForwardingEvent) XXX_Size() int { - return xxx_messageInfo_ForwardingEvent.Size(m) -} -func (m *ForwardingEvent) XXX_DiscardUnknown() { - xxx_messageInfo_ForwardingEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_ForwardingEvent proto.InternalMessageInfo - -func (m *ForwardingEvent) GetTimestamp() uint64 { - if m != nil { - return m.Timestamp - } - return 0 -} - -func (m *ForwardingEvent) GetChanIdIn() uint64 { - if m != nil { - return m.ChanIdIn - } - return 0 -} - -func (m *ForwardingEvent) GetChanIdOut() uint64 { - if m != nil { - return m.ChanIdOut - } - return 0 -} - -func (m *ForwardingEvent) GetAmtIn() uint64 { - if m != nil { - return m.AmtIn - } - return 0 -} - -func (m *ForwardingEvent) GetAmtOut() uint64 { - if m != nil { - return m.AmtOut - } - return 0 -} - -func (m *ForwardingEvent) GetFee() uint64 { - if m != nil { - return m.Fee - } - return 0 -} - -func (m *ForwardingEvent) GetFeeMsat() uint64 { - if m != nil { - return m.FeeMsat - } - return 0 -} - -func (m *ForwardingEvent) GetAmtInMsat() uint64 { - if m != nil { - return m.AmtInMsat - } - return 0 -} - -func (m *ForwardingEvent) GetAmtOutMsat() uint64 { - if m != nil { - return m.AmtOutMsat - } - return 0 -} - -type ForwardingHistoryResponse struct { - // A list of forwarding events from the time slice of the time series - // specified in the request. - ForwardingEvents []*ForwardingEvent `protobuf:"bytes,1,rep,name=forwarding_events,json=forwardingEvents,proto3" json:"forwarding_events,omitempty"` - // The index of the last time in the set of returned forwarding events. Can - // be used to seek further, pagination style. - LastOffsetIndex uint32 `protobuf:"varint,2,opt,name=last_offset_index,json=lastOffsetIndex,proto3" json:"last_offset_index,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ForwardingHistoryResponse) Reset() { *m = ForwardingHistoryResponse{} } -func (m *ForwardingHistoryResponse) String() string { return proto.CompactTextString(m) } -func (*ForwardingHistoryResponse) ProtoMessage() {} -func (*ForwardingHistoryResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{140} -} - -func (m *ForwardingHistoryResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ForwardingHistoryResponse.Unmarshal(m, b) -} -func (m *ForwardingHistoryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ForwardingHistoryResponse.Marshal(b, m, deterministic) -} -func (m *ForwardingHistoryResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ForwardingHistoryResponse.Merge(m, src) -} -func (m *ForwardingHistoryResponse) XXX_Size() int { - return xxx_messageInfo_ForwardingHistoryResponse.Size(m) -} -func (m *ForwardingHistoryResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ForwardingHistoryResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ForwardingHistoryResponse proto.InternalMessageInfo - -func (m *ForwardingHistoryResponse) GetForwardingEvents() []*ForwardingEvent { - if m != nil { - return m.ForwardingEvents - } - return nil -} - -func (m *ForwardingHistoryResponse) GetLastOffsetIndex() uint32 { - if m != nil { - return m.LastOffsetIndex - } - return 0 -} - -type ExportChannelBackupRequest struct { - // The target channel point to obtain a back up for. - ChanPoint *ChannelPoint `protobuf:"bytes,1,opt,name=chan_point,json=chanPoint,proto3" json:"chan_point,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExportChannelBackupRequest) Reset() { *m = ExportChannelBackupRequest{} } -func (m *ExportChannelBackupRequest) String() string { return proto.CompactTextString(m) } -func (*ExportChannelBackupRequest) ProtoMessage() {} -func (*ExportChannelBackupRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{141} -} - -func (m *ExportChannelBackupRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExportChannelBackupRequest.Unmarshal(m, b) -} -func (m *ExportChannelBackupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExportChannelBackupRequest.Marshal(b, m, deterministic) -} -func (m *ExportChannelBackupRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportChannelBackupRequest.Merge(m, src) -} -func (m *ExportChannelBackupRequest) XXX_Size() int { - return xxx_messageInfo_ExportChannelBackupRequest.Size(m) -} -func (m *ExportChannelBackupRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExportChannelBackupRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportChannelBackupRequest proto.InternalMessageInfo - -func (m *ExportChannelBackupRequest) GetChanPoint() *ChannelPoint { - if m != nil { - return m.ChanPoint - } - return nil -} - -type ChannelBackup struct { - // - //Identifies the channel that this backup belongs to. - ChanPoint *ChannelPoint `protobuf:"bytes,1,opt,name=chan_point,json=chanPoint,proto3" json:"chan_point,omitempty"` - // - //Is an encrypted single-chan backup. this can be passed to - //RestoreChannelBackups, or the WalletUnlocker Init and Unlock methods in - //order to trigger the recovery protocol. When using REST, this field must be - //encoded as base64. - ChanBackup []byte `protobuf:"bytes,2,opt,name=chan_backup,json=chanBackup,proto3" json:"chan_backup,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChannelBackup) Reset() { *m = ChannelBackup{} } -func (m *ChannelBackup) String() string { return proto.CompactTextString(m) } -func (*ChannelBackup) ProtoMessage() {} -func (*ChannelBackup) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{142} -} - -func (m *ChannelBackup) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChannelBackup.Unmarshal(m, b) -} -func (m *ChannelBackup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChannelBackup.Marshal(b, m, deterministic) -} -func (m *ChannelBackup) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChannelBackup.Merge(m, src) -} -func (m *ChannelBackup) XXX_Size() int { - return xxx_messageInfo_ChannelBackup.Size(m) -} -func (m *ChannelBackup) XXX_DiscardUnknown() { - xxx_messageInfo_ChannelBackup.DiscardUnknown(m) -} - -var xxx_messageInfo_ChannelBackup proto.InternalMessageInfo - -func (m *ChannelBackup) GetChanPoint() *ChannelPoint { - if m != nil { - return m.ChanPoint - } - return nil -} - -func (m *ChannelBackup) GetChanBackup() []byte { - if m != nil { - return m.ChanBackup - } - return nil -} - -type MultiChanBackup struct { - // - //Is the set of all channels that are included in this multi-channel backup. - ChanPoints []*ChannelPoint `protobuf:"bytes,1,rep,name=chan_points,json=chanPoints,proto3" json:"chan_points,omitempty"` - // - //A single encrypted blob containing all the static channel backups of the - //channel listed above. This can be stored as a single file or blob, and - //safely be replaced with any prior/future versions. When using REST, this - //field must be encoded as base64. - MultiChanBackup []byte `protobuf:"bytes,2,opt,name=multi_chan_backup,json=multiChanBackup,proto3" json:"multi_chan_backup,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MultiChanBackup) Reset() { *m = MultiChanBackup{} } -func (m *MultiChanBackup) String() string { return proto.CompactTextString(m) } -func (*MultiChanBackup) ProtoMessage() {} -func (*MultiChanBackup) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{143} -} - -func (m *MultiChanBackup) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MultiChanBackup.Unmarshal(m, b) -} -func (m *MultiChanBackup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MultiChanBackup.Marshal(b, m, deterministic) -} -func (m *MultiChanBackup) XXX_Merge(src proto.Message) { - xxx_messageInfo_MultiChanBackup.Merge(m, src) -} -func (m *MultiChanBackup) XXX_Size() int { - return xxx_messageInfo_MultiChanBackup.Size(m) -} -func (m *MultiChanBackup) XXX_DiscardUnknown() { - xxx_messageInfo_MultiChanBackup.DiscardUnknown(m) -} - -var xxx_messageInfo_MultiChanBackup proto.InternalMessageInfo - -func (m *MultiChanBackup) GetChanPoints() []*ChannelPoint { - if m != nil { - return m.ChanPoints - } - return nil -} - -func (m *MultiChanBackup) GetMultiChanBackup() []byte { - if m != nil { - return m.MultiChanBackup - } - return nil -} - -type ChanBackupExportRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChanBackupExportRequest) Reset() { *m = ChanBackupExportRequest{} } -func (m *ChanBackupExportRequest) String() string { return proto.CompactTextString(m) } -func (*ChanBackupExportRequest) ProtoMessage() {} -func (*ChanBackupExportRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{144} -} - -func (m *ChanBackupExportRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChanBackupExportRequest.Unmarshal(m, b) -} -func (m *ChanBackupExportRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChanBackupExportRequest.Marshal(b, m, deterministic) -} -func (m *ChanBackupExportRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChanBackupExportRequest.Merge(m, src) -} -func (m *ChanBackupExportRequest) XXX_Size() int { - return xxx_messageInfo_ChanBackupExportRequest.Size(m) -} -func (m *ChanBackupExportRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ChanBackupExportRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ChanBackupExportRequest proto.InternalMessageInfo - -type ChanBackupSnapshot struct { - // - //The set of new channels that have been added since the last channel backup - //snapshot was requested. - SingleChanBackups *ChannelBackups `protobuf:"bytes,1,opt,name=single_chan_backups,json=singleChanBackups,proto3" json:"single_chan_backups,omitempty"` - // - //A multi-channel backup that covers all open channels currently known to - //lnd. - MultiChanBackup *MultiChanBackup `protobuf:"bytes,2,opt,name=multi_chan_backup,json=multiChanBackup,proto3" json:"multi_chan_backup,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChanBackupSnapshot) Reset() { *m = ChanBackupSnapshot{} } -func (m *ChanBackupSnapshot) String() string { return proto.CompactTextString(m) } -func (*ChanBackupSnapshot) ProtoMessage() {} -func (*ChanBackupSnapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{145} -} - -func (m *ChanBackupSnapshot) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChanBackupSnapshot.Unmarshal(m, b) -} -func (m *ChanBackupSnapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChanBackupSnapshot.Marshal(b, m, deterministic) -} -func (m *ChanBackupSnapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChanBackupSnapshot.Merge(m, src) -} -func (m *ChanBackupSnapshot) XXX_Size() int { - return xxx_messageInfo_ChanBackupSnapshot.Size(m) -} -func (m *ChanBackupSnapshot) XXX_DiscardUnknown() { - xxx_messageInfo_ChanBackupSnapshot.DiscardUnknown(m) -} - -var xxx_messageInfo_ChanBackupSnapshot proto.InternalMessageInfo - -func (m *ChanBackupSnapshot) GetSingleChanBackups() *ChannelBackups { - if m != nil { - return m.SingleChanBackups - } - return nil -} - -func (m *ChanBackupSnapshot) GetMultiChanBackup() *MultiChanBackup { - if m != nil { - return m.MultiChanBackup - } - return nil -} - -type ChannelBackups struct { - // - //A set of single-chan static channel backups. - ChanBackups []*ChannelBackup `protobuf:"bytes,1,rep,name=chan_backups,json=chanBackups,proto3" json:"chan_backups,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChannelBackups) Reset() { *m = ChannelBackups{} } -func (m *ChannelBackups) String() string { return proto.CompactTextString(m) } -func (*ChannelBackups) ProtoMessage() {} -func (*ChannelBackups) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{146} -} - -func (m *ChannelBackups) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChannelBackups.Unmarshal(m, b) -} -func (m *ChannelBackups) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChannelBackups.Marshal(b, m, deterministic) -} -func (m *ChannelBackups) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChannelBackups.Merge(m, src) -} -func (m *ChannelBackups) XXX_Size() int { - return xxx_messageInfo_ChannelBackups.Size(m) -} -func (m *ChannelBackups) XXX_DiscardUnknown() { - xxx_messageInfo_ChannelBackups.DiscardUnknown(m) -} - -var xxx_messageInfo_ChannelBackups proto.InternalMessageInfo - -func (m *ChannelBackups) GetChanBackups() []*ChannelBackup { - if m != nil { - return m.ChanBackups - } - return nil -} - -type RestoreChanBackupRequest struct { - // Types that are valid to be assigned to Backup: - // *RestoreChanBackupRequest_ChanBackups - // *RestoreChanBackupRequest_MultiChanBackup - Backup isRestoreChanBackupRequest_Backup `protobuf_oneof:"backup"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RestoreChanBackupRequest) Reset() { *m = RestoreChanBackupRequest{} } -func (m *RestoreChanBackupRequest) String() string { return proto.CompactTextString(m) } -func (*RestoreChanBackupRequest) ProtoMessage() {} -func (*RestoreChanBackupRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{147} -} - -func (m *RestoreChanBackupRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RestoreChanBackupRequest.Unmarshal(m, b) -} -func (m *RestoreChanBackupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RestoreChanBackupRequest.Marshal(b, m, deterministic) -} -func (m *RestoreChanBackupRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RestoreChanBackupRequest.Merge(m, src) -} -func (m *RestoreChanBackupRequest) XXX_Size() int { - return xxx_messageInfo_RestoreChanBackupRequest.Size(m) -} -func (m *RestoreChanBackupRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RestoreChanBackupRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_RestoreChanBackupRequest proto.InternalMessageInfo - -type isRestoreChanBackupRequest_Backup interface { - isRestoreChanBackupRequest_Backup() -} - -type RestoreChanBackupRequest_ChanBackups struct { - ChanBackups *ChannelBackups `protobuf:"bytes,1,opt,name=chan_backups,json=chanBackups,proto3,oneof"` -} - -type RestoreChanBackupRequest_MultiChanBackup struct { - MultiChanBackup []byte `protobuf:"bytes,2,opt,name=multi_chan_backup,json=multiChanBackup,proto3,oneof"` -} - -func (*RestoreChanBackupRequest_ChanBackups) isRestoreChanBackupRequest_Backup() {} - -func (*RestoreChanBackupRequest_MultiChanBackup) isRestoreChanBackupRequest_Backup() {} - -func (m *RestoreChanBackupRequest) GetBackup() isRestoreChanBackupRequest_Backup { - if m != nil { - return m.Backup - } - return nil -} - -func (m *RestoreChanBackupRequest) GetChanBackups() *ChannelBackups { - if x, ok := m.GetBackup().(*RestoreChanBackupRequest_ChanBackups); ok { - return x.ChanBackups - } - return nil -} - -func (m *RestoreChanBackupRequest) GetMultiChanBackup() []byte { - if x, ok := m.GetBackup().(*RestoreChanBackupRequest_MultiChanBackup); ok { - return x.MultiChanBackup - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*RestoreChanBackupRequest) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*RestoreChanBackupRequest_ChanBackups)(nil), - (*RestoreChanBackupRequest_MultiChanBackup)(nil), - } -} - -type RestoreBackupResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RestoreBackupResponse) Reset() { *m = RestoreBackupResponse{} } -func (m *RestoreBackupResponse) String() string { return proto.CompactTextString(m) } -func (*RestoreBackupResponse) ProtoMessage() {} -func (*RestoreBackupResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{148} -} - -func (m *RestoreBackupResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RestoreBackupResponse.Unmarshal(m, b) -} -func (m *RestoreBackupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RestoreBackupResponse.Marshal(b, m, deterministic) -} -func (m *RestoreBackupResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RestoreBackupResponse.Merge(m, src) -} -func (m *RestoreBackupResponse) XXX_Size() int { - return xxx_messageInfo_RestoreBackupResponse.Size(m) -} -func (m *RestoreBackupResponse) XXX_DiscardUnknown() { - xxx_messageInfo_RestoreBackupResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_RestoreBackupResponse proto.InternalMessageInfo - -type ChannelBackupSubscription struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChannelBackupSubscription) Reset() { *m = ChannelBackupSubscription{} } -func (m *ChannelBackupSubscription) String() string { return proto.CompactTextString(m) } -func (*ChannelBackupSubscription) ProtoMessage() {} -func (*ChannelBackupSubscription) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{149} -} - -func (m *ChannelBackupSubscription) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChannelBackupSubscription.Unmarshal(m, b) -} -func (m *ChannelBackupSubscription) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChannelBackupSubscription.Marshal(b, m, deterministic) -} -func (m *ChannelBackupSubscription) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChannelBackupSubscription.Merge(m, src) -} -func (m *ChannelBackupSubscription) XXX_Size() int { - return xxx_messageInfo_ChannelBackupSubscription.Size(m) -} -func (m *ChannelBackupSubscription) XXX_DiscardUnknown() { - xxx_messageInfo_ChannelBackupSubscription.DiscardUnknown(m) -} - -var xxx_messageInfo_ChannelBackupSubscription proto.InternalMessageInfo - -type VerifyChanBackupResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VerifyChanBackupResponse) Reset() { *m = VerifyChanBackupResponse{} } -func (m *VerifyChanBackupResponse) String() string { return proto.CompactTextString(m) } -func (*VerifyChanBackupResponse) ProtoMessage() {} -func (*VerifyChanBackupResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{150} -} - -func (m *VerifyChanBackupResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VerifyChanBackupResponse.Unmarshal(m, b) -} -func (m *VerifyChanBackupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VerifyChanBackupResponse.Marshal(b, m, deterministic) -} -func (m *VerifyChanBackupResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_VerifyChanBackupResponse.Merge(m, src) -} -func (m *VerifyChanBackupResponse) XXX_Size() int { - return xxx_messageInfo_VerifyChanBackupResponse.Size(m) -} -func (m *VerifyChanBackupResponse) XXX_DiscardUnknown() { - xxx_messageInfo_VerifyChanBackupResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_VerifyChanBackupResponse proto.InternalMessageInfo - -type MacaroonPermission struct { - // The entity a permission grants access to. - Entity string `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"` - // The action that is granted. - Action string `protobuf:"bytes,2,opt,name=action,proto3" json:"action,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MacaroonPermission) Reset() { *m = MacaroonPermission{} } -func (m *MacaroonPermission) String() string { return proto.CompactTextString(m) } -func (*MacaroonPermission) ProtoMessage() {} -func (*MacaroonPermission) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{151} -} - -func (m *MacaroonPermission) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MacaroonPermission.Unmarshal(m, b) -} -func (m *MacaroonPermission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MacaroonPermission.Marshal(b, m, deterministic) -} -func (m *MacaroonPermission) XXX_Merge(src proto.Message) { - xxx_messageInfo_MacaroonPermission.Merge(m, src) -} -func (m *MacaroonPermission) XXX_Size() int { - return xxx_messageInfo_MacaroonPermission.Size(m) -} -func (m *MacaroonPermission) XXX_DiscardUnknown() { - xxx_messageInfo_MacaroonPermission.DiscardUnknown(m) -} - -var xxx_messageInfo_MacaroonPermission proto.InternalMessageInfo - -func (m *MacaroonPermission) GetEntity() string { - if m != nil { - return m.Entity - } - return "" -} - -func (m *MacaroonPermission) GetAction() string { - if m != nil { - return m.Action - } - return "" -} - -type BakeMacaroonRequest struct { - // The list of permissions the new macaroon should grant. - Permissions []*MacaroonPermission `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"` - // The root key ID used to create the macaroon, must be a positive integer. - RootKeyId uint64 `protobuf:"varint,2,opt,name=root_key_id,json=rootKeyId,proto3" json:"root_key_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BakeMacaroonRequest) Reset() { *m = BakeMacaroonRequest{} } -func (m *BakeMacaroonRequest) String() string { return proto.CompactTextString(m) } -func (*BakeMacaroonRequest) ProtoMessage() {} -func (*BakeMacaroonRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{152} -} - -func (m *BakeMacaroonRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BakeMacaroonRequest.Unmarshal(m, b) -} -func (m *BakeMacaroonRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BakeMacaroonRequest.Marshal(b, m, deterministic) -} -func (m *BakeMacaroonRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BakeMacaroonRequest.Merge(m, src) -} -func (m *BakeMacaroonRequest) XXX_Size() int { - return xxx_messageInfo_BakeMacaroonRequest.Size(m) -} -func (m *BakeMacaroonRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BakeMacaroonRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_BakeMacaroonRequest proto.InternalMessageInfo - -func (m *BakeMacaroonRequest) GetPermissions() []*MacaroonPermission { - if m != nil { - return m.Permissions - } - return nil -} - -func (m *BakeMacaroonRequest) GetRootKeyId() uint64 { - if m != nil { - return m.RootKeyId - } - return 0 -} - -type BakeMacaroonResponse struct { - // The hex encoded macaroon, serialized in binary format. - Macaroon string `protobuf:"bytes,1,opt,name=macaroon,proto3" json:"macaroon,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BakeMacaroonResponse) Reset() { *m = BakeMacaroonResponse{} } -func (m *BakeMacaroonResponse) String() string { return proto.CompactTextString(m) } -func (*BakeMacaroonResponse) ProtoMessage() {} -func (*BakeMacaroonResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{153} -} - -func (m *BakeMacaroonResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BakeMacaroonResponse.Unmarshal(m, b) -} -func (m *BakeMacaroonResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BakeMacaroonResponse.Marshal(b, m, deterministic) -} -func (m *BakeMacaroonResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_BakeMacaroonResponse.Merge(m, src) -} -func (m *BakeMacaroonResponse) XXX_Size() int { - return xxx_messageInfo_BakeMacaroonResponse.Size(m) -} -func (m *BakeMacaroonResponse) XXX_DiscardUnknown() { - xxx_messageInfo_BakeMacaroonResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_BakeMacaroonResponse proto.InternalMessageInfo - -func (m *BakeMacaroonResponse) GetMacaroon() string { - if m != nil { - return m.Macaroon - } - return "" -} - -type ListMacaroonIDsRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListMacaroonIDsRequest) Reset() { *m = ListMacaroonIDsRequest{} } -func (m *ListMacaroonIDsRequest) String() string { return proto.CompactTextString(m) } -func (*ListMacaroonIDsRequest) ProtoMessage() {} -func (*ListMacaroonIDsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{154} -} - -func (m *ListMacaroonIDsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListMacaroonIDsRequest.Unmarshal(m, b) -} -func (m *ListMacaroonIDsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListMacaroonIDsRequest.Marshal(b, m, deterministic) -} -func (m *ListMacaroonIDsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListMacaroonIDsRequest.Merge(m, src) -} -func (m *ListMacaroonIDsRequest) XXX_Size() int { - return xxx_messageInfo_ListMacaroonIDsRequest.Size(m) -} -func (m *ListMacaroonIDsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListMacaroonIDsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListMacaroonIDsRequest proto.InternalMessageInfo - -type ListMacaroonIDsResponse struct { - // The list of root key IDs that are in use. - RootKeyIds []uint64 `protobuf:"varint,1,rep,packed,name=root_key_ids,json=rootKeyIds,proto3" json:"root_key_ids,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListMacaroonIDsResponse) Reset() { *m = ListMacaroonIDsResponse{} } -func (m *ListMacaroonIDsResponse) String() string { return proto.CompactTextString(m) } -func (*ListMacaroonIDsResponse) ProtoMessage() {} -func (*ListMacaroonIDsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{155} -} - -func (m *ListMacaroonIDsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListMacaroonIDsResponse.Unmarshal(m, b) -} -func (m *ListMacaroonIDsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListMacaroonIDsResponse.Marshal(b, m, deterministic) -} -func (m *ListMacaroonIDsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListMacaroonIDsResponse.Merge(m, src) -} -func (m *ListMacaroonIDsResponse) XXX_Size() int { - return xxx_messageInfo_ListMacaroonIDsResponse.Size(m) -} -func (m *ListMacaroonIDsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListMacaroonIDsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListMacaroonIDsResponse proto.InternalMessageInfo - -func (m *ListMacaroonIDsResponse) GetRootKeyIds() []uint64 { - if m != nil { - return m.RootKeyIds - } - return nil -} - -type DeleteMacaroonIDRequest struct { - // The root key ID to be removed. - RootKeyId uint64 `protobuf:"varint,1,opt,name=root_key_id,json=rootKeyId,proto3" json:"root_key_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteMacaroonIDRequest) Reset() { *m = DeleteMacaroonIDRequest{} } -func (m *DeleteMacaroonIDRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteMacaroonIDRequest) ProtoMessage() {} -func (*DeleteMacaroonIDRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{156} -} - -func (m *DeleteMacaroonIDRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteMacaroonIDRequest.Unmarshal(m, b) -} -func (m *DeleteMacaroonIDRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteMacaroonIDRequest.Marshal(b, m, deterministic) -} -func (m *DeleteMacaroonIDRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteMacaroonIDRequest.Merge(m, src) -} -func (m *DeleteMacaroonIDRequest) XXX_Size() int { - return xxx_messageInfo_DeleteMacaroonIDRequest.Size(m) -} -func (m *DeleteMacaroonIDRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteMacaroonIDRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteMacaroonIDRequest proto.InternalMessageInfo - -func (m *DeleteMacaroonIDRequest) GetRootKeyId() uint64 { - if m != nil { - return m.RootKeyId - } - return 0 -} - -type DeleteMacaroonIDResponse struct { - // A boolean indicates that the deletion is successful. - Deleted bool `protobuf:"varint,1,opt,name=deleted,proto3" json:"deleted,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteMacaroonIDResponse) Reset() { *m = DeleteMacaroonIDResponse{} } -func (m *DeleteMacaroonIDResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteMacaroonIDResponse) ProtoMessage() {} -func (*DeleteMacaroonIDResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{157} -} - -func (m *DeleteMacaroonIDResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteMacaroonIDResponse.Unmarshal(m, b) -} -func (m *DeleteMacaroonIDResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteMacaroonIDResponse.Marshal(b, m, deterministic) -} -func (m *DeleteMacaroonIDResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteMacaroonIDResponse.Merge(m, src) -} -func (m *DeleteMacaroonIDResponse) XXX_Size() int { - return xxx_messageInfo_DeleteMacaroonIDResponse.Size(m) -} -func (m *DeleteMacaroonIDResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteMacaroonIDResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteMacaroonIDResponse proto.InternalMessageInfo - -func (m *DeleteMacaroonIDResponse) GetDeleted() bool { - if m != nil { - return m.Deleted - } - return false -} - -type MacaroonPermissionList struct { - // A list of macaroon permissions. - Permissions []*MacaroonPermission `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MacaroonPermissionList) Reset() { *m = MacaroonPermissionList{} } -func (m *MacaroonPermissionList) String() string { return proto.CompactTextString(m) } -func (*MacaroonPermissionList) ProtoMessage() {} -func (*MacaroonPermissionList) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{158} -} - -func (m *MacaroonPermissionList) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MacaroonPermissionList.Unmarshal(m, b) -} -func (m *MacaroonPermissionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MacaroonPermissionList.Marshal(b, m, deterministic) -} -func (m *MacaroonPermissionList) XXX_Merge(src proto.Message) { - xxx_messageInfo_MacaroonPermissionList.Merge(m, src) -} -func (m *MacaroonPermissionList) XXX_Size() int { - return xxx_messageInfo_MacaroonPermissionList.Size(m) -} -func (m *MacaroonPermissionList) XXX_DiscardUnknown() { - xxx_messageInfo_MacaroonPermissionList.DiscardUnknown(m) -} - -var xxx_messageInfo_MacaroonPermissionList proto.InternalMessageInfo - -func (m *MacaroonPermissionList) GetPermissions() []*MacaroonPermission { - if m != nil { - return m.Permissions - } - return nil -} - -type ListPermissionsRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListPermissionsRequest) Reset() { *m = ListPermissionsRequest{} } -func (m *ListPermissionsRequest) String() string { return proto.CompactTextString(m) } -func (*ListPermissionsRequest) ProtoMessage() {} -func (*ListPermissionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{159} -} - -func (m *ListPermissionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListPermissionsRequest.Unmarshal(m, b) -} -func (m *ListPermissionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListPermissionsRequest.Marshal(b, m, deterministic) -} -func (m *ListPermissionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListPermissionsRequest.Merge(m, src) -} -func (m *ListPermissionsRequest) XXX_Size() int { - return xxx_messageInfo_ListPermissionsRequest.Size(m) -} -func (m *ListPermissionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListPermissionsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListPermissionsRequest proto.InternalMessageInfo - -type ListPermissionsResponse struct { - // - //A map between all RPC method URIs and their required macaroon permissions to - //access them. - MethodPermissions map[string]*MacaroonPermissionList `protobuf:"bytes,1,rep,name=method_permissions,json=methodPermissions,proto3" json:"method_permissions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListPermissionsResponse) Reset() { *m = ListPermissionsResponse{} } -func (m *ListPermissionsResponse) String() string { return proto.CompactTextString(m) } -func (*ListPermissionsResponse) ProtoMessage() {} -func (*ListPermissionsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{160} -} - -func (m *ListPermissionsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListPermissionsResponse.Unmarshal(m, b) -} -func (m *ListPermissionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListPermissionsResponse.Marshal(b, m, deterministic) -} -func (m *ListPermissionsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListPermissionsResponse.Merge(m, src) -} -func (m *ListPermissionsResponse) XXX_Size() int { - return xxx_messageInfo_ListPermissionsResponse.Size(m) -} -func (m *ListPermissionsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListPermissionsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListPermissionsResponse proto.InternalMessageInfo - -func (m *ListPermissionsResponse) GetMethodPermissions() map[string]*MacaroonPermissionList { - if m != nil { - return m.MethodPermissions - } - return nil -} - -type Failure struct { - // Failure code as defined in the Lightning spec - Code Failure_FailureCode `protobuf:"varint,1,opt,name=code,proto3,enum=lnrpc.Failure_FailureCode" json:"code,omitempty"` - // An optional channel update message. - ChannelUpdate *ChannelUpdate `protobuf:"bytes,3,opt,name=channel_update,json=channelUpdate,proto3" json:"channel_update,omitempty"` - // A failure type-dependent htlc value. - HtlcMsat uint64 `protobuf:"varint,4,opt,name=htlc_msat,json=htlcMsat,proto3" json:"htlc_msat,omitempty"` - // The sha256 sum of the onion payload. - OnionSha_256 []byte `protobuf:"bytes,5,opt,name=onion_sha_256,json=onionSha256,proto3" json:"onion_sha_256,omitempty"` - // A failure type-dependent cltv expiry value. - CltvExpiry uint32 `protobuf:"varint,6,opt,name=cltv_expiry,json=cltvExpiry,proto3" json:"cltv_expiry,omitempty"` - // A failure type-dependent flags value. - Flags uint32 `protobuf:"varint,7,opt,name=flags,proto3" json:"flags,omitempty"` - // - //The position in the path of the intermediate or final node that generated - //the failure message. Position zero is the sender node. - FailureSourceIndex uint32 `protobuf:"varint,8,opt,name=failure_source_index,json=failureSourceIndex,proto3" json:"failure_source_index,omitempty"` - // A failure type-dependent block height. - Height uint32 `protobuf:"varint,9,opt,name=height,proto3" json:"height,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Failure) Reset() { *m = Failure{} } -func (m *Failure) String() string { return proto.CompactTextString(m) } -func (*Failure) ProtoMessage() {} -func (*Failure) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{161} -} - -func (m *Failure) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Failure.Unmarshal(m, b) -} -func (m *Failure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Failure.Marshal(b, m, deterministic) -} -func (m *Failure) XXX_Merge(src proto.Message) { - xxx_messageInfo_Failure.Merge(m, src) -} -func (m *Failure) XXX_Size() int { - return xxx_messageInfo_Failure.Size(m) -} -func (m *Failure) XXX_DiscardUnknown() { - xxx_messageInfo_Failure.DiscardUnknown(m) -} - -var xxx_messageInfo_Failure proto.InternalMessageInfo - -func (m *Failure) GetCode() Failure_FailureCode { - if m != nil { - return m.Code - } - return Failure_RESERVED -} - -func (m *Failure) GetChannelUpdate() *ChannelUpdate { - if m != nil { - return m.ChannelUpdate - } - return nil -} - -func (m *Failure) GetHtlcMsat() uint64 { - if m != nil { - return m.HtlcMsat - } - return 0 -} - -func (m *Failure) GetOnionSha_256() []byte { - if m != nil { - return m.OnionSha_256 - } - return nil -} - -func (m *Failure) GetCltvExpiry() uint32 { - if m != nil { - return m.CltvExpiry - } - return 0 -} - -func (m *Failure) GetFlags() uint32 { - if m != nil { - return m.Flags - } - return 0 -} - -func (m *Failure) GetFailureSourceIndex() uint32 { - if m != nil { - return m.FailureSourceIndex - } - return 0 -} - -func (m *Failure) GetHeight() uint32 { - if m != nil { - return m.Height - } - return 0 -} - -type ChannelUpdate struct { - // - //The signature that validates the announced data and proves the ownership - //of node id. - Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"` - // - //The target chain that this channel was opened within. This value - //should be the genesis hash of the target chain. Along with the short - //channel ID, this uniquely identifies the channel globally in a - //blockchain. - ChainHash []byte `protobuf:"bytes,2,opt,name=chain_hash,json=chainHash,proto3" json:"chain_hash,omitempty"` - // - //The unique description of the funding transaction. - ChanId uint64 `protobuf:"varint,3,opt,name=chan_id,json=chanId,proto3" json:"chan_id,omitempty"` - // - //A timestamp that allows ordering in the case of multiple announcements. - //We should ignore the message if timestamp is not greater than the - //last-received. - Timestamp uint32 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // - //The bitfield that describes whether optional fields are present in this - //update. Currently, the least-significant bit must be set to 1 if the - //optional field MaxHtlc is present. - MessageFlags uint32 `protobuf:"varint,10,opt,name=message_flags,json=messageFlags,proto3" json:"message_flags,omitempty"` - // - //The bitfield that describes additional meta-data concerning how the - //update is to be interpreted. Currently, the least-significant bit must be - //set to 0 if the creating node corresponds to the first node in the - //previously sent channel announcement and 1 otherwise. If the second bit - //is set, then the channel is set to be disabled. - ChannelFlags uint32 `protobuf:"varint,5,opt,name=channel_flags,json=channelFlags,proto3" json:"channel_flags,omitempty"` - // - //The minimum number of blocks this node requires to be added to the expiry - //of HTLCs. This is a security parameter determined by the node operator. - //This value represents the required gap between the time locks of the - //incoming and outgoing HTLC's set to this node. - TimeLockDelta uint32 `protobuf:"varint,6,opt,name=time_lock_delta,json=timeLockDelta,proto3" json:"time_lock_delta,omitempty"` - // - //The minimum HTLC value which will be accepted. - HtlcMinimumMsat uint64 `protobuf:"varint,7,opt,name=htlc_minimum_msat,json=htlcMinimumMsat,proto3" json:"htlc_minimum_msat,omitempty"` - // - //The base fee that must be used for incoming HTLC's to this particular - //channel. This value will be tacked onto the required for a payment - //independent of the size of the payment. - BaseFee uint32 `protobuf:"varint,8,opt,name=base_fee,json=baseFee,proto3" json:"base_fee,omitempty"` - // - //The fee rate that will be charged per millionth of a satoshi. - FeeRate uint32 `protobuf:"varint,9,opt,name=fee_rate,json=feeRate,proto3" json:"fee_rate,omitempty"` - // - //The maximum HTLC value which will be accepted. - HtlcMaximumMsat uint64 `protobuf:"varint,11,opt,name=htlc_maximum_msat,json=htlcMaximumMsat,proto3" json:"htlc_maximum_msat,omitempty"` - // - //The set of data that was appended to this message, some of which we may - //not actually know how to iterate or parse. By holding onto this data, we - //ensure that we're able to properly validate the set of signatures that - //cover these new fields, and ensure we're able to make upgrades to the - //network in a forwards compatible manner. - ExtraOpaqueData []byte `protobuf:"bytes,12,opt,name=extra_opaque_data,json=extraOpaqueData,proto3" json:"extra_opaque_data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChannelUpdate) Reset() { *m = ChannelUpdate{} } -func (m *ChannelUpdate) String() string { return proto.CompactTextString(m) } -func (*ChannelUpdate) ProtoMessage() {} -func (*ChannelUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{162} -} - -func (m *ChannelUpdate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChannelUpdate.Unmarshal(m, b) -} -func (m *ChannelUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChannelUpdate.Marshal(b, m, deterministic) -} -func (m *ChannelUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChannelUpdate.Merge(m, src) -} -func (m *ChannelUpdate) XXX_Size() int { - return xxx_messageInfo_ChannelUpdate.Size(m) -} -func (m *ChannelUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_ChannelUpdate.DiscardUnknown(m) -} - -var xxx_messageInfo_ChannelUpdate proto.InternalMessageInfo - -func (m *ChannelUpdate) GetSignature() []byte { - if m != nil { - return m.Signature - } - return nil -} - -func (m *ChannelUpdate) GetChainHash() []byte { - if m != nil { - return m.ChainHash - } - return nil -} - -func (m *ChannelUpdate) GetChanId() uint64 { - if m != nil { - return m.ChanId - } - return 0 -} - -func (m *ChannelUpdate) GetTimestamp() uint32 { - if m != nil { - return m.Timestamp - } - return 0 -} - -func (m *ChannelUpdate) GetMessageFlags() uint32 { - if m != nil { - return m.MessageFlags - } - return 0 -} - -func (m *ChannelUpdate) GetChannelFlags() uint32 { - if m != nil { - return m.ChannelFlags - } - return 0 -} - -func (m *ChannelUpdate) GetTimeLockDelta() uint32 { - if m != nil { - return m.TimeLockDelta - } - return 0 -} - -func (m *ChannelUpdate) GetHtlcMinimumMsat() uint64 { - if m != nil { - return m.HtlcMinimumMsat - } - return 0 -} - -func (m *ChannelUpdate) GetBaseFee() uint32 { - if m != nil { - return m.BaseFee - } - return 0 -} - -func (m *ChannelUpdate) GetFeeRate() uint32 { - if m != nil { - return m.FeeRate - } - return 0 -} - -func (m *ChannelUpdate) GetHtlcMaximumMsat() uint64 { - if m != nil { - return m.HtlcMaximumMsat - } - return 0 -} - -func (m *ChannelUpdate) GetExtraOpaqueData() []byte { - if m != nil { - return m.ExtraOpaqueData - } - return nil -} - -type MacaroonId struct { - Nonce []byte `protobuf:"bytes,1,opt,name=nonce,proto3" json:"nonce,omitempty"` - StorageId []byte `protobuf:"bytes,2,opt,name=storageId,proto3" json:"storageId,omitempty"` - Ops []*Op `protobuf:"bytes,3,rep,name=ops,proto3" json:"ops,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MacaroonId) Reset() { *m = MacaroonId{} } -func (m *MacaroonId) String() string { return proto.CompactTextString(m) } -func (*MacaroonId) ProtoMessage() {} -func (*MacaroonId) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{163} -} - -func (m *MacaroonId) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MacaroonId.Unmarshal(m, b) -} -func (m *MacaroonId) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MacaroonId.Marshal(b, m, deterministic) -} -func (m *MacaroonId) XXX_Merge(src proto.Message) { - xxx_messageInfo_MacaroonId.Merge(m, src) -} -func (m *MacaroonId) XXX_Size() int { - return xxx_messageInfo_MacaroonId.Size(m) -} -func (m *MacaroonId) XXX_DiscardUnknown() { - xxx_messageInfo_MacaroonId.DiscardUnknown(m) -} - -var xxx_messageInfo_MacaroonId proto.InternalMessageInfo - -func (m *MacaroonId) GetNonce() []byte { - if m != nil { - return m.Nonce - } - return nil -} - -func (m *MacaroonId) GetStorageId() []byte { - if m != nil { - return m.StorageId - } - return nil -} - -func (m *MacaroonId) GetOps() []*Op { - if m != nil { - return m.Ops - } - return nil -} - -type Op struct { - Entity string `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"` - Actions []string `protobuf:"bytes,2,rep,name=actions,proto3" json:"actions,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Op) Reset() { *m = Op{} } -func (m *Op) String() string { return proto.CompactTextString(m) } -func (*Op) ProtoMessage() {} -func (*Op) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{164} -} - -func (m *Op) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Op.Unmarshal(m, b) -} -func (m *Op) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Op.Marshal(b, m, deterministic) -} -func (m *Op) XXX_Merge(src proto.Message) { - xxx_messageInfo_Op.Merge(m, src) -} -func (m *Op) XXX_Size() int { - return xxx_messageInfo_Op.Size(m) -} -func (m *Op) XXX_DiscardUnknown() { - xxx_messageInfo_Op.DiscardUnknown(m) -} - -var xxx_messageInfo_Op proto.InternalMessageInfo - -func (m *Op) GetEntity() string { - if m != nil { - return m.Entity - } - return "" -} - -func (m *Op) GetActions() []string { - if m != nil { - return m.Actions - } - return nil -} - -type ReSyncChainRequest struct { - FromHeight int32 `protobuf:"varint,1,opt,name=from_height,json=fromHeight,proto3" json:"from_height,omitempty"` - ToHeight int32 `protobuf:"varint,2,opt,name=to_height,json=toHeight,proto3" json:"to_height,omitempty"` - Addresses []string `protobuf:"bytes,3,rep,name=addresses,proto3" json:"addresses,omitempty"` - DropDb bool `protobuf:"varint,4,opt,name=drop_db,json=dropDb,proto3" json:"drop_db,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReSyncChainRequest) Reset() { *m = ReSyncChainRequest{} } -func (m *ReSyncChainRequest) String() string { return proto.CompactTextString(m) } -func (*ReSyncChainRequest) ProtoMessage() {} -func (*ReSyncChainRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{165} -} - -func (m *ReSyncChainRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReSyncChainRequest.Unmarshal(m, b) -} -func (m *ReSyncChainRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReSyncChainRequest.Marshal(b, m, deterministic) -} -func (m *ReSyncChainRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReSyncChainRequest.Merge(m, src) -} -func (m *ReSyncChainRequest) XXX_Size() int { - return xxx_messageInfo_ReSyncChainRequest.Size(m) -} -func (m *ReSyncChainRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReSyncChainRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ReSyncChainRequest proto.InternalMessageInfo - -func (m *ReSyncChainRequest) GetFromHeight() int32 { - if m != nil { - return m.FromHeight - } - return 0 -} - -func (m *ReSyncChainRequest) GetToHeight() int32 { - if m != nil { - return m.ToHeight - } - return 0 -} - -func (m *ReSyncChainRequest) GetAddresses() []string { - if m != nil { - return m.Addresses - } - return nil -} - -func (m *ReSyncChainRequest) GetDropDb() bool { - if m != nil { - return m.DropDb - } - return false -} - -type ReSyncChainResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReSyncChainResponse) Reset() { *m = ReSyncChainResponse{} } -func (m *ReSyncChainResponse) String() string { return proto.CompactTextString(m) } -func (*ReSyncChainResponse) ProtoMessage() {} -func (*ReSyncChainResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{166} -} - -func (m *ReSyncChainResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReSyncChainResponse.Unmarshal(m, b) -} -func (m *ReSyncChainResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReSyncChainResponse.Marshal(b, m, deterministic) -} -func (m *ReSyncChainResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReSyncChainResponse.Merge(m, src) -} -func (m *ReSyncChainResponse) XXX_Size() int { - return xxx_messageInfo_ReSyncChainResponse.Size(m) -} -func (m *ReSyncChainResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ReSyncChainResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ReSyncChainResponse proto.InternalMessageInfo - -type StopReSyncRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StopReSyncRequest) Reset() { *m = StopReSyncRequest{} } -func (m *StopReSyncRequest) String() string { return proto.CompactTextString(m) } -func (*StopReSyncRequest) ProtoMessage() {} -func (*StopReSyncRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{167} -} - -func (m *StopReSyncRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StopReSyncRequest.Unmarshal(m, b) -} -func (m *StopReSyncRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StopReSyncRequest.Marshal(b, m, deterministic) -} -func (m *StopReSyncRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StopReSyncRequest.Merge(m, src) -} -func (m *StopReSyncRequest) XXX_Size() int { - return xxx_messageInfo_StopReSyncRequest.Size(m) -} -func (m *StopReSyncRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StopReSyncRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StopReSyncRequest proto.InternalMessageInfo - -type StopReSyncResponse struct { - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StopReSyncResponse) Reset() { *m = StopReSyncResponse{} } -func (m *StopReSyncResponse) String() string { return proto.CompactTextString(m) } -func (*StopReSyncResponse) ProtoMessage() {} -func (*StopReSyncResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{168} -} - -func (m *StopReSyncResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StopReSyncResponse.Unmarshal(m, b) -} -func (m *StopReSyncResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StopReSyncResponse.Marshal(b, m, deterministic) -} -func (m *StopReSyncResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StopReSyncResponse.Merge(m, src) -} -func (m *StopReSyncResponse) XXX_Size() int { - return xxx_messageInfo_StopReSyncResponse.Size(m) -} -func (m *StopReSyncResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StopReSyncResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StopReSyncResponse proto.InternalMessageInfo - -func (m *StopReSyncResponse) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -func init() { - proto.RegisterEnum("lnrpc.AddressType", AddressType_name, AddressType_value) - proto.RegisterEnum("lnrpc.CommitmentType", CommitmentType_name, CommitmentType_value) - proto.RegisterEnum("lnrpc.Initiator", Initiator_name, Initiator_value) - proto.RegisterEnum("lnrpc.ResolutionType", ResolutionType_name, ResolutionType_value) - proto.RegisterEnum("lnrpc.ResolutionOutcome", ResolutionOutcome_name, ResolutionOutcome_value) - proto.RegisterEnum("lnrpc.NodeMetricType", NodeMetricType_name, NodeMetricType_value) - proto.RegisterEnum("lnrpc.InvoiceHTLCState", InvoiceHTLCState_name, InvoiceHTLCState_value) - proto.RegisterEnum("lnrpc.PaymentFailureReason", PaymentFailureReason_name, PaymentFailureReason_value) - proto.RegisterEnum("lnrpc.FeatureBit", FeatureBit_name, FeatureBit_value) - proto.RegisterEnum("lnrpc.ChannelCloseSummary_ClosureType", ChannelCloseSummary_ClosureType_name, ChannelCloseSummary_ClosureType_value) - proto.RegisterEnum("lnrpc.Peer_SyncType", Peer_SyncType_name, Peer_SyncType_value) - proto.RegisterEnum("lnrpc.PeerEvent_EventType", PeerEvent_EventType_name, PeerEvent_EventType_value) - proto.RegisterEnum("lnrpc.PendingChannelsResponse_ForceClosedChannel_AnchorState", PendingChannelsResponse_ForceClosedChannel_AnchorState_name, PendingChannelsResponse_ForceClosedChannel_AnchorState_value) - proto.RegisterEnum("lnrpc.ChannelEventUpdate_UpdateType", ChannelEventUpdate_UpdateType_name, ChannelEventUpdate_UpdateType_value) - proto.RegisterEnum("lnrpc.Invoice_InvoiceState", Invoice_InvoiceState_name, Invoice_InvoiceState_value) - proto.RegisterEnum("lnrpc.Payment_PaymentStatus", Payment_PaymentStatus_name, Payment_PaymentStatus_value) - proto.RegisterEnum("lnrpc.HTLCAttempt_HTLCStatus", HTLCAttempt_HTLCStatus_name, HTLCAttempt_HTLCStatus_value) - proto.RegisterEnum("lnrpc.Failure_FailureCode", Failure_FailureCode_name, Failure_FailureCode_value) - proto.RegisterType((*Utxo)(nil), "lnrpc.Utxo") - proto.RegisterType((*Transaction)(nil), "lnrpc.Transaction") - proto.RegisterType((*GetTransactionsRequest)(nil), "lnrpc.GetTransactionsRequest") - proto.RegisterType((*TransactionDetails)(nil), "lnrpc.TransactionDetails") - proto.RegisterType((*FeeLimit)(nil), "lnrpc.FeeLimit") - proto.RegisterType((*SendRequest)(nil), "lnrpc.SendRequest") - proto.RegisterMapType((map[uint64][]byte)(nil), "lnrpc.SendRequest.DestCustomRecordsEntry") - proto.RegisterType((*SendResponse)(nil), "lnrpc.SendResponse") - proto.RegisterType((*SendToRouteRequest)(nil), "lnrpc.SendToRouteRequest") - proto.RegisterType((*ChannelAcceptRequest)(nil), "lnrpc.ChannelAcceptRequest") - proto.RegisterType((*ChannelAcceptResponse)(nil), "lnrpc.ChannelAcceptResponse") - proto.RegisterType((*ChannelPoint)(nil), "lnrpc.ChannelPoint") - proto.RegisterType((*OutPoint)(nil), "lnrpc.OutPoint") - proto.RegisterType((*LightningAddress)(nil), "lnrpc.LightningAddress") - proto.RegisterType((*EstimateFeeRequest)(nil), "lnrpc.EstimateFeeRequest") - proto.RegisterMapType((map[string]int64)(nil), "lnrpc.EstimateFeeRequest.AddrToAmountEntry") - proto.RegisterType((*EstimateFeeResponse)(nil), "lnrpc.EstimateFeeResponse") - proto.RegisterType((*SendManyRequest)(nil), "lnrpc.SendManyRequest") - proto.RegisterMapType((map[string]int64)(nil), "lnrpc.SendManyRequest.AddrToAmountEntry") - proto.RegisterType((*SendManyResponse)(nil), "lnrpc.SendManyResponse") - proto.RegisterType((*SendCoinsRequest)(nil), "lnrpc.SendCoinsRequest") - proto.RegisterType((*SendCoinsResponse)(nil), "lnrpc.SendCoinsResponse") - proto.RegisterType((*ListUnspentRequest)(nil), "lnrpc.ListUnspentRequest") - proto.RegisterType((*ListUnspentResponse)(nil), "lnrpc.ListUnspentResponse") - proto.RegisterType((*NewAddressRequest)(nil), "lnrpc.NewAddressRequest") - proto.RegisterType((*NewAddressResponse)(nil), "lnrpc.NewAddressResponse") - proto.RegisterType((*SignMessageRequest)(nil), "lnrpc.SignMessageRequest") - proto.RegisterType((*SignMessageResponse)(nil), "lnrpc.SignMessageResponse") - proto.RegisterType((*VerifyMessageRequest)(nil), "lnrpc.VerifyMessageRequest") - proto.RegisterType((*VerifyMessageResponse)(nil), "lnrpc.VerifyMessageResponse") - proto.RegisterType((*ConnectPeerRequest)(nil), "lnrpc.ConnectPeerRequest") - proto.RegisterType((*ConnectPeerResponse)(nil), "lnrpc.ConnectPeerResponse") - proto.RegisterType((*DisconnectPeerRequest)(nil), "lnrpc.DisconnectPeerRequest") - proto.RegisterType((*DisconnectPeerResponse)(nil), "lnrpc.DisconnectPeerResponse") - proto.RegisterType((*HTLC)(nil), "lnrpc.HTLC") - proto.RegisterType((*ChannelConstraints)(nil), "lnrpc.ChannelConstraints") - proto.RegisterType((*Channel)(nil), "lnrpc.Channel") - proto.RegisterType((*ListChannelsRequest)(nil), "lnrpc.ListChannelsRequest") - proto.RegisterType((*ListChannelsResponse)(nil), "lnrpc.ListChannelsResponse") - proto.RegisterType((*ChannelCloseSummary)(nil), "lnrpc.ChannelCloseSummary") - proto.RegisterType((*Resolution)(nil), "lnrpc.Resolution") - proto.RegisterType((*ClosedChannelsRequest)(nil), "lnrpc.ClosedChannelsRequest") - proto.RegisterType((*ClosedChannelsResponse)(nil), "lnrpc.ClosedChannelsResponse") - proto.RegisterType((*Peer)(nil), "lnrpc.Peer") - proto.RegisterMapType((map[uint32]*Feature)(nil), "lnrpc.Peer.FeaturesEntry") - proto.RegisterType((*TimestampedError)(nil), "lnrpc.TimestampedError") - proto.RegisterType((*ListPeersRequest)(nil), "lnrpc.ListPeersRequest") - proto.RegisterType((*ListPeersResponse)(nil), "lnrpc.ListPeersResponse") - proto.RegisterType((*PeerEventSubscription)(nil), "lnrpc.PeerEventSubscription") - proto.RegisterType((*PeerEvent)(nil), "lnrpc.PeerEvent") - proto.RegisterType((*GetInfoRequest)(nil), "lnrpc.GetInfoRequest") - proto.RegisterType((*GetInfoResponse)(nil), "lnrpc.GetInfoResponse") - proto.RegisterMapType((map[uint32]*Feature)(nil), "lnrpc.GetInfoResponse.FeaturesEntry") - proto.RegisterType((*GetRecoveryInfoRequest)(nil), "lnrpc.GetRecoveryInfoRequest") - proto.RegisterType((*GetRecoveryInfoResponse)(nil), "lnrpc.GetRecoveryInfoResponse") - proto.RegisterType((*Chain)(nil), "lnrpc.Chain") - proto.RegisterType((*ConfirmationUpdate)(nil), "lnrpc.ConfirmationUpdate") - proto.RegisterType((*ChannelOpenUpdate)(nil), "lnrpc.ChannelOpenUpdate") - proto.RegisterType((*ChannelCloseUpdate)(nil), "lnrpc.ChannelCloseUpdate") - proto.RegisterType((*CloseChannelRequest)(nil), "lnrpc.CloseChannelRequest") - proto.RegisterType((*CloseStatusUpdate)(nil), "lnrpc.CloseStatusUpdate") - proto.RegisterType((*PendingUpdate)(nil), "lnrpc.PendingUpdate") - proto.RegisterType((*ReadyForPsbtFunding)(nil), "lnrpc.ReadyForPsbtFunding") - proto.RegisterType((*OpenChannelRequest)(nil), "lnrpc.OpenChannelRequest") - proto.RegisterType((*OpenStatusUpdate)(nil), "lnrpc.OpenStatusUpdate") - proto.RegisterType((*KeyLocator)(nil), "lnrpc.KeyLocator") - proto.RegisterType((*KeyDescriptor)(nil), "lnrpc.KeyDescriptor") - proto.RegisterType((*ChanPointShim)(nil), "lnrpc.ChanPointShim") - proto.RegisterType((*PsbtShim)(nil), "lnrpc.PsbtShim") - proto.RegisterType((*FundingShim)(nil), "lnrpc.FundingShim") - proto.RegisterType((*FundingShimCancel)(nil), "lnrpc.FundingShimCancel") - proto.RegisterType((*FundingPsbtVerify)(nil), "lnrpc.FundingPsbtVerify") - proto.RegisterType((*FundingPsbtFinalize)(nil), "lnrpc.FundingPsbtFinalize") - proto.RegisterType((*FundingTransitionMsg)(nil), "lnrpc.FundingTransitionMsg") - proto.RegisterType((*FundingStateStepResp)(nil), "lnrpc.FundingStateStepResp") - proto.RegisterType((*PendingHTLC)(nil), "lnrpc.PendingHTLC") - proto.RegisterType((*PendingChannelsRequest)(nil), "lnrpc.PendingChannelsRequest") - proto.RegisterType((*PendingChannelsResponse)(nil), "lnrpc.PendingChannelsResponse") - proto.RegisterType((*PendingChannelsResponse_PendingChannel)(nil), "lnrpc.PendingChannelsResponse.PendingChannel") - proto.RegisterType((*PendingChannelsResponse_PendingOpenChannel)(nil), "lnrpc.PendingChannelsResponse.PendingOpenChannel") - proto.RegisterType((*PendingChannelsResponse_WaitingCloseChannel)(nil), "lnrpc.PendingChannelsResponse.WaitingCloseChannel") - proto.RegisterType((*PendingChannelsResponse_Commitments)(nil), "lnrpc.PendingChannelsResponse.Commitments") - proto.RegisterType((*PendingChannelsResponse_ClosedChannel)(nil), "lnrpc.PendingChannelsResponse.ClosedChannel") - proto.RegisterType((*PendingChannelsResponse_ForceClosedChannel)(nil), "lnrpc.PendingChannelsResponse.ForceClosedChannel") - proto.RegisterType((*ChannelEventSubscription)(nil), "lnrpc.ChannelEventSubscription") - proto.RegisterType((*ChannelEventUpdate)(nil), "lnrpc.ChannelEventUpdate") - proto.RegisterType((*WalletBalanceRequest)(nil), "lnrpc.WalletBalanceRequest") - proto.RegisterType((*WalletBalanceResponse)(nil), "lnrpc.WalletBalanceResponse") - proto.RegisterType((*GetAddressBalancesRequest)(nil), "lnrpc.GetAddressBalancesRequest") - proto.RegisterType((*GetAddressBalancesResponseAddr)(nil), "lnrpc.GetAddressBalancesResponseAddr") - proto.RegisterType((*GetAddressBalancesResponse)(nil), "lnrpc.GetAddressBalancesResponse") - proto.RegisterType((*Amount)(nil), "lnrpc.Amount") - proto.RegisterType((*ChannelBalanceRequest)(nil), "lnrpc.ChannelBalanceRequest") - proto.RegisterType((*ChannelBalanceResponse)(nil), "lnrpc.ChannelBalanceResponse") - proto.RegisterType((*QueryRoutesRequest)(nil), "lnrpc.QueryRoutesRequest") - proto.RegisterMapType((map[uint64][]byte)(nil), "lnrpc.QueryRoutesRequest.DestCustomRecordsEntry") - proto.RegisterType((*NodePair)(nil), "lnrpc.NodePair") - proto.RegisterType((*EdgeLocator)(nil), "lnrpc.EdgeLocator") - proto.RegisterType((*QueryRoutesResponse)(nil), "lnrpc.QueryRoutesResponse") - proto.RegisterType((*Hop)(nil), "lnrpc.Hop") - proto.RegisterMapType((map[uint64][]byte)(nil), "lnrpc.Hop.CustomRecordsEntry") - proto.RegisterType((*MPPRecord)(nil), "lnrpc.MPPRecord") - proto.RegisterType((*Route)(nil), "lnrpc.Route") - proto.RegisterType((*NodeInfoRequest)(nil), "lnrpc.NodeInfoRequest") - proto.RegisterType((*NodeInfo)(nil), "lnrpc.NodeInfo") - proto.RegisterType((*LightningNode)(nil), "lnrpc.LightningNode") - proto.RegisterMapType((map[uint32]*Feature)(nil), "lnrpc.LightningNode.FeaturesEntry") - proto.RegisterType((*NodeAddress)(nil), "lnrpc.NodeAddress") - proto.RegisterType((*RoutingPolicy)(nil), "lnrpc.RoutingPolicy") - proto.RegisterType((*ChannelEdge)(nil), "lnrpc.ChannelEdge") - proto.RegisterType((*ChannelGraphRequest)(nil), "lnrpc.ChannelGraphRequest") - proto.RegisterType((*ChannelGraph)(nil), "lnrpc.ChannelGraph") - proto.RegisterType((*NodeMetricsRequest)(nil), "lnrpc.NodeMetricsRequest") - proto.RegisterType((*NodeMetricsResponse)(nil), "lnrpc.NodeMetricsResponse") - proto.RegisterMapType((map[string]*FloatMetric)(nil), "lnrpc.NodeMetricsResponse.BetweennessCentralityEntry") - proto.RegisterType((*FloatMetric)(nil), "lnrpc.FloatMetric") - proto.RegisterType((*ChanInfoRequest)(nil), "lnrpc.ChanInfoRequest") - proto.RegisterType((*NetworkInfoRequest)(nil), "lnrpc.NetworkInfoRequest") - proto.RegisterType((*NetworkInfo)(nil), "lnrpc.NetworkInfo") - proto.RegisterType((*StopRequest)(nil), "lnrpc.StopRequest") - proto.RegisterType((*StopResponse)(nil), "lnrpc.StopResponse") - proto.RegisterType((*GraphTopologySubscription)(nil), "lnrpc.GraphTopologySubscription") - proto.RegisterType((*GraphTopologyUpdate)(nil), "lnrpc.GraphTopologyUpdate") - proto.RegisterType((*NodeUpdate)(nil), "lnrpc.NodeUpdate") - proto.RegisterType((*ChannelEdgeUpdate)(nil), "lnrpc.ChannelEdgeUpdate") - proto.RegisterType((*ClosedChannelUpdate)(nil), "lnrpc.ClosedChannelUpdate") - proto.RegisterType((*HopHint)(nil), "lnrpc.HopHint") - proto.RegisterType((*RouteHint)(nil), "lnrpc.RouteHint") - proto.RegisterType((*Invoice)(nil), "lnrpc.Invoice") - proto.RegisterMapType((map[uint32]*Feature)(nil), "lnrpc.Invoice.FeaturesEntry") - proto.RegisterType((*InvoiceHTLC)(nil), "lnrpc.InvoiceHTLC") - proto.RegisterMapType((map[uint64][]byte)(nil), "lnrpc.InvoiceHTLC.CustomRecordsEntry") - proto.RegisterType((*AddInvoiceResponse)(nil), "lnrpc.AddInvoiceResponse") - proto.RegisterType((*PaymentHash)(nil), "lnrpc.PaymentHash") - proto.RegisterType((*ListInvoiceRequest)(nil), "lnrpc.ListInvoiceRequest") - proto.RegisterType((*ListInvoiceResponse)(nil), "lnrpc.ListInvoiceResponse") - proto.RegisterType((*InvoiceSubscription)(nil), "lnrpc.InvoiceSubscription") - proto.RegisterType((*Payment)(nil), "lnrpc.Payment") - proto.RegisterType((*HTLCAttempt)(nil), "lnrpc.HTLCAttempt") - proto.RegisterType((*ListPaymentsRequest)(nil), "lnrpc.ListPaymentsRequest") - proto.RegisterType((*ListPaymentsResponse)(nil), "lnrpc.ListPaymentsResponse") - proto.RegisterType((*DeleteAllPaymentsRequest)(nil), "lnrpc.DeleteAllPaymentsRequest") - proto.RegisterType((*DeleteAllPaymentsResponse)(nil), "lnrpc.DeleteAllPaymentsResponse") - proto.RegisterType((*AbandonChannelRequest)(nil), "lnrpc.AbandonChannelRequest") - proto.RegisterType((*AbandonChannelResponse)(nil), "lnrpc.AbandonChannelResponse") - proto.RegisterType((*DebugLevelRequest)(nil), "lnrpc.DebugLevelRequest") - proto.RegisterType((*DebugLevelResponse)(nil), "lnrpc.DebugLevelResponse") - proto.RegisterType((*PayReqString)(nil), "lnrpc.PayReqString") - proto.RegisterType((*PayReq)(nil), "lnrpc.PayReq") - proto.RegisterMapType((map[uint32]*Feature)(nil), "lnrpc.PayReq.FeaturesEntry") - proto.RegisterType((*Feature)(nil), "lnrpc.Feature") - proto.RegisterType((*FeeReportRequest)(nil), "lnrpc.FeeReportRequest") - proto.RegisterType((*ChannelFeeReport)(nil), "lnrpc.ChannelFeeReport") - proto.RegisterType((*FeeReportResponse)(nil), "lnrpc.FeeReportResponse") - proto.RegisterType((*PolicyUpdateRequest)(nil), "lnrpc.PolicyUpdateRequest") - proto.RegisterType((*PolicyUpdateResponse)(nil), "lnrpc.PolicyUpdateResponse") - proto.RegisterType((*ForwardingHistoryRequest)(nil), "lnrpc.ForwardingHistoryRequest") - proto.RegisterType((*ForwardingEvent)(nil), "lnrpc.ForwardingEvent") - proto.RegisterType((*ForwardingHistoryResponse)(nil), "lnrpc.ForwardingHistoryResponse") - proto.RegisterType((*ExportChannelBackupRequest)(nil), "lnrpc.ExportChannelBackupRequest") - proto.RegisterType((*ChannelBackup)(nil), "lnrpc.ChannelBackup") - proto.RegisterType((*MultiChanBackup)(nil), "lnrpc.MultiChanBackup") - proto.RegisterType((*ChanBackupExportRequest)(nil), "lnrpc.ChanBackupExportRequest") - proto.RegisterType((*ChanBackupSnapshot)(nil), "lnrpc.ChanBackupSnapshot") - proto.RegisterType((*ChannelBackups)(nil), "lnrpc.ChannelBackups") - proto.RegisterType((*RestoreChanBackupRequest)(nil), "lnrpc.RestoreChanBackupRequest") - proto.RegisterType((*RestoreBackupResponse)(nil), "lnrpc.RestoreBackupResponse") - proto.RegisterType((*ChannelBackupSubscription)(nil), "lnrpc.ChannelBackupSubscription") - proto.RegisterType((*VerifyChanBackupResponse)(nil), "lnrpc.VerifyChanBackupResponse") - proto.RegisterType((*MacaroonPermission)(nil), "lnrpc.MacaroonPermission") - proto.RegisterType((*BakeMacaroonRequest)(nil), "lnrpc.BakeMacaroonRequest") - proto.RegisterType((*BakeMacaroonResponse)(nil), "lnrpc.BakeMacaroonResponse") - proto.RegisterType((*ListMacaroonIDsRequest)(nil), "lnrpc.ListMacaroonIDsRequest") - proto.RegisterType((*ListMacaroonIDsResponse)(nil), "lnrpc.ListMacaroonIDsResponse") - proto.RegisterType((*DeleteMacaroonIDRequest)(nil), "lnrpc.DeleteMacaroonIDRequest") - proto.RegisterType((*DeleteMacaroonIDResponse)(nil), "lnrpc.DeleteMacaroonIDResponse") - proto.RegisterType((*MacaroonPermissionList)(nil), "lnrpc.MacaroonPermissionList") - proto.RegisterType((*ListPermissionsRequest)(nil), "lnrpc.ListPermissionsRequest") - proto.RegisterType((*ListPermissionsResponse)(nil), "lnrpc.ListPermissionsResponse") - proto.RegisterMapType((map[string]*MacaroonPermissionList)(nil), "lnrpc.ListPermissionsResponse.MethodPermissionsEntry") - proto.RegisterType((*Failure)(nil), "lnrpc.Failure") - proto.RegisterType((*ChannelUpdate)(nil), "lnrpc.ChannelUpdate") - proto.RegisterType((*MacaroonId)(nil), "lnrpc.MacaroonId") - proto.RegisterType((*Op)(nil), "lnrpc.Op") - proto.RegisterType((*ReSyncChainRequest)(nil), "lnrpc.ReSyncChainRequest") - proto.RegisterType((*ReSyncChainResponse)(nil), "lnrpc.ReSyncChainResponse") - proto.RegisterType((*StopReSyncRequest)(nil), "lnrpc.StopReSyncRequest") - proto.RegisterType((*StopReSyncResponse)(nil), "lnrpc.StopReSyncResponse") -} - -func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } - -var fileDescriptor_77a6da22d6a3feb1 = []byte{ - // 12670 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0xbd, 0x59, 0x6c, 0x24, 0x49, - 0x7a, 0x18, 0xdc, 0x75, 0xb1, 0xaa, 0xbe, 0x3a, 0x58, 0x0c, 0x5e, 0xd5, 0xd5, 0xd3, 0xd3, 0x3d, - 0x39, 0xbd, 0x33, 0xbd, 0x3d, 0x3b, 0x9c, 0x9e, 0xde, 0xe9, 0x99, 0x9d, 0xed, 0x5f, 0xbb, 0x5b, - 0x24, 0x8b, 0xcd, 0xda, 0x26, 0xab, 0xb8, 0x59, 0xc5, 0x19, 0x8d, 0x20, 0x29, 0x95, 0xac, 0x0a, - 0x92, 0xf9, 0x77, 0x55, 0x66, 0x4d, 0x66, 0x16, 0x9b, 0x5c, 0xc3, 0x80, 0x1e, 0x64, 0xd9, 0x16, - 0x04, 0x03, 0x06, 0x2c, 0xc3, 0x97, 0xe0, 0x0b, 0x3e, 0x9e, 0x04, 0xc3, 0x92, 0xfd, 0xe4, 0x37, - 0x03, 0xd6, 0x8b, 0x0f, 0x18, 0x96, 0xe1, 0x03, 0x82, 0x60, 0x03, 0xb6, 0xfc, 0x60, 0xc0, 0x10, - 0xa0, 0x57, 0x1b, 0x30, 0x22, 0xbe, 0x88, 0xc8, 0xc8, 0xac, 0x64, 0x77, 0xcf, 0x6a, 0xbc, 0x0f, - 0xdd, 0xac, 0xfc, 0xe2, 0x8b, 0x3b, 0xe2, 0x8b, 0xef, 0x8a, 0x2f, 0xa0, 0xec, 0xcf, 0x46, 0x5b, - 0x33, 0xdf, 0x0b, 0x3d, 0x52, 0x98, 0xb8, 0xfe, 0x6c, 0x64, 0xfc, 0x61, 0x06, 0xf2, 0xc7, 0xe1, - 0xa5, 0x47, 0x1e, 0x43, 0xd5, 0x1e, 0x8f, 0x7d, 0x1a, 0x04, 0x56, 0x78, 0x35, 0xa3, 0xcd, 0xcc, - 0xdd, 0xcc, 0xfd, 0xfa, 0x23, 0xb2, 0xc5, 0xd1, 0xb6, 0xda, 0x98, 0x34, 0xbc, 0x9a, 0x51, 0xb3, - 0x62, 0x47, 0x1f, 0xa4, 0x09, 0x45, 0xf1, 0xd9, 0xcc, 0xde, 0xcd, 0xdc, 0x2f, 0x9b, 0xf2, 0x93, - 0xdc, 0x06, 0xb0, 0xa7, 0xde, 0xdc, 0x0d, 0xad, 0xc0, 0x0e, 0x9b, 0xb9, 0xbb, 0x99, 0xfb, 0x39, - 0xb3, 0x8c, 0x90, 0x81, 0x1d, 0x92, 0x5b, 0x50, 0x9e, 0x3d, 0xb7, 0x82, 0x91, 0xef, 0xcc, 0xc2, - 0x66, 0x9e, 0x67, 0x2d, 0xcd, 0x9e, 0x0f, 0xf8, 0x37, 0x79, 0x0f, 0x4a, 0xde, 0x3c, 0x9c, 0x79, - 0x8e, 0x1b, 0x36, 0x0b, 0x77, 0x33, 0xf7, 0x2b, 0x8f, 0x96, 0x45, 0x43, 0xfa, 0xf3, 0xf0, 0x88, - 0x81, 0x4d, 0x85, 0x40, 0xee, 0x41, 0x6d, 0xe4, 0xb9, 0xa7, 0x8e, 0x3f, 0xb5, 0x43, 0xc7, 0x73, - 0x83, 0xe6, 0x12, 0xaf, 0x2b, 0x0e, 0x34, 0xfe, 0x45, 0x16, 0x2a, 0x43, 0xdf, 0x76, 0x03, 0x7b, - 0xc4, 0x00, 0x64, 0x13, 0x8a, 0xe1, 0xa5, 0x75, 0x6e, 0x07, 0xe7, 0xbc, 0xab, 0x65, 0x73, 0x29, - 0xbc, 0xdc, 0xb7, 0x83, 0x73, 0xb2, 0x01, 0x4b, 0xd8, 0x4a, 0xde, 0xa1, 0x9c, 0x29, 0xbe, 0xc8, - 0x7b, 0xb0, 0xe2, 0xce, 0xa7, 0x56, 0xbc, 0x2a, 0xd6, 0xad, 0x82, 0xd9, 0x70, 0xe7, 0xd3, 0x1d, - 0x1d, 0xce, 0x3a, 0x7f, 0x32, 0xf1, 0x46, 0xcf, 0xb1, 0x02, 0xec, 0x5e, 0x99, 0x43, 0x78, 0x1d, - 0x6f, 0x41, 0x55, 0x24, 0x53, 0xe7, 0xec, 0x1c, 0xfb, 0x58, 0x30, 0x2b, 0x88, 0xc0, 0x41, 0xac, - 0x84, 0xd0, 0x99, 0x52, 0x2b, 0x08, 0xed, 0xe9, 0x4c, 0x74, 0xa9, 0xcc, 0x20, 0x03, 0x06, 0xe0, - 0xc9, 0x5e, 0x68, 0x4f, 0xac, 0x53, 0x4a, 0x83, 0x66, 0x51, 0x24, 0x33, 0xc8, 0x1e, 0xa5, 0x01, - 0xf9, 0x06, 0xd4, 0xc7, 0x34, 0x08, 0x2d, 0x31, 0x19, 0x34, 0x68, 0x96, 0xee, 0xe6, 0xee, 0x97, - 0xcd, 0x1a, 0x83, 0xb6, 0x25, 0x90, 0xbc, 0x01, 0xe0, 0xdb, 0x2f, 0x2c, 0x36, 0x10, 0xf4, 0xb2, - 0x59, 0xc6, 0x59, 0xf0, 0xed, 0x17, 0xc3, 0xcb, 0x7d, 0x7a, 0x49, 0xd6, 0xa0, 0x30, 0xb1, 0x4f, - 0xe8, 0xa4, 0x09, 0x3c, 0x01, 0x3f, 0x8c, 0x7f, 0x9c, 0x81, 0x8d, 0xa7, 0x34, 0xd4, 0xc6, 0x32, - 0x30, 0xe9, 0x97, 0x73, 0x1a, 0x84, 0xac, 0x5b, 0x41, 0x68, 0xfb, 0xa1, 0xec, 0x56, 0x06, 0xbb, - 0xc5, 0x61, 0x51, 0xb7, 0xa8, 0x3b, 0x96, 0x08, 0x59, 0x8e, 0x50, 0xa6, 0xee, 0x58, 0xeb, 0xf5, - 0xa5, 0x1b, 0x58, 0x13, 0x67, 0xea, 0x84, 0x62, 0x74, 0xcb, 0x0c, 0x72, 0xc0, 0x00, 0x6c, 0xd1, - 0xf0, 0xe4, 0xe0, 0xb9, 0x33, 0xe3, 0xa3, 0x5a, 0x30, 0x4b, 0x0c, 0x30, 0x78, 0xee, 0xcc, 0x48, - 0x0b, 0x4a, 0x23, 0xcf, 0x71, 0x4f, 0xec, 0x80, 0x8a, 0x01, 0x55, 0xdf, 0xc6, 0x01, 0x10, 0xad, - 0xc1, 0xbb, 0x34, 0xb4, 0x9d, 0x49, 0x40, 0x3e, 0x86, 0x6a, 0xa8, 0x75, 0xa3, 0x99, 0xb9, 0x9b, - 0xbb, 0x5f, 0x51, 0x6b, 0x5e, 0xcb, 0x60, 0xc6, 0xf0, 0x8c, 0x73, 0x28, 0xed, 0x51, 0x8a, 0x4d, - 0xda, 0x80, 0xc2, 0xa9, 0x73, 0x49, 0xc7, 0xbc, 0xb3, 0xb9, 0xfd, 0x1b, 0x26, 0x7e, 0x92, 0x3b, - 0x00, 0xfc, 0x87, 0x35, 0x55, 0xcb, 0x7f, 0xff, 0x86, 0x59, 0xe6, 0xb0, 0xc3, 0xc0, 0x0e, 0x49, - 0x0b, 0x8a, 0x33, 0xea, 0x8f, 0xa8, 0x5c, 0x68, 0xfb, 0x37, 0x4c, 0x09, 0xd8, 0x2e, 0x42, 0x81, - 0x8f, 0x80, 0xf1, 0xbb, 0x05, 0xa8, 0x0c, 0xa8, 0x3b, 0x96, 0x23, 0x4c, 0x20, 0xcf, 0x66, 0x90, - 0x57, 0x56, 0x35, 0xf9, 0x6f, 0xf2, 0x36, 0x54, 0xf8, 0x5c, 0x07, 0xa1, 0xef, 0xb8, 0x67, 0xb8, - 0x0d, 0xb7, 0xb3, 0xcd, 0x8c, 0x09, 0x0c, 0x3c, 0xe0, 0x50, 0xd2, 0x80, 0x9c, 0x3d, 0x95, 0xdb, - 0x90, 0xfd, 0x24, 0x37, 0xa1, 0x64, 0x4f, 0x43, 0x6c, 0x5e, 0x95, 0x83, 0x8b, 0xf6, 0x34, 0xe4, - 0x4d, 0x7b, 0x0b, 0xaa, 0x33, 0xfb, 0x6a, 0x4a, 0xdd, 0x30, 0x5a, 0xbf, 0x55, 0xb3, 0x22, 0x60, - 0x7c, 0x05, 0x3f, 0x82, 0x55, 0x1d, 0x45, 0x56, 0x5e, 0x50, 0x95, 0xaf, 0x68, 0xd8, 0xa2, 0x0d, - 0xef, 0xc2, 0xb2, 0xcc, 0xe3, 0x63, 0x7f, 0xf8, 0xba, 0x2e, 0x9b, 0x75, 0x01, 0x96, 0xbd, 0xbc, - 0x0f, 0x8d, 0x53, 0xc7, 0xb5, 0x27, 0xd6, 0x68, 0x12, 0x5e, 0x58, 0x63, 0x3a, 0x09, 0x6d, 0xbe, - 0xc4, 0x0b, 0x66, 0x9d, 0xc3, 0x77, 0x26, 0xe1, 0xc5, 0x2e, 0x83, 0x92, 0x6f, 0x41, 0xf9, 0x94, - 0x52, 0xb1, 0x5c, 0x4a, 0x31, 0x4a, 0x21, 0x67, 0xc8, 0x2c, 0x9d, 0xca, 0xb9, 0xfa, 0x16, 0x34, - 0xbc, 0x79, 0x78, 0xe6, 0x39, 0xee, 0x99, 0x35, 0x3a, 0xb7, 0x5d, 0xcb, 0x19, 0xf3, 0x45, 0x9f, - 0xdf, 0xce, 0x3e, 0xcc, 0x98, 0x75, 0x99, 0xb6, 0x73, 0x6e, 0xbb, 0xdd, 0x31, 0x79, 0x07, 0x96, - 0x27, 0x76, 0x10, 0x5a, 0xe7, 0xde, 0xcc, 0x9a, 0xcd, 0x4f, 0x9e, 0xd3, 0xab, 0x66, 0x8d, 0x0f, - 0x44, 0x8d, 0x81, 0xf7, 0xbd, 0xd9, 0x11, 0x07, 0xb2, 0x35, 0xcb, 0xdb, 0x89, 0x8d, 0x60, 0x7b, - 0xa5, 0x66, 0x96, 0x19, 0x04, 0x2b, 0xfd, 0x02, 0x56, 0xf9, 0xf4, 0x8c, 0xe6, 0x41, 0xe8, 0x4d, - 0x2d, 0x9f, 0x8e, 0x3c, 0x7f, 0x1c, 0x34, 0x2b, 0x7c, 0xad, 0x7d, 0x53, 0x34, 0x56, 0x9b, 0xe3, - 0xad, 0x5d, 0x1a, 0x84, 0x3b, 0x1c, 0xd9, 0x44, 0xdc, 0x8e, 0x1b, 0xfa, 0x57, 0xe6, 0xca, 0x38, - 0x09, 0x27, 0xdf, 0x02, 0x62, 0x4f, 0x26, 0xde, 0x0b, 0x2b, 0xa0, 0x93, 0x53, 0x4b, 0x0c, 0x62, - 0xb3, 0x7e, 0x37, 0x73, 0xbf, 0x64, 0x36, 0x78, 0xca, 0x80, 0x4e, 0x4e, 0x8f, 0x10, 0x4e, 0x3e, - 0x06, 0xbe, 0xfb, 0xad, 0x53, 0x6a, 0x87, 0x73, 0x9f, 0x06, 0xcd, 0xe5, 0xbb, 0xb9, 0xfb, 0xf5, - 0x47, 0x2b, 0x6a, 0xbc, 0x38, 0x78, 0xdb, 0x09, 0xcd, 0x2a, 0xc3, 0x13, 0xdf, 0x41, 0x6b, 0x17, - 0x36, 0xd2, 0x9b, 0xc4, 0x16, 0x15, 0x1b, 0x15, 0xb6, 0x18, 0xf3, 0x26, 0xfb, 0xc9, 0x48, 0xc6, - 0x85, 0x3d, 0x99, 0x53, 0xbe, 0x0a, 0xab, 0x26, 0x7e, 0x7c, 0x37, 0xfb, 0x9d, 0x8c, 0xf1, 0x3b, - 0x19, 0xa8, 0x62, 0x2f, 0x83, 0x99, 0xe7, 0x06, 0x94, 0xbc, 0x0d, 0x35, 0xb9, 0x1a, 0xa8, 0xef, - 0x7b, 0xbe, 0x20, 0xc3, 0x72, 0xe5, 0x75, 0x18, 0x8c, 0x7c, 0x13, 0x1a, 0x12, 0x69, 0xe6, 0x53, - 0x67, 0x6a, 0x9f, 0xc9, 0xa2, 0xe5, 0x52, 0x3a, 0x12, 0x60, 0xf2, 0x61, 0x54, 0x9e, 0xef, 0xcd, - 0x43, 0xca, 0xd7, 0x7a, 0xe5, 0x51, 0x55, 0x74, 0xcf, 0x64, 0x30, 0x55, 0x3a, 0xff, 0x7a, 0x8d, - 0x75, 0x6e, 0xfc, 0x46, 0x06, 0x08, 0x6b, 0xf6, 0xd0, 0xc3, 0x02, 0x22, 0x4a, 0x17, 0xcb, 0x99, - 0x79, 0xed, 0x1d, 0x92, 0x7d, 0xd9, 0x0e, 0x31, 0xa0, 0x80, 0x6d, 0xcf, 0xa7, 0xb4, 0x1d, 0x93, - 0x7e, 0x98, 0x2f, 0xe5, 0x1a, 0x79, 0xe3, 0x3f, 0xe7, 0x60, 0x8d, 0xad, 0x53, 0x97, 0x4e, 0xda, - 0xa3, 0x11, 0x9d, 0xa9, 0xbd, 0x73, 0x07, 0x2a, 0xae, 0x37, 0xa6, 0x72, 0xc5, 0x62, 0xc3, 0x80, - 0x81, 0xb4, 0xe5, 0x7a, 0x6e, 0x3b, 0x2e, 0x36, 0x1c, 0x07, 0xb3, 0xcc, 0x21, 0xbc, 0xd9, 0xef, - 0xc0, 0xf2, 0x8c, 0xba, 0x63, 0x7d, 0x8b, 0xe4, 0x70, 0xd5, 0x0b, 0xb0, 0xd8, 0x1d, 0x77, 0xa0, - 0x72, 0x3a, 0x47, 0x3c, 0x46, 0x58, 0xf2, 0x7c, 0x0d, 0x80, 0x00, 0xb5, 0x91, 0xbe, 0xcc, 0xe6, - 0xc1, 0x39, 0x4f, 0x2d, 0xf0, 0xd4, 0x22, 0xfb, 0x66, 0x49, 0xb7, 0x01, 0xc6, 0xf3, 0x20, 0x14, - 0x3b, 0x66, 0x89, 0x27, 0x96, 0x19, 0x04, 0x77, 0xcc, 0xfb, 0xb0, 0x3a, 0xb5, 0x2f, 0x2d, 0xbe, - 0x76, 0x2c, 0xc7, 0xb5, 0x4e, 0x27, 0xfc, 0xb0, 0x28, 0x72, 0xbc, 0xc6, 0xd4, 0xbe, 0xfc, 0x8c, - 0xa5, 0x74, 0xdd, 0x3d, 0x0e, 0x67, 0x64, 0x65, 0x84, 0x23, 0x61, 0xf9, 0x34, 0xa0, 0xfe, 0x05, - 0xe5, 0x94, 0x20, 0x6f, 0xd6, 0x05, 0xd8, 0x44, 0x28, 0x6b, 0xd1, 0x94, 0xf5, 0x3b, 0x9c, 0x8c, - 0x70, 0xdb, 0x9b, 0xc5, 0xa9, 0xe3, 0xee, 0x87, 0x93, 0x11, 0x3b, 0x08, 0x19, 0x1d, 0x99, 0x51, - 0xdf, 0x7a, 0xfe, 0x82, 0xef, 0xe1, 0x3c, 0xa7, 0x1b, 0x47, 0xd4, 0x7f, 0xf6, 0x82, 0x1d, 0x3b, - 0xa3, 0x80, 0x13, 0x22, 0xfb, 0xaa, 0x59, 0xe1, 0x1b, 0xbc, 0x34, 0x0a, 0x18, 0x09, 0xb2, 0xaf, - 0xd8, 0x26, 0x64, 0xad, 0xb5, 0xf9, 0x2c, 0xd0, 0x31, 0x2f, 0x3e, 0xe0, 0x14, 0xb5, 0xc6, 0x1b, - 0xdb, 0x16, 0x09, 0xac, 0x9e, 0x80, 0xad, 0x7a, 0xd9, 0xd8, 0xd3, 0x89, 0x7d, 0x16, 0x70, 0x92, - 0x52, 0x33, 0xab, 0x02, 0xb8, 0xc7, 0x60, 0xc6, 0x1f, 0x67, 0x61, 0x3d, 0x31, 0xb9, 0x62, 0xd3, - 0x30, 0xe6, 0x84, 0x43, 0xf8, 0xc4, 0x96, 0x4c, 0xf1, 0x95, 0x36, 0x6b, 0xd9, 0xb4, 0x59, 0x5b, - 0x83, 0x02, 0x6e, 0xb6, 0x1c, 0x1e, 0xe9, 0x54, 0xee, 0xb2, 0xf9, 0xec, 0xd4, 0xf7, 0x18, 0xaf, - 0x76, 0x3e, 0x0f, 0xc7, 0xde, 0x0b, 0x57, 0xf0, 0x2c, 0xcb, 0x02, 0x3e, 0x10, 0xe0, 0xf8, 0x50, - 0x14, 0x12, 0x43, 0x71, 0x07, 0x2a, 0x62, 0x06, 0x38, 0xcf, 0x87, 0x13, 0x0b, 0x02, 0xc4, 0x98, - 0xbe, 0xf7, 0x80, 0xa8, 0xf9, 0xb4, 0xd8, 0xa8, 0xf1, 0xd3, 0x07, 0x27, 0x76, 0xd9, 0x11, 0x13, - 0x7a, 0x68, 0x5f, 0xf2, 0x53, 0xe8, 0x1e, 0xd4, 0x19, 0x0a, 0x1b, 0x4f, 0x6b, 0xc4, 0x19, 0xb2, - 0x12, 0x8e, 0xd5, 0xd4, 0xbe, 0x64, 0x83, 0xb9, 0xc3, 0xd9, 0xb2, 0x37, 0xa1, 0x22, 0x27, 0xd5, - 0x72, 0x5c, 0x31, 0xaf, 0x65, 0x31, 0xaf, 0x5d, 0x97, 0x9d, 0x25, 0x2c, 0x1d, 0xc7, 0xc9, 0x1a, - 0xd3, 0x59, 0x78, 0x2e, 0x68, 0x74, 0x7d, 0xea, 0xb8, 0x38, 0xbc, 0xbb, 0x0c, 0x6a, 0xfc, 0x66, - 0x06, 0xaa, 0x62, 0xd4, 0x39, 0x8b, 0x49, 0xb6, 0x80, 0xc8, 0x25, 0x1e, 0x5e, 0x3a, 0x63, 0xeb, - 0xe4, 0x2a, 0xa4, 0x01, 0xee, 0xa8, 0xfd, 0x1b, 0x66, 0x43, 0xa4, 0x0d, 0x2f, 0x9d, 0xf1, 0x36, - 0x4b, 0x21, 0x0f, 0xa0, 0x11, 0xc3, 0x0f, 0x42, 0x1f, 0xb7, 0xfb, 0xfe, 0x0d, 0xb3, 0xae, 0x61, - 0x0f, 0x42, 0x9f, 0x11, 0x10, 0xc6, 0xc0, 0xce, 0x43, 0xcb, 0x71, 0xc7, 0xf4, 0x92, 0xcf, 0x47, - 0xcd, 0xac, 0x20, 0xac, 0xcb, 0x40, 0xdb, 0x75, 0xa8, 0xea, 0xc5, 0x19, 0x67, 0x50, 0x92, 0xdc, - 0x2f, 0xf2, 0x49, 0xf1, 0x26, 0x31, 0x3e, 0x49, 0xb6, 0xe4, 0x26, 0x94, 0xe2, 0x2d, 0x30, 0x8b, - 0xe1, 0x6b, 0x57, 0x6c, 0x7c, 0x0f, 0x1a, 0x07, 0x6c, 0x22, 0x5c, 0xb6, 0x93, 0x05, 0x37, 0xbf, - 0x01, 0x4b, 0x1a, 0x45, 0x29, 0x9b, 0xe2, 0x8b, 0x31, 0x24, 0xe7, 0x5e, 0x10, 0x8a, 0x5a, 0xf8, - 0x6f, 0xe3, 0x77, 0x33, 0x40, 0x3a, 0x41, 0xe8, 0x4c, 0xed, 0x90, 0xee, 0x51, 0x45, 0x33, 0xfb, - 0x50, 0x65, 0xa5, 0x0d, 0xbd, 0x36, 0xb2, 0xd7, 0xc8, 0x6d, 0xbd, 0x27, 0x68, 0xdc, 0x62, 0x86, - 0x2d, 0x1d, 0x1b, 0xcf, 0xc0, 0x58, 0x01, 0x6c, 0xb9, 0x85, 0xb6, 0x7f, 0x46, 0x43, 0xce, 0x94, - 0x0b, 0x66, 0x12, 0x10, 0xc4, 0xd8, 0xf1, 0xd6, 0xf7, 0x61, 0x65, 0xa1, 0x0c, 0xfd, 0xd0, 0x2a, - 0xa7, 0x1c, 0x5a, 0x39, 0xfd, 0xd0, 0xb2, 0x60, 0x35, 0xd6, 0x2e, 0xb1, 0x0b, 0x37, 0xa1, 0xc8, - 0xa8, 0x05, 0x5b, 0xbb, 0x19, 0x94, 0x11, 0x4e, 0x29, 0x5f, 0xdf, 0x1f, 0xc0, 0xda, 0x29, 0xa5, - 0xbe, 0x1d, 0xf2, 0x44, 0x4e, 0x4e, 0xd8, 0x0c, 0x89, 0x82, 0x57, 0x44, 0xda, 0xc0, 0x0e, 0x8f, - 0xa8, 0xcf, 0x66, 0xca, 0xf8, 0xe7, 0x59, 0x58, 0x66, 0xc7, 0xcb, 0xa1, 0xed, 0x5e, 0xc9, 0x71, - 0x3a, 0x48, 0x1d, 0xa7, 0xfb, 0x1a, 0xa7, 0xa0, 0x61, 0x7f, 0xd5, 0x41, 0xca, 0x25, 0x07, 0x89, - 0xdc, 0x85, 0x6a, 0xac, 0xad, 0x05, 0xde, 0x56, 0x08, 0x54, 0x23, 0x23, 0x39, 0x60, 0x49, 0x93, - 0x03, 0x18, 0x25, 0x60, 0x1b, 0x8b, 0x95, 0x1a, 0x08, 0xee, 0x8c, 0x91, 0x57, 0x56, 0x66, 0xc0, - 0x84, 0xa5, 0x80, 0x51, 0x1e, 0x6b, 0xee, 0x0a, 0x81, 0x89, 0x8e, 0xf9, 0xf6, 0x2d, 0x99, 0x0d, - 0x9e, 0x70, 0x1c, 0xc1, 0xff, 0xe4, 0xd3, 0xf4, 0x0e, 0x34, 0xa2, 0x61, 0x11, 0x73, 0x44, 0x20, - 0xcf, 0x96, 0xbc, 0x28, 0x80, 0xff, 0x36, 0xfe, 0x77, 0x06, 0x11, 0x77, 0x3c, 0x27, 0x12, 0x5a, - 0x08, 0xe4, 0x99, 0x94, 0x24, 0x11, 0xd9, 0xef, 0x6b, 0x65, 0xc0, 0xaf, 0x61, 0x30, 0x6f, 0x42, - 0x29, 0x60, 0x03, 0x63, 0x4f, 0x70, 0x3c, 0x4b, 0x66, 0x91, 0x7d, 0xb7, 0x27, 0x93, 0x68, 0x9c, - 0x8b, 0xd7, 0x8e, 0x73, 0xe9, 0x75, 0xc6, 0xb9, 0x9c, 0x3e, 0xce, 0xc6, 0xbb, 0xb0, 0xa2, 0xf5, - 0xfe, 0x25, 0xe3, 0xd4, 0x03, 0x72, 0xe0, 0x04, 0xe1, 0xb1, 0xcb, 0x8a, 0x50, 0x9c, 0x45, 0xac, - 0x21, 0x99, 0x44, 0x43, 0x58, 0xa2, 0x7d, 0x29, 0x12, 0xb3, 0x22, 0xd1, 0xbe, 0xe4, 0x89, 0xc6, - 0x77, 0x60, 0x35, 0x56, 0x9e, 0xa8, 0xfa, 0x2d, 0x28, 0xcc, 0xc3, 0x4b, 0x4f, 0xca, 0x5d, 0x15, - 0xb1, 0xc2, 0x8f, 0xc3, 0x4b, 0xcf, 0xc4, 0x14, 0xe3, 0x09, 0xac, 0xf4, 0xe8, 0x0b, 0x41, 0x84, - 0x64, 0x43, 0xde, 0x81, 0xfc, 0x2b, 0x54, 0x14, 0x3c, 0xdd, 0xd8, 0x02, 0xa2, 0x67, 0x16, 0xb5, - 0x6a, 0x1a, 0x8b, 0x4c, 0x4c, 0x63, 0x61, 0xbc, 0x03, 0x64, 0xe0, 0x9c, 0xb9, 0x87, 0x34, 0x08, - 0xec, 0x33, 0x45, 0xb6, 0x1a, 0x90, 0x9b, 0x06, 0x67, 0x82, 0xc6, 0xb2, 0x9f, 0xc6, 0xb7, 0x61, - 0x35, 0x86, 0x27, 0x0a, 0x7e, 0x03, 0xca, 0x81, 0x73, 0xe6, 0x72, 0xae, 0x59, 0x14, 0x1d, 0x01, - 0x8c, 0x3d, 0x58, 0xfb, 0x8c, 0xfa, 0xce, 0xe9, 0xd5, 0xab, 0x8a, 0x8f, 0x97, 0x93, 0x4d, 0x96, - 0xd3, 0x81, 0xf5, 0x44, 0x39, 0xa2, 0x7a, 0xdc, 0x1e, 0x62, 0x26, 0x4b, 0x26, 0x7e, 0x68, 0x74, - 0x3b, 0xab, 0xd3, 0x6d, 0xc3, 0x03, 0xb2, 0xe3, 0xb9, 0x2e, 0x1d, 0x85, 0x47, 0x94, 0xfa, 0xb2, - 0x31, 0xef, 0x69, 0x7b, 0xa1, 0xf2, 0x68, 0x53, 0x8c, 0x6c, 0xf2, 0x30, 0x10, 0x9b, 0x84, 0x40, - 0x7e, 0x46, 0xfd, 0x29, 0x2f, 0xb8, 0x64, 0xf2, 0xdf, 0x6c, 0x70, 0x43, 0x67, 0x4a, 0xbd, 0x39, - 0x8a, 0x9a, 0x79, 0x53, 0x7e, 0x1a, 0xeb, 0xb0, 0x1a, 0xab, 0x10, 0x5b, 0x6d, 0x3c, 0x84, 0xf5, - 0x5d, 0x27, 0x18, 0x2d, 0x36, 0x65, 0x13, 0x8a, 0xb3, 0xf9, 0x89, 0x15, 0x3f, 0x71, 0x9e, 0xd1, - 0x2b, 0xa3, 0x09, 0x1b, 0xc9, 0x1c, 0xa2, 0xac, 0x5f, 0xcd, 0x42, 0x7e, 0x7f, 0x78, 0xb0, 0x43, - 0x5a, 0x50, 0x72, 0xdc, 0x91, 0x37, 0x65, 0xfc, 0x36, 0x8e, 0x86, 0xfa, 0xbe, 0x76, 0x6b, 0xdf, - 0x82, 0x32, 0x67, 0xd3, 0x27, 0xde, 0xe8, 0xb9, 0xe0, 0x78, 0x4b, 0x0c, 0x70, 0xe0, 0x8d, 0x9e, - 0xb3, 0x6d, 0x46, 0x2f, 0x67, 0x8e, 0xcf, 0xb5, 0x3b, 0x52, 0x79, 0x91, 0x47, 0x16, 0x2f, 0x4a, - 0x88, 0x74, 0x18, 0x82, 0x1b, 0x61, 0xe7, 0x2b, 0xb2, 0xbe, 0xe5, 0x73, 0xce, 0x8d, 0x8c, 0xe9, - 0x25, 0x79, 0x1f, 0xc8, 0xa9, 0xe7, 0xbf, 0xb0, 0x7d, 0xc5, 0xad, 0xb9, 0x82, 0xb4, 0xe6, 0xcd, - 0x95, 0x28, 0x45, 0x70, 0x22, 0xe4, 0x11, 0xac, 0x6b, 0xe8, 0x5a, 0xc1, 0xc8, 0x35, 0xad, 0x46, - 0x89, 0xfb, 0xb2, 0x0a, 0xe3, 0x57, 0xb2, 0x40, 0x44, 0xfe, 0x1d, 0xcf, 0x0d, 0x42, 0xdf, 0x76, - 0xdc, 0x30, 0x88, 0xf3, 0x6e, 0x99, 0x04, 0xef, 0x76, 0x1f, 0x1a, 0x9c, 0x73, 0xd4, 0x19, 0xb8, - 0x6c, 0xc4, 0x46, 0x9b, 0x11, 0x13, 0x77, 0x0f, 0xea, 0x11, 0xf7, 0xae, 0x94, 0x7b, 0x79, 0xb3, - 0xaa, 0x38, 0x78, 0x71, 0x14, 0x32, 0x82, 0x20, 0xb9, 0x52, 0xa5, 0x6a, 0x40, 0x41, 0x61, 0x65, - 0x6a, 0x5f, 0x1e, 0x51, 0x29, 0x2b, 0x70, 0x76, 0xcf, 0x80, 0x9a, 0x62, 0xe4, 0x38, 0x26, 0x8e, - 0x5c, 0x45, 0xb0, 0x72, 0x1c, 0x27, 0x9d, 0xd7, 0x5e, 0x4a, 0xe7, 0xb5, 0x8d, 0xff, 0x50, 0x86, - 0xa2, 0x1c, 0x46, 0xce, 0x38, 0x87, 0xce, 0x05, 0x8d, 0x18, 0x67, 0xf6, 0xc5, 0xf8, 0x71, 0x9f, - 0x4e, 0xbd, 0x50, 0x09, 0x4c, 0xb8, 0x4d, 0xaa, 0x08, 0x14, 0x22, 0x93, 0xc6, 0xb4, 0xa3, 0x4e, - 0x12, 0xb9, 0x67, 0xc9, 0xb4, 0x23, 0x4b, 0x76, 0x0b, 0x8a, 0x92, 0xf5, 0xce, 0x2b, 0x9d, 0xc2, - 0xd2, 0x08, 0xf9, 0xee, 0x16, 0x94, 0x46, 0xf6, 0xcc, 0x1e, 0x39, 0xe1, 0x95, 0x38, 0x13, 0xd4, - 0x37, 0x2b, 0x7d, 0xe2, 0x8d, 0xec, 0x89, 0x75, 0x62, 0x4f, 0x6c, 0x77, 0x44, 0x85, 0xb2, 0xaf, - 0xca, 0x81, 0xdb, 0x08, 0x23, 0xdf, 0x80, 0xba, 0x68, 0xa7, 0xc4, 0x42, 0x9d, 0x9f, 0x68, 0xbd, - 0x44, 0x63, 0xc2, 0x9d, 0x37, 0x65, 0xf3, 0x72, 0x4a, 0x51, 0x0c, 0xca, 0x99, 0x65, 0x84, 0xec, - 0x51, 0xde, 0x5b, 0x91, 0xfc, 0x02, 0xd7, 0x70, 0x19, 0xab, 0x42, 0xe0, 0xe7, 0xb8, 0x7e, 0x17, - 0x65, 0xa1, 0x9c, 0x26, 0x0b, 0xbd, 0x07, 0x2b, 0x73, 0x37, 0xa0, 0x61, 0x38, 0xa1, 0x63, 0xd5, - 0x96, 0x0a, 0x47, 0x6a, 0xa8, 0x04, 0xd9, 0x9c, 0x2d, 0x58, 0x45, 0x2d, 0x65, 0x60, 0x87, 0x5e, - 0x70, 0xee, 0x04, 0x56, 0x40, 0x5d, 0xa9, 0x6e, 0x5a, 0xe1, 0x49, 0x03, 0x91, 0x32, 0x40, 0x15, - 0xc5, 0x66, 0x02, 0xdf, 0xa7, 0x23, 0xea, 0x5c, 0xd0, 0x31, 0x97, 0x93, 0x72, 0xe6, 0x7a, 0x2c, - 0x8f, 0x29, 0x12, 0xb9, 0xd0, 0x3b, 0x9f, 0x5a, 0xf3, 0xd9, 0xd8, 0x66, 0xfc, 0x70, 0x1d, 0x05, - 0x0f, 0x77, 0x3e, 0x3d, 0x46, 0x08, 0x79, 0x08, 0x52, 0x10, 0x12, 0x6b, 0x66, 0x39, 0x76, 0xe4, - 0x30, 0xaa, 0x61, 0x56, 0x05, 0x06, 0x0a, 0x6a, 0x77, 0xf4, 0xcd, 0xd2, 0x60, 0x2b, 0x8c, 0x0b, - 0xed, 0xd1, 0x86, 0x69, 0x42, 0x71, 0xe6, 0x3b, 0x17, 0x76, 0x48, 0x9b, 0x2b, 0x78, 0x8e, 0x8b, - 0x4f, 0x46, 0xc0, 0x1d, 0xd7, 0x09, 0x1d, 0x3b, 0xf4, 0xfc, 0x26, 0xe1, 0x69, 0x11, 0x80, 0x3c, - 0x80, 0x15, 0xbe, 0x4e, 0x82, 0xd0, 0x0e, 0xe7, 0x81, 0x90, 0x02, 0x57, 0x51, 0xda, 0x62, 0x09, - 0x03, 0x0e, 0xe7, 0x82, 0x20, 0xf9, 0x04, 0x36, 0x70, 0x69, 0x2c, 0x6c, 0xcd, 0x35, 0x36, 0x1c, - 0xbc, 0x45, 0xab, 0x1c, 0x63, 0x27, 0xbe, 0x47, 0x3f, 0x85, 0x4d, 0xb1, 0x5c, 0x16, 0x72, 0xae, - 0xab, 0x9c, 0x6b, 0x88, 0x92, 0xc8, 0xba, 0x05, 0x2b, 0xac, 0x69, 0xce, 0xc8, 0x12, 0x25, 0xb0, - 0x5d, 0xb1, 0xc1, 0x7a, 0xc1, 0x33, 0x2d, 0x63, 0xa2, 0xc9, 0xd3, 0x9e, 0xd1, 0x2b, 0xf2, 0x3d, - 0x58, 0xc6, 0xe5, 0xc3, 0x55, 0x1d, 0xfc, 0x60, 0x6e, 0xf1, 0x83, 0x79, 0x5d, 0x0c, 0xee, 0x8e, - 0x4a, 0xe5, 0x67, 0x73, 0x7d, 0x14, 0xfb, 0x66, 0x5b, 0x63, 0xe2, 0x9c, 0x52, 0x76, 0x4e, 0x34, - 0x37, 0x71, 0xb1, 0xc9, 0x6f, 0xb6, 0x6b, 0xe7, 0x33, 0x9e, 0xd2, 0x44, 0x62, 0x8d, 0x5f, 0x7c, - 0x1d, 0x4f, 0xbc, 0x80, 0x4a, 0xfd, 0x76, 0xf3, 0xa6, 0xd8, 0x90, 0x0c, 0x28, 0x45, 0x16, 0x26, - 0x13, 0xa3, 0x02, 0x42, 0x59, 0x21, 0x6e, 0xf1, 0x85, 0x51, 0x43, 0x3d, 0x84, 0xb4, 0x44, 0x30, - 0xa6, 0xee, 0xdc, 0x7e, 0x21, 0xc9, 0xfa, 0x1b, 0x9c, 0x9a, 0x00, 0x03, 0x09, 0x82, 0xbe, 0x07, - 0x2b, 0x62, 0x16, 0x22, 0x62, 0xda, 0xbc, 0xcd, 0x8f, 0xc8, 0x9b, 0xb2, 0x8f, 0x0b, 0xd4, 0xd6, - 0x6c, 0xe0, 0xbc, 0x68, 0xf4, 0x77, 0x1f, 0x88, 0x9c, 0x14, 0xad, 0xa0, 0x37, 0x5f, 0x55, 0xd0, - 0x8a, 0x98, 0xa6, 0x08, 0x64, 0xfc, 0x76, 0x06, 0x39, 0x2a, 0x81, 0x1d, 0x68, 0xca, 0x1f, 0xa4, - 0x6b, 0x96, 0xe7, 0x4e, 0xae, 0x04, 0xa9, 0x03, 0x04, 0xf5, 0xdd, 0x09, 0xa7, 0x35, 0x8e, 0xab, - 0xa3, 0xe0, 0xe1, 0x5d, 0x95, 0x40, 0x8e, 0x74, 0x07, 0x2a, 0xb3, 0xf9, 0xc9, 0xc4, 0x19, 0x21, - 0x4a, 0x0e, 0x4b, 0x41, 0x10, 0x47, 0x78, 0x0b, 0xaa, 0x62, 0xad, 0x23, 0x46, 0x9e, 0x63, 0x54, - 0x04, 0x8c, 0xa3, 0x70, 0xe6, 0x80, 0xfa, 0x9c, 0xd8, 0x55, 0x4d, 0xfe, 0xdb, 0xd8, 0x86, 0xb5, - 0x78, 0xa3, 0x05, 0xe7, 0xf2, 0x00, 0x4a, 0x82, 0x92, 0x4a, 0xb5, 0x68, 0x3d, 0x3e, 0x1a, 0xa6, - 0x4a, 0x37, 0xfe, 0x63, 0x01, 0x56, 0xe5, 0x18, 0xb1, 0xc9, 0x1e, 0xcc, 0xa7, 0x53, 0xdb, 0x4f, - 0x21, 0xd1, 0x99, 0x97, 0x93, 0xe8, 0xec, 0x02, 0x89, 0x8e, 0xeb, 0xc5, 0x90, 0xc2, 0xc7, 0xf5, - 0x62, 0x6c, 0x75, 0xa1, 0x34, 0xae, 0x9b, 0x75, 0x6a, 0x02, 0x3c, 0x44, 0xf3, 0xd1, 0xc2, 0x81, - 0x52, 0x48, 0x39, 0x50, 0xf4, 0xe3, 0x60, 0x29, 0x71, 0x1c, 0xbc, 0x05, 0xb8, 0x8c, 0xe5, 0x7a, - 0x2c, 0xa2, 0x80, 0xce, 0x61, 0x62, 0x41, 0xbe, 0x0b, 0xcb, 0x49, 0x0a, 0x8c, 0xa4, 0xbe, 0x9e, - 0x42, 0x7f, 0x9d, 0x29, 0xe5, 0x4c, 0x8d, 0x86, 0x5c, 0x16, 0xf4, 0xd7, 0x99, 0xd2, 0x03, 0x9e, - 0x22, 0xf1, 0x3b, 0x00, 0x58, 0x37, 0xdf, 0xc6, 0xc0, 0xb7, 0xf1, 0x3b, 0x89, 0x95, 0xa9, 0x8d, - 0xfa, 0x16, 0xfb, 0x98, 0xfb, 0x94, 0xef, 0xeb, 0x32, 0xcf, 0xc9, 0xb7, 0xf4, 0x27, 0x50, 0xf7, - 0x66, 0xd4, 0xb5, 0x22, 0x2a, 0x58, 0xe1, 0x45, 0x35, 0x44, 0x51, 0x5d, 0x09, 0x37, 0x6b, 0x0c, - 0x4f, 0x7d, 0x92, 0x4f, 0x71, 0x90, 0xa9, 0x96, 0xb3, 0x7a, 0x4d, 0xce, 0x3a, 0x47, 0x8c, 0xb2, - 0x7e, 0x9b, 0xeb, 0x9e, 0xbc, 0xc9, 0x1c, 0x4d, 0x39, 0x35, 0xbe, 0x8e, 0xa4, 0x6e, 0xdb, 0x54, - 0x29, 0xa6, 0x8e, 0x65, 0xfc, 0x5a, 0x06, 0x2a, 0x5a, 0x1f, 0xc8, 0x3a, 0xac, 0xec, 0xf4, 0xfb, - 0x47, 0x1d, 0xb3, 0x3d, 0xec, 0x7e, 0xd6, 0xb1, 0x76, 0x0e, 0xfa, 0x83, 0x4e, 0xe3, 0x06, 0x03, - 0x1f, 0xf4, 0x77, 0xda, 0x07, 0xd6, 0x5e, 0xdf, 0xdc, 0x91, 0xe0, 0x0c, 0xd9, 0x00, 0x62, 0x76, - 0x0e, 0xfb, 0xc3, 0x4e, 0x0c, 0x9e, 0x25, 0x0d, 0xa8, 0x6e, 0x9b, 0x9d, 0xf6, 0xce, 0xbe, 0x80, - 0xe4, 0xc8, 0x1a, 0x34, 0xf6, 0x8e, 0x7b, 0xbb, 0xdd, 0xde, 0x53, 0x6b, 0xa7, 0xdd, 0xdb, 0xe9, - 0x1c, 0x74, 0x76, 0x1b, 0x79, 0x52, 0x83, 0x72, 0x7b, 0xbb, 0xdd, 0xdb, 0xed, 0xf7, 0x3a, 0xbb, - 0x8d, 0x82, 0xf1, 0x3f, 0x33, 0x00, 0x51, 0x43, 0x19, 0x5d, 0x8d, 0x9a, 0xaa, 0xdb, 0x64, 0xd7, - 0x17, 0x3a, 0x85, 0x74, 0xd5, 0x8f, 0x7d, 0x93, 0x47, 0x50, 0xf4, 0xe6, 0xe1, 0xc8, 0x9b, 0xa2, - 0x10, 0x51, 0x7f, 0xd4, 0x5c, 0xc8, 0xd7, 0xc7, 0x74, 0x53, 0x22, 0xc6, 0xec, 0xae, 0xb9, 0x57, - 0xd9, 0x5d, 0xe3, 0x06, 0x5e, 0xe4, 0xeb, 0x34, 0x03, 0xef, 0x6d, 0x80, 0xe0, 0x05, 0xa5, 0x33, - 0xae, 0xbc, 0x12, 0xbb, 0xa0, 0xcc, 0x21, 0x43, 0x26, 0x63, 0xfe, 0x41, 0x06, 0xd6, 0xf9, 0x5a, - 0x1a, 0x27, 0x89, 0xd8, 0x5d, 0xa8, 0x8c, 0x3c, 0x6f, 0x46, 0x19, 0x53, 0xad, 0xf8, 0x35, 0x1d, - 0xc4, 0x08, 0x14, 0x12, 0xe4, 0x53, 0xcf, 0x1f, 0x51, 0x41, 0xc3, 0x80, 0x83, 0xf6, 0x18, 0x84, - 0xed, 0x21, 0xb1, 0x09, 0x11, 0x03, 0x49, 0x58, 0x05, 0x61, 0x88, 0xb2, 0x01, 0x4b, 0x27, 0x3e, - 0xb5, 0x47, 0xe7, 0x82, 0x7a, 0x89, 0x2f, 0xf2, 0xcd, 0x48, 0x89, 0x37, 0x62, 0x7b, 0x62, 0x42, - 0xb1, 0xf1, 0x25, 0x73, 0x59, 0xc0, 0x77, 0x04, 0x98, 0x9d, 0xf3, 0xf6, 0x89, 0xed, 0x8e, 0x3d, - 0x97, 0x8e, 0x85, 0x2c, 0x1f, 0x01, 0x8c, 0x23, 0xd8, 0x48, 0xf6, 0x4f, 0xd0, 0xbb, 0x8f, 0x35, - 0x7a, 0x87, 0xa2, 0x6f, 0xeb, 0xfa, 0x3d, 0xa6, 0xd1, 0xbe, 0x7f, 0x9d, 0x87, 0x3c, 0x13, 0x78, - 0xae, 0x95, 0x8d, 0x74, 0xd9, 0x36, 0xb7, 0x60, 0x8d, 0xe7, 0xba, 0x42, 0x64, 0xc0, 0xc4, 0x64, - 0x71, 0x08, 0x67, 0xbc, 0x54, 0xb2, 0x4f, 0x47, 0x17, 0x52, 0x66, 0xe1, 0x10, 0x93, 0x8e, 0x2e, - 0xb8, 0xd2, 0xc2, 0x0e, 0x31, 0x2f, 0xd2, 0xab, 0x62, 0x60, 0x87, 0x3c, 0xa7, 0x48, 0xe2, 0xf9, - 0x8a, 0x2a, 0x89, 0xe7, 0x6a, 0x42, 0xd1, 0x71, 0x4f, 0xbc, 0xb9, 0x2b, 0x55, 0x3f, 0xf2, 0x93, - 0x1b, 0xff, 0x39, 0x25, 0x65, 0x47, 0x3b, 0x52, 0xa3, 0x12, 0x03, 0x0c, 0xd9, 0xe1, 0xfe, 0x21, - 0x94, 0x83, 0x2b, 0x77, 0xa4, 0xd3, 0xa0, 0x35, 0x31, 0x3e, 0xac, 0xf7, 0x5b, 0x83, 0x2b, 0x77, - 0xc4, 0x57, 0x7c, 0x29, 0x10, 0xbf, 0xc8, 0x63, 0x28, 0x29, 0xab, 0x16, 0x9e, 0x20, 0x37, 0xf5, - 0x1c, 0xd2, 0x94, 0x85, 0xfa, 0x31, 0x85, 0x4a, 0x3e, 0x80, 0x25, 0xae, 0x00, 0x0f, 0x9a, 0x55, - 0x9e, 0x49, 0x0a, 0xbc, 0xac, 0x19, 0xdc, 0xee, 0x4e, 0xc7, 0xdc, 0x0c, 0x65, 0x0a, 0x34, 0x36, - 0x4c, 0xa7, 0x13, 0x7b, 0x26, 0xd4, 0xd1, 0x35, 0x34, 0x4f, 0x33, 0x08, 0xea, 0xa2, 0xef, 0x42, - 0x95, 0x5b, 0x0c, 0x39, 0x8e, 0x8b, 0x7c, 0x68, 0xce, 0x04, 0x06, 0xdb, 0x9b, 0xd8, 0xb3, 0x5e, - 0xd0, 0x7a, 0x06, 0xb5, 0x58, 0x63, 0x74, 0x35, 0x57, 0x0d, 0xd5, 0x5c, 0xf7, 0x74, 0x35, 0x57, - 0x74, 0x14, 0x8a, 0x6c, 0xba, 0xda, 0xeb, 0xfb, 0x50, 0x92, 0x63, 0xc1, 0x68, 0xce, 0x71, 0xef, - 0x59, 0xaf, 0xff, 0x79, 0xcf, 0x1a, 0x7c, 0xd1, 0xdb, 0x69, 0xdc, 0x20, 0xcb, 0x50, 0x69, 0xef, - 0x70, 0x32, 0xc6, 0x01, 0x19, 0x86, 0x72, 0xd4, 0x1e, 0x0c, 0x14, 0x24, 0x6b, 0xec, 0x41, 0x23, - 0xd9, 0x55, 0xb6, 0xa8, 0x43, 0x09, 0x13, 0x96, 0xbd, 0x08, 0x10, 0xd9, 0x0f, 0xb2, 0x9a, 0xfd, - 0xc0, 0x78, 0x0c, 0x0d, 0x76, 0xb0, 0xb3, 0xb1, 0xd6, 0x7d, 0x01, 0x26, 0x8c, 0xf5, 0xd6, 0xad, - 0x7b, 0x25, 0xb3, 0x82, 0x30, 0x5e, 0x95, 0xf1, 0x31, 0xac, 0x68, 0xd9, 0x22, 0xa5, 0x10, 0x63, - 0x16, 0x92, 0x4a, 0x21, 0x2e, 0xe8, 0x63, 0x8a, 0xb1, 0x09, 0xeb, 0xec, 0xb3, 0x73, 0x41, 0xdd, - 0x70, 0x30, 0x3f, 0x41, 0x1f, 0x12, 0xc7, 0x73, 0x8d, 0x5f, 0xc9, 0x40, 0x59, 0xa5, 0x5c, 0xbf, - 0x4b, 0xb6, 0x84, 0xfe, 0x08, 0xc9, 0x62, 0x4b, 0xab, 0x81, 0x67, 0xdc, 0xe2, 0xff, 0xc7, 0xf4, - 0x48, 0x65, 0x05, 0x62, 0xc3, 0x7a, 0xd4, 0xe9, 0x98, 0x56, 0xbf, 0x77, 0xd0, 0xed, 0xb1, 0xc3, - 0x81, 0x0d, 0x2b, 0x07, 0xec, 0xed, 0x71, 0x48, 0xc6, 0x68, 0x40, 0xfd, 0x29, 0x0d, 0xbb, 0xee, - 0xa9, 0x27, 0x06, 0xc3, 0xf8, 0xb3, 0x4b, 0xb0, 0xac, 0x40, 0x91, 0x1e, 0xea, 0x82, 0xfa, 0x81, - 0xe3, 0xb9, 0x7c, 0x9d, 0x94, 0x4d, 0xf9, 0xc9, 0xc8, 0x9b, 0x90, 0xd2, 0x38, 0x9b, 0xb1, 0xc6, - 0x53, 0x85, 0x5c, 0xc7, 0x79, 0x8c, 0x77, 0x61, 0xd9, 0x19, 0x53, 0x37, 0x74, 0xc2, 0x2b, 0x2b, - 0xa6, 0x95, 0xaf, 0x4b, 0xb0, 0xe0, 0x33, 0xd6, 0xa0, 0x60, 0x4f, 0x1c, 0x5b, 0xfa, 0xe6, 0xe0, - 0x07, 0x83, 0x8e, 0xbc, 0x89, 0xe7, 0x73, 0xb9, 0xa5, 0x6c, 0xe2, 0x07, 0x79, 0x08, 0x6b, 0x4c, - 0x86, 0xd2, 0xcd, 0x48, 0x9c, 0x42, 0xa1, 0x81, 0x80, 0xb8, 0xf3, 0xe9, 0x51, 0x64, 0x4a, 0x62, - 0x29, 0x8c, 0xbb, 0x60, 0x39, 0x04, 0x3b, 0xa9, 0x32, 0xa0, 0x5e, 0x64, 0xc5, 0x9d, 0x4f, 0xdb, - 0x3c, 0x45, 0xe1, 0x3f, 0x82, 0x75, 0x86, 0xaf, 0x18, 0x50, 0x95, 0x63, 0x99, 0xe7, 0x60, 0x85, - 0x75, 0x45, 0x9a, 0xca, 0x73, 0x0b, 0xca, 0xd8, 0x2a, 0xb6, 0x24, 0x84, 0xbd, 0x89, 0x37, 0x85, - 0xfa, 0xc1, 0x82, 0x1b, 0x0d, 0x2a, 0x02, 0x92, 0x6e, 0x34, 0x9a, 0x23, 0x4e, 0x29, 0xe9, 0x88, - 0xf3, 0x08, 0xd6, 0x4f, 0xd8, 0x1a, 0x3d, 0xa7, 0xf6, 0x98, 0xfa, 0x56, 0xb4, 0xf2, 0x51, 0xdc, - 0x5c, 0x65, 0x89, 0xfb, 0x3c, 0x4d, 0x6d, 0x14, 0xc6, 0x09, 0x32, 0xc2, 0x43, 0xc7, 0x56, 0xe8, - 0x59, 0x9c, 0x41, 0x14, 0x1a, 0xd7, 0x1a, 0x82, 0x87, 0xde, 0x0e, 0x03, 0xc6, 0xf1, 0xce, 0x7c, - 0x7b, 0x76, 0x2e, 0x84, 0x41, 0x85, 0xf7, 0x94, 0x01, 0xc9, 0x1b, 0x50, 0x64, 0x7b, 0xc2, 0xa5, - 0xe8, 0x3c, 0x80, 0x62, 0x96, 0x04, 0x91, 0x7b, 0xb0, 0xc4, 0xeb, 0x08, 0x9a, 0x0d, 0xbe, 0x21, - 0xaa, 0xd1, 0x51, 0xe1, 0xb8, 0xa6, 0x48, 0x63, 0xec, 0xf6, 0xdc, 0x77, 0x90, 0x8e, 0x95, 0x4d, - 0xfe, 0x9b, 0xfc, 0x40, 0x23, 0x8a, 0xab, 0x3c, 0xef, 0x3d, 0x91, 0x37, 0xb1, 0x14, 0xaf, 0xa3, - 0x8f, 0x5f, 0x2b, 0xb5, 0xfa, 0x61, 0xbe, 0x54, 0x69, 0x54, 0x8d, 0x26, 0x77, 0x1e, 0x32, 0xe9, - 0xc8, 0xbb, 0xa0, 0xfe, 0x55, 0x6c, 0x8f, 0x64, 0x60, 0x73, 0x21, 0x29, 0xf2, 0x15, 0xf0, 0x05, - 0xdc, 0x9a, 0x7a, 0x63, 0xc9, 0x14, 0x54, 0x25, 0xf0, 0xd0, 0x1b, 0x33, 0xe6, 0x65, 0x45, 0x21, - 0x9d, 0x3a, 0xae, 0x13, 0x9c, 0xd3, 0xb1, 0xe0, 0x0d, 0x1a, 0x32, 0x61, 0x4f, 0xc0, 0x19, 0x07, - 0x3e, 0xf3, 0xbd, 0x33, 0x75, 0x54, 0x66, 0x4c, 0xf5, 0x6d, 0x7c, 0x02, 0x05, 0x9c, 0x41, 0xb6, - 0x51, 0xf8, 0xfc, 0x66, 0xc4, 0x46, 0xe1, 0xd0, 0x26, 0x14, 0x5d, 0x1a, 0xbe, 0xf0, 0xfc, 0xe7, - 0xd2, 0xb6, 0x26, 0x3e, 0x8d, 0x1f, 0x73, 0xa5, 0xaa, 0x72, 0x03, 0x43, 0xe5, 0x03, 0x5b, 0xc2, - 0xb8, 0x04, 0x83, 0x73, 0x5b, 0xe8, 0x79, 0x4b, 0x1c, 0x30, 0x38, 0xb7, 0x17, 0x96, 0x70, 0x76, - 0xd1, 0x13, 0xec, 0x1e, 0xd4, 0xa5, 0xe3, 0x59, 0x60, 0x4d, 0xe8, 0x69, 0x28, 0xb6, 0x64, 0x55, - 0x78, 0x9d, 0x05, 0x07, 0xf4, 0x34, 0x34, 0x0e, 0x61, 0x45, 0x6c, 0x9a, 0xfe, 0x8c, 0xca, 0xaa, - 0xbf, 0x93, 0x26, 0x15, 0x55, 0x1e, 0xad, 0xc6, 0xd9, 0x0d, 0x64, 0xec, 0x62, 0xa2, 0x92, 0xf1, - 0xa3, 0x48, 0x83, 0xc8, 0x98, 0x11, 0x51, 0x9e, 0x90, 0x4d, 0xa4, 0x49, 0x52, 0xba, 0x3d, 0x28, - 0x09, 0xc8, 0x19, 0xb3, 0xd1, 0x09, 0xe6, 0xa3, 0x91, 0x74, 0x08, 0x2c, 0x99, 0xf2, 0xd3, 0xf8, - 0x77, 0x19, 0x58, 0xe5, 0x85, 0x49, 0xa9, 0x4e, 0x9c, 0x14, 0x3f, 0x71, 0x23, 0xd9, 0xfc, 0xe8, - 0x1c, 0x20, 0x7e, 0x7c, 0x75, 0x23, 0x4d, 0x7e, 0xc1, 0x48, 0xf3, 0x4d, 0x68, 0x8c, 0xe9, 0xc4, - 0xe1, 0x4b, 0x49, 0x32, 0x54, 0xc8, 0xc1, 0x2e, 0x4b, 0xb8, 0xd0, 0x32, 0x18, 0x7f, 0x39, 0x03, - 0x2b, 0xc8, 0xaf, 0x71, 0xbd, 0x8d, 0x18, 0xa8, 0x27, 0x52, 0x41, 0x21, 0xc8, 0xa9, 0xe8, 0x53, - 0xc4, 0xc7, 0x70, 0x28, 0x22, 0xef, 0xdf, 0x10, 0x8a, 0x0b, 0x01, 0x25, 0xdf, 0xe5, 0x92, 0xa8, - 0x6b, 0x71, 0xa0, 0xe0, 0xc3, 0x6f, 0xa6, 0x70, 0x88, 0x2a, 0x3b, 0x13, 0x53, 0x5d, 0x0e, 0xda, - 0x2e, 0xc1, 0x12, 0x6a, 0xc1, 0x8c, 0x3d, 0xa8, 0xc5, 0xaa, 0x89, 0x59, 0x7a, 0xaa, 0x68, 0xe9, - 0x59, 0xb0, 0x06, 0x67, 0x17, 0xad, 0xc1, 0x57, 0xb0, 0x6a, 0x52, 0x7b, 0x7c, 0xb5, 0xe7, 0xf9, - 0x47, 0xc1, 0x49, 0xb8, 0x87, 0x4c, 0x30, 0x3b, 0x83, 0x94, 0xff, 0x47, 0xcc, 0x9c, 0x22, 0x2d, - 0xdd, 0x52, 0x0d, 0xf3, 0x0d, 0xa8, 0x47, 0x8e, 0x22, 0x9a, 0xe2, 0xbd, 0xa6, 0x7c, 0x45, 0x38, - 0xef, 0x44, 0x20, 0x3f, 0x0b, 0x4e, 0x42, 0xa1, 0x7a, 0xe7, 0xbf, 0x8d, 0xbf, 0x52, 0x00, 0xc2, - 0x56, 0x73, 0x62, 0xc1, 0x24, 0x5c, 0x5c, 0xb2, 0x0b, 0x2e, 0x2e, 0x0f, 0x81, 0x68, 0x08, 0xd2, - 0xf3, 0x26, 0xa7, 0x3c, 0x6f, 0x1a, 0x11, 0xae, 0x70, 0xbc, 0x79, 0x08, 0x6b, 0x42, 0xa2, 0x88, - 0x37, 0x15, 0x97, 0x06, 0x41, 0xd1, 0x22, 0xd6, 0x5e, 0xe9, 0xde, 0x22, 0x35, 0xd5, 0x39, 0x74, - 0x6f, 0x91, 0x0a, 0x25, 0x6d, 0x01, 0x2e, 0xbd, 0x72, 0x01, 0x16, 0x17, 0x16, 0xa0, 0xa6, 0x5c, - 0x2c, 0xc5, 0x95, 0x8b, 0x0b, 0x6a, 0x72, 0x64, 0x9f, 0x63, 0x6a, 0xf2, 0xfb, 0xd0, 0x90, 0x8a, - 0x26, 0xa5, 0xc2, 0x14, 0x3e, 0x0f, 0x42, 0x97, 0x24, 0x95, 0x98, 0x31, 0x9b, 0x5e, 0xe5, 0x75, - 0x8c, 0x8b, 0xd5, 0x74, 0xe3, 0xe2, 0xa2, 0x4a, 0xae, 0x96, 0xa2, 0x92, 0x7b, 0x1c, 0xb9, 0x34, - 0x04, 0xe7, 0xce, 0x94, 0x33, 0x3e, 0x91, 0xc3, 0xa5, 0x18, 0xe0, 0xc1, 0xb9, 0x33, 0x35, 0xa5, - 0x73, 0x11, 0xfb, 0x20, 0x3b, 0x70, 0x47, 0xf4, 0x27, 0xc5, 0x2f, 0x08, 0x47, 0x61, 0x99, 0x73, - 0xaa, 0x2d, 0x44, 0x3b, 0x4c, 0xb8, 0x08, 0x25, 0x06, 0x45, 0x7a, 0x95, 0x04, 0xa8, 0xd7, 0x95, - 0x83, 0x72, 0x88, 0x6e, 0x25, 0x01, 0x1f, 0x62, 0xfb, 0xd2, 0x12, 0x3a, 0xbf, 0xe0, 0x82, 0xf3, - 0x49, 0x35, 0xb3, 0x32, 0xb5, 0x2f, 0x0f, 0xb8, 0x4e, 0x2f, 0xb8, 0x30, 0xfe, 0x38, 0x03, 0x0d, - 0xb6, 0x34, 0x63, 0xbb, 0xfe, 0x53, 0xe0, 0xf4, 0xe9, 0x35, 0x37, 0x7d, 0x85, 0xe1, 0xca, 0x3d, - 0xff, 0x09, 0xf0, 0x4d, 0x6c, 0x79, 0x33, 0xea, 0x8a, 0x2d, 0xdf, 0x8c, 0x6f, 0xf9, 0x88, 0xac, - 0xef, 0xdf, 0x40, 0xa1, 0x90, 0x41, 0xc8, 0xa7, 0x50, 0x66, 0x7b, 0x85, 0x2f, 0x5c, 0xe1, 0x2b, - 0xdd, 0x52, 0x82, 0xfe, 0xc2, 0xb6, 0x65, 0x59, 0x67, 0xe2, 0x33, 0xcd, 0x69, 0x28, 0x9f, 0xe2, - 0x34, 0xa4, 0xd1, 0x94, 0x7d, 0x80, 0x67, 0xf4, 0x8a, 0x0d, 0x42, 0xe8, 0xf9, 0x8c, 0xb7, 0x62, - 0xdb, 0xeb, 0xd4, 0x9e, 0x3a, 0x42, 0xd9, 0x58, 0x30, 0xcb, 0xcf, 0xe9, 0xd5, 0x1e, 0x07, 0xb0, - 0xb5, 0xc5, 0x92, 0x23, 0xc2, 0x52, 0x30, 0x4b, 0xcf, 0xe9, 0x15, 0x52, 0x15, 0x0b, 0x6a, 0xcf, - 0xe8, 0xd5, 0x2e, 0x45, 0xe6, 0xdd, 0xf3, 0xd9, 0xa0, 0xfb, 0xf6, 0x0b, 0xc6, 0xad, 0xc7, 0x9c, - 0x5a, 0x2a, 0xbe, 0xfd, 0xe2, 0x19, 0xbd, 0x92, 0x0e, 0x36, 0x45, 0x96, 0x3e, 0xf1, 0x46, 0x82, - 0xdd, 0x90, 0xfa, 0x9d, 0xa8, 0x51, 0xe6, 0xd2, 0x73, 0xfe, 0xdb, 0xf8, 0xa3, 0x0c, 0xd4, 0x58, - 0xfb, 0xf9, 0x49, 0xc1, 0x57, 0x91, 0x70, 0x81, 0xcd, 0x44, 0x2e, 0xb0, 0x8f, 0x04, 0xa1, 0xc5, - 0x63, 0x27, 0x7b, 0xfd, 0xb1, 0xc3, 0xe7, 0x06, 0xcf, 0x9c, 0x0f, 0xa1, 0x8c, 0x0b, 0x83, 0x91, - 0x9e, 0x5c, 0x6c, 0x82, 0x63, 0x1d, 0x32, 0x4b, 0x1c, 0xed, 0x19, 0x7a, 0xdc, 0x69, 0xaa, 0x74, - 0x1c, 0xe2, 0xb2, 0xaf, 0x14, 0xe8, 0x29, 0xd3, 0x50, 0xb8, 0xc6, 0xe3, 0x4e, 0xd7, 0x53, 0x2f, - 0x25, 0xf5, 0xd4, 0x86, 0x0b, 0x25, 0x36, 0xd5, 0xbc, 0xb3, 0x29, 0x85, 0x66, 0xd2, 0x0a, 0x65, - 0xcc, 0x89, 0xcd, 0xce, 0x29, 0x46, 0x7b, 0xb3, 0x82, 0x39, 0xb1, 0x03, 0xca, 0x0a, 0x62, 0x0d, - 0x77, 0x3d, 0x8b, 0x2b, 0x7e, 0x85, 0x4a, 0xb4, 0x64, 0x96, 0x5d, 0xef, 0x08, 0x01, 0xc6, 0x9f, - 0xc9, 0x40, 0x45, 0xdb, 0xb3, 0xdc, 0x12, 0xa0, 0x86, 0x13, 0x37, 0x78, 0x7c, 0x07, 0xc4, 0xe6, - 0x63, 0xff, 0x86, 0x59, 0x1b, 0xc5, 0x26, 0x68, 0x4b, 0x2c, 0x65, 0x9e, 0x33, 0x1b, 0x53, 0x3f, - 0xc9, 0x7e, 0xc9, 0xf5, 0xcb, 0x7e, 0x6f, 0x2f, 0x41, 0x9e, 0xa1, 0x1a, 0x4f, 0x60, 0x45, 0x6b, - 0x06, 0xaa, 0x67, 0x5e, 0x77, 0x00, 0x8c, 0x9f, 0x57, 0x99, 0x59, 0x1d, 0x68, 0x5a, 0x97, 0xce, - 0x8d, 0x74, 0x8c, 0xe3, 0x22, 0x9c, 0x28, 0x11, 0xc4, 0x47, 0xe6, 0x35, 0xfd, 0xed, 0x8c, 0x5f, - 0xce, 0xc0, 0xaa, 0x56, 0xfc, 0x9e, 0xe3, 0xda, 0x13, 0xe7, 0xc7, 0x9c, 0x47, 0x09, 0x9c, 0x33, - 0x37, 0x51, 0x01, 0x82, 0xbe, 0x4a, 0x05, 0xec, 0x28, 0x41, 0x57, 0x69, 0xf4, 0xe3, 0x17, 0xc7, - 0x27, 0x70, 0x98, 0x69, 0xbf, 0x18, 0x5e, 0x1a, 0x7f, 0x35, 0x0b, 0x6b, 0xa2, 0x09, 0xdc, 0xa3, - 0xdd, 0x61, 0xac, 0xe9, 0x61, 0x70, 0x46, 0x3e, 0x85, 0x1a, 0x1b, 0x3e, 0xcb, 0xa7, 0x67, 0x4e, - 0x10, 0x52, 0x69, 0xf5, 0x4f, 0xa1, 0xc6, 0x8c, 0x43, 0x61, 0xa8, 0xa6, 0xc0, 0x24, 0x4f, 0xa0, - 0xc2, 0xb3, 0xa2, 0x86, 0x4c, 0xcc, 0x55, 0x73, 0x31, 0x23, 0xce, 0xc5, 0xfe, 0x0d, 0x13, 0x82, - 0x68, 0x66, 0x9e, 0x40, 0x85, 0x4f, 0xf3, 0x05, 0x1f, 0xeb, 0x04, 0xb1, 0x5b, 0x98, 0x0b, 0x96, - 0x79, 0x16, 0xcd, 0x4c, 0x1b, 0x6a, 0x48, 0xee, 0xc4, 0x48, 0x0a, 0x4f, 0xd9, 0xd6, 0x62, 0x76, - 0x39, 0xd6, 0xac, 0xf1, 0x33, 0xed, 0x7b, 0xbb, 0x0c, 0xc5, 0xd0, 0x77, 0xce, 0xce, 0xa8, 0x6f, - 0x6c, 0xa8, 0xa1, 0x61, 0x74, 0x9c, 0x0e, 0x42, 0x3a, 0x63, 0x32, 0x87, 0xf1, 0x2f, 0x33, 0x50, - 0x11, 0x94, 0xf9, 0x27, 0x76, 0x28, 0x68, 0x25, 0x74, 0xa9, 0x65, 0x4d, 0x75, 0xfa, 0x2e, 0x2c, - 0x4f, 0x99, 0x80, 0xc4, 0x04, 0xf8, 0x98, 0x37, 0x41, 0x5d, 0x82, 0x05, 0xef, 0xbf, 0x05, 0xab, - 0x5c, 0x14, 0x08, 0xac, 0xd0, 0x99, 0x58, 0x32, 0x51, 0x5c, 0x6f, 0x58, 0xc1, 0xa4, 0xa1, 0x33, - 0x39, 0x14, 0x09, 0x8c, 0x23, 0x0e, 0x42, 0xfb, 0x8c, 0x0a, 0xea, 0x80, 0x1f, 0x4c, 0xe8, 0x4a, - 0xc8, 0xee, 0x52, 0xe8, 0xfa, 0x3f, 0x2b, 0xb0, 0xb9, 0x90, 0x24, 0x84, 0x2e, 0x65, 0xbc, 0x9d, - 0x38, 0xd3, 0x13, 0x4f, 0x19, 0x0f, 0x32, 0x9a, 0xf1, 0xf6, 0x80, 0xa5, 0x48, 0xe3, 0x01, 0x85, - 0x75, 0xb9, 0x64, 0xb9, 0xf6, 0x5f, 0x89, 0xf7, 0x59, 0x2e, 0x7c, 0x7e, 0x18, 0x3f, 0x06, 0x93, - 0xd5, 0x49, 0xb8, 0xce, 0xef, 0xad, 0xce, 0x16, 0x60, 0x01, 0xf9, 0xff, 0xa1, 0xa9, 0x76, 0x86, - 0x90, 0x45, 0x34, 0x5d, 0x05, 0xab, 0xe9, 0x5b, 0xaf, 0xa8, 0x29, 0xa6, 0x96, 0xe5, 0x0c, 0xe1, - 0x86, 0xdc, 0x54, 0x58, 0xa0, 0xaa, 0xeb, 0x02, 0xde, 0x94, 0x75, 0x71, 0xd9, 0x62, 0xb1, 0xc6, - 0xfc, 0x6b, 0xf5, 0x8d, 0xab, 0x9c, 0x63, 0xd5, 0x9a, 0xb7, 0x44, 0xc1, 0x2a, 0x49, 0xaf, 0xf7, - 0x1c, 0x36, 0x5e, 0xd8, 0x4e, 0x28, 0xfb, 0xa8, 0xa9, 0x4a, 0x0a, 0xbc, 0xbe, 0x47, 0xaf, 0xa8, - 0xef, 0x73, 0xcc, 0x1c, 0x93, 0xb6, 0xd6, 0x5e, 0x2c, 0x02, 0x83, 0xd6, 0xdf, 0xce, 0x41, 0x3d, - 0x5e, 0x0a, 0x23, 0x3d, 0xe2, 0xb8, 0x92, 0x4c, 0xb4, 0xe0, 0xec, 0x85, 0x61, 0xab, 0x87, 0xcc, - 0xf3, 0xa2, 0xc9, 0x2d, 0x9b, 0x62, 0x72, 0xd3, 0x2d, 0x5d, 0xb9, 0x57, 0x39, 0x3e, 0xe4, 0x5f, - 0xcb, 0xf1, 0xa1, 0x90, 0xe6, 0xf8, 0xf0, 0xed, 0x6b, 0x2d, 0xe5, 0xa8, 0xaf, 0x4e, 0xb5, 0x92, - 0x3f, 0xbe, 0xde, 0x4a, 0x8e, 0x2c, 0xf9, 0x75, 0x16, 0x72, 0xcd, 0xbe, 0x5f, 0xba, 0xc6, 0x3e, - 0xa5, 0x59, 0xfc, 0x53, 0x2c, 0xe4, 0xe5, 0xaf, 0x60, 0x21, 0x6f, 0xfd, 0x51, 0x06, 0xc8, 0xe2, - 0xee, 0x20, 0x4f, 0xd1, 0x9a, 0xe9, 0xd2, 0x89, 0xa0, 0xdc, 0xef, 0xbf, 0xde, 0x0e, 0x93, 0x0b, - 0x42, 0xe6, 0x26, 0x1f, 0xc0, 0xaa, 0x7e, 0xab, 0x4d, 0x57, 0x45, 0xd4, 0x4c, 0xa2, 0x27, 0x45, - 0x4a, 0x35, 0xcd, 0xcb, 0x24, 0xff, 0x4a, 0x2f, 0x93, 0xc2, 0x2b, 0xbd, 0x4c, 0x96, 0xe2, 0x5e, - 0x26, 0xad, 0x7f, 0x9b, 0x81, 0xd5, 0x94, 0x45, 0xfc, 0xf5, 0xf5, 0x99, 0xad, 0xbd, 0x18, 0x59, - 0xcb, 0x8a, 0xb5, 0xa7, 0x53, 0xb4, 0x03, 0xa9, 0x88, 0x65, 0x53, 0x11, 0x88, 0x93, 0xea, 0xc1, - 0xab, 0xa8, 0x4b, 0x94, 0xc3, 0xd4, 0xb3, 0xb7, 0xfe, 0x6e, 0x16, 0x2a, 0x5a, 0x22, 0x1b, 0x45, - 0x5c, 0xb2, 0x9a, 0xff, 0x25, 0xf2, 0x96, 0x5c, 0x91, 0xc2, 0x9d, 0xe9, 0xf9, 0xe2, 0xe4, 0xe9, - 0xb8, 0xb9, 0x04, 0x23, 0xc9, 0x11, 0xb6, 0x60, 0x55, 0x5a, 0x9a, 0x69, 0xe4, 0x26, 0x2e, 0xce, - 0x1a, 0xe1, 0x34, 0x20, 0x1a, 0xc9, 0xf1, 0x3f, 0x90, 0x32, 0x6e, 0x34, 0x77, 0x9a, 0xe5, 0x6e, - 0x45, 0xb8, 0x2b, 0x88, 0x49, 0x64, 0xeb, 0xfc, 0x43, 0x58, 0x57, 0xfe, 0x0a, 0xb1, 0x1c, 0x68, - 0x1f, 0x22, 0xd2, 0x2f, 0x41, 0xcb, 0xf2, 0x03, 0xb8, 0x9d, 0x68, 0x53, 0x22, 0x2b, 0xfa, 0xb9, - 0xdd, 0x8c, 0xb5, 0x4e, 0x2f, 0xa1, 0xf5, 0xa7, 0xa0, 0x16, 0x23, 0x94, 0x5f, 0xdf, 0x94, 0x27, - 0x95, 0x57, 0x38, 0xa2, 0xba, 0xf2, 0xaa, 0xf5, 0xbf, 0x72, 0x40, 0x16, 0x69, 0xf5, 0x4f, 0xb3, - 0x09, 0x8b, 0x0b, 0x33, 0x97, 0xb2, 0x30, 0xff, 0x9f, 0xf1, 0x0f, 0x91, 0x0e, 0x55, 0x73, 0x17, - 0xc0, 0xcd, 0xd9, 0x50, 0x09, 0xb2, 0x15, 0x9f, 0x24, 0x9d, 0xaa, 0x4a, 0xb1, 0xfb, 0x93, 0x1a, - 0x03, 0x95, 0xf0, 0xad, 0x3a, 0x86, 0x25, 0xdb, 0x1d, 0x9d, 0x7b, 0xbe, 0xa0, 0x83, 0x3f, 0xf3, - 0x95, 0x8f, 0xcf, 0xad, 0x36, 0xcf, 0xcf, 0xb9, 0x36, 0x53, 0x14, 0x66, 0x7c, 0x08, 0x15, 0x0d, - 0x4c, 0xca, 0x50, 0x38, 0xe8, 0x1e, 0x6e, 0xf7, 0x1b, 0x37, 0x48, 0x0d, 0xca, 0x66, 0x67, 0xa7, - 0xff, 0x59, 0xc7, 0xec, 0xec, 0x36, 0x32, 0xa4, 0x04, 0xf9, 0x83, 0xfe, 0x60, 0xd8, 0xc8, 0x1a, - 0x2d, 0x68, 0x8a, 0x12, 0x17, 0xad, 0x49, 0xbf, 0x91, 0x57, 0x3a, 0x50, 0x9e, 0x28, 0x84, 0xfc, - 0x6f, 0x43, 0x55, 0x67, 0x6f, 0xc4, 0x8a, 0x48, 0x78, 0xac, 0x30, 0xf1, 0xde, 0xd3, 0x68, 0xf5, - 0x0e, 0xa0, 0xbf, 0xc2, 0x58, 0x65, 0xcb, 0xc6, 0xf8, 0xd6, 0x14, 0xc3, 0x2f, 0x97, 0x8f, 0x62, - 0xcb, 0xf0, 0xff, 0x83, 0x7a, 0xdc, 0x72, 0x22, 0x28, 0x52, 0x9a, 0xc8, 0xca, 0x72, 0xc7, 0x4c, - 0x29, 0xe4, 0x07, 0xd0, 0x48, 0x5a, 0x5e, 0x04, 0xf3, 0x7c, 0x4d, 0xfe, 0x65, 0x27, 0x6e, 0x8c, - 0x21, 0xfb, 0xb0, 0x96, 0xc6, 0xe0, 0xf1, 0xf5, 0x71, 0xbd, 0x9a, 0x83, 0x2c, 0x32, 0x71, 0xe4, - 0x3b, 0xc2, 0x02, 0x57, 0xe0, 0xd3, 0x7f, 0x2f, 0x5e, 0xbf, 0x36, 0xd8, 0x5b, 0xf8, 0x47, 0xb3, - 0xc5, 0x5d, 0x00, 0x44, 0x30, 0xd2, 0x80, 0x6a, 0xff, 0xa8, 0xd3, 0xb3, 0x76, 0xf6, 0xdb, 0xbd, - 0x5e, 0xe7, 0xa0, 0x71, 0x83, 0x10, 0xa8, 0x73, 0xa7, 0x8b, 0x5d, 0x05, 0xcb, 0x30, 0x98, 0xb0, - 0x84, 0x4a, 0x58, 0x96, 0xac, 0x41, 0xa3, 0xdb, 0x4b, 0x40, 0x73, 0xa4, 0x09, 0x6b, 0x47, 0x1d, - 0xf4, 0xd3, 0x88, 0x95, 0x9b, 0x67, 0x42, 0x83, 0xe8, 0x2e, 0x13, 0x1a, 0x3e, 0xb7, 0x27, 0x13, - 0x1a, 0x8a, 0x7d, 0x20, 0x79, 0xe9, 0xbf, 0x96, 0x81, 0xf5, 0x44, 0x42, 0x64, 0xbe, 0x40, 0x4e, - 0x3a, 0xce, 0x43, 0x57, 0x39, 0x50, 0xee, 0xa6, 0xf7, 0x60, 0x45, 0x69, 0xd3, 0x12, 0xa7, 0x52, - 0x43, 0x25, 0x48, 0xe4, 0x0f, 0x60, 0x55, 0x53, 0xca, 0x25, 0x68, 0x05, 0xd1, 0x92, 0x44, 0x06, - 0xc3, 0x82, 0x9b, 0x4f, 0xa9, 0xbc, 0xf9, 0x2d, 0x80, 0xca, 0x56, 0xdb, 0x84, 0xe2, 0xd4, 0xe1, - 0x79, 0x84, 0x16, 0x47, 0x7e, 0x92, 0xfb, 0xb0, 0x1c, 0x9c, 0x7b, 0x2f, 0x7e, 0x4c, 0x7d, 0x4f, - 0x6f, 0x52, 0xc9, 0x4c, 0x82, 0x8d, 0xff, 0x92, 0x85, 0x37, 0xd3, 0x6a, 0xc0, 0x21, 0x60, 0xe0, - 0xeb, 0x3d, 0xef, 0x99, 0xd8, 0xc2, 0xc7, 0x82, 0x17, 0x9e, 0x31, 0xf1, 0x83, 0x49, 0x56, 0x01, - 0x82, 0xb1, 0x5f, 0xe2, 0x8b, 0x3b, 0xc8, 0xb3, 0x45, 0x65, 0x9f, 0x4c, 0x90, 0xfb, 0xc8, 0x98, - 0x11, 0x80, 0xbc, 0x09, 0x10, 0x44, 0xc9, 0xf2, 0x02, 0x46, 0x94, 0xfe, 0x0e, 0xd4, 0x9d, 0x29, - 0xa7, 0x84, 0xd4, 0xa7, 0x2f, 0x6c, 0x1f, 0x5d, 0x37, 0x32, 0x66, 0x02, 0xca, 0xbb, 0x9e, 0x40, - 0x44, 0xa6, 0x30, 0x09, 0x26, 0x77, 0xa1, 0x92, 0xbc, 0xe6, 0x92, 0x31, 0x75, 0x10, 0x31, 0xa0, - 0x1a, 0x24, 0x6f, 0x68, 0xe4, 0xcc, 0x18, 0x8c, 0x95, 0x82, 0x6a, 0x77, 0x74, 0x2e, 0x00, 0x34, - 0x04, 0x69, 0x20, 0xe3, 0x0b, 0x68, 0x5d, 0x3f, 0xc2, 0xe4, 0x09, 0x14, 0xd8, 0x70, 0x4a, 0xc3, - 0xf9, 0x37, 0x22, 0x5b, 0xdf, 0x4b, 0xe6, 0xc4, 0xc4, 0x3c, 0xc6, 0x16, 0x2c, 0x09, 0xbd, 0x76, - 0x03, 0x72, 0xf2, 0x5e, 0x53, 0xde, 0x64, 0x3f, 0x09, 0x81, 0xfc, 0x34, 0xf2, 0x06, 0xe7, 0xbf, - 0x8d, 0x4d, 0x75, 0x41, 0x31, 0xb1, 0x09, 0x7e, 0x39, 0x0f, 0x1b, 0xc9, 0x14, 0x75, 0x3f, 0xa2, - 0x18, 0x5b, 0xff, 0x68, 0xe7, 0x14, 0x20, 0xf2, 0x51, 0x82, 0xb8, 0xc4, 0x76, 0x00, 0x47, 0xd5, - 0x09, 0x89, 0xdc, 0x07, 0x8f, 0x92, 0x22, 0x04, 0x52, 0xc4, 0x9a, 0xbc, 0x13, 0xc2, 0xfb, 0x94, - 0x90, 0x28, 0x3e, 0x5a, 0x90, 0x28, 0xf2, 0x69, 0x99, 0x12, 0x02, 0x46, 0x07, 0x36, 0x23, 0xbf, - 0xe7, 0x78, 0x9d, 0x85, 0xb4, 0xec, 0xeb, 0x0a, 0xfb, 0x40, 0xaf, 0xfc, 0x29, 0x34, 0xa3, 0x62, - 0x12, 0xcd, 0x58, 0x4a, 0x2b, 0x67, 0x43, 0xa1, 0x9b, 0xb1, 0xf6, 0xfc, 0x10, 0x5a, 0xb1, 0xf1, - 0x8a, 0x37, 0xa9, 0x98, 0x56, 0xd4, 0xa6, 0x36, 0x80, 0xb1, 0x46, 0x1d, 0xc0, 0xad, 0x58, 0x59, - 0x89, 0x76, 0x95, 0xd2, 0x0a, 0x6b, 0x6a, 0x85, 0xc5, 0x5a, 0x66, 0xfc, 0xd6, 0x12, 0x90, 0x1f, - 0xcd, 0xa9, 0x7f, 0xc5, 0xaf, 0x2d, 0x07, 0xaf, 0xba, 0xd0, 0x21, 0xf5, 0xb2, 0xd9, 0xd7, 0x0a, - 0x4d, 0x90, 0x16, 0x1a, 0x20, 0xff, 0xea, 0xd0, 0x00, 0x85, 0x57, 0x85, 0x06, 0x78, 0x1b, 0x6a, - 0xce, 0x99, 0xeb, 0x31, 0xb6, 0x87, 0x49, 0xbd, 0x41, 0x73, 0xe9, 0x6e, 0xee, 0x7e, 0xd5, 0xac, - 0x0a, 0x20, 0x93, 0x79, 0x03, 0xf2, 0x24, 0x42, 0xa2, 0xe3, 0x33, 0x1e, 0x77, 0x43, 0x67, 0x78, - 0x3a, 0xe3, 0x33, 0x2a, 0xd4, 0xd0, 0x7c, 0xc1, 0xca, 0xcc, 0x0c, 0x1e, 0x90, 0x7b, 0x50, 0x0f, - 0xbc, 0xb9, 0x3f, 0xe2, 0x12, 0x35, 0x1f, 0x06, 0xf4, 0x46, 0xa8, 0x22, 0xf4, 0x48, 0xfa, 0xa6, - 0xac, 0xce, 0x03, 0x6a, 0x4d, 0x9d, 0x20, 0x60, 0xa2, 0xd8, 0xc8, 0x73, 0x43, 0xdf, 0x9b, 0x08, - 0x07, 0x83, 0x95, 0x79, 0x40, 0x0f, 0x31, 0x65, 0x07, 0x13, 0xc8, 0x47, 0x51, 0x93, 0x66, 0xb6, - 0xe3, 0x07, 0x4d, 0xe0, 0x4d, 0x92, 0x3d, 0xe5, 0xb2, 0xba, 0xed, 0xf8, 0xaa, 0x2d, 0xec, 0x23, - 0x48, 0x84, 0x2c, 0xa8, 0x24, 0x43, 0x16, 0xfc, 0x52, 0x7a, 0xc8, 0x02, 0xf4, 0xa9, 0x7c, 0x28, - 0x8a, 0x5e, 0x9c, 0xe2, 0xaf, 0x14, 0xb9, 0x60, 0x31, 0x12, 0x43, 0xfd, 0xab, 0x44, 0x62, 0x58, - 0x4e, 0x8b, 0xc4, 0xf0, 0x21, 0x54, 0xf8, 0x1d, 0x79, 0xeb, 0x9c, 0x7b, 0x56, 0xa3, 0xc3, 0x44, - 0x43, 0xbf, 0x44, 0xbf, 0xef, 0xb8, 0xa1, 0x09, 0xbe, 0xfc, 0x19, 0x2c, 0x06, 0x45, 0x58, 0xf9, - 0x29, 0x06, 0x45, 0x10, 0x77, 0xf9, 0xb7, 0xa0, 0x24, 0xe7, 0x89, 0x11, 0xdb, 0x53, 0xdf, 0x9b, - 0x4a, 0x23, 0x2d, 0xfb, 0x4d, 0xea, 0x90, 0x0d, 0x3d, 0x91, 0x39, 0x1b, 0x7a, 0xc6, 0x2f, 0x40, - 0x45, 0x5b, 0x6a, 0xe4, 0x2d, 0xb4, 0x62, 0xb8, 0x74, 0x22, 0x95, 0xdc, 0x38, 0x8a, 0x65, 0x01, - 0xed, 0x8e, 0x19, 0x6f, 0x31, 0x76, 0x7c, 0xca, 0xc3, 0x97, 0x58, 0x3e, 0xbd, 0xa0, 0x7e, 0x20, - 0x0f, 0xf2, 0x86, 0x4a, 0x30, 0x11, 0x6e, 0xfc, 0x22, 0xac, 0xc6, 0xe6, 0x56, 0x90, 0xef, 0x7b, - 0xb0, 0xc4, 0xc7, 0x4d, 0x1e, 0x30, 0xf1, 0xe0, 0x04, 0x22, 0x8d, 0x87, 0x80, 0x41, 0x7b, 0xbf, - 0x35, 0xf3, 0xbd, 0x13, 0x71, 0xa0, 0x57, 0x04, 0xec, 0xc8, 0xf7, 0x4e, 0x8c, 0xdf, 0xcf, 0x41, - 0x6e, 0xdf, 0x9b, 0xe9, 0xde, 0xd8, 0x99, 0x05, 0x6f, 0x6c, 0xa1, 0x5c, 0xb2, 0x94, 0xf2, 0x48, - 0xc8, 0xe7, 0xdc, 0xd2, 0x2d, 0x15, 0x48, 0xf7, 0xa1, 0xce, 0xe8, 0x44, 0xe8, 0x59, 0xe2, 0x16, - 0x14, 0x32, 0x0a, 0xb8, 0xf9, 0xec, 0x69, 0x38, 0xf4, 0xf6, 0x10, 0x4e, 0xd6, 0x20, 0xa7, 0x54, - 0x15, 0x3c, 0x99, 0x7d, 0x32, 0x06, 0x83, 0xdf, 0xde, 0x92, 0x37, 0xd9, 0xc5, 0x17, 0x79, 0x1f, - 0x56, 0xe3, 0xe5, 0x22, 0x29, 0x12, 0x72, 0x90, 0x5e, 0x30, 0xa7, 0x49, 0x37, 0x81, 0xd1, 0x91, - 0xe8, 0x2e, 0x7b, 0xce, 0x2c, 0x9e, 0x52, 0xca, 0x93, 0x34, 0xa2, 0x57, 0x8a, 0x11, 0xbd, 0x3b, - 0x50, 0x09, 0x27, 0x17, 0xd6, 0xcc, 0xbe, 0x9a, 0x78, 0xb6, 0xbc, 0xb2, 0x09, 0xe1, 0xe4, 0xe2, - 0x08, 0x21, 0xe4, 0x03, 0x80, 0xe9, 0x6c, 0x26, 0xf6, 0x1e, 0xe7, 0x06, 0xa2, 0xa5, 0x7c, 0x78, - 0x74, 0x84, 0x4b, 0xce, 0x2c, 0x4f, 0x67, 0x33, 0xfc, 0x49, 0x76, 0xa1, 0x9e, 0x1a, 0x62, 0xe4, - 0xb6, 0xbc, 0xe3, 0xe2, 0xcd, 0xb6, 0x52, 0x36, 0x67, 0x6d, 0xa4, 0xc3, 0x5a, 0x3f, 0x00, 0xf2, - 0x27, 0x0c, 0xf4, 0x31, 0x84, 0xb2, 0x6a, 0x9f, 0x1e, 0x27, 0x83, 0x5f, 0x2c, 0xac, 0xc4, 0xe2, - 0x64, 0x70, 0xae, 0xf0, 0x1e, 0xd4, 0x91, 0x39, 0x56, 0x24, 0x1f, 0x34, 0xee, 0x58, 0xdc, 0x0e, - 0x33, 0xfe, 0x6b, 0x06, 0x0a, 0x18, 0xb4, 0xe3, 0x1d, 0x58, 0x46, 0x7c, 0xe5, 0xd9, 0x2e, 0xfc, - 0x91, 0x90, 0xc7, 0x1e, 0x0a, 0xa7, 0x76, 0xb6, 0x2d, 0xb4, 0x08, 0x49, 0x11, 0x1b, 0xa1, 0x45, - 0x49, 0xba, 0x03, 0x65, 0x55, 0xb5, 0xb6, 0x74, 0x4a, 0xb2, 0x66, 0xf2, 0x26, 0xe4, 0xcf, 0xbd, - 0x99, 0xd4, 0xf2, 0x42, 0x34, 0x92, 0x26, 0x87, 0x47, 0x6d, 0x61, 0x75, 0x44, 0xb7, 0xd6, 0x72, - 0xa2, 0x2d, 0xac, 0x12, 0x19, 0xca, 0x20, 0xd1, 0xc7, 0xa5, 0x94, 0x3e, 0x1e, 0xc3, 0x32, 0xa3, - 0x03, 0x9a, 0x53, 0xd4, 0xf5, 0x87, 0xe6, 0x37, 0x99, 0x34, 0x37, 0x9a, 0xcc, 0xc7, 0x54, 0xd7, - 0xb3, 0x73, 0xce, 0x5c, 0xc0, 0xa5, 0x14, 0x6d, 0xfc, 0x56, 0x06, 0xe9, 0x0b, 0x2b, 0x97, 0xdc, - 0x87, 0xbc, 0x2b, 0x1d, 0xa8, 0x22, 0x99, 0x4d, 0xdd, 0xf0, 0x64, 0x78, 0x26, 0xc7, 0x60, 0x53, - 0xc7, 0xdd, 0x8e, 0xf4, 0xd2, 0x6b, 0x66, 0xc5, 0x9d, 0x4f, 0x95, 0x9a, 0xfa, 0x1b, 0xb2, 0x5b, - 0x09, 0x15, 0x2f, 0xf6, 0x5e, 0x6d, 0xd3, 0x2d, 0xcd, 0xdf, 0x39, 0x1f, 0x3b, 0x31, 0xa5, 0xc4, - 0x37, 0x3e, 0xa3, 0x9a, 0x9f, 0xf3, 0xef, 0x64, 0xa1, 0x16, 0x6b, 0x11, 0x77, 0xf8, 0x66, 0x07, - 0x00, 0x9a, 0xa1, 0xc5, 0x7c, 0x73, 0xbf, 0x5a, 0x21, 0x94, 0x6b, 0xe3, 0x94, 0x8d, 0x8d, 0x93, - 0xf2, 0x80, 0xcc, 0xe9, 0x1e, 0x90, 0x0f, 0xa1, 0x1c, 0x45, 0xc6, 0x8a, 0x37, 0x89, 0xd5, 0x27, - 0xef, 0xb9, 0x46, 0x48, 0x91, 0xcf, 0x64, 0x41, 0xf7, 0x99, 0xfc, 0x9e, 0xe6, 0x62, 0xb7, 0xc4, - 0x8b, 0x31, 0xd2, 0x46, 0xf4, 0xa7, 0xe2, 0x60, 0x67, 0x3c, 0x81, 0x8a, 0xd6, 0x78, 0xdd, 0x4d, - 0x2d, 0x13, 0x73, 0x53, 0x53, 0x37, 0xde, 0xb3, 0xd1, 0x8d, 0x77, 0xe3, 0x57, 0xb3, 0x50, 0x63, - 0xfb, 0xcb, 0x71, 0xcf, 0x8e, 0xbc, 0x89, 0x33, 0xe2, 0x66, 0x69, 0xb5, 0xc3, 0x04, 0xa3, 0x25, - 0xf7, 0x99, 0xd8, 0x62, 0xc8, 0x67, 0xe9, 0x51, 0x55, 0x90, 0x48, 0xab, 0xa8, 0x2a, 0x06, 0xd4, - 0x18, 0x61, 0xe4, 0x06, 0xe6, 0x28, 0x0c, 0x96, 0x59, 0x39, 0xa5, 0x74, 0xdb, 0x0e, 0x90, 0x42, - 0xbe, 0x0f, 0xab, 0x0c, 0x87, 0xc7, 0x4c, 0x98, 0x3a, 0x93, 0x89, 0x13, 0x5d, 0x13, 0xcd, 0x99, - 0x8d, 0x53, 0x4a, 0x4d, 0x3b, 0xa4, 0x87, 0x2c, 0x41, 0x44, 0xcd, 0x2a, 0x8d, 0x9d, 0x80, 0x09, - 0x72, 0xd2, 0x2d, 0x5f, 0x7d, 0x4b, 0xbf, 0x8d, 0xc8, 0x35, 0x66, 0x49, 0xdc, 0x20, 0x45, 0xc7, - 0x0e, 0x9e, 0x3f, 0xb1, 0x92, 0x8a, 0xc9, 0x95, 0x64, 0xfc, 0xb3, 0x2c, 0x54, 0xb4, 0x65, 0xf9, - 0x3a, 0xa7, 0xeb, 0xed, 0x05, 0x37, 0x82, 0xb2, 0xee, 0x31, 0xf0, 0x76, 0xbc, 0xca, 0x9c, 0xba, - 0x4b, 0xa8, 0x2f, 0xe0, 0x5b, 0x50, 0x66, 0xbb, 0xee, 0x43, 0x6e, 0x6e, 0x11, 0xe1, 0xf0, 0x38, - 0xe0, 0x68, 0x7e, 0x22, 0x13, 0x1f, 0xf1, 0xc4, 0x42, 0x94, 0xf8, 0x88, 0x25, 0xbe, 0xec, 0x2e, - 0xd1, 0x27, 0x50, 0x15, 0xa5, 0xf2, 0x39, 0x15, 0x62, 0xc1, 0x9a, 0x76, 0x72, 0xab, 0xf9, 0x36, - 0x2b, 0x58, 0x1d, 0x4e, 0xbe, 0xc8, 0xf8, 0x48, 0x66, 0x2c, 0xbd, 0x2a, 0xe3, 0x23, 0xfc, 0x30, - 0xf6, 0xd4, 0xf5, 0x2c, 0xee, 0xdc, 0x2a, 0xe9, 0xd8, 0x07, 0xb0, 0x2a, 0xc9, 0xd5, 0xdc, 0xb5, - 0x5d, 0xd7, 0x9b, 0xbb, 0x23, 0x2a, 0xaf, 0xaa, 0x13, 0x91, 0x74, 0x1c, 0xa5, 0x18, 0x63, 0x15, - 0x8b, 0x05, 0x9d, 0x64, 0x1f, 0x40, 0x01, 0xf9, 0x72, 0x64, 0x3e, 0xd2, 0x09, 0x17, 0xa2, 0x90, - 0xfb, 0x50, 0x40, 0xf6, 0x3c, 0x7b, 0x2d, 0xb1, 0x41, 0x04, 0xa3, 0x0d, 0x84, 0x65, 0x3c, 0xa4, - 0xa1, 0xef, 0x8c, 0x82, 0xe8, 0x16, 0x7c, 0x21, 0xbc, 0x9a, 0x89, 0xba, 0x22, 0x2b, 0x4d, 0x84, - 0xc9, 0xf5, 0x51, 0x88, 0xc3, 0x0e, 0xa6, 0xd5, 0x58, 0x19, 0x82, 0x5d, 0x9a, 0xc0, 0xc6, 0x09, - 0x0d, 0x5f, 0x50, 0xea, 0xba, 0x8c, 0x19, 0x1a, 0x51, 0x37, 0xf4, 0xed, 0x09, 0x9b, 0x24, 0xec, - 0xc1, 0xe3, 0x85, 0x52, 0x23, 0x7d, 0xe7, 0x76, 0x94, 0x71, 0x47, 0xe5, 0x43, 0xda, 0xb1, 0x7e, - 0x92, 0x96, 0xd6, 0xfa, 0x79, 0x68, 0x5d, 0x9f, 0x29, 0x25, 0x96, 0xc6, 0xfd, 0x38, 0x55, 0x51, - 0x36, 0xff, 0x89, 0x67, 0x87, 0xd8, 0x1a, 0x9d, 0xb2, 0xf4, 0xa0, 0xa2, 0xa5, 0x44, 0x67, 0x7f, - 0x06, 0xb5, 0x35, 0xfc, 0x83, 0x9d, 0x48, 0xae, 0xe7, 0x4f, 0xb9, 0x8d, 0x7d, 0x6c, 0x45, 0xa5, - 0x67, 0xcc, 0xe5, 0x08, 0xce, 0xdd, 0xb2, 0x8c, 0x2d, 0x58, 0xe6, 0x9c, 0xbd, 0x76, 0xd0, 0xbd, - 0x8c, 0x19, 0x34, 0xd6, 0x80, 0xf4, 0x90, 0x76, 0xe9, 0x0e, 0xc3, 0xff, 0x3e, 0x07, 0x15, 0x0d, - 0xcc, 0x4e, 0x23, 0xee, 0x65, 0x6d, 0x8d, 0x1d, 0x7b, 0x4a, 0xa5, 0x43, 0x43, 0xcd, 0xac, 0x71, - 0xe8, 0xae, 0x00, 0xb2, 0xb3, 0xd8, 0xbe, 0x38, 0xb3, 0xbc, 0x79, 0x68, 0x8d, 0xe9, 0x99, 0x4f, - 0x65, 0x2b, 0xab, 0xf6, 0xc5, 0x59, 0x7f, 0x1e, 0xee, 0x72, 0x98, 0x0c, 0x3e, 0xa4, 0x61, 0xe5, - 0x54, 0xf0, 0xa1, 0x08, 0x4b, 0x78, 0xa7, 0xe3, 0xca, 0xcc, 0x2b, 0xef, 0x74, 0x94, 0x16, 0x93, - 0x07, 0x68, 0x61, 0xf1, 0x00, 0xfd, 0x08, 0x36, 0xf0, 0x00, 0x15, 0xa4, 0xd9, 0x4a, 0xec, 0xe4, - 0x35, 0x9e, 0x2a, 0x3a, 0xa9, 0xb1, 0xbd, 0x0d, 0xd6, 0x03, 0x49, 0x96, 0x02, 0xe7, 0xc7, 0x48, - 0xc8, 0x32, 0x26, 0xeb, 0x99, 0x28, 0x7c, 0xe0, 0xfc, 0x98, 0xca, 0xe0, 0x47, 0x31, 0x4c, 0x71, - 0x53, 0x70, 0xea, 0xb8, 0x49, 0x4c, 0xfb, 0x32, 0x8e, 0x59, 0x16, 0x98, 0xf6, 0xa5, 0x8e, 0xf9, - 0x18, 0x36, 0xa7, 0x74, 0xec, 0xd8, 0xf1, 0x62, 0xad, 0x88, 0x71, 0x5b, 0xc3, 0x64, 0x2d, 0xcf, - 0x00, 0x05, 0x77, 0x36, 0x1a, 0x3f, 0xf6, 0xa6, 0x27, 0x0e, 0xf2, 0x2c, 0xe8, 0x70, 0x98, 0x37, - 0xeb, 0xee, 0x7c, 0xfa, 0x73, 0x1c, 0xcc, 0xb2, 0x04, 0x46, 0x0d, 0x2a, 0x83, 0xd0, 0x9b, 0xc9, - 0x69, 0xae, 0x43, 0x15, 0x3f, 0x45, 0x94, 0x87, 0x5b, 0x70, 0x93, 0x93, 0x84, 0xa1, 0x37, 0xf3, - 0x26, 0xde, 0xd9, 0x55, 0x4c, 0x67, 0xff, 0xaf, 0x32, 0xb0, 0x1a, 0x4b, 0x15, 0xe4, 0xf5, 0x23, - 0xa4, 0x67, 0xea, 0x86, 0x78, 0x26, 0x76, 0x3d, 0x90, 0xcd, 0x17, 0x22, 0x22, 0x31, 0x93, 0xb7, - 0xc6, 0xdb, 0x51, 0x64, 0x31, 0x99, 0x11, 0x49, 0x4a, 0x73, 0x91, 0xa4, 0x88, 0xfc, 0x32, 0xe6, - 0x98, 0x2c, 0xe2, 0x67, 0xc4, 0x6d, 0xce, 0xb1, 0xe8, 0x72, 0x2e, 0x7e, 0xdf, 0x4b, 0xd7, 0xef, - 0xcb, 0x16, 0x44, 0x4a, 0xff, 0xc0, 0xf8, 0x3b, 0x19, 0x80, 0xa8, 0x75, 0xfc, 0xc6, 0x99, 0xe2, - 0x5b, 0x32, 0xdc, 0xd7, 0x5f, 0xe3, 0x51, 0xde, 0x82, 0xaa, 0xba, 0x16, 0x12, 0x71, 0x42, 0x15, - 0x09, 0x63, 0xec, 0xd0, 0xbb, 0xb0, 0x7c, 0x36, 0xf1, 0x4e, 0x38, 0xc7, 0x2a, 0xf8, 0x16, 0xf4, - 0x18, 0xaa, 0x23, 0x58, 0x72, 0x23, 0x11, 0xdf, 0x94, 0x4f, 0xbd, 0x39, 0xa2, 0x73, 0x41, 0xc6, - 0x5f, 0xcc, 0x2a, 0xdf, 0xf3, 0x68, 0x24, 0x5e, 0x2e, 0xde, 0xfd, 0x24, 0x9e, 0x77, 0x2f, 0x73, - 0x25, 0x78, 0x02, 0x75, 0x1f, 0x0f, 0x25, 0x79, 0x62, 0xe5, 0x5f, 0x72, 0x62, 0xd5, 0xfc, 0x18, - 0xa7, 0xf3, 0x4d, 0x68, 0xd8, 0xe3, 0x0b, 0xea, 0x87, 0x0e, 0xb7, 0xcc, 0x71, 0xfe, 0x58, 0x78, - 0x7b, 0x6b, 0x70, 0xce, 0x88, 0xbe, 0x0b, 0xcb, 0x22, 0xf2, 0x88, 0xc2, 0x14, 0x21, 0x2c, 0x23, - 0x30, 0x43, 0x34, 0xfe, 0x81, 0x74, 0x76, 0x8f, 0xcf, 0xee, 0xcb, 0x47, 0x45, 0xef, 0x61, 0x76, - 0xd1, 0x59, 0x42, 0x2c, 0x24, 0x61, 0xf0, 0x13, 0xf4, 0x08, 0x81, 0xc2, 0xdc, 0x17, 0x1f, 0xd6, - 0xfc, 0xeb, 0x0c, 0xab, 0xf1, 0x6f, 0x32, 0x50, 0xdc, 0xf7, 0x66, 0xfb, 0x0e, 0x5e, 0x99, 0xe2, - 0xdb, 0x44, 0xd9, 0xa3, 0x97, 0xd8, 0x27, 0x77, 0x13, 0x7c, 0xc9, 0xcd, 0xe9, 0x54, 0x36, 0xaf, - 0x16, 0x67, 0xf3, 0xbe, 0x07, 0xb7, 0xb8, 0xb9, 0xdf, 0xf7, 0x66, 0x9e, 0xcf, 0xb6, 0xaa, 0x3d, - 0x41, 0x76, 0xcf, 0x73, 0xc3, 0x73, 0x49, 0x3b, 0x6f, 0x9e, 0x52, 0x7a, 0xa4, 0x61, 0x1c, 0x2a, - 0x04, 0x1e, 0x35, 0x61, 0x12, 0x5e, 0x58, 0x28, 0xa1, 0x0b, 0x7e, 0x14, 0x29, 0xea, 0x32, 0x4b, - 0xe8, 0x70, 0x38, 0xe7, 0x48, 0x8d, 0xef, 0x40, 0x59, 0x29, 0x7b, 0xc8, 0x7b, 0x50, 0x3e, 0xf7, - 0x66, 0x42, 0x23, 0x94, 0x89, 0xdd, 0x2e, 0x17, 0xbd, 0x36, 0x4b, 0xe7, 0xf8, 0x23, 0x30, 0x7e, - 0xbf, 0x08, 0xc5, 0xae, 0x7b, 0xe1, 0x39, 0x23, 0xee, 0x2e, 0x3f, 0xa5, 0x53, 0x4f, 0x06, 0x46, - 0x62, 0xbf, 0xb9, 0x27, 0x67, 0x14, 0x88, 0x32, 0x27, 0x3c, 0x39, 0x55, 0x08, 0xca, 0x75, 0x58, - 0xf2, 0xf5, 0x48, 0x92, 0x05, 0x9f, 0x5f, 0x32, 0x52, 0xe7, 0x65, 0x41, 0x0b, 0x5c, 0xc5, 0xca, - 0x42, 0x4f, 0x66, 0x3e, 0x64, 0x18, 0xf9, 0xa0, 0xcc, 0x21, 0x7c, 0xc0, 0xde, 0x80, 0xa2, 0xd0, - 0xfb, 0xe2, 0xd5, 0x52, 0xd4, 0x96, 0x0b, 0x10, 0x5f, 0x0d, 0x3e, 0x45, 0x77, 0x0d, 0xc5, 0xc8, - 0xe6, 0xcc, 0xaa, 0x04, 0xee, 0xb2, 0xb5, 0x76, 0x07, 0x2a, 0x88, 0x8f, 0x28, 0x25, 0x61, 0x0a, - 0xe1, 0x20, 0x8e, 0x90, 0x12, 0x90, 0xb5, 0x9c, 0x1a, 0x90, 0x95, 0xdf, 0x87, 0x50, 0x54, 0x16, - 0xbb, 0x08, 0x18, 0x86, 0x53, 0x83, 0xcb, 0xf0, 0xc9, 0x42, 0xa7, 0x82, 0x41, 0x41, 0xa4, 0x4e, - 0xe5, 0x6d, 0xa8, 0x9d, 0xda, 0x93, 0xc9, 0x89, 0x3d, 0x7a, 0x8e, 0xaa, 0x80, 0x2a, 0x6a, 0x3f, - 0x25, 0x90, 0xeb, 0x02, 0xee, 0x40, 0x45, 0x9b, 0x65, 0xee, 0x42, 0x9e, 0x37, 0x21, 0x9a, 0xdf, - 0xa4, 0x86, 0xaf, 0xfe, 0x1a, 0x1a, 0x3e, 0xcd, 0x95, 0x7e, 0x39, 0xee, 0x4a, 0x7f, 0x8b, 0x53, - 0x53, 0xe1, 0xa0, 0xdc, 0xc0, 0x98, 0x8f, 0xf6, 0x78, 0x8c, 0x61, 0x7a, 0xde, 0x82, 0xaa, 0x18, - 0x3c, 0x4c, 0x5f, 0x41, 0x59, 0x02, 0x61, 0x88, 0x72, 0x1b, 0xd5, 0xd4, 0x33, 0xdb, 0x19, 0xf3, - 0x9b, 0x5d, 0xc2, 0xa2, 0x61, 0x4f, 0xc3, 0x23, 0xdb, 0xe1, 0xae, 0x99, 0x32, 0x99, 0x9f, 0x8e, - 0xab, 0x38, 0xfe, 0x22, 0x79, 0x80, 0x21, 0x6f, 0x14, 0xc6, 0x54, 0x45, 0xf5, 0x30, 0x2b, 0x02, - 0x85, 0xaf, 0x83, 0x0f, 0xb9, 0x47, 0x5f, 0x48, 0x79, 0xdc, 0x8e, 0xfa, 0xa3, 0x5b, 0xca, 0xd1, - 0x88, 0xaf, 0x52, 0xf9, 0x17, 0x0d, 0xe1, 0x88, 0xc9, 0x98, 0x3b, 0xb4, 0xc7, 0x6f, 0xc4, 0xf8, - 0x5f, 0x81, 0xca, 0xed, 0xf1, 0x88, 0x40, 0xbe, 0xa3, 0xc9, 0xaf, 0x4d, 0x8e, 0xfc, 0x46, 0xa2, - 0xfc, 0xeb, 0xae, 0xce, 0xde, 0x06, 0x70, 0x02, 0x76, 0xca, 0x04, 0xd4, 0x1d, 0xf3, 0xf0, 0x1b, - 0x25, 0xb3, 0xec, 0x04, 0xcf, 0x10, 0xf0, 0xf5, 0x0a, 0xb6, 0x6d, 0xa8, 0xea, 0xdd, 0x24, 0x25, - 0xc8, 0xf7, 0x8f, 0x3a, 0xbd, 0xc6, 0x0d, 0x52, 0x81, 0xe2, 0xa0, 0x33, 0x1c, 0x1e, 0x70, 0xab, - 0x7e, 0x15, 0x4a, 0xea, 0x72, 0x7d, 0x96, 0x7d, 0xb5, 0x77, 0x76, 0x3a, 0x47, 0xc3, 0xce, 0x6e, - 0x23, 0xf7, 0xc3, 0x7c, 0x29, 0xdb, 0xc8, 0x19, 0x7f, 0x90, 0x83, 0x8a, 0x36, 0x0a, 0x2f, 0x27, - 0xc6, 0xf1, 0x30, 0x4e, 0xd9, 0x64, 0x18, 0x27, 0xdd, 0x46, 0x21, 0x42, 0x5d, 0x49, 0x1b, 0xc5, - 0xdb, 0x50, 0x13, 0xe1, 0x26, 0x35, 0xdf, 0x8c, 0x82, 0x59, 0x45, 0xa0, 0x20, 0xd5, 0x3c, 0x54, - 0x07, 0x47, 0xe2, 0x97, 0xa0, 0x85, 0x9d, 0x12, 0x41, 0xfc, 0x1a, 0x34, 0xbf, 0xc3, 0x1e, 0x78, - 0x93, 0x0b, 0x8a, 0x18, 0xc8, 0x11, 0x56, 0x04, 0x6c, 0x28, 0xc2, 0xa0, 0x08, 0x7a, 0xa8, 0xc5, - 0x8a, 0x28, 0x98, 0x55, 0x04, 0x8a, 0x8a, 0xde, 0x97, 0x0b, 0x08, 0x3d, 0xd5, 0x36, 0x17, 0x57, - 0x43, 0x6c, 0xf1, 0x1c, 0x2c, 0xa8, 0x11, 0xcb, 0x31, 0x7b, 0xa2, 0x96, 0xef, 0xd5, 0xea, 0x44, - 0xf2, 0x1e, 0x90, 0xe9, 0x6c, 0x66, 0xa5, 0x28, 0xf8, 0xf2, 0xe6, 0xf2, 0x74, 0x36, 0x1b, 0x6a, - 0xfa, 0xaf, 0xaf, 0x41, 0xf7, 0xf8, 0x25, 0x90, 0x36, 0xdb, 0xc0, 0xbc, 0x89, 0x4a, 0x14, 0x8b, - 0xc8, 0x72, 0x46, 0x27, 0xcb, 0x29, 0xd4, 0x2f, 0x9b, 0x4a, 0xfd, 0x5e, 0x46, 0x27, 0x8c, 0x3d, - 0xa8, 0x1c, 0x69, 0x51, 0x7f, 0xef, 0xb2, 0x13, 0x42, 0xc6, 0xfb, 0xc5, 0xb3, 0x03, 0x75, 0x8a, - 0xbe, 0x08, 0xf3, 0xab, 0xb5, 0x26, 0xab, 0xb5, 0xc6, 0xf8, 0x5b, 0x19, 0x0c, 0xba, 0xa7, 0x1a, - 0x1f, 0x05, 0x1a, 0x96, 0xa6, 0xb9, 0x28, 0xa4, 0x4b, 0x45, 0x1a, 0xdf, 0x44, 0x34, 0x16, 0xde, - 0x34, 0xcb, 0x3b, 0x3d, 0x0d, 0xa8, 0xf4, 0xe7, 0xaa, 0x70, 0x58, 0x9f, 0x83, 0x24, 0xf3, 0xcd, - 0x38, 0x7c, 0x07, 0xcb, 0x0f, 0x84, 0x13, 0x17, 0x63, 0xbe, 0x0f, 0xed, 0x4b, 0x51, 0x6b, 0xc0, - 0x58, 0x10, 0x61, 0x1f, 0x90, 0x21, 0x0d, 0xd4, 0xb7, 0xf1, 0xd7, 0x45, 0xd4, 0x99, 0xe4, 0xf8, - 0x3e, 0x80, 0x92, 0x2a, 0x35, 0x7e, 0xc2, 0x4a, 0x4c, 0x95, 0xce, 0xce, 0x71, 0xae, 0x0c, 0x89, - 0xb5, 0x18, 0x37, 0x17, 0xb7, 0xf1, 0x74, 0xb5, 0x56, 0x7f, 0x0b, 0xc8, 0xa9, 0xe3, 0x27, 0x91, - 0x71, 0xb3, 0x35, 0x78, 0x8a, 0x86, 0x6d, 0x1c, 0xc3, 0xaa, 0xa4, 0x12, 0x9a, 0x44, 0x10, 0x9f, - 0xbc, 0xcc, 0x2b, 0x88, 0x7c, 0x76, 0x81, 0xc8, 0x1b, 0xbf, 0x56, 0x80, 0xa2, 0x8c, 0xa0, 0x9d, - 0x16, 0xf5, 0xb9, 0x1c, 0x8f, 0xfa, 0xdc, 0x8c, 0x05, 0xa9, 0xe4, 0x53, 0x2f, 0xce, 0xfb, 0x77, - 0x93, 0x47, 0xb6, 0x66, 0xab, 0x88, 0x1d, 0xdb, 0xc2, 0x56, 0x51, 0x88, 0xdb, 0x2a, 0xd2, 0x22, - 0x61, 0x23, 0xeb, 0xb9, 0x10, 0x09, 0xfb, 0x16, 0x20, 0x1f, 0xa1, 0x39, 0xb2, 0x96, 0x38, 0x40, - 0x84, 0xe5, 0xd0, 0xd8, 0x8e, 0x52, 0x92, 0xed, 0x78, 0x6d, 0x96, 0xe0, 0x23, 0x58, 0xc2, 0x08, - 0x56, 0x22, 0x44, 0x83, 0x3c, 0x38, 0xc4, 0x58, 0xc9, 0xbf, 0x78, 0x3f, 0xca, 0x14, 0xb8, 0x7a, - 0xe4, 0xd4, 0x4a, 0x2c, 0x72, 0xaa, 0x6e, 0x43, 0xa9, 0xc6, 0x6d, 0x28, 0xf7, 0xa1, 0xa1, 0x06, - 0x8e, 0x6b, 0x24, 0xdd, 0x40, 0x5c, 0xcf, 0xae, 0x4b, 0x38, 0xa3, 0x86, 0xbd, 0x20, 0x3a, 0xf8, - 0xea, 0xb1, 0x83, 0x8f, 0xd1, 0xaa, 0x76, 0x18, 0xd2, 0xe9, 0x2c, 0x94, 0x07, 0x9f, 0x16, 0x7c, - 0x1c, 0x67, 0x1e, 0xef, 0x8f, 0xc9, 0xe9, 0xc5, 0xd5, 0xb1, 0x0d, 0xf5, 0x53, 0xdb, 0x99, 0xcc, - 0x7d, 0x6a, 0xf9, 0xd4, 0x0e, 0x3c, 0x97, 0x6f, 0xfe, 0xe8, 0x0c, 0x16, 0x5d, 0xdc, 0x43, 0x1c, - 0x93, 0xa3, 0x98, 0xb5, 0x53, 0xfd, 0x93, 0xdf, 0xc2, 0xd4, 0x47, 0x82, 0x1d, 0x59, 0x22, 0x50, - 0x03, 0xfa, 0xa5, 0x75, 0x7b, 0xd6, 0xde, 0x41, 0xf7, 0xe9, 0xfe, 0xb0, 0x91, 0x61, 0x9f, 0x83, - 0xe3, 0x9d, 0x9d, 0x4e, 0x67, 0x97, 0x1f, 0x61, 0x00, 0x4b, 0x7b, 0xed, 0xee, 0x81, 0x38, 0xc0, - 0xf2, 0x8d, 0x82, 0xf1, 0x4f, 0xb3, 0x50, 0xd1, 0x7a, 0x43, 0x1e, 0xab, 0x49, 0xc0, 0xd0, 0x30, - 0xb7, 0x17, 0x7b, 0xbc, 0x25, 0x29, 0xbc, 0x36, 0x0b, 0x2a, 0xcc, 0x78, 0xf6, 0xda, 0x30, 0xe3, - 0xe4, 0x1d, 0x58, 0xb6, 0xb1, 0x04, 0x35, 0xe8, 0x42, 0xb9, 0x2f, 0xc0, 0x62, 0xcc, 0xdf, 0x11, - 0x61, 0x6a, 0xc4, 0x31, 0xc5, 0xf0, 0xf2, 0xd2, 0x41, 0x5b, 0x9d, 0x54, 0x7c, 0x6e, 0x8a, 0x62, - 0x64, 0x84, 0x31, 0x5e, 0x1d, 0xf8, 0x62, 0xbc, 0x64, 0x32, 0x5e, 0xcd, 0xd6, 0x56, 0x78, 0xd5, - 0x54, 0xdf, 0xc6, 0xc7, 0x00, 0x51, 0x7f, 0xe2, 0xc3, 0x77, 0x23, 0x3e, 0x7c, 0x19, 0x6d, 0xf8, - 0xb2, 0xc6, 0xdf, 0x17, 0xa4, 0x4b, 0xcc, 0x85, 0x52, 0xf5, 0xbd, 0x0f, 0x52, 0xf9, 0x68, 0xf1, - 0x0b, 0x1d, 0xb3, 0x09, 0x0d, 0xe5, 0xed, 0xf2, 0x15, 0x91, 0xd2, 0x55, 0x09, 0x0b, 0xa4, 0x36, - 0xbb, 0x48, 0x6a, 0xdf, 0x82, 0x2a, 0x8f, 0x7b, 0x28, 0x2a, 0x12, 0xe4, 0xaa, 0x32, 0xb5, 0x2f, - 0x65, 0xdd, 0x31, 0x1a, 0x9b, 0x4f, 0xd0, 0xd8, 0xbf, 0x91, 0xc1, 0x20, 0x59, 0x51, 0x43, 0x23, - 0x22, 0xab, 0xca, 0x8c, 0x13, 0x59, 0x81, 0x6a, 0xaa, 0xf4, 0x6b, 0x08, 0x67, 0x36, 0x9d, 0x70, - 0xa6, 0x93, 0xe4, 0x5c, 0x2a, 0x49, 0x36, 0x5a, 0xd0, 0xdc, 0xa5, 0x6c, 0x28, 0xda, 0x93, 0x49, - 0x62, 0x2c, 0x8d, 0x5b, 0x70, 0x33, 0x25, 0x4d, 0x68, 0x6d, 0x7e, 0x3d, 0x03, 0xeb, 0x6d, 0x8c, - 0x8d, 0xf3, 0xb5, 0x5d, 0xff, 0xfe, 0x14, 0x6e, 0xaa, 0xdb, 0x19, 0xda, 0xad, 0x52, 0x3d, 0xb0, - 0x99, 0xbc, 0xd8, 0xa1, 0xdd, 0x49, 0x62, 0x67, 0xa6, 0xd1, 0x84, 0x8d, 0x64, 0x6b, 0x44, 0x43, - 0xf7, 0x60, 0x65, 0x97, 0x9e, 0xcc, 0xcf, 0x0e, 0xe8, 0x45, 0xd4, 0x46, 0x02, 0xf9, 0xe0, 0xdc, - 0x7b, 0x21, 0x16, 0x06, 0xff, 0xcd, 0xdd, 0xb7, 0x19, 0x8e, 0x15, 0xcc, 0xe8, 0x48, 0x6a, 0xfd, - 0x39, 0x64, 0x30, 0xa3, 0x23, 0xe3, 0x31, 0x10, 0xbd, 0x1c, 0x31, 0x8b, 0x4c, 0x24, 0x9b, 0x9f, - 0x58, 0xc1, 0x55, 0x10, 0xd2, 0xa9, 0x74, 0x83, 0x83, 0x60, 0x7e, 0x32, 0x40, 0x88, 0xf1, 0x2e, - 0x54, 0x8f, 0xec, 0x2b, 0x93, 0x7e, 0x29, 0x2e, 0x26, 0x6f, 0x42, 0x71, 0x66, 0x5f, 0x31, 0x5a, - 0xac, 0x0c, 0x80, 0x3c, 0xd9, 0xf8, 0x47, 0x79, 0x58, 0x42, 0x4c, 0x72, 0x17, 0x1f, 0x00, 0x71, - 0x5c, 0x4e, 0x0b, 0xe5, 0xa9, 0xa4, 0x81, 0x16, 0x0e, 0xae, 0xec, 0xe2, 0xc1, 0x25, 0xb4, 0x95, - 0x32, 0xf0, 0xa2, 0x34, 0xd5, 0xb8, 0xf3, 0xa9, 0x8c, 0xb6, 0x18, 0x0f, 0x0d, 0x93, 0x8f, 0x5e, - 0xa4, 0xc1, 0xb0, 0x18, 0x71, 0x63, 0x7a, 0x24, 0xf8, 0x61, 0xeb, 0xe4, 0x79, 0x2c, 0xce, 0x2c, - 0x1d, 0x94, 0x2a, 0x5d, 0x16, 0xe5, 0x6d, 0xfb, 0xb8, 0x74, 0xb9, 0x20, 0x45, 0x96, 0x5e, 0x2d, - 0x45, 0xa2, 0x1a, 0xf3, 0x25, 0x52, 0x24, 0xbc, 0x86, 0x14, 0xf9, 0x1a, 0x86, 0xec, 0x9b, 0x50, - 0xe2, 0x4c, 0x96, 0x76, 0x84, 0x31, 0xe6, 0x8a, 0x1d, 0x61, 0x9f, 0x68, 0x72, 0x16, 0x7a, 0xd1, - 0x68, 0x67, 0x88, 0x49, 0xbf, 0xfc, 0xe9, 0x18, 0x08, 0xbf, 0x80, 0xa2, 0x80, 0xb2, 0x05, 0xed, - 0xda, 0x53, 0x19, 0x5e, 0x98, 0xff, 0x66, 0xc3, 0xc6, 0x03, 0x6e, 0x7e, 0x39, 0x77, 0x7c, 0x3a, - 0x96, 0x61, 0xff, 0x1c, 0xbe, 0xbf, 0x19, 0x84, 0x75, 0x90, 0xc9, 0x7c, 0xae, 0x7c, 0x1e, 0xa0, - 0x64, 0x16, 0x9d, 0xe0, 0x19, 0xfb, 0x34, 0x08, 0x34, 0x78, 0x80, 0xf4, 0x99, 0xe7, 0x4b, 0x0e, - 0xc1, 0xf8, 0xed, 0x0c, 0x34, 0xc4, 0xee, 0x52, 0x69, 0xba, 0xc8, 0x55, 0xb8, 0xce, 0xe9, 0xe3, - 0xe5, 0x41, 0xfc, 0x0c, 0xa8, 0x71, 0x4d, 0x93, 0x62, 0x17, 0x50, 0x53, 0x56, 0x61, 0xc0, 0x3d, - 0xc1, 0x32, 0xbc, 0x09, 0x15, 0x79, 0xb9, 0x64, 0xea, 0x48, 0xf7, 0xd1, 0x32, 0xde, 0x2e, 0x39, - 0x74, 0x26, 0x92, 0xdb, 0xf0, 0xed, 0x50, 0x3a, 0x90, 0x16, 0x85, 0xa5, 0xd1, 0xf8, 0x27, 0x19, - 0x58, 0xd1, 0xba, 0x22, 0xf6, 0xed, 0x77, 0xa1, 0xaa, 0x9e, 0x6d, 0xa0, 0x8a, 0xcd, 0xdd, 0x8c, - 0xd3, 0xa8, 0x28, 0x5b, 0x65, 0xa4, 0x20, 0x01, 0x6b, 0xcc, 0xd8, 0xbe, 0xc2, 0x1b, 0x10, 0xf3, - 0xa9, 0x94, 0x24, 0xc7, 0xf6, 0xd5, 0x1e, 0xa5, 0x83, 0xf9, 0x94, 0xdc, 0x85, 0xea, 0x0b, 0x4a, - 0x9f, 0x2b, 0x04, 0x24, 0xbd, 0xc0, 0x60, 0x02, 0xc3, 0x80, 0xda, 0xd4, 0x73, 0xc3, 0x73, 0x85, - 0x22, 0x58, 0x7c, 0x0e, 0x44, 0x1c, 0xe3, 0xf7, 0xb2, 0xb0, 0x8a, 0xfa, 0x4c, 0xa1, 0x47, 0x56, - 0xbe, 0xbd, 0x4b, 0xa8, 0xda, 0x45, 0xe2, 0xb5, 0x7f, 0xc3, 0x14, 0xdf, 0xe4, 0xa3, 0xd7, 0xd4, - 0xc1, 0xca, 0x00, 0x13, 0xd7, 0x0c, 0x7f, 0x6e, 0x71, 0xf8, 0xaf, 0x1f, 0xde, 0x34, 0xab, 0x72, - 0x21, 0xcd, 0xaa, 0xfc, 0x3a, 0xb6, 0xdc, 0x85, 0x50, 0x08, 0xc5, 0xc5, 0x88, 0xc1, 0x8f, 0x61, - 0x33, 0x86, 0xc3, 0xa9, 0xb5, 0x73, 0xea, 0xa8, 0x70, 0xf4, 0x6b, 0x1a, 0xf6, 0x40, 0xa6, 0x6d, - 0x17, 0xa1, 0x10, 0x8c, 0xbc, 0x19, 0x35, 0x36, 0x60, 0x2d, 0x3e, 0xaa, 0xe2, 0x98, 0xf8, 0xcd, - 0x0c, 0x34, 0xf7, 0xa2, 0xd0, 0xcb, 0x4e, 0x10, 0x7a, 0xbe, 0x8a, 0xe0, 0x7f, 0x1b, 0x00, 0xdf, - 0xc1, 0xe2, 0x82, 0xbb, 0x08, 0xa2, 0xc5, 0x21, 0x5c, 0x6c, 0xbf, 0x09, 0x25, 0xea, 0x8e, 0x31, - 0x11, 0x57, 0x43, 0x91, 0xba, 0x63, 0x29, 0xf4, 0x2f, 0x1c, 0xc3, 0xb5, 0x38, 0x83, 0x21, 0xc2, - 0xc1, 0xb0, 0xd1, 0xa1, 0x17, 0x9c, 0x1d, 0xc8, 0xab, 0x70, 0x30, 0x87, 0xf6, 0x25, 0xf7, 0x9e, - 0x0f, 0x8c, 0xbf, 0x94, 0x85, 0xe5, 0xa8, 0x7d, 0x18, 0x10, 0xeb, 0xe5, 0xa1, 0xbd, 0xee, 0x8a, - 0xe5, 0xe0, 0x30, 0x61, 0x49, 0xd3, 0xf2, 0x96, 0x70, 0x73, 0x76, 0x5d, 0x62, 0x40, 0x45, 0x62, - 0x78, 0xf3, 0x50, 0x8b, 0x72, 0x5c, 0x46, 0x94, 0xfe, 0x3c, 0x64, 0xd2, 0x2d, 0x13, 0xf3, 0x1d, - 0x57, 0xc8, 0x97, 0x05, 0x7b, 0x1a, 0x76, 0xf9, 0x6b, 0x6b, 0x0c, 0xcc, 0xb2, 0xe1, 0x44, 0x32, - 0x2c, 0x86, 0xdf, 0x40, 0x61, 0x07, 0x67, 0x8e, 0x0b, 0x3a, 0xba, 0x24, 0x80, 0xef, 0xb8, 0x28, - 0x49, 0xe0, 0x4d, 0xa8, 0x60, 0xe1, 0x51, 0xe4, 0x0b, 0x1e, 0x72, 0x30, 0xec, 0xba, 0x3c, 0x5d, - 0x68, 0xdc, 0xbc, 0x79, 0x4c, 0xcf, 0x00, 0x58, 0x15, 0x77, 0xb1, 0xf9, 0xf5, 0x0c, 0xdc, 0x4c, - 0x99, 0x36, 0xb1, 0xcb, 0x77, 0x40, 0x0b, 0xc0, 0x2d, 0x47, 0x17, 0xb7, 0xfa, 0x86, 0x24, 0xab, - 0xf1, 0x31, 0x35, 0x1b, 0xa7, 0x71, 0x40, 0x24, 0xe1, 0xe2, 0x0c, 0xc6, 0xe2, 0xaa, 0x70, 0x76, - 0x0a, 0xa7, 0x11, 0x85, 0xcb, 0x23, 0x68, 0x75, 0x2e, 0x19, 0xc5, 0x50, 0x2e, 0xd3, 0xa3, 0xe7, - 0x73, 0x69, 0xf9, 0x4a, 0x68, 0xf3, 0x33, 0xaf, 0xa5, 0xcd, 0x1f, 0x63, 0xd4, 0x03, 0x55, 0xd6, - 0x4f, 0x52, 0x08, 0x3f, 0x40, 0x59, 0x9e, 0x13, 0x5e, 0x84, 0x0c, 0xb0, 0xc2, 0x40, 0x58, 0xa8, - 0x11, 0xc0, 0xf2, 0xe1, 0x7c, 0x12, 0x3a, 0x3b, 0x0a, 0x44, 0x3e, 0x12, 0x79, 0x78, 0x3d, 0x72, - 0xd4, 0x52, 0x2b, 0x02, 0x55, 0x11, 0x1f, 0xac, 0x29, 0x2b, 0xc8, 0x5a, 0xac, 0x6f, 0x79, 0x1a, - 0xaf, 0xc1, 0xb8, 0x09, 0x9b, 0xd1, 0x17, 0x0e, 0x9b, 0x3c, 0x6a, 0xfe, 0x66, 0x06, 0xaf, 0xea, - 0x60, 0xda, 0xc0, 0xb5, 0x67, 0xc1, 0xb9, 0x17, 0x92, 0x0e, 0xac, 0x06, 0x8e, 0x7b, 0x36, 0xa1, - 0x7a, 0xf1, 0x81, 0x18, 0x84, 0xf5, 0x78, 0xdb, 0x30, 0x6b, 0x60, 0xae, 0x60, 0x8e, 0xa8, 0xb4, - 0x80, 0x6c, 0x5f, 0xd7, 0xc8, 0x68, 0x59, 0x24, 0x46, 0x63, 0xb1, 0xf1, 0x5d, 0xa8, 0xc7, 0x2b, - 0x22, 0x9f, 0x88, 0x60, 0x21, 0x51, 0xab, 0x72, 0x89, 0x50, 0x09, 0xd1, 0x82, 0xa8, 0x44, 0x63, - 0x1f, 0x18, 0x7f, 0x21, 0x03, 0x4d, 0x93, 0xb2, 0x95, 0xab, 0xb5, 0x52, 0xae, 0x99, 0xef, 0x2e, - 0x94, 0x7a, 0x7d, 0x5f, 0x65, 0x0c, 0x12, 0xd9, 0xa2, 0x6f, 0x5d, 0x3b, 0x19, 0xfb, 0x37, 0x16, - 0x7a, 0xb4, 0x5d, 0x82, 0x25, 0x44, 0x31, 0x36, 0x61, 0x5d, 0xb4, 0x47, 0xb6, 0x25, 0x32, 0xd5, - 0xc6, 0x6a, 0x8c, 0x99, 0x6a, 0x5b, 0xd0, 0xc4, 0x3b, 0xfd, 0x7a, 0x27, 0x44, 0xc6, 0x5d, 0x20, - 0x87, 0xf6, 0xc8, 0xf6, 0x3d, 0xcf, 0x3d, 0xa2, 0xbe, 0x70, 0x86, 0xe6, 0x1c, 0x26, 0xb7, 0x64, - 0x4a, 0x56, 0x18, 0xbf, 0x64, 0x6c, 0x77, 0xcf, 0x95, 0xbe, 0x5f, 0xf8, 0x65, 0xf8, 0xb0, 0xba, - 0x6d, 0x3f, 0xa7, 0xb2, 0x24, 0x39, 0x44, 0x4f, 0xa0, 0x32, 0x53, 0x85, 0xca, 0x71, 0x97, 0xf1, - 0x95, 0x16, 0xab, 0x35, 0x75, 0x6c, 0x46, 0x82, 0x7c, 0xcf, 0x0b, 0x79, 0x9c, 0x12, 0x69, 0x0c, - 0x33, 0xcb, 0x0c, 0xf4, 0x8c, 0x5e, 0x75, 0xc7, 0xc6, 0x23, 0x58, 0x8b, 0xd7, 0x29, 0x48, 0x4b, - 0x0b, 0x4a, 0x53, 0x01, 0x13, 0xad, 0x57, 0xdf, 0x4c, 0x18, 0x61, 0x22, 0x9f, 0xcc, 0xd3, 0xdd, - 0x55, 0x22, 0xd5, 0x13, 0xd8, 0x5c, 0x48, 0x11, 0x05, 0xde, 0x85, 0xaa, 0xd6, 0x10, 0xec, 0x46, - 0x9e, 0xb1, 0xac, 0xa2, 0x25, 0x81, 0xf1, 0x29, 0x6c, 0xa2, 0x3c, 0x16, 0x65, 0x97, 0x43, 0x90, - 0xe8, 0x45, 0x26, 0xd9, 0x8b, 0x8f, 0xa4, 0x98, 0xa7, 0x67, 0x8d, 0xe2, 0x16, 0x8e, 0x79, 0x9a, - 0x74, 0xdf, 0x91, 0x9f, 0xc6, 0x31, 0x6c, 0x2c, 0x0e, 0x1f, 0x6b, 0xff, 0x9f, 0x68, 0xc8, 0xe5, - 0xf0, 0x44, 0xc9, 0x6a, 0x78, 0xfe, 0x5b, 0x06, 0xc7, 0x27, 0x96, 0x24, 0x9a, 0x39, 0x06, 0x32, - 0xa5, 0xe1, 0xb9, 0x37, 0xb6, 0x16, 0x6b, 0x7e, 0xac, 0xbc, 0x87, 0x52, 0xf3, 0x6e, 0x1d, 0xf2, - 0x8c, 0x5a, 0x8a, 0xf0, 0x63, 0x9f, 0x26, 0xe1, 0xad, 0x11, 0x6c, 0xa4, 0x23, 0xa7, 0xf8, 0xdc, - 0x7c, 0x3b, 0xce, 0xa8, 0xdf, 0xbe, 0xb6, 0xfb, 0xac, 0x59, 0x3a, 0xdf, 0xfe, 0x1b, 0x25, 0x28, - 0x0a, 0x2d, 0x09, 0xd9, 0x82, 0xfc, 0x48, 0xfa, 0x6f, 0x46, 0xb1, 0x2b, 0x45, 0xaa, 0xfc, 0xbb, - 0xc3, 0xbd, 0x38, 0x19, 0x1e, 0x79, 0x02, 0xf5, 0xb8, 0x0b, 0x43, 0x22, 0x66, 0x4d, 0xdc, 0xf7, - 0xa0, 0x36, 0x4a, 0x18, 0xab, 0xcb, 0x11, 0x73, 0x85, 0x3c, 0x67, 0xe9, 0x5c, 0xe3, 0xbe, 0x3c, - 0x97, 0xc9, 0x6b, 0xc1, 0xb9, 0x6d, 0x3d, 0x7a, 0xfc, 0xb1, 0x08, 0x5a, 0x53, 0xe1, 0xc0, 0xc1, - 0xb9, 0xfd, 0xe8, 0xf1, 0xc7, 0x49, 0x49, 0x4c, 0x84, 0xac, 0xd1, 0x24, 0xb1, 0x35, 0x28, 0x60, - 0x00, 0x7c, 0x74, 0xc4, 0xc3, 0x0f, 0xf2, 0x10, 0xd6, 0xa4, 0xe2, 0x4d, 0x5c, 0x99, 0xc0, 0x53, - 0x14, 0xdf, 0xff, 0x22, 0x22, 0x6d, 0xc0, 0x93, 0x50, 0x55, 0xb7, 0x01, 0x4b, 0xe7, 0xd1, 0x8b, - 0x06, 0x35, 0x53, 0x7c, 0x19, 0xbf, 0x57, 0x80, 0x8a, 0x36, 0x28, 0xa4, 0x0a, 0x25, 0xb3, 0x33, - 0xe8, 0x98, 0x9f, 0x75, 0x76, 0x1b, 0x37, 0xc8, 0x7d, 0xb8, 0xd7, 0xed, 0xed, 0xf4, 0x4d, 0xb3, - 0xb3, 0x33, 0xb4, 0xfa, 0xa6, 0x25, 0x23, 0xa8, 0x1e, 0xb5, 0xbf, 0x38, 0xec, 0xf4, 0x86, 0xd6, - 0x6e, 0x67, 0xd8, 0xee, 0x1e, 0x0c, 0x1a, 0x19, 0xf2, 0x06, 0x34, 0x23, 0x4c, 0x99, 0xdc, 0x3e, - 0xec, 0x1f, 0xf7, 0x86, 0x8d, 0x2c, 0xb9, 0x03, 0xb7, 0xf6, 0xba, 0xbd, 0xf6, 0x81, 0x15, 0xe1, - 0xec, 0x1c, 0x0c, 0x3f, 0xb3, 0x3a, 0x3f, 0x7b, 0xd4, 0x35, 0xbf, 0x68, 0xe4, 0xd2, 0x10, 0xf6, - 0x87, 0x07, 0x3b, 0xb2, 0x84, 0x3c, 0xb9, 0x09, 0xeb, 0x88, 0x80, 0x59, 0xac, 0x61, 0xbf, 0x6f, - 0x0d, 0xfa, 0xfd, 0x5e, 0xa3, 0x40, 0x56, 0xa0, 0xd6, 0xed, 0x7d, 0xd6, 0x3e, 0xe8, 0xee, 0x5a, - 0x66, 0xa7, 0x7d, 0x70, 0xd8, 0x58, 0x22, 0xab, 0xb0, 0x9c, 0xc4, 0x2b, 0xb2, 0x22, 0x24, 0x5e, - 0xbf, 0xd7, 0xed, 0xf7, 0xac, 0xcf, 0x3a, 0xe6, 0xa0, 0xdb, 0xef, 0x35, 0x4a, 0x64, 0x03, 0x48, - 0x3c, 0x69, 0xff, 0xb0, 0xbd, 0xd3, 0x28, 0x93, 0x75, 0x58, 0x89, 0xc3, 0x9f, 0x75, 0xbe, 0x68, - 0x00, 0x69, 0xc2, 0x1a, 0x36, 0xcc, 0xda, 0xee, 0x1c, 0xf4, 0x3f, 0xb7, 0x0e, 0xbb, 0xbd, 0xee, - 0xe1, 0xf1, 0x61, 0xa3, 0xc2, 0xe3, 0x58, 0x77, 0x3a, 0x56, 0xb7, 0x37, 0x38, 0xde, 0xdb, 0xeb, - 0xee, 0x74, 0x3b, 0xbd, 0x61, 0xa3, 0x8a, 0x35, 0xa7, 0x75, 0xbc, 0xc6, 0x32, 0x88, 0x3b, 0x94, - 0xd6, 0x6e, 0x77, 0xd0, 0xde, 0x3e, 0xe8, 0xec, 0x36, 0xea, 0xe4, 0x36, 0xdc, 0x1c, 0x76, 0x0e, - 0x8f, 0xfa, 0x66, 0xdb, 0xfc, 0x42, 0xde, 0xb1, 0xb4, 0xf6, 0xda, 0xdd, 0x83, 0x63, 0xb3, 0xd3, - 0x58, 0x26, 0x6f, 0xc1, 0x6d, 0xb3, 0xf3, 0xa3, 0xe3, 0xae, 0xd9, 0xd9, 0xb5, 0x7a, 0xfd, 0xdd, - 0x8e, 0xb5, 0xd7, 0x69, 0x0f, 0x8f, 0xcd, 0x8e, 0x75, 0xd8, 0x1d, 0x0c, 0xba, 0xbd, 0xa7, 0x8d, - 0x06, 0xb9, 0x07, 0x77, 0x15, 0x8a, 0x2a, 0x20, 0x81, 0xb5, 0xc2, 0xfa, 0x27, 0xa7, 0xb4, 0xd7, - 0xf9, 0xd9, 0xa1, 0x75, 0xd4, 0xe9, 0x98, 0x0d, 0x42, 0x5a, 0xb0, 0x11, 0x55, 0x8f, 0x15, 0x88, - 0xba, 0x57, 0x59, 0xda, 0x51, 0xc7, 0x3c, 0x6c, 0xf7, 0xd8, 0x04, 0xc7, 0xd2, 0xd6, 0x58, 0xb3, - 0xa3, 0xb4, 0x64, 0xb3, 0xd7, 0x09, 0x81, 0xba, 0x36, 0x2b, 0x7b, 0x6d, 0xb3, 0xb1, 0x41, 0x96, - 0xa1, 0x72, 0x78, 0x74, 0x64, 0x0d, 0xbb, 0x87, 0x9d, 0xfe, 0xf1, 0xb0, 0xb1, 0x49, 0xd6, 0xa1, - 0xd1, 0xed, 0x0d, 0x3b, 0x26, 0x9b, 0x6b, 0x99, 0xf5, 0xbf, 0x17, 0xc9, 0x1a, 0x2c, 0xcb, 0x96, - 0x4a, 0xe8, 0x1f, 0x16, 0xc9, 0x26, 0x90, 0xe3, 0x9e, 0xd9, 0x69, 0xef, 0xb2, 0x81, 0x53, 0x09, - 0xff, 0xa3, 0x28, 0xcc, 0x99, 0xbf, 0x9d, 0x53, 0xcc, 0x5e, 0xe4, 0x1f, 0x14, 0x7f, 0x82, 0xa8, - 0xaa, 0x3d, 0x1d, 0xf4, 0xaa, 0x97, 0x1f, 0x35, 0xd1, 0x3c, 0xb7, 0x20, 0x9a, 0x2f, 0xe8, 0x7e, - 0x6a, 0xba, 0xec, 0xf0, 0x36, 0xd4, 0xa6, 0xf8, 0x1c, 0x91, 0x78, 0xcf, 0x02, 0x84, 0xb3, 0x1c, - 0x02, 0xf1, 0x31, 0x8b, 0x85, 0xa7, 0x0f, 0x0b, 0x8b, 0x4f, 0x1f, 0xa6, 0xc9, 0x87, 0x4b, 0x69, - 0xf2, 0xe1, 0x03, 0x58, 0x41, 0xd2, 0xe4, 0xb8, 0xce, 0x54, 0x6a, 0x5d, 0xc4, 0x43, 0x82, 0x9c, - 0x44, 0x21, 0x5c, 0x8a, 0xa3, 0x52, 0x64, 0x15, 0x24, 0xa4, 0x28, 0xa4, 0xd5, 0x98, 0xa4, 0x8a, - 0x94, 0x43, 0x49, 0xaa, 0xaa, 0x06, 0xfb, 0x32, 0xaa, 0xa1, 0xa2, 0xd5, 0x80, 0x70, 0x5e, 0xc3, - 0x03, 0x58, 0xa1, 0x97, 0xa1, 0x6f, 0x5b, 0xde, 0xcc, 0xfe, 0x72, 0xce, 0xfd, 0x2d, 0x6c, 0xae, - 0x03, 0xaa, 0x9a, 0xcb, 0x3c, 0xa1, 0xcf, 0xe1, 0xbb, 0x76, 0x68, 0x1b, 0xbf, 0x00, 0xa0, 0x4e, - 0x55, 0xfe, 0x20, 0xa3, 0xeb, 0xc9, 0x2b, 0x91, 0x55, 0x13, 0x3f, 0xf8, 0x3c, 0x86, 0x9e, 0x6f, - 0x9f, 0xd1, 0xae, 0x8c, 0xfb, 0x14, 0x01, 0xc8, 0x2d, 0xc8, 0x79, 0x33, 0xe9, 0x4a, 0x56, 0x96, - 0x01, 0xda, 0x67, 0x26, 0x83, 0x1a, 0x1f, 0x43, 0xb6, 0x3f, 0xbb, 0x96, 0x55, 0x6a, 0x42, 0x51, - 0x3e, 0x76, 0x9c, 0xe5, 0xee, 0x63, 0xf2, 0xd3, 0xf8, 0xf3, 0x19, 0x20, 0x26, 0x1d, 0x5c, 0xb9, - 0x23, 0x8c, 0x2c, 0x1b, 0xc5, 0x5a, 0x3c, 0xf5, 0xbd, 0x69, 0xfc, 0x45, 0x67, 0x60, 0x20, 0x61, - 0x5e, 0xbe, 0x05, 0xe5, 0xd0, 0x8b, 0x47, 0x2f, 0x2d, 0x85, 0xde, 0xbe, 0x0c, 0xf2, 0xa1, 0xf9, - 0xab, 0xe5, 0x92, 0xfe, 0x6a, 0x9b, 0x50, 0x1c, 0xfb, 0xde, 0xcc, 0x1a, 0x9f, 0xc8, 0x18, 0xec, - 0xec, 0x73, 0xf7, 0xc4, 0x58, 0x87, 0xd5, 0x58, 0x53, 0x04, 0x57, 0xb8, 0x0a, 0x2b, 0xe8, 0x09, - 0xc8, 0x92, 0x24, 0x0f, 0xf0, 0x00, 0x88, 0x0e, 0x8c, 0x3d, 0x86, 0x35, 0x97, 0xef, 0x70, 0xe1, - 0xc7, 0x83, 0x3f, 0x0d, 0x15, 0xed, 0x95, 0x30, 0xb2, 0x09, 0xab, 0x9f, 0x77, 0x87, 0xbd, 0xce, - 0x60, 0x60, 0x1d, 0x1d, 0x6f, 0x3f, 0xeb, 0x7c, 0x61, 0xed, 0xb7, 0x07, 0xfb, 0x8d, 0x1b, 0x8c, - 0x5e, 0xf6, 0x3a, 0x83, 0x61, 0x67, 0x37, 0x06, 0xcf, 0x90, 0x37, 0xa1, 0x75, 0xdc, 0x3b, 0x1e, - 0x74, 0x76, 0xad, 0xb4, 0x7c, 0x59, 0x46, 0x20, 0x44, 0x7a, 0x4a, 0xf6, 0xdc, 0x83, 0x5f, 0x84, - 0x7a, 0x3c, 0xd2, 0x0b, 0x01, 0x58, 0x3a, 0xe8, 0x3c, 0x6d, 0xef, 0x7c, 0x81, 0x8f, 0x0c, 0x0c, - 0x86, 0xed, 0x61, 0x77, 0xc7, 0x12, 0x8f, 0x0a, 0x30, 0x62, 0x9c, 0x21, 0x15, 0x28, 0xb6, 0x7b, - 0x3b, 0xfb, 0x7d, 0x73, 0xd0, 0xc8, 0x92, 0x37, 0x60, 0x53, 0x92, 0x89, 0x9d, 0xfe, 0xe1, 0x61, - 0x77, 0xc8, 0xcf, 0xa1, 0xe1, 0x17, 0x47, 0x8c, 0x2a, 0x3c, 0xb0, 0xa1, 0x1c, 0xbd, 0x87, 0xc0, - 0x69, 0x7b, 0x77, 0xd8, 0x6d, 0x0f, 0xa3, 0x83, 0xad, 0x71, 0x83, 0x1d, 0x1d, 0x11, 0x98, 0x3f, - 0x6a, 0xd0, 0xc8, 0xe0, 0x65, 0x78, 0x09, 0xc4, 0xda, 0x1b, 0x59, 0x46, 0xcf, 0x22, 0xe8, 0x76, - 0x7f, 0xc8, 0xba, 0xf0, 0x4b, 0x50, 0x8f, 0x3f, 0x3b, 0x40, 0x1a, 0x50, 0x65, 0xf5, 0x6b, 0x55, - 0x00, 0x2c, 0x61, 0x8b, 0x1b, 0x19, 0x3c, 0xbc, 0x76, 0xfa, 0x87, 0xdd, 0xde, 0x53, 0x7e, 0xe2, - 0x35, 0xb2, 0x0c, 0xd4, 0x3f, 0x1e, 0x3e, 0xed, 0x2b, 0x50, 0x8e, 0xe5, 0xc0, 0xee, 0x34, 0xf2, - 0x0f, 0xbe, 0x84, 0x95, 0x85, 0x07, 0x0a, 0x58, 0xab, 0xfb, 0xc7, 0xc3, 0x9d, 0xfe, 0xa1, 0x5e, - 0x4f, 0x05, 0x8a, 0x3b, 0x07, 0xed, 0xee, 0x21, 0x37, 0xf6, 0xd4, 0xa0, 0x7c, 0xdc, 0x93, 0x9f, - 0xd9, 0xf8, 0xd3, 0x0a, 0x39, 0x46, 0x86, 0xf7, 0xba, 0xe6, 0x60, 0x68, 0x0d, 0x86, 0xed, 0xa7, - 0x9d, 0x46, 0x9e, 0xe5, 0x95, 0x34, 0xb9, 0xf0, 0xe0, 0x53, 0xa8, 0xc7, 0x7d, 0xbb, 0xe3, 0x46, - 0xba, 0x16, 0x6c, 0x6c, 0x77, 0x86, 0x9f, 0x77, 0x3a, 0x3d, 0x3e, 0xe5, 0x3b, 0x9d, 0xde, 0xd0, - 0x6c, 0x1f, 0x74, 0x87, 0x5f, 0x34, 0x32, 0x0f, 0x9e, 0x40, 0x23, 0xe9, 0x48, 0x11, 0xf3, 0x3c, - 0x79, 0x99, 0x8b, 0xca, 0x83, 0xff, 0x94, 0x81, 0xb5, 0x34, 0x1b, 0x22, 0x5b, 0x98, 0x82, 0xd8, - 0xb3, 0x23, 0x7f, 0xd0, 0xef, 0x59, 0xbd, 0x3e, 0x8f, 0x35, 0xde, 0x82, 0x8d, 0x44, 0x82, 0xec, - 0x45, 0x86, 0xdc, 0x82, 0xcd, 0x85, 0x4c, 0x96, 0xd9, 0x3f, 0xe6, 0x73, 0xd9, 0x84, 0xb5, 0x44, - 0x62, 0xc7, 0x34, 0xfb, 0x66, 0x23, 0x47, 0xbe, 0x05, 0xf7, 0x13, 0x29, 0x8b, 0x8c, 0x8e, 0xe4, - 0x83, 0xf2, 0xe4, 0x5d, 0x78, 0x7b, 0x01, 0x3b, 0xe2, 0x05, 0xac, 0xed, 0xf6, 0x01, 0xeb, 0x5e, - 0xa3, 0xf0, 0xe0, 0xef, 0xe5, 0x00, 0xa2, 0xcb, 0x93, 0xac, 0xfe, 0xdd, 0xf6, 0xb0, 0x7d, 0xd0, - 0x67, 0x7b, 0xc6, 0xec, 0x0f, 0x59, 0xe9, 0x66, 0xe7, 0x47, 0x8d, 0x1b, 0xa9, 0x29, 0xfd, 0x23, - 0xd6, 0xa1, 0x4d, 0x58, 0xc5, 0xf5, 0x77, 0xc0, 0xba, 0xc1, 0x96, 0x0b, 0x0f, 0x5b, 0xcf, 0xb9, - 0xa9, 0xe3, 0xa3, 0x3d, 0xb3, 0xdf, 0x1b, 0x5a, 0x83, 0xfd, 0xe3, 0xe1, 0x2e, 0x0f, 0x7a, 0xbf, - 0x63, 0x76, 0x8f, 0xb0, 0xcc, 0xfc, 0xcb, 0x10, 0x58, 0xd1, 0x05, 0xb6, 0xc1, 0x9f, 0xf6, 0x07, - 0x83, 0xee, 0x91, 0xf5, 0xa3, 0xe3, 0x8e, 0xd9, 0xed, 0x0c, 0x78, 0xc6, 0xa5, 0x14, 0x38, 0xc3, - 0x2f, 0xb2, 0x35, 0x3b, 0x3c, 0xf8, 0x4c, 0x30, 0x49, 0x0c, 0xb5, 0x14, 0x07, 0x31, 0xac, 0x32, - 0x9b, 0x1d, 0xc6, 0x65, 0xa4, 0x94, 0x0c, 0xd7, 0xa4, 0xb1, 0x7c, 0x15, 0xc6, 0x3f, 0x2d, 0xec, - 0x7c, 0x9e, 0xad, 0x9a, 0x9e, 0xc4, 0x72, 0x71, 0xd6, 0x4a, 0x31, 0xa2, 0xbb, 0xbb, 0x26, 0xcf, - 0x50, 0x5f, 0x80, 0x32, 0xdc, 0x65, 0xb6, 0x08, 0x19, 0x1b, 0xc2, 0x50, 0x1a, 0xf2, 0x83, 0xa5, - 0xac, 0x3c, 0xfa, 0x87, 0x6f, 0x43, 0x59, 0x5d, 0xa2, 0x20, 0x3f, 0x84, 0x5a, 0x2c, 0x82, 0x05, - 0x91, 0x66, 0x8a, 0xb4, 0x80, 0x17, 0xad, 0x37, 0xd2, 0x13, 0x05, 0x09, 0xfe, 0x02, 0xc8, 0x62, - 0xec, 0x01, 0x72, 0xf7, 0x25, 0x61, 0x09, 0xb0, 0xd4, 0xb7, 0x5e, 0x19, 0xb8, 0x80, 0x1c, 0x6a, - 0xca, 0x14, 0x6c, 0xe7, 0x1b, 0x49, 0x05, 0x47, 0xac, 0xa1, 0xb7, 0xaf, 0x49, 0x15, 0xc5, 0x3d, - 0xe3, 0xc1, 0xf9, 0xb5, 0xe7, 0xfe, 0x03, 0x72, 0x3b, 0x6a, 0x84, 0x0e, 0x97, 0x05, 0x4a, 0xd1, - 0x55, 0x4b, 0xdb, 0xa5, 0xa1, 0xed, 0x4c, 0x02, 0xb2, 0x0b, 0x15, 0xed, 0xc9, 0x58, 0x72, 0xf3, - 0xda, 0xe7, 0x6d, 0x5b, 0xad, 0xb4, 0x24, 0xd1, 0xa4, 0xef, 0x41, 0x59, 0x3d, 0xd5, 0x49, 0x36, - 0xb5, 0xa7, 0x5f, 0xf5, 0xa7, 0x4b, 0x5b, 0xcd, 0xc5, 0x04, 0x91, 0x7f, 0x17, 0x2a, 0xda, 0x8b, - 0x9b, 0xaa, 0x15, 0x8b, 0xaf, 0x7a, 0xaa, 0x56, 0xa4, 0x3d, 0xd0, 0x79, 0x00, 0xeb, 0x42, 0x65, - 0x73, 0x42, 0xbf, 0xca, 0xf0, 0x90, 0xc5, 0xe1, 0x79, 0x98, 0x21, 0x4f, 0xa0, 0x24, 0x5f, 0x69, - 0x25, 0x1b, 0xe9, 0xaf, 0xd9, 0xb6, 0x36, 0x17, 0xe0, 0xa2, 0x29, 0x6d, 0x80, 0xe8, 0x2d, 0x4f, - 0x22, 0x3b, 0xbe, 0xf0, 0x36, 0xa8, 0x9a, 0x99, 0x94, 0x87, 0x3f, 0x77, 0xa1, 0xa2, 0x3d, 0xdb, - 0xa9, 0xc6, 0x64, 0xf1, 0xc9, 0x4f, 0x35, 0x26, 0x69, 0xaf, 0x7c, 0xfe, 0x10, 0x6a, 0xb1, 0xf7, - 0x37, 0xd5, 0x16, 0x49, 0x7b, 0xdd, 0x53, 0x6d, 0x91, 0xf4, 0x27, 0x3b, 0x77, 0xa1, 0xa2, 0xbd, - 0x89, 0xa9, 0x5a, 0xb4, 0xf8, 0x30, 0xa7, 0x6a, 0x51, 0xca, 0x13, 0x9a, 0x6c, 0x37, 0xc4, 0x1f, - 0xc4, 0x54, 0xbb, 0x21, 0xf5, 0x65, 0x4d, 0xb5, 0x1b, 0xd2, 0x5f, 0xd1, 0x64, 0x4b, 0x4f, 0xbd, - 0xca, 0x41, 0x36, 0x63, 0x9a, 0x92, 0xe8, 0x79, 0x0f, 0xb5, 0xf4, 0x16, 0x1f, 0xf0, 0x78, 0x0a, - 0xab, 0x6a, 0xd1, 0xa8, 0x37, 0x35, 0x02, 0xd5, 0xa6, 0xd4, 0x97, 0x3b, 0x5a, 0x8d, 0x64, 0xea, - 0xc3, 0x0c, 0xf9, 0x0e, 0x14, 0xc5, 0x43, 0x05, 0x64, 0x3d, 0xf9, 0x70, 0x01, 0x36, 0x62, 0x23, - 0xfd, 0x3d, 0x03, 0x72, 0xc4, 0x37, 0xb4, 0xfe, 0x92, 0x80, 0xbe, 0x62, 0x53, 0x1e, 0x1f, 0x68, - 0xbd, 0x79, 0x5d, 0x72, 0x54, 0x62, 0xf2, 0xf5, 0x8b, 0xdb, 0xd7, 0x05, 0xad, 0x8a, 0x97, 0x78, - 0x5d, 0x74, 0xcd, 0xa7, 0x50, 0xd5, 0x1f, 0x43, 0x23, 0xfa, 0x3e, 0x4c, 0x96, 0x75, 0x2b, 0x35, - 0x4d, 0x14, 0xf4, 0x19, 0x6c, 0xa8, 0xf1, 0xd6, 0x23, 0x28, 0x05, 0xe4, 0x4e, 0x4a, 0x5c, 0xa5, - 0xd8, 0xa8, 0xdf, 0xbc, 0x36, 0xf0, 0xd2, 0xc3, 0x0c, 0x27, 0xb2, 0xb1, 0xf7, 0x8b, 0x22, 0x22, - 0x9b, 0xf6, 0x6c, 0x53, 0x44, 0x64, 0xd3, 0x1f, 0x3d, 0x6a, 0xc3, 0xb2, 0x16, 0x01, 0x8a, 0x31, - 0xeb, 0x6a, 0xbd, 0x2f, 0x86, 0x78, 0x6f, 0xa5, 0x19, 0x0e, 0xc8, 0x0e, 0x54, 0xf4, 0x20, 0x52, - 0x2f, 0xc9, 0xbe, 0xa9, 0x25, 0xe9, 0x11, 0xba, 0x1f, 0x66, 0xc8, 0x01, 0x34, 0x92, 0x21, 0x5f, - 0xd5, 0x16, 0x4e, 0x0b, 0x93, 0xdb, 0x4a, 0x24, 0xc6, 0x02, 0xc5, 0xb2, 0x75, 0x11, 0x7b, 0xa8, - 0xdf, 0xf3, 0x93, 0x47, 0x51, 0xfc, 0x01, 0x7f, 0x55, 0x5a, 0x22, 0x95, 0x37, 0xfb, 0x7e, 0xe6, - 0x61, 0x86, 0xec, 0x41, 0x35, 0x16, 0xf1, 0x30, 0x76, 0x55, 0x28, 0xd1, 0xcd, 0xa6, 0x9e, 0x96, - 0xe8, 0xe7, 0x21, 0xd4, 0xe3, 0x1e, 0x2e, 0xaa, 0x61, 0xa9, 0x6e, 0x38, 0x6a, 0xfa, 0xd2, 0xdd, - 0x62, 0xc8, 0xf7, 0xa1, 0xc2, 0x68, 0xb2, 0xf4, 0x84, 0x24, 0x1a, 0x9d, 0x4e, 0xce, 0x19, 0xc2, - 0x84, 0xcc, 0x96, 0xfb, 0x73, 0xd9, 0x0c, 0xef, 0xd7, 0x77, 0xf1, 0xa1, 0x73, 0xe9, 0x0c, 0xc7, - 0xe6, 0xff, 0x75, 0x0b, 0x21, 0x7b, 0x58, 0xf9, 0xd0, 0xc3, 0x08, 0x00, 0x37, 0x35, 0x1c, 0x01, - 0x7b, 0xbd, 0x36, 0xb4, 0xb1, 0x0d, 0x22, 0x4f, 0x6c, 0x0d, 0xbe, 0x66, 0x59, 0xe4, 0x13, 0x80, - 0xc8, 0xc3, 0x98, 0x24, 0xfc, 0x5c, 0xd5, 0x86, 0x4a, 0x71, 0x42, 0xee, 0xe0, 0x7e, 0x57, 0x8e, - 0xb6, 0xfa, 0x91, 0x1c, 0xf7, 0xf9, 0x8d, 0x1d, 0xc9, 0xc9, 0x62, 0xbe, 0x0d, 0xb5, 0x03, 0xcf, - 0x7b, 0x3e, 0x9f, 0xa9, 0x6b, 0x2a, 0x71, 0x2f, 0xb0, 0x7d, 0x3b, 0x38, 0x6f, 0x25, 0x9a, 0x45, - 0xda, 0xb0, 0xa2, 0x48, 0x44, 0xe4, 0xe9, 0x1b, 0x47, 0x8a, 0x11, 0x86, 0x44, 0x01, 0x0f, 0x33, - 0xe4, 0x11, 0x54, 0x77, 0xe9, 0x88, 0x47, 0x29, 0xe1, 0x3e, 0x47, 0xab, 0x31, 0xff, 0x15, 0x74, - 0x56, 0x6a, 0xd5, 0x62, 0x40, 0x49, 0xe2, 0x22, 0xbf, 0x37, 0xfd, 0xcc, 0x88, 0x3b, 0x8f, 0xc5, - 0x48, 0xdc, 0x82, 0xef, 0xdb, 0x67, 0xb0, 0xb2, 0xe0, 0x59, 0xa6, 0xa8, 0xdb, 0x75, 0xfe, 0x68, - 0xad, 0xbb, 0xd7, 0x23, 0x88, 0x72, 0x7f, 0x00, 0x35, 0x0c, 0xd8, 0x7e, 0x42, 0xf1, 0x96, 0x71, - 0x22, 0x1c, 0x9f, 0x7e, 0x85, 0x39, 0x49, 0x92, 0x30, 0xc3, 0x53, 0xfe, 0xd4, 0x93, 0x76, 0x87, - 0x57, 0xcd, 0xeb, 0xe2, 0xbd, 0x62, 0x35, 0xaf, 0x69, 0xd7, 0x85, 0x3f, 0x85, 0xca, 0x53, 0x1a, - 0xca, 0x5b, 0xb1, 0x8a, 0x3f, 0x4a, 0x5c, 0x93, 0x6d, 0xa5, 0xdc, 0x65, 0x26, 0x1f, 0xf3, 0xac, - 0x2a, 0xc2, 0xc3, 0x86, 0x56, 0x8b, 0x9e, 0x75, 0x39, 0x01, 0x67, 0xdc, 0x87, 0x16, 0xe7, 0x45, - 0x35, 0x7c, 0x31, 0xae, 0x8f, 0x6a, 0x78, 0x5a, 0x58, 0x98, 0xef, 0xe3, 0x08, 0x68, 0xf7, 0x70, - 0x23, 0x16, 0x2c, 0x79, 0x65, 0x57, 0x35, 0x5f, 0x47, 0x7f, 0x0c, 0x30, 0x08, 0xbd, 0xd9, 0xae, - 0x4d, 0xa7, 0x9e, 0x1b, 0xd1, 0x84, 0xe8, 0x06, 0x68, 0xb4, 0x11, 0xb5, 0x6b, 0xa0, 0xe4, 0x73, - 0x8d, 0x37, 0x8d, 0x4d, 0x89, 0x92, 0x30, 0xae, 0xbb, 0x24, 0xaa, 0xba, 0x93, 0x72, 0x51, 0x94, - 0x13, 0x09, 0x88, 0x1c, 0xf7, 0x14, 0xa7, 0xb9, 0xe0, 0x13, 0xa8, 0xf6, 0x7a, 0x8a, 0x97, 0xdf, - 0xf7, 0xa0, 0x1c, 0x79, 0x3c, 0x6d, 0x46, 0x41, 0xa7, 0x62, 0xfe, 0x51, 0x8a, 0x7a, 0x2f, 0x7a, - 0x1b, 0xf5, 0x60, 0x15, 0x9b, 0xa3, 0x8e, 0x3f, 0x7e, 0x4f, 0x51, 0xbd, 0x54, 0xb6, 0xe8, 0xe6, - 0xa3, 0xf6, 0x4f, 0x9a, 0xb3, 0x0a, 0xdb, 0x3f, 0x0b, 0x4e, 0x0f, 0x6a, 0xff, 0x5c, 0xe7, 0xc5, - 0xa2, 0xf6, 0xcf, 0xf5, 0xfe, 0x12, 0x3d, 0x58, 0x4d, 0x71, 0x5f, 0x20, 0x52, 0x82, 0xbb, 0xde, - 0xb5, 0xa1, 0x95, 0x6a, 0xe6, 0x26, 0x43, 0xd8, 0xc4, 0x3c, 0xed, 0xc9, 0x24, 0x61, 0x2d, 0x7f, - 0x53, 0xcb, 0x90, 0xe2, 0x01, 0x10, 0x63, 0x65, 0x12, 0x5e, 0x00, 0x3d, 0x68, 0x24, 0x0d, 0xcd, - 0xe4, 0x7a, 0xf4, 0xd6, 0x9d, 0x18, 0xcb, 0xbe, 0x68, 0x9c, 0x26, 0x9f, 0x29, 0x73, 0x77, 0xa2, - 0x8d, 0x77, 0xa2, 0x07, 0x36, 0x53, 0x8d, 0xf3, 0x4a, 0x1a, 0x48, 0xb5, 0x96, 0x93, 0x9f, 0x85, - 0xcd, 0xe4, 0x8a, 0x96, 0x25, 0xdf, 0x4d, 0x1b, 0xae, 0x6b, 0x59, 0xb9, 0x78, 0x87, 0x1e, 0x66, - 0x18, 0x21, 0xd6, 0x8d, 0xd2, 0x6a, 0x21, 0xa5, 0x58, 0xc7, 0xd5, 0x42, 0x4a, 0xb5, 0x62, 0x1f, - 0xc1, 0x72, 0xc2, 0x1e, 0xad, 0xd8, 0xe0, 0x74, 0x0b, 0xb6, 0x62, 0x83, 0xaf, 0x33, 0x63, 0x0f, - 0xa0, 0x91, 0xb4, 0x34, 0xab, 0xb9, 0xbe, 0xc6, 0x7a, 0xdd, 0xba, 0x73, 0x6d, 0x7a, 0xbc, 0x99, - 0x9a, 0x4d, 0x36, 0xd6, 0xcc, 0x45, 0x4b, 0x72, 0xac, 0x99, 0x69, 0xd6, 0xe4, 0xef, 0xc3, 0x92, - 0x19, 0x67, 0x18, 0x16, 0x75, 0xe5, 0xad, 0x56, 0x5a, 0x52, 0x24, 0xbf, 0x46, 0x6a, 0x6a, 0x45, - 0x55, 0x16, 0xd4, 0xd9, 0x6a, 0x1e, 0x17, 0x75, 0xda, 0xdb, 0xf7, 0x7e, 0xce, 0x38, 0x73, 0xc2, - 0xf3, 0xf9, 0xc9, 0xd6, 0xc8, 0x9b, 0x7e, 0x30, 0x7b, 0x1e, 0xbe, 0x3f, 0xb2, 0x83, 0x73, 0xf6, - 0x63, 0xfc, 0xc1, 0xc4, 0x65, 0xff, 0xfc, 0xd9, 0xe8, 0x64, 0x69, 0xe6, 0x7b, 0xa1, 0xf7, 0xed, - 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xc6, 0xe5, 0xfa, 0xfd, 0xeb, 0x93, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// LightningClient is the client API for Lightning service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type LightningClient interface { - // lncli: `walletbalance` - //WalletBalance returns total unspent outputs(confirmed and unconfirmed), all - //confirmed unspent outputs and all unconfirmed unspent outputs under control - //of the wallet. - WalletBalance(ctx context.Context, in *WalletBalanceRequest, opts ...grpc.CallOption) (*WalletBalanceResponse, error) - // lncli: `getaddressbalances` - //GetAddressBalances returns the balance for each of the addresses in the wallet. - GetAddressBalances(ctx context.Context, in *GetAddressBalancesRequest, opts ...grpc.CallOption) (*GetAddressBalancesResponse, error) - // lncli: `channelbalance` - //ChannelBalance returns a report on the total funds across all open channels, - //categorized in local/remote, pending local/remote and unsettled local/remote - //balances. - ChannelBalance(ctx context.Context, in *ChannelBalanceRequest, opts ...grpc.CallOption) (*ChannelBalanceResponse, error) - // lncli: `listchaintxns` - //GetTransactions returns a list describing all the known transactions - //relevant to the wallet. - GetTransactions(ctx context.Context, in *GetTransactionsRequest, opts ...grpc.CallOption) (*TransactionDetails, error) - // lncli: `estimatefee` - //EstimateFee asks the chain backend to estimate the fee rate and total fees - //for a transaction that pays to multiple specified outputs. - // - //When using REST, the `AddrToAmount` map type can be set by appending - //`&AddrToAmount[
]=` to the URL. Unfortunately this - //map type doesn't appear in the REST API documentation because of a bug in - //the grpc-gateway library. - EstimateFee(ctx context.Context, in *EstimateFeeRequest, opts ...grpc.CallOption) (*EstimateFeeResponse, error) - // lncli: `sendcoins` - //SendCoins executes a request to send coins to a particular address. Unlike - //SendMany, this RPC call only allows creating a single output at a time. If - //neither target_conf, or sat_per_byte are set, then the internal wallet will - //consult its fee model to determine a fee for the default confirmation - //target. - SendCoins(ctx context.Context, in *SendCoinsRequest, opts ...grpc.CallOption) (*SendCoinsResponse, error) - // lncli: `listunspent` - //Deprecated, use walletrpc.ListUnspent instead. - // - //ListUnspent returns a list of all utxos spendable by the wallet with a - //number of confirmations between the specified minimum and maximum. - ListUnspent(ctx context.Context, in *ListUnspentRequest, opts ...grpc.CallOption) (*ListUnspentResponse, error) - // - //SubscribeTransactions creates a uni-directional stream from the server to - //the client in which any newly discovered transactions relevant to the - //wallet are sent over. - SubscribeTransactions(ctx context.Context, in *GetTransactionsRequest, opts ...grpc.CallOption) (Lightning_SubscribeTransactionsClient, error) - // lncli: `sendmany` - //SendMany handles a request for a transaction that creates multiple specified - //outputs in parallel. If neither target_conf, or sat_per_byte are set, then - //the internal wallet will consult its fee model to determine a fee for the - //default confirmation target. - SendMany(ctx context.Context, in *SendManyRequest, opts ...grpc.CallOption) (*SendManyResponse, error) - // lncli: `newaddress` - //NewAddress creates a new address under control of the local wallet. - NewAddress(ctx context.Context, in *NewAddressRequest, opts ...grpc.CallOption) (*NewAddressResponse, error) - // lncli: `signmessage` - //SignMessage signs a message with this node's private key. The returned - //signature string is `zbase32` encoded and pubkey recoverable, meaning that - //only the message digest and signature are needed for verification. - SignMessage(ctx context.Context, in *SignMessageRequest, opts ...grpc.CallOption) (*SignMessageResponse, error) - // lncli: `verifymessage` - //VerifyMessage verifies a signature over a msg. The signature must be - //zbase32 encoded and signed by an active node in the resident node's - //channel database. In addition to returning the validity of the signature, - //VerifyMessage also returns the recovered pubkey from the signature. - VerifyMessage(ctx context.Context, in *VerifyMessageRequest, opts ...grpc.CallOption) (*VerifyMessageResponse, error) - // lncli: `connect` - //ConnectPeer attempts to establish a connection to a remote peer. This is at - //the networking level, and is used for communication between nodes. This is - //distinct from establishing a channel with a peer. - ConnectPeer(ctx context.Context, in *ConnectPeerRequest, opts ...grpc.CallOption) (*ConnectPeerResponse, error) - // lncli: `disconnect` - //DisconnectPeer attempts to disconnect one peer from another identified by a - //given pubKey. In the case that we currently have a pending or active channel - //with the target peer, then this action will be not be allowed. - DisconnectPeer(ctx context.Context, in *DisconnectPeerRequest, opts ...grpc.CallOption) (*DisconnectPeerResponse, error) - // lncli: `listpeers` - //ListPeers returns a verbose listing of all currently active peers. - ListPeers(ctx context.Context, in *ListPeersRequest, opts ...grpc.CallOption) (*ListPeersResponse, error) - // - //SubscribePeerEvents creates a uni-directional stream from the server to - //the client in which any events relevant to the state of peers are sent - //over. Events include peers going online and offline. - SubscribePeerEvents(ctx context.Context, in *PeerEventSubscription, opts ...grpc.CallOption) (Lightning_SubscribePeerEventsClient, error) - // lncli: `getinfo` - //GetInfo returns general information concerning the lightning node including - //it's identity pubkey, alias, the chains it is connected to, and information - //concerning the number of open+pending channels. - GetInfo(ctx context.Context, in *GetInfoRequest, opts ...grpc.CallOption) (*GetInfoResponse, error) - //* lncli: `getrecoveryinfo` - //GetRecoveryInfo returns information concerning the recovery mode including - //whether it's in a recovery mode, whether the recovery is finished, and the - //progress made so far. - GetRecoveryInfo(ctx context.Context, in *GetRecoveryInfoRequest, opts ...grpc.CallOption) (*GetRecoveryInfoResponse, error) - // lncli: `pendingchannels` - //PendingChannels returns a list of all the channels that are currently - //considered "pending". A channel is pending if it has finished the funding - //workflow and is waiting for confirmations for the funding txn, or is in the - //process of closure, either initiated cooperatively or non-cooperatively. - PendingChannels(ctx context.Context, in *PendingChannelsRequest, opts ...grpc.CallOption) (*PendingChannelsResponse, error) - // lncli: `listchannels` - //ListChannels returns a description of all the open channels that this node - //is a participant in. - ListChannels(ctx context.Context, in *ListChannelsRequest, opts ...grpc.CallOption) (*ListChannelsResponse, error) - // - //SubscribeChannelEvents creates a uni-directional stream from the server to - //the client in which any updates relevant to the state of the channels are - //sent over. Events include new active channels, inactive channels, and closed - //channels. - SubscribeChannelEvents(ctx context.Context, in *ChannelEventSubscription, opts ...grpc.CallOption) (Lightning_SubscribeChannelEventsClient, error) - // lncli: `closedchannels` - //ClosedChannels returns a description of all the closed channels that - //this node was a participant in. - ClosedChannels(ctx context.Context, in *ClosedChannelsRequest, opts ...grpc.CallOption) (*ClosedChannelsResponse, error) - // - //OpenChannelSync is a synchronous version of the OpenChannel RPC call. This - //call is meant to be consumed by clients to the REST proxy. As with all - //other sync calls, all byte slices are intended to be populated as hex - //encoded strings. - OpenChannelSync(ctx context.Context, in *OpenChannelRequest, opts ...grpc.CallOption) (*ChannelPoint, error) - // lncli: `openchannel` - //OpenChannel attempts to open a singly funded channel specified in the - //request to a remote peer. Users are able to specify a target number of - //blocks that the funding transaction should be confirmed in, or a manual fee - //rate to us for the funding transaction. If neither are specified, then a - //lax block confirmation target is used. Each OpenStatusUpdate will return - //the pending channel ID of the in-progress channel. Depending on the - //arguments specified in the OpenChannelRequest, this pending channel ID can - //then be used to manually progress the channel funding flow. - OpenChannel(ctx context.Context, in *OpenChannelRequest, opts ...grpc.CallOption) (Lightning_OpenChannelClient, error) - // - //FundingStateStep is an advanced funding related call that allows the caller - //to either execute some preparatory steps for a funding workflow, or - //manually progress a funding workflow. The primary way a funding flow is - //identified is via its pending channel ID. As an example, this method can be - //used to specify that we're expecting a funding flow for a particular - //pending channel ID, for which we need to use specific parameters. - //Alternatively, this can be used to interactively drive PSBT signing for - //funding for partially complete funding transactions. - FundingStateStep(ctx context.Context, in *FundingTransitionMsg, opts ...grpc.CallOption) (*FundingStateStepResp, error) - // - //ChannelAcceptor dispatches a bi-directional streaming RPC in which - //OpenChannel requests are sent to the client and the client responds with - //a boolean that tells LND whether or not to accept the channel. This allows - //node operators to specify their own criteria for accepting inbound channels - //through a single persistent connection. - ChannelAcceptor(ctx context.Context, opts ...grpc.CallOption) (Lightning_ChannelAcceptorClient, error) - // lncli: `closechannel` - //CloseChannel attempts to close an active channel identified by its channel - //outpoint (ChannelPoint). The actions of this method can additionally be - //augmented to attempt a force close after a timeout period in the case of an - //inactive peer. If a non-force close (cooperative closure) is requested, - //then the user can specify either a target number of blocks until the - //closure transaction is confirmed, or a manual fee rate. If neither are - //specified, then a default lax, block confirmation target is used. - CloseChannel(ctx context.Context, in *CloseChannelRequest, opts ...grpc.CallOption) (Lightning_CloseChannelClient, error) - // lncli: `abandonchannel` - //AbandonChannel removes all channel state from the database except for a - //close summary. This method can be used to get rid of permanently unusable - //channels due to bugs fixed in newer versions of lnd. This method can also be - //used to remove externally funded channels where the funding transaction was - //never broadcast. Only available for non-externally funded channels in dev - //build. - AbandonChannel(ctx context.Context, in *AbandonChannelRequest, opts ...grpc.CallOption) (*AbandonChannelResponse, error) - // lncli: `sendpayment` - //Deprecated, use routerrpc.SendPaymentV2. SendPayment dispatches a - //bi-directional streaming RPC for sending payments through the Lightning - //Network. A single RPC invocation creates a persistent bi-directional - //stream allowing clients to rapidly send payments through the Lightning - //Network with a single persistent connection. - SendPayment(ctx context.Context, opts ...grpc.CallOption) (Lightning_SendPaymentClient, error) - // - //SendPaymentSync is the synchronous non-streaming version of SendPayment. - //This RPC is intended to be consumed by clients of the REST proxy. - //Additionally, this RPC expects the destination's public key and the payment - //hash (if any) to be encoded as hex strings. - SendPaymentSync(ctx context.Context, in *SendRequest, opts ...grpc.CallOption) (*SendResponse, error) - // lncli: `sendtoroute` - //Deprecated, use routerrpc.SendToRouteV2. SendToRoute is a bi-directional - //streaming RPC for sending payment through the Lightning Network. This - //method differs from SendPayment in that it allows users to specify a full - //route manually. This can be used for things like rebalancing, and atomic - //swaps. - SendToRoute(ctx context.Context, opts ...grpc.CallOption) (Lightning_SendToRouteClient, error) - // - //SendToRouteSync is a synchronous version of SendToRoute. It Will block - //until the payment either fails or succeeds. - SendToRouteSync(ctx context.Context, in *SendToRouteRequest, opts ...grpc.CallOption) (*SendResponse, error) - // lncli: `addinvoice` - //AddInvoice attempts to add a new invoice to the invoice database. Any - //duplicated invoices are rejected, therefore all invoices *must* have a - //unique payment preimage. - AddInvoice(ctx context.Context, in *Invoice, opts ...grpc.CallOption) (*AddInvoiceResponse, error) - // lncli: `listinvoices` - //ListInvoices returns a list of all the invoices currently stored within the - //database. Any active debug invoices are ignored. It has full support for - //paginated responses, allowing users to query for specific invoices through - //their add_index. This can be done by using either the first_index_offset or - //last_index_offset fields included in the response as the index_offset of the - //next request. By default, the first 100 invoices created will be returned. - //Backwards pagination is also supported through the Reversed flag. - ListInvoices(ctx context.Context, in *ListInvoiceRequest, opts ...grpc.CallOption) (*ListInvoiceResponse, error) - // lncli: `lookupinvoice` - //LookupInvoice attempts to look up an invoice according to its payment hash. - //The passed payment hash *must* be exactly 32 bytes, if not, an error is - //returned. - LookupInvoice(ctx context.Context, in *PaymentHash, opts ...grpc.CallOption) (*Invoice, error) - // - //SubscribeInvoices returns a uni-directional stream (server -> client) for - //notifying the client of newly added/settled invoices. The caller can - //optionally specify the add_index and/or the settle_index. If the add_index - //is specified, then we'll first start by sending add invoice events for all - //invoices with an add_index greater than the specified value. If the - //settle_index is specified, the next, we'll send out all settle events for - //invoices with a settle_index greater than the specified value. One or both - //of these fields can be set. If no fields are set, then we'll only send out - //the latest add/settle events. - SubscribeInvoices(ctx context.Context, in *InvoiceSubscription, opts ...grpc.CallOption) (Lightning_SubscribeInvoicesClient, error) - // lncli: `decodepayreq` - //DecodePayReq takes an encoded payment request string and attempts to decode - //it, returning a full description of the conditions encoded within the - //payment request. - DecodePayReq(ctx context.Context, in *PayReqString, opts ...grpc.CallOption) (*PayReq, error) - // lncli: `listpayments` - //ListPayments returns a list of all outgoing payments. - ListPayments(ctx context.Context, in *ListPaymentsRequest, opts ...grpc.CallOption) (*ListPaymentsResponse, error) - // - //DeleteAllPayments deletes all outgoing payments from DB. - DeleteAllPayments(ctx context.Context, in *DeleteAllPaymentsRequest, opts ...grpc.CallOption) (*DeleteAllPaymentsResponse, error) - // lncli: `describegraph` - //DescribeGraph returns a description of the latest graph state from the - //point of view of the node. The graph information is partitioned into two - //components: all the nodes/vertexes, and all the edges that connect the - //vertexes themselves. As this is a directed graph, the edges also contain - //the node directional specific routing policy which includes: the time lock - //delta, fee information, etc. - DescribeGraph(ctx context.Context, in *ChannelGraphRequest, opts ...grpc.CallOption) (*ChannelGraph, error) - // lncli: `getnodemetrics` - //GetNodeMetrics returns node metrics calculated from the graph. Currently - //the only supported metric is betweenness centrality of individual nodes. - GetNodeMetrics(ctx context.Context, in *NodeMetricsRequest, opts ...grpc.CallOption) (*NodeMetricsResponse, error) - // lncli: `getchaninfo` - //GetChanInfo returns the latest authenticated network announcement for the - //given channel identified by its channel ID: an 8-byte integer which - //uniquely identifies the location of transaction's funding output within the - //blockchain. - GetChanInfo(ctx context.Context, in *ChanInfoRequest, opts ...grpc.CallOption) (*ChannelEdge, error) - // lncli: `getnodeinfo` - //GetNodeInfo returns the latest advertised, aggregated, and authenticated - //channel information for the specified node identified by its public key. - GetNodeInfo(ctx context.Context, in *NodeInfoRequest, opts ...grpc.CallOption) (*NodeInfo, error) - // lncli: `queryroutes` - //QueryRoutes attempts to query the daemon's Channel Router for a possible - //route to a target destination capable of carrying a specific amount of - //satoshis. The returned route contains the full details required to craft and - //send an HTLC, also including the necessary information that should be - //present within the Sphinx packet encapsulated within the HTLC. - // - //When using REST, the `dest_custom_records` map type can be set by appending - //`&dest_custom_records[]=` - //to the URL. Unfortunately this map type doesn't appear in the REST API - //documentation because of a bug in the grpc-gateway library. - QueryRoutes(ctx context.Context, in *QueryRoutesRequest, opts ...grpc.CallOption) (*QueryRoutesResponse, error) - // lncli: `getnetworkinfo` - //GetNetworkInfo returns some basic stats about the known channel graph from - //the point of view of the node. - GetNetworkInfo(ctx context.Context, in *NetworkInfoRequest, opts ...grpc.CallOption) (*NetworkInfo, error) - // lncli: `stop` - //StopDaemon will send a shutdown request to the interrupt handler, triggering - //a graceful shutdown of the daemon. - StopDaemon(ctx context.Context, in *StopRequest, opts ...grpc.CallOption) (*StopResponse, error) - // - //SubscribeChannelGraph launches a streaming RPC that allows the caller to - //receive notifications upon any changes to the channel graph topology from - //the point of view of the responding node. Events notified include: new - //nodes coming online, nodes updating their authenticated attributes, new - //channels being advertised, updates in the routing policy for a directional - //channel edge, and when channels are closed on-chain. - SubscribeChannelGraph(ctx context.Context, in *GraphTopologySubscription, opts ...grpc.CallOption) (Lightning_SubscribeChannelGraphClient, error) - // lncli: `debuglevel` - //DebugLevel allows a caller to programmatically set the logging verbosity of - //lnd. The logging can be targeted according to a coarse daemon-wide logging - //level, or in a granular fashion to specify the logging for a target - //sub-system. - DebugLevel(ctx context.Context, in *DebugLevelRequest, opts ...grpc.CallOption) (*DebugLevelResponse, error) - // lncli: `feereport` - //FeeReport allows the caller to obtain a report detailing the current fee - //schedule enforced by the node globally for each channel. - FeeReport(ctx context.Context, in *FeeReportRequest, opts ...grpc.CallOption) (*FeeReportResponse, error) - // lncli: `updatechanpolicy` - //UpdateChannelPolicy allows the caller to update the fee schedule and - //channel policies for all channels globally, or a particular channel. - UpdateChannelPolicy(ctx context.Context, in *PolicyUpdateRequest, opts ...grpc.CallOption) (*PolicyUpdateResponse, error) - // lncli: `fwdinghistory` - //ForwardingHistory allows the caller to query the htlcswitch for a record of - //all HTLCs forwarded within the target time range, and integer offset - //within that time range. If no time-range is specified, then the first chunk - //of the past 24 hrs of forwarding history are returned. - // - //A list of forwarding events are returned. The size of each forwarding event - //is 40 bytes, and the max message size able to be returned in gRPC is 4 MiB. - //As a result each message can only contain 50k entries. Each response has - //the index offset of the last entry. The index offset can be provided to the - //request to allow the caller to skip a series of records. - ForwardingHistory(ctx context.Context, in *ForwardingHistoryRequest, opts ...grpc.CallOption) (*ForwardingHistoryResponse, error) - // lncli: `exportchanbackup` - //ExportChannelBackup attempts to return an encrypted static channel backup - //for the target channel identified by it channel point. The backup is - //encrypted with a key generated from the aezeed seed of the user. The - //returned backup can either be restored using the RestoreChannelBackup - //method once lnd is running, or via the InitWallet and UnlockWallet methods - //from the WalletUnlocker service. - ExportChannelBackup(ctx context.Context, in *ExportChannelBackupRequest, opts ...grpc.CallOption) (*ChannelBackup, error) - // - //ExportAllChannelBackups returns static channel backups for all existing - //channels known to lnd. A set of regular singular static channel backups for - //each channel are returned. Additionally, a multi-channel backup is returned - //as well, which contains a single encrypted blob containing the backups of - //each channel. - ExportAllChannelBackups(ctx context.Context, in *ChanBackupExportRequest, opts ...grpc.CallOption) (*ChanBackupSnapshot, error) - // - //VerifyChanBackup allows a caller to verify the integrity of a channel backup - //snapshot. This method will accept either a packed Single or a packed Multi. - //Specifying both will result in an error. - VerifyChanBackup(ctx context.Context, in *ChanBackupSnapshot, opts ...grpc.CallOption) (*VerifyChanBackupResponse, error) - // lncli: `restorechanbackup` - //RestoreChannelBackups accepts a set of singular channel backups, or a - //single encrypted multi-chan backup and attempts to recover any funds - //remaining within the channel. If we are able to unpack the backup, then the - //new channel will be shown under listchannels, as well as pending channels. - RestoreChannelBackups(ctx context.Context, in *RestoreChanBackupRequest, opts ...grpc.CallOption) (*RestoreBackupResponse, error) - // - //SubscribeChannelBackups allows a client to sub-subscribe to the most up to - //date information concerning the state of all channel backups. Each time a - //new channel is added, we return the new set of channels, along with a - //multi-chan backup containing the backup info for all channels. Each time a - //channel is closed, we send a new update, which contains new new chan back - //ups, but the updated set of encrypted multi-chan backups with the closed - //channel(s) removed. - SubscribeChannelBackups(ctx context.Context, in *ChannelBackupSubscription, opts ...grpc.CallOption) (Lightning_SubscribeChannelBackupsClient, error) - // lncli: `bakemacaroon` - //BakeMacaroon allows the creation of a new macaroon with custom read and - //write permissions. No first-party caveats are added since this can be done - //offline. - BakeMacaroon(ctx context.Context, in *BakeMacaroonRequest, opts ...grpc.CallOption) (*BakeMacaroonResponse, error) - // lncli: `listmacaroonids` - //ListMacaroonIDs returns all root key IDs that are in use. - ListMacaroonIDs(ctx context.Context, in *ListMacaroonIDsRequest, opts ...grpc.CallOption) (*ListMacaroonIDsResponse, error) - // lncli: `deletemacaroonid` - //DeleteMacaroonID deletes the specified macaroon ID and invalidates all - //macaroons derived from that ID. - DeleteMacaroonID(ctx context.Context, in *DeleteMacaroonIDRequest, opts ...grpc.CallOption) (*DeleteMacaroonIDResponse, error) - // lncli: `listpermissions` - //ListPermissions lists all RPC method URIs and their required macaroon - //permissions to access them. - ListPermissions(ctx context.Context, in *ListPermissionsRequest, opts ...grpc.CallOption) (*ListPermissionsResponse, error) - //Scan over the chain to find any transactions which may not have been recorded in the wallet's database - ReSync(ctx context.Context, in *ReSyncChainRequest, opts ...grpc.CallOption) (*ReSyncChainResponse, error) - //Stop a re-synchronization job before it's completion - StopReSync(ctx context.Context, in *StopReSyncRequest, opts ...grpc.CallOption) (*StopReSyncResponse, error) -} - -type lightningClient struct { - cc *grpc.ClientConn -} - -func NewLightningClient(cc *grpc.ClientConn) LightningClient { - return &lightningClient{cc} -} - -func (c *lightningClient) WalletBalance(ctx context.Context, in *WalletBalanceRequest, opts ...grpc.CallOption) (*WalletBalanceResponse, error) { - out := new(WalletBalanceResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/WalletBalance", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) GetAddressBalances(ctx context.Context, in *GetAddressBalancesRequest, opts ...grpc.CallOption) (*GetAddressBalancesResponse, error) { - out := new(GetAddressBalancesResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/GetAddressBalances", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) ChannelBalance(ctx context.Context, in *ChannelBalanceRequest, opts ...grpc.CallOption) (*ChannelBalanceResponse, error) { - out := new(ChannelBalanceResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/ChannelBalance", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) GetTransactions(ctx context.Context, in *GetTransactionsRequest, opts ...grpc.CallOption) (*TransactionDetails, error) { - out := new(TransactionDetails) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/GetTransactions", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) EstimateFee(ctx context.Context, in *EstimateFeeRequest, opts ...grpc.CallOption) (*EstimateFeeResponse, error) { - out := new(EstimateFeeResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/EstimateFee", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) SendCoins(ctx context.Context, in *SendCoinsRequest, opts ...grpc.CallOption) (*SendCoinsResponse, error) { - out := new(SendCoinsResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/SendCoins", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) ListUnspent(ctx context.Context, in *ListUnspentRequest, opts ...grpc.CallOption) (*ListUnspentResponse, error) { - out := new(ListUnspentResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/ListUnspent", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) SubscribeTransactions(ctx context.Context, in *GetTransactionsRequest, opts ...grpc.CallOption) (Lightning_SubscribeTransactionsClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[0], "/lnrpc.Lightning/SubscribeTransactions", opts...) - if err != nil { - return nil, err - } - x := &lightningSubscribeTransactionsClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Lightning_SubscribeTransactionsClient interface { - Recv() (*Transaction, error) - grpc.ClientStream -} - -type lightningSubscribeTransactionsClient struct { - grpc.ClientStream -} - -func (x *lightningSubscribeTransactionsClient) Recv() (*Transaction, error) { - m := new(Transaction) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *lightningClient) SendMany(ctx context.Context, in *SendManyRequest, opts ...grpc.CallOption) (*SendManyResponse, error) { - out := new(SendManyResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/SendMany", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) NewAddress(ctx context.Context, in *NewAddressRequest, opts ...grpc.CallOption) (*NewAddressResponse, error) { - out := new(NewAddressResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/NewAddress", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) SignMessage(ctx context.Context, in *SignMessageRequest, opts ...grpc.CallOption) (*SignMessageResponse, error) { - out := new(SignMessageResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/SignMessage", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) VerifyMessage(ctx context.Context, in *VerifyMessageRequest, opts ...grpc.CallOption) (*VerifyMessageResponse, error) { - out := new(VerifyMessageResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/VerifyMessage", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) ConnectPeer(ctx context.Context, in *ConnectPeerRequest, opts ...grpc.CallOption) (*ConnectPeerResponse, error) { - out := new(ConnectPeerResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/ConnectPeer", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) DisconnectPeer(ctx context.Context, in *DisconnectPeerRequest, opts ...grpc.CallOption) (*DisconnectPeerResponse, error) { - out := new(DisconnectPeerResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/DisconnectPeer", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) ListPeers(ctx context.Context, in *ListPeersRequest, opts ...grpc.CallOption) (*ListPeersResponse, error) { - out := new(ListPeersResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/ListPeers", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) SubscribePeerEvents(ctx context.Context, in *PeerEventSubscription, opts ...grpc.CallOption) (Lightning_SubscribePeerEventsClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[1], "/lnrpc.Lightning/SubscribePeerEvents", opts...) - if err != nil { - return nil, err - } - x := &lightningSubscribePeerEventsClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Lightning_SubscribePeerEventsClient interface { - Recv() (*PeerEvent, error) - grpc.ClientStream -} - -type lightningSubscribePeerEventsClient struct { - grpc.ClientStream -} - -func (x *lightningSubscribePeerEventsClient) Recv() (*PeerEvent, error) { - m := new(PeerEvent) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *lightningClient) GetInfo(ctx context.Context, in *GetInfoRequest, opts ...grpc.CallOption) (*GetInfoResponse, error) { - out := new(GetInfoResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/GetInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) GetRecoveryInfo(ctx context.Context, in *GetRecoveryInfoRequest, opts ...grpc.CallOption) (*GetRecoveryInfoResponse, error) { - out := new(GetRecoveryInfoResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/GetRecoveryInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) PendingChannels(ctx context.Context, in *PendingChannelsRequest, opts ...grpc.CallOption) (*PendingChannelsResponse, error) { - out := new(PendingChannelsResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/PendingChannels", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) ListChannels(ctx context.Context, in *ListChannelsRequest, opts ...grpc.CallOption) (*ListChannelsResponse, error) { - out := new(ListChannelsResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/ListChannels", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) SubscribeChannelEvents(ctx context.Context, in *ChannelEventSubscription, opts ...grpc.CallOption) (Lightning_SubscribeChannelEventsClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[2], "/lnrpc.Lightning/SubscribeChannelEvents", opts...) - if err != nil { - return nil, err - } - x := &lightningSubscribeChannelEventsClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Lightning_SubscribeChannelEventsClient interface { - Recv() (*ChannelEventUpdate, error) - grpc.ClientStream -} - -type lightningSubscribeChannelEventsClient struct { - grpc.ClientStream -} - -func (x *lightningSubscribeChannelEventsClient) Recv() (*ChannelEventUpdate, error) { - m := new(ChannelEventUpdate) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *lightningClient) ClosedChannels(ctx context.Context, in *ClosedChannelsRequest, opts ...grpc.CallOption) (*ClosedChannelsResponse, error) { - out := new(ClosedChannelsResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/ClosedChannels", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) OpenChannelSync(ctx context.Context, in *OpenChannelRequest, opts ...grpc.CallOption) (*ChannelPoint, error) { - out := new(ChannelPoint) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/OpenChannelSync", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) OpenChannel(ctx context.Context, in *OpenChannelRequest, opts ...grpc.CallOption) (Lightning_OpenChannelClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[3], "/lnrpc.Lightning/OpenChannel", opts...) - if err != nil { - return nil, err - } - x := &lightningOpenChannelClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Lightning_OpenChannelClient interface { - Recv() (*OpenStatusUpdate, error) - grpc.ClientStream -} - -type lightningOpenChannelClient struct { - grpc.ClientStream -} - -func (x *lightningOpenChannelClient) Recv() (*OpenStatusUpdate, error) { - m := new(OpenStatusUpdate) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *lightningClient) FundingStateStep(ctx context.Context, in *FundingTransitionMsg, opts ...grpc.CallOption) (*FundingStateStepResp, error) { - out := new(FundingStateStepResp) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/FundingStateStep", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) ChannelAcceptor(ctx context.Context, opts ...grpc.CallOption) (Lightning_ChannelAcceptorClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[4], "/lnrpc.Lightning/ChannelAcceptor", opts...) - if err != nil { - return nil, err - } - x := &lightningChannelAcceptorClient{stream} - return x, nil -} - -type Lightning_ChannelAcceptorClient interface { - Send(*ChannelAcceptResponse) error - Recv() (*ChannelAcceptRequest, error) - grpc.ClientStream -} - -type lightningChannelAcceptorClient struct { - grpc.ClientStream -} - -func (x *lightningChannelAcceptorClient) Send(m *ChannelAcceptResponse) error { - return x.ClientStream.SendMsg(m) -} - -func (x *lightningChannelAcceptorClient) Recv() (*ChannelAcceptRequest, error) { - m := new(ChannelAcceptRequest) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *lightningClient) CloseChannel(ctx context.Context, in *CloseChannelRequest, opts ...grpc.CallOption) (Lightning_CloseChannelClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[5], "/lnrpc.Lightning/CloseChannel", opts...) - if err != nil { - return nil, err - } - x := &lightningCloseChannelClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Lightning_CloseChannelClient interface { - Recv() (*CloseStatusUpdate, error) - grpc.ClientStream -} - -type lightningCloseChannelClient struct { - grpc.ClientStream -} - -func (x *lightningCloseChannelClient) Recv() (*CloseStatusUpdate, error) { - m := new(CloseStatusUpdate) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *lightningClient) AbandonChannel(ctx context.Context, in *AbandonChannelRequest, opts ...grpc.CallOption) (*AbandonChannelResponse, error) { - out := new(AbandonChannelResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/AbandonChannel", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Deprecated: Do not use. -func (c *lightningClient) SendPayment(ctx context.Context, opts ...grpc.CallOption) (Lightning_SendPaymentClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[6], "/lnrpc.Lightning/SendPayment", opts...) - if err != nil { - return nil, err - } - x := &lightningSendPaymentClient{stream} - return x, nil -} - -type Lightning_SendPaymentClient interface { - Send(*SendRequest) error - Recv() (*SendResponse, error) - grpc.ClientStream -} - -type lightningSendPaymentClient struct { - grpc.ClientStream -} - -func (x *lightningSendPaymentClient) Send(m *SendRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *lightningSendPaymentClient) Recv() (*SendResponse, error) { - m := new(SendResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *lightningClient) SendPaymentSync(ctx context.Context, in *SendRequest, opts ...grpc.CallOption) (*SendResponse, error) { - out := new(SendResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/SendPaymentSync", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Deprecated: Do not use. -func (c *lightningClient) SendToRoute(ctx context.Context, opts ...grpc.CallOption) (Lightning_SendToRouteClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[7], "/lnrpc.Lightning/SendToRoute", opts...) - if err != nil { - return nil, err - } - x := &lightningSendToRouteClient{stream} - return x, nil -} - -type Lightning_SendToRouteClient interface { - Send(*SendToRouteRequest) error - Recv() (*SendResponse, error) - grpc.ClientStream -} - -type lightningSendToRouteClient struct { - grpc.ClientStream -} - -func (x *lightningSendToRouteClient) Send(m *SendToRouteRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *lightningSendToRouteClient) Recv() (*SendResponse, error) { - m := new(SendResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *lightningClient) SendToRouteSync(ctx context.Context, in *SendToRouteRequest, opts ...grpc.CallOption) (*SendResponse, error) { - out := new(SendResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/SendToRouteSync", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) AddInvoice(ctx context.Context, in *Invoice, opts ...grpc.CallOption) (*AddInvoiceResponse, error) { - out := new(AddInvoiceResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/AddInvoice", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) ListInvoices(ctx context.Context, in *ListInvoiceRequest, opts ...grpc.CallOption) (*ListInvoiceResponse, error) { - out := new(ListInvoiceResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/ListInvoices", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) LookupInvoice(ctx context.Context, in *PaymentHash, opts ...grpc.CallOption) (*Invoice, error) { - out := new(Invoice) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/LookupInvoice", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) SubscribeInvoices(ctx context.Context, in *InvoiceSubscription, opts ...grpc.CallOption) (Lightning_SubscribeInvoicesClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[8], "/lnrpc.Lightning/SubscribeInvoices", opts...) - if err != nil { - return nil, err - } - x := &lightningSubscribeInvoicesClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Lightning_SubscribeInvoicesClient interface { - Recv() (*Invoice, error) - grpc.ClientStream -} - -type lightningSubscribeInvoicesClient struct { - grpc.ClientStream -} - -func (x *lightningSubscribeInvoicesClient) Recv() (*Invoice, error) { - m := new(Invoice) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *lightningClient) DecodePayReq(ctx context.Context, in *PayReqString, opts ...grpc.CallOption) (*PayReq, error) { - out := new(PayReq) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/DecodePayReq", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) ListPayments(ctx context.Context, in *ListPaymentsRequest, opts ...grpc.CallOption) (*ListPaymentsResponse, error) { - out := new(ListPaymentsResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/ListPayments", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) DeleteAllPayments(ctx context.Context, in *DeleteAllPaymentsRequest, opts ...grpc.CallOption) (*DeleteAllPaymentsResponse, error) { - out := new(DeleteAllPaymentsResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/DeleteAllPayments", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) DescribeGraph(ctx context.Context, in *ChannelGraphRequest, opts ...grpc.CallOption) (*ChannelGraph, error) { - out := new(ChannelGraph) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/DescribeGraph", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) GetNodeMetrics(ctx context.Context, in *NodeMetricsRequest, opts ...grpc.CallOption) (*NodeMetricsResponse, error) { - out := new(NodeMetricsResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/GetNodeMetrics", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) GetChanInfo(ctx context.Context, in *ChanInfoRequest, opts ...grpc.CallOption) (*ChannelEdge, error) { - out := new(ChannelEdge) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/GetChanInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) GetNodeInfo(ctx context.Context, in *NodeInfoRequest, opts ...grpc.CallOption) (*NodeInfo, error) { - out := new(NodeInfo) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/GetNodeInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) QueryRoutes(ctx context.Context, in *QueryRoutesRequest, opts ...grpc.CallOption) (*QueryRoutesResponse, error) { - out := new(QueryRoutesResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/QueryRoutes", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) GetNetworkInfo(ctx context.Context, in *NetworkInfoRequest, opts ...grpc.CallOption) (*NetworkInfo, error) { - out := new(NetworkInfo) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/GetNetworkInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) StopDaemon(ctx context.Context, in *StopRequest, opts ...grpc.CallOption) (*StopResponse, error) { - out := new(StopResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/StopDaemon", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) SubscribeChannelGraph(ctx context.Context, in *GraphTopologySubscription, opts ...grpc.CallOption) (Lightning_SubscribeChannelGraphClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[9], "/lnrpc.Lightning/SubscribeChannelGraph", opts...) - if err != nil { - return nil, err - } - x := &lightningSubscribeChannelGraphClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Lightning_SubscribeChannelGraphClient interface { - Recv() (*GraphTopologyUpdate, error) - grpc.ClientStream -} - -type lightningSubscribeChannelGraphClient struct { - grpc.ClientStream -} - -func (x *lightningSubscribeChannelGraphClient) Recv() (*GraphTopologyUpdate, error) { - m := new(GraphTopologyUpdate) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *lightningClient) DebugLevel(ctx context.Context, in *DebugLevelRequest, opts ...grpc.CallOption) (*DebugLevelResponse, error) { - out := new(DebugLevelResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/DebugLevel", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) FeeReport(ctx context.Context, in *FeeReportRequest, opts ...grpc.CallOption) (*FeeReportResponse, error) { - out := new(FeeReportResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/FeeReport", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) UpdateChannelPolicy(ctx context.Context, in *PolicyUpdateRequest, opts ...grpc.CallOption) (*PolicyUpdateResponse, error) { - out := new(PolicyUpdateResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/UpdateChannelPolicy", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) ForwardingHistory(ctx context.Context, in *ForwardingHistoryRequest, opts ...grpc.CallOption) (*ForwardingHistoryResponse, error) { - out := new(ForwardingHistoryResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/ForwardingHistory", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) ExportChannelBackup(ctx context.Context, in *ExportChannelBackupRequest, opts ...grpc.CallOption) (*ChannelBackup, error) { - out := new(ChannelBackup) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/ExportChannelBackup", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) ExportAllChannelBackups(ctx context.Context, in *ChanBackupExportRequest, opts ...grpc.CallOption) (*ChanBackupSnapshot, error) { - out := new(ChanBackupSnapshot) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/ExportAllChannelBackups", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) VerifyChanBackup(ctx context.Context, in *ChanBackupSnapshot, opts ...grpc.CallOption) (*VerifyChanBackupResponse, error) { - out := new(VerifyChanBackupResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/VerifyChanBackup", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) RestoreChannelBackups(ctx context.Context, in *RestoreChanBackupRequest, opts ...grpc.CallOption) (*RestoreBackupResponse, error) { - out := new(RestoreBackupResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/RestoreChannelBackups", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) SubscribeChannelBackups(ctx context.Context, in *ChannelBackupSubscription, opts ...grpc.CallOption) (Lightning_SubscribeChannelBackupsClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[10], "/lnrpc.Lightning/SubscribeChannelBackups", opts...) - if err != nil { - return nil, err - } - x := &lightningSubscribeChannelBackupsClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Lightning_SubscribeChannelBackupsClient interface { - Recv() (*ChanBackupSnapshot, error) - grpc.ClientStream -} - -type lightningSubscribeChannelBackupsClient struct { - grpc.ClientStream -} - -func (x *lightningSubscribeChannelBackupsClient) Recv() (*ChanBackupSnapshot, error) { - m := new(ChanBackupSnapshot) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *lightningClient) BakeMacaroon(ctx context.Context, in *BakeMacaroonRequest, opts ...grpc.CallOption) (*BakeMacaroonResponse, error) { - out := new(BakeMacaroonResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/BakeMacaroon", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) ListMacaroonIDs(ctx context.Context, in *ListMacaroonIDsRequest, opts ...grpc.CallOption) (*ListMacaroonIDsResponse, error) { - out := new(ListMacaroonIDsResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/ListMacaroonIDs", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) DeleteMacaroonID(ctx context.Context, in *DeleteMacaroonIDRequest, opts ...grpc.CallOption) (*DeleteMacaroonIDResponse, error) { - out := new(DeleteMacaroonIDResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/DeleteMacaroonID", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) ListPermissions(ctx context.Context, in *ListPermissionsRequest, opts ...grpc.CallOption) (*ListPermissionsResponse, error) { - out := new(ListPermissionsResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/ListPermissions", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) ReSync(ctx context.Context, in *ReSyncChainRequest, opts ...grpc.CallOption) (*ReSyncChainResponse, error) { - out := new(ReSyncChainResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/ReSync", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lightningClient) StopReSync(ctx context.Context, in *StopReSyncRequest, opts ...grpc.CallOption) (*StopReSyncResponse, error) { - out := new(StopReSyncResponse) - err := c.cc.Invoke(ctx, "/lnrpc.Lightning/StopReSync", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// LightningServer is the server API for Lightning service. -type LightningServer interface { - // lncli: `walletbalance` - //WalletBalance returns total unspent outputs(confirmed and unconfirmed), all - //confirmed unspent outputs and all unconfirmed unspent outputs under control - //of the wallet. - WalletBalance(context.Context, *WalletBalanceRequest) (*WalletBalanceResponse, error) - // lncli: `getaddressbalances` - //GetAddressBalances returns the balance for each of the addresses in the wallet. - GetAddressBalances(context.Context, *GetAddressBalancesRequest) (*GetAddressBalancesResponse, error) - // lncli: `channelbalance` - //ChannelBalance returns a report on the total funds across all open channels, - //categorized in local/remote, pending local/remote and unsettled local/remote - //balances. - ChannelBalance(context.Context, *ChannelBalanceRequest) (*ChannelBalanceResponse, error) - // lncli: `listchaintxns` - //GetTransactions returns a list describing all the known transactions - //relevant to the wallet. - GetTransactions(context.Context, *GetTransactionsRequest) (*TransactionDetails, error) - // lncli: `estimatefee` - //EstimateFee asks the chain backend to estimate the fee rate and total fees - //for a transaction that pays to multiple specified outputs. - // - //When using REST, the `AddrToAmount` map type can be set by appending - //`&AddrToAmount[
]=` to the URL. Unfortunately this - //map type doesn't appear in the REST API documentation because of a bug in - //the grpc-gateway library. - EstimateFee(context.Context, *EstimateFeeRequest) (*EstimateFeeResponse, error) - // lncli: `sendcoins` - //SendCoins executes a request to send coins to a particular address. Unlike - //SendMany, this RPC call only allows creating a single output at a time. If - //neither target_conf, or sat_per_byte are set, then the internal wallet will - //consult its fee model to determine a fee for the default confirmation - //target. - SendCoins(context.Context, *SendCoinsRequest) (*SendCoinsResponse, error) - // lncli: `listunspent` - //Deprecated, use walletrpc.ListUnspent instead. - // - //ListUnspent returns a list of all utxos spendable by the wallet with a - //number of confirmations between the specified minimum and maximum. - ListUnspent(context.Context, *ListUnspentRequest) (*ListUnspentResponse, error) - // - //SubscribeTransactions creates a uni-directional stream from the server to - //the client in which any newly discovered transactions relevant to the - //wallet are sent over. - SubscribeTransactions(*GetTransactionsRequest, Lightning_SubscribeTransactionsServer) error - // lncli: `sendmany` - //SendMany handles a request for a transaction that creates multiple specified - //outputs in parallel. If neither target_conf, or sat_per_byte are set, then - //the internal wallet will consult its fee model to determine a fee for the - //default confirmation target. - SendMany(context.Context, *SendManyRequest) (*SendManyResponse, error) - // lncli: `newaddress` - //NewAddress creates a new address under control of the local wallet. - NewAddress(context.Context, *NewAddressRequest) (*NewAddressResponse, error) - // lncli: `signmessage` - //SignMessage signs a message with this node's private key. The returned - //signature string is `zbase32` encoded and pubkey recoverable, meaning that - //only the message digest and signature are needed for verification. - SignMessage(context.Context, *SignMessageRequest) (*SignMessageResponse, error) - // lncli: `verifymessage` - //VerifyMessage verifies a signature over a msg. The signature must be - //zbase32 encoded and signed by an active node in the resident node's - //channel database. In addition to returning the validity of the signature, - //VerifyMessage also returns the recovered pubkey from the signature. - VerifyMessage(context.Context, *VerifyMessageRequest) (*VerifyMessageResponse, error) - // lncli: `connect` - //ConnectPeer attempts to establish a connection to a remote peer. This is at - //the networking level, and is used for communication between nodes. This is - //distinct from establishing a channel with a peer. - ConnectPeer(context.Context, *ConnectPeerRequest) (*ConnectPeerResponse, error) - // lncli: `disconnect` - //DisconnectPeer attempts to disconnect one peer from another identified by a - //given pubKey. In the case that we currently have a pending or active channel - //with the target peer, then this action will be not be allowed. - DisconnectPeer(context.Context, *DisconnectPeerRequest) (*DisconnectPeerResponse, error) - // lncli: `listpeers` - //ListPeers returns a verbose listing of all currently active peers. - ListPeers(context.Context, *ListPeersRequest) (*ListPeersResponse, error) - // - //SubscribePeerEvents creates a uni-directional stream from the server to - //the client in which any events relevant to the state of peers are sent - //over. Events include peers going online and offline. - SubscribePeerEvents(*PeerEventSubscription, Lightning_SubscribePeerEventsServer) error - // lncli: `getinfo` - //GetInfo returns general information concerning the lightning node including - //it's identity pubkey, alias, the chains it is connected to, and information - //concerning the number of open+pending channels. - GetInfo(context.Context, *GetInfoRequest) (*GetInfoResponse, error) - //* lncli: `getrecoveryinfo` - //GetRecoveryInfo returns information concerning the recovery mode including - //whether it's in a recovery mode, whether the recovery is finished, and the - //progress made so far. - GetRecoveryInfo(context.Context, *GetRecoveryInfoRequest) (*GetRecoveryInfoResponse, error) - // lncli: `pendingchannels` - //PendingChannels returns a list of all the channels that are currently - //considered "pending". A channel is pending if it has finished the funding - //workflow and is waiting for confirmations for the funding txn, or is in the - //process of closure, either initiated cooperatively or non-cooperatively. - PendingChannels(context.Context, *PendingChannelsRequest) (*PendingChannelsResponse, error) - // lncli: `listchannels` - //ListChannels returns a description of all the open channels that this node - //is a participant in. - ListChannels(context.Context, *ListChannelsRequest) (*ListChannelsResponse, error) - // - //SubscribeChannelEvents creates a uni-directional stream from the server to - //the client in which any updates relevant to the state of the channels are - //sent over. Events include new active channels, inactive channels, and closed - //channels. - SubscribeChannelEvents(*ChannelEventSubscription, Lightning_SubscribeChannelEventsServer) error - // lncli: `closedchannels` - //ClosedChannels returns a description of all the closed channels that - //this node was a participant in. - ClosedChannels(context.Context, *ClosedChannelsRequest) (*ClosedChannelsResponse, error) - // - //OpenChannelSync is a synchronous version of the OpenChannel RPC call. This - //call is meant to be consumed by clients to the REST proxy. As with all - //other sync calls, all byte slices are intended to be populated as hex - //encoded strings. - OpenChannelSync(context.Context, *OpenChannelRequest) (*ChannelPoint, error) - // lncli: `openchannel` - //OpenChannel attempts to open a singly funded channel specified in the - //request to a remote peer. Users are able to specify a target number of - //blocks that the funding transaction should be confirmed in, or a manual fee - //rate to us for the funding transaction. If neither are specified, then a - //lax block confirmation target is used. Each OpenStatusUpdate will return - //the pending channel ID of the in-progress channel. Depending on the - //arguments specified in the OpenChannelRequest, this pending channel ID can - //then be used to manually progress the channel funding flow. - OpenChannel(*OpenChannelRequest, Lightning_OpenChannelServer) error - // - //FundingStateStep is an advanced funding related call that allows the caller - //to either execute some preparatory steps for a funding workflow, or - //manually progress a funding workflow. The primary way a funding flow is - //identified is via its pending channel ID. As an example, this method can be - //used to specify that we're expecting a funding flow for a particular - //pending channel ID, for which we need to use specific parameters. - //Alternatively, this can be used to interactively drive PSBT signing for - //funding for partially complete funding transactions. - FundingStateStep(context.Context, *FundingTransitionMsg) (*FundingStateStepResp, error) - // - //ChannelAcceptor dispatches a bi-directional streaming RPC in which - //OpenChannel requests are sent to the client and the client responds with - //a boolean that tells LND whether or not to accept the channel. This allows - //node operators to specify their own criteria for accepting inbound channels - //through a single persistent connection. - ChannelAcceptor(Lightning_ChannelAcceptorServer) error - // lncli: `closechannel` - //CloseChannel attempts to close an active channel identified by its channel - //outpoint (ChannelPoint). The actions of this method can additionally be - //augmented to attempt a force close after a timeout period in the case of an - //inactive peer. If a non-force close (cooperative closure) is requested, - //then the user can specify either a target number of blocks until the - //closure transaction is confirmed, or a manual fee rate. If neither are - //specified, then a default lax, block confirmation target is used. - CloseChannel(*CloseChannelRequest, Lightning_CloseChannelServer) error - // lncli: `abandonchannel` - //AbandonChannel removes all channel state from the database except for a - //close summary. This method can be used to get rid of permanently unusable - //channels due to bugs fixed in newer versions of lnd. This method can also be - //used to remove externally funded channels where the funding transaction was - //never broadcast. Only available for non-externally funded channels in dev - //build. - AbandonChannel(context.Context, *AbandonChannelRequest) (*AbandonChannelResponse, error) - // lncli: `sendpayment` - //Deprecated, use routerrpc.SendPaymentV2. SendPayment dispatches a - //bi-directional streaming RPC for sending payments through the Lightning - //Network. A single RPC invocation creates a persistent bi-directional - //stream allowing clients to rapidly send payments through the Lightning - //Network with a single persistent connection. - SendPayment(Lightning_SendPaymentServer) error - // - //SendPaymentSync is the synchronous non-streaming version of SendPayment. - //This RPC is intended to be consumed by clients of the REST proxy. - //Additionally, this RPC expects the destination's public key and the payment - //hash (if any) to be encoded as hex strings. - SendPaymentSync(context.Context, *SendRequest) (*SendResponse, error) - // lncli: `sendtoroute` - //Deprecated, use routerrpc.SendToRouteV2. SendToRoute is a bi-directional - //streaming RPC for sending payment through the Lightning Network. This - //method differs from SendPayment in that it allows users to specify a full - //route manually. This can be used for things like rebalancing, and atomic - //swaps. - SendToRoute(Lightning_SendToRouteServer) error - // - //SendToRouteSync is a synchronous version of SendToRoute. It Will block - //until the payment either fails or succeeds. - SendToRouteSync(context.Context, *SendToRouteRequest) (*SendResponse, error) - // lncli: `addinvoice` - //AddInvoice attempts to add a new invoice to the invoice database. Any - //duplicated invoices are rejected, therefore all invoices *must* have a - //unique payment preimage. - AddInvoice(context.Context, *Invoice) (*AddInvoiceResponse, error) - // lncli: `listinvoices` - //ListInvoices returns a list of all the invoices currently stored within the - //database. Any active debug invoices are ignored. It has full support for - //paginated responses, allowing users to query for specific invoices through - //their add_index. This can be done by using either the first_index_offset or - //last_index_offset fields included in the response as the index_offset of the - //next request. By default, the first 100 invoices created will be returned. - //Backwards pagination is also supported through the Reversed flag. - ListInvoices(context.Context, *ListInvoiceRequest) (*ListInvoiceResponse, error) - // lncli: `lookupinvoice` - //LookupInvoice attempts to look up an invoice according to its payment hash. - //The passed payment hash *must* be exactly 32 bytes, if not, an error is - //returned. - LookupInvoice(context.Context, *PaymentHash) (*Invoice, error) - // - //SubscribeInvoices returns a uni-directional stream (server -> client) for - //notifying the client of newly added/settled invoices. The caller can - //optionally specify the add_index and/or the settle_index. If the add_index - //is specified, then we'll first start by sending add invoice events for all - //invoices with an add_index greater than the specified value. If the - //settle_index is specified, the next, we'll send out all settle events for - //invoices with a settle_index greater than the specified value. One or both - //of these fields can be set. If no fields are set, then we'll only send out - //the latest add/settle events. - SubscribeInvoices(*InvoiceSubscription, Lightning_SubscribeInvoicesServer) error - // lncli: `decodepayreq` - //DecodePayReq takes an encoded payment request string and attempts to decode - //it, returning a full description of the conditions encoded within the - //payment request. - DecodePayReq(context.Context, *PayReqString) (*PayReq, error) - // lncli: `listpayments` - //ListPayments returns a list of all outgoing payments. - ListPayments(context.Context, *ListPaymentsRequest) (*ListPaymentsResponse, error) - // - //DeleteAllPayments deletes all outgoing payments from DB. - DeleteAllPayments(context.Context, *DeleteAllPaymentsRequest) (*DeleteAllPaymentsResponse, error) - // lncli: `describegraph` - //DescribeGraph returns a description of the latest graph state from the - //point of view of the node. The graph information is partitioned into two - //components: all the nodes/vertexes, and all the edges that connect the - //vertexes themselves. As this is a directed graph, the edges also contain - //the node directional specific routing policy which includes: the time lock - //delta, fee information, etc. - DescribeGraph(context.Context, *ChannelGraphRequest) (*ChannelGraph, error) - // lncli: `getnodemetrics` - //GetNodeMetrics returns node metrics calculated from the graph. Currently - //the only supported metric is betweenness centrality of individual nodes. - GetNodeMetrics(context.Context, *NodeMetricsRequest) (*NodeMetricsResponse, error) - // lncli: `getchaninfo` - //GetChanInfo returns the latest authenticated network announcement for the - //given channel identified by its channel ID: an 8-byte integer which - //uniquely identifies the location of transaction's funding output within the - //blockchain. - GetChanInfo(context.Context, *ChanInfoRequest) (*ChannelEdge, error) - // lncli: `getnodeinfo` - //GetNodeInfo returns the latest advertised, aggregated, and authenticated - //channel information for the specified node identified by its public key. - GetNodeInfo(context.Context, *NodeInfoRequest) (*NodeInfo, error) - // lncli: `queryroutes` - //QueryRoutes attempts to query the daemon's Channel Router for a possible - //route to a target destination capable of carrying a specific amount of - //satoshis. The returned route contains the full details required to craft and - //send an HTLC, also including the necessary information that should be - //present within the Sphinx packet encapsulated within the HTLC. - // - //When using REST, the `dest_custom_records` map type can be set by appending - //`&dest_custom_records[]=` - //to the URL. Unfortunately this map type doesn't appear in the REST API - //documentation because of a bug in the grpc-gateway library. - QueryRoutes(context.Context, *QueryRoutesRequest) (*QueryRoutesResponse, error) - // lncli: `getnetworkinfo` - //GetNetworkInfo returns some basic stats about the known channel graph from - //the point of view of the node. - GetNetworkInfo(context.Context, *NetworkInfoRequest) (*NetworkInfo, error) - // lncli: `stop` - //StopDaemon will send a shutdown request to the interrupt handler, triggering - //a graceful shutdown of the daemon. - StopDaemon(context.Context, *StopRequest) (*StopResponse, error) - // - //SubscribeChannelGraph launches a streaming RPC that allows the caller to - //receive notifications upon any changes to the channel graph topology from - //the point of view of the responding node. Events notified include: new - //nodes coming online, nodes updating their authenticated attributes, new - //channels being advertised, updates in the routing policy for a directional - //channel edge, and when channels are closed on-chain. - SubscribeChannelGraph(*GraphTopologySubscription, Lightning_SubscribeChannelGraphServer) error - // lncli: `debuglevel` - //DebugLevel allows a caller to programmatically set the logging verbosity of - //lnd. The logging can be targeted according to a coarse daemon-wide logging - //level, or in a granular fashion to specify the logging for a target - //sub-system. - DebugLevel(context.Context, *DebugLevelRequest) (*DebugLevelResponse, error) - // lncli: `feereport` - //FeeReport allows the caller to obtain a report detailing the current fee - //schedule enforced by the node globally for each channel. - FeeReport(context.Context, *FeeReportRequest) (*FeeReportResponse, error) - // lncli: `updatechanpolicy` - //UpdateChannelPolicy allows the caller to update the fee schedule and - //channel policies for all channels globally, or a particular channel. - UpdateChannelPolicy(context.Context, *PolicyUpdateRequest) (*PolicyUpdateResponse, error) - // lncli: `fwdinghistory` - //ForwardingHistory allows the caller to query the htlcswitch for a record of - //all HTLCs forwarded within the target time range, and integer offset - //within that time range. If no time-range is specified, then the first chunk - //of the past 24 hrs of forwarding history are returned. - // - //A list of forwarding events are returned. The size of each forwarding event - //is 40 bytes, and the max message size able to be returned in gRPC is 4 MiB. - //As a result each message can only contain 50k entries. Each response has - //the index offset of the last entry. The index offset can be provided to the - //request to allow the caller to skip a series of records. - ForwardingHistory(context.Context, *ForwardingHistoryRequest) (*ForwardingHistoryResponse, error) - // lncli: `exportchanbackup` - //ExportChannelBackup attempts to return an encrypted static channel backup - //for the target channel identified by it channel point. The backup is - //encrypted with a key generated from the aezeed seed of the user. The - //returned backup can either be restored using the RestoreChannelBackup - //method once lnd is running, or via the InitWallet and UnlockWallet methods - //from the WalletUnlocker service. - ExportChannelBackup(context.Context, *ExportChannelBackupRequest) (*ChannelBackup, error) - // - //ExportAllChannelBackups returns static channel backups for all existing - //channels known to lnd. A set of regular singular static channel backups for - //each channel are returned. Additionally, a multi-channel backup is returned - //as well, which contains a single encrypted blob containing the backups of - //each channel. - ExportAllChannelBackups(context.Context, *ChanBackupExportRequest) (*ChanBackupSnapshot, error) - // - //VerifyChanBackup allows a caller to verify the integrity of a channel backup - //snapshot. This method will accept either a packed Single or a packed Multi. - //Specifying both will result in an error. - VerifyChanBackup(context.Context, *ChanBackupSnapshot) (*VerifyChanBackupResponse, error) - // lncli: `restorechanbackup` - //RestoreChannelBackups accepts a set of singular channel backups, or a - //single encrypted multi-chan backup and attempts to recover any funds - //remaining within the channel. If we are able to unpack the backup, then the - //new channel will be shown under listchannels, as well as pending channels. - RestoreChannelBackups(context.Context, *RestoreChanBackupRequest) (*RestoreBackupResponse, error) - // - //SubscribeChannelBackups allows a client to sub-subscribe to the most up to - //date information concerning the state of all channel backups. Each time a - //new channel is added, we return the new set of channels, along with a - //multi-chan backup containing the backup info for all channels. Each time a - //channel is closed, we send a new update, which contains new new chan back - //ups, but the updated set of encrypted multi-chan backups with the closed - //channel(s) removed. - SubscribeChannelBackups(*ChannelBackupSubscription, Lightning_SubscribeChannelBackupsServer) error - // lncli: `bakemacaroon` - //BakeMacaroon allows the creation of a new macaroon with custom read and - //write permissions. No first-party caveats are added since this can be done - //offline. - BakeMacaroon(context.Context, *BakeMacaroonRequest) (*BakeMacaroonResponse, error) - // lncli: `listmacaroonids` - //ListMacaroonIDs returns all root key IDs that are in use. - ListMacaroonIDs(context.Context, *ListMacaroonIDsRequest) (*ListMacaroonIDsResponse, error) - // lncli: `deletemacaroonid` - //DeleteMacaroonID deletes the specified macaroon ID and invalidates all - //macaroons derived from that ID. - DeleteMacaroonID(context.Context, *DeleteMacaroonIDRequest) (*DeleteMacaroonIDResponse, error) - // lncli: `listpermissions` - //ListPermissions lists all RPC method URIs and their required macaroon - //permissions to access them. - ListPermissions(context.Context, *ListPermissionsRequest) (*ListPermissionsResponse, error) - //Scan over the chain to find any transactions which may not have been recorded in the wallet's database - ReSync(context.Context, *ReSyncChainRequest) (*ReSyncChainResponse, error) - //Stop a re-synchronization job before it's completion - StopReSync(context.Context, *StopReSyncRequest) (*StopReSyncResponse, error) -} - -// UnimplementedLightningServer can be embedded to have forward compatible implementations. -type UnimplementedLightningServer struct { -} - -func (*UnimplementedLightningServer) WalletBalance(ctx context.Context, req *WalletBalanceRequest) (*WalletBalanceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method WalletBalance not implemented") -} -func (*UnimplementedLightningServer) GetAddressBalances(ctx context.Context, req *GetAddressBalancesRequest) (*GetAddressBalancesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAddressBalances not implemented") -} -func (*UnimplementedLightningServer) ChannelBalance(ctx context.Context, req *ChannelBalanceRequest) (*ChannelBalanceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ChannelBalance not implemented") -} -func (*UnimplementedLightningServer) GetTransactions(ctx context.Context, req *GetTransactionsRequest) (*TransactionDetails, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetTransactions not implemented") -} -func (*UnimplementedLightningServer) EstimateFee(ctx context.Context, req *EstimateFeeRequest) (*EstimateFeeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method EstimateFee not implemented") -} -func (*UnimplementedLightningServer) SendCoins(ctx context.Context, req *SendCoinsRequest) (*SendCoinsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SendCoins not implemented") -} -func (*UnimplementedLightningServer) ListUnspent(ctx context.Context, req *ListUnspentRequest) (*ListUnspentResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListUnspent not implemented") -} -func (*UnimplementedLightningServer) SubscribeTransactions(req *GetTransactionsRequest, srv Lightning_SubscribeTransactionsServer) error { - return status.Errorf(codes.Unimplemented, "method SubscribeTransactions not implemented") -} -func (*UnimplementedLightningServer) SendMany(ctx context.Context, req *SendManyRequest) (*SendManyResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SendMany not implemented") -} -func (*UnimplementedLightningServer) NewAddress(ctx context.Context, req *NewAddressRequest) (*NewAddressResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NewAddress not implemented") -} -func (*UnimplementedLightningServer) SignMessage(ctx context.Context, req *SignMessageRequest) (*SignMessageResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SignMessage not implemented") -} -func (*UnimplementedLightningServer) VerifyMessage(ctx context.Context, req *VerifyMessageRequest) (*VerifyMessageResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method VerifyMessage not implemented") -} -func (*UnimplementedLightningServer) ConnectPeer(ctx context.Context, req *ConnectPeerRequest) (*ConnectPeerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ConnectPeer not implemented") -} -func (*UnimplementedLightningServer) DisconnectPeer(ctx context.Context, req *DisconnectPeerRequest) (*DisconnectPeerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DisconnectPeer not implemented") -} -func (*UnimplementedLightningServer) ListPeers(ctx context.Context, req *ListPeersRequest) (*ListPeersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListPeers not implemented") -} -func (*UnimplementedLightningServer) SubscribePeerEvents(req *PeerEventSubscription, srv Lightning_SubscribePeerEventsServer) error { - return status.Errorf(codes.Unimplemented, "method SubscribePeerEvents not implemented") -} -func (*UnimplementedLightningServer) GetInfo(ctx context.Context, req *GetInfoRequest) (*GetInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetInfo not implemented") -} -func (*UnimplementedLightningServer) GetRecoveryInfo(ctx context.Context, req *GetRecoveryInfoRequest) (*GetRecoveryInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetRecoveryInfo not implemented") -} -func (*UnimplementedLightningServer) PendingChannels(ctx context.Context, req *PendingChannelsRequest) (*PendingChannelsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method PendingChannels not implemented") -} -func (*UnimplementedLightningServer) ListChannels(ctx context.Context, req *ListChannelsRequest) (*ListChannelsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListChannels not implemented") -} -func (*UnimplementedLightningServer) SubscribeChannelEvents(req *ChannelEventSubscription, srv Lightning_SubscribeChannelEventsServer) error { - return status.Errorf(codes.Unimplemented, "method SubscribeChannelEvents not implemented") -} -func (*UnimplementedLightningServer) ClosedChannels(ctx context.Context, req *ClosedChannelsRequest) (*ClosedChannelsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ClosedChannels not implemented") -} -func (*UnimplementedLightningServer) OpenChannelSync(ctx context.Context, req *OpenChannelRequest) (*ChannelPoint, error) { - return nil, status.Errorf(codes.Unimplemented, "method OpenChannelSync not implemented") -} -func (*UnimplementedLightningServer) OpenChannel(req *OpenChannelRequest, srv Lightning_OpenChannelServer) error { - return status.Errorf(codes.Unimplemented, "method OpenChannel not implemented") -} -func (*UnimplementedLightningServer) FundingStateStep(ctx context.Context, req *FundingTransitionMsg) (*FundingStateStepResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method FundingStateStep not implemented") -} -func (*UnimplementedLightningServer) ChannelAcceptor(srv Lightning_ChannelAcceptorServer) error { - return status.Errorf(codes.Unimplemented, "method ChannelAcceptor not implemented") -} -func (*UnimplementedLightningServer) CloseChannel(req *CloseChannelRequest, srv Lightning_CloseChannelServer) error { - return status.Errorf(codes.Unimplemented, "method CloseChannel not implemented") -} -func (*UnimplementedLightningServer) AbandonChannel(ctx context.Context, req *AbandonChannelRequest) (*AbandonChannelResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AbandonChannel not implemented") -} -func (*UnimplementedLightningServer) SendPayment(srv Lightning_SendPaymentServer) error { - return status.Errorf(codes.Unimplemented, "method SendPayment not implemented") -} -func (*UnimplementedLightningServer) SendPaymentSync(ctx context.Context, req *SendRequest) (*SendResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SendPaymentSync not implemented") -} -func (*UnimplementedLightningServer) SendToRoute(srv Lightning_SendToRouteServer) error { - return status.Errorf(codes.Unimplemented, "method SendToRoute not implemented") -} -func (*UnimplementedLightningServer) SendToRouteSync(ctx context.Context, req *SendToRouteRequest) (*SendResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SendToRouteSync not implemented") -} -func (*UnimplementedLightningServer) AddInvoice(ctx context.Context, req *Invoice) (*AddInvoiceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AddInvoice not implemented") -} -func (*UnimplementedLightningServer) ListInvoices(ctx context.Context, req *ListInvoiceRequest) (*ListInvoiceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListInvoices not implemented") -} -func (*UnimplementedLightningServer) LookupInvoice(ctx context.Context, req *PaymentHash) (*Invoice, error) { - return nil, status.Errorf(codes.Unimplemented, "method LookupInvoice not implemented") -} -func (*UnimplementedLightningServer) SubscribeInvoices(req *InvoiceSubscription, srv Lightning_SubscribeInvoicesServer) error { - return status.Errorf(codes.Unimplemented, "method SubscribeInvoices not implemented") -} -func (*UnimplementedLightningServer) DecodePayReq(ctx context.Context, req *PayReqString) (*PayReq, error) { - return nil, status.Errorf(codes.Unimplemented, "method DecodePayReq not implemented") -} -func (*UnimplementedLightningServer) ListPayments(ctx context.Context, req *ListPaymentsRequest) (*ListPaymentsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListPayments not implemented") -} -func (*UnimplementedLightningServer) DeleteAllPayments(ctx context.Context, req *DeleteAllPaymentsRequest) (*DeleteAllPaymentsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteAllPayments not implemented") -} -func (*UnimplementedLightningServer) DescribeGraph(ctx context.Context, req *ChannelGraphRequest) (*ChannelGraph, error) { - return nil, status.Errorf(codes.Unimplemented, "method DescribeGraph not implemented") -} -func (*UnimplementedLightningServer) GetNodeMetrics(ctx context.Context, req *NodeMetricsRequest) (*NodeMetricsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetNodeMetrics not implemented") -} -func (*UnimplementedLightningServer) GetChanInfo(ctx context.Context, req *ChanInfoRequest) (*ChannelEdge, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetChanInfo not implemented") -} -func (*UnimplementedLightningServer) GetNodeInfo(ctx context.Context, req *NodeInfoRequest) (*NodeInfo, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetNodeInfo not implemented") -} -func (*UnimplementedLightningServer) QueryRoutes(ctx context.Context, req *QueryRoutesRequest) (*QueryRoutesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method QueryRoutes not implemented") -} -func (*UnimplementedLightningServer) GetNetworkInfo(ctx context.Context, req *NetworkInfoRequest) (*NetworkInfo, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetNetworkInfo not implemented") -} -func (*UnimplementedLightningServer) StopDaemon(ctx context.Context, req *StopRequest) (*StopResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method StopDaemon not implemented") -} -func (*UnimplementedLightningServer) SubscribeChannelGraph(req *GraphTopologySubscription, srv Lightning_SubscribeChannelGraphServer) error { - return status.Errorf(codes.Unimplemented, "method SubscribeChannelGraph not implemented") -} -func (*UnimplementedLightningServer) DebugLevel(ctx context.Context, req *DebugLevelRequest) (*DebugLevelResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DebugLevel not implemented") -} -func (*UnimplementedLightningServer) FeeReport(ctx context.Context, req *FeeReportRequest) (*FeeReportResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method FeeReport not implemented") -} -func (*UnimplementedLightningServer) UpdateChannelPolicy(ctx context.Context, req *PolicyUpdateRequest) (*PolicyUpdateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateChannelPolicy not implemented") -} -func (*UnimplementedLightningServer) ForwardingHistory(ctx context.Context, req *ForwardingHistoryRequest) (*ForwardingHistoryResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ForwardingHistory not implemented") -} -func (*UnimplementedLightningServer) ExportChannelBackup(ctx context.Context, req *ExportChannelBackupRequest) (*ChannelBackup, error) { - return nil, status.Errorf(codes.Unimplemented, "method ExportChannelBackup not implemented") -} -func (*UnimplementedLightningServer) ExportAllChannelBackups(ctx context.Context, req *ChanBackupExportRequest) (*ChanBackupSnapshot, error) { - return nil, status.Errorf(codes.Unimplemented, "method ExportAllChannelBackups not implemented") -} -func (*UnimplementedLightningServer) VerifyChanBackup(ctx context.Context, req *ChanBackupSnapshot) (*VerifyChanBackupResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method VerifyChanBackup not implemented") -} -func (*UnimplementedLightningServer) RestoreChannelBackups(ctx context.Context, req *RestoreChanBackupRequest) (*RestoreBackupResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RestoreChannelBackups not implemented") -} -func (*UnimplementedLightningServer) SubscribeChannelBackups(req *ChannelBackupSubscription, srv Lightning_SubscribeChannelBackupsServer) error { - return status.Errorf(codes.Unimplemented, "method SubscribeChannelBackups not implemented") -} -func (*UnimplementedLightningServer) BakeMacaroon(ctx context.Context, req *BakeMacaroonRequest) (*BakeMacaroonResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method BakeMacaroon not implemented") -} -func (*UnimplementedLightningServer) ListMacaroonIDs(ctx context.Context, req *ListMacaroonIDsRequest) (*ListMacaroonIDsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListMacaroonIDs not implemented") -} -func (*UnimplementedLightningServer) DeleteMacaroonID(ctx context.Context, req *DeleteMacaroonIDRequest) (*DeleteMacaroonIDResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteMacaroonID not implemented") -} -func (*UnimplementedLightningServer) ListPermissions(ctx context.Context, req *ListPermissionsRequest) (*ListPermissionsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListPermissions not implemented") -} -func (*UnimplementedLightningServer) ReSync(ctx context.Context, req *ReSyncChainRequest) (*ReSyncChainResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ReSync not implemented") -} -func (*UnimplementedLightningServer) StopReSync(ctx context.Context, req *StopReSyncRequest) (*StopReSyncResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method StopReSync not implemented") -} - -func RegisterLightningServer(s *grpc.Server, srv LightningServer) { - s.RegisterService(&_Lightning_serviceDesc, srv) -} - -func _Lightning_WalletBalance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(WalletBalanceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).WalletBalance(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/WalletBalance", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).WalletBalance(ctx, req.(*WalletBalanceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_GetAddressBalances_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetAddressBalancesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).GetAddressBalances(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/GetAddressBalances", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).GetAddressBalances(ctx, req.(*GetAddressBalancesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_ChannelBalance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ChannelBalanceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).ChannelBalance(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/ChannelBalance", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).ChannelBalance(ctx, req.(*ChannelBalanceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_GetTransactions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetTransactionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).GetTransactions(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/GetTransactions", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).GetTransactions(ctx, req.(*GetTransactionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_EstimateFee_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(EstimateFeeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).EstimateFee(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/EstimateFee", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).EstimateFee(ctx, req.(*EstimateFeeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_SendCoins_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SendCoinsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).SendCoins(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/SendCoins", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).SendCoins(ctx, req.(*SendCoinsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_ListUnspent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListUnspentRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).ListUnspent(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/ListUnspent", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).ListUnspent(ctx, req.(*ListUnspentRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_SubscribeTransactions_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(GetTransactionsRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(LightningServer).SubscribeTransactions(m, &lightningSubscribeTransactionsServer{stream}) -} - -type Lightning_SubscribeTransactionsServer interface { - Send(*Transaction) error - grpc.ServerStream -} - -type lightningSubscribeTransactionsServer struct { - grpc.ServerStream -} - -func (x *lightningSubscribeTransactionsServer) Send(m *Transaction) error { - return x.ServerStream.SendMsg(m) -} - -func _Lightning_SendMany_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SendManyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).SendMany(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/SendMany", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).SendMany(ctx, req.(*SendManyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_NewAddress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NewAddressRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).NewAddress(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/NewAddress", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).NewAddress(ctx, req.(*NewAddressRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_SignMessage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SignMessageRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).SignMessage(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/SignMessage", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).SignMessage(ctx, req.(*SignMessageRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_VerifyMessage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(VerifyMessageRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).VerifyMessage(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/VerifyMessage", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).VerifyMessage(ctx, req.(*VerifyMessageRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_ConnectPeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ConnectPeerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).ConnectPeer(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/ConnectPeer", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).ConnectPeer(ctx, req.(*ConnectPeerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_DisconnectPeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DisconnectPeerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).DisconnectPeer(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/DisconnectPeer", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).DisconnectPeer(ctx, req.(*DisconnectPeerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_ListPeers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListPeersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).ListPeers(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/ListPeers", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).ListPeers(ctx, req.(*ListPeersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_SubscribePeerEvents_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(PeerEventSubscription) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(LightningServer).SubscribePeerEvents(m, &lightningSubscribePeerEventsServer{stream}) -} - -type Lightning_SubscribePeerEventsServer interface { - Send(*PeerEvent) error - grpc.ServerStream -} - -type lightningSubscribePeerEventsServer struct { - grpc.ServerStream -} - -func (x *lightningSubscribePeerEventsServer) Send(m *PeerEvent) error { - return x.ServerStream.SendMsg(m) -} - -func _Lightning_GetInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).GetInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/GetInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).GetInfo(ctx, req.(*GetInfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_GetRecoveryInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetRecoveryInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).GetRecoveryInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/GetRecoveryInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).GetRecoveryInfo(ctx, req.(*GetRecoveryInfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_PendingChannels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PendingChannelsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).PendingChannels(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/PendingChannels", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).PendingChannels(ctx, req.(*PendingChannelsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_ListChannels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListChannelsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).ListChannels(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/ListChannels", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).ListChannels(ctx, req.(*ListChannelsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_SubscribeChannelEvents_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ChannelEventSubscription) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(LightningServer).SubscribeChannelEvents(m, &lightningSubscribeChannelEventsServer{stream}) -} - -type Lightning_SubscribeChannelEventsServer interface { - Send(*ChannelEventUpdate) error - grpc.ServerStream -} - -type lightningSubscribeChannelEventsServer struct { - grpc.ServerStream -} - -func (x *lightningSubscribeChannelEventsServer) Send(m *ChannelEventUpdate) error { - return x.ServerStream.SendMsg(m) -} - -func _Lightning_ClosedChannels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ClosedChannelsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).ClosedChannels(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/ClosedChannels", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).ClosedChannels(ctx, req.(*ClosedChannelsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_OpenChannelSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(OpenChannelRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).OpenChannelSync(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/OpenChannelSync", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).OpenChannelSync(ctx, req.(*OpenChannelRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_OpenChannel_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(OpenChannelRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(LightningServer).OpenChannel(m, &lightningOpenChannelServer{stream}) -} - -type Lightning_OpenChannelServer interface { - Send(*OpenStatusUpdate) error - grpc.ServerStream -} - -type lightningOpenChannelServer struct { - grpc.ServerStream -} - -func (x *lightningOpenChannelServer) Send(m *OpenStatusUpdate) error { - return x.ServerStream.SendMsg(m) -} - -func _Lightning_FundingStateStep_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(FundingTransitionMsg) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).FundingStateStep(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/FundingStateStep", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).FundingStateStep(ctx, req.(*FundingTransitionMsg)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_ChannelAcceptor_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(LightningServer).ChannelAcceptor(&lightningChannelAcceptorServer{stream}) -} - -type Lightning_ChannelAcceptorServer interface { - Send(*ChannelAcceptRequest) error - Recv() (*ChannelAcceptResponse, error) - grpc.ServerStream -} - -type lightningChannelAcceptorServer struct { - grpc.ServerStream -} - -func (x *lightningChannelAcceptorServer) Send(m *ChannelAcceptRequest) error { - return x.ServerStream.SendMsg(m) -} - -func (x *lightningChannelAcceptorServer) Recv() (*ChannelAcceptResponse, error) { - m := new(ChannelAcceptResponse) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Lightning_CloseChannel_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(CloseChannelRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(LightningServer).CloseChannel(m, &lightningCloseChannelServer{stream}) -} - -type Lightning_CloseChannelServer interface { - Send(*CloseStatusUpdate) error - grpc.ServerStream -} - -type lightningCloseChannelServer struct { - grpc.ServerStream -} - -func (x *lightningCloseChannelServer) Send(m *CloseStatusUpdate) error { - return x.ServerStream.SendMsg(m) -} - -func _Lightning_AbandonChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AbandonChannelRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).AbandonChannel(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/AbandonChannel", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).AbandonChannel(ctx, req.(*AbandonChannelRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_SendPayment_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(LightningServer).SendPayment(&lightningSendPaymentServer{stream}) -} - -type Lightning_SendPaymentServer interface { - Send(*SendResponse) error - Recv() (*SendRequest, error) - grpc.ServerStream -} - -type lightningSendPaymentServer struct { - grpc.ServerStream -} - -func (x *lightningSendPaymentServer) Send(m *SendResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *lightningSendPaymentServer) Recv() (*SendRequest, error) { - m := new(SendRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Lightning_SendPaymentSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SendRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).SendPaymentSync(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/SendPaymentSync", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).SendPaymentSync(ctx, req.(*SendRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_SendToRoute_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(LightningServer).SendToRoute(&lightningSendToRouteServer{stream}) -} - -type Lightning_SendToRouteServer interface { - Send(*SendResponse) error - Recv() (*SendToRouteRequest, error) - grpc.ServerStream -} - -type lightningSendToRouteServer struct { - grpc.ServerStream -} - -func (x *lightningSendToRouteServer) Send(m *SendResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *lightningSendToRouteServer) Recv() (*SendToRouteRequest, error) { - m := new(SendToRouteRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Lightning_SendToRouteSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SendToRouteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).SendToRouteSync(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/SendToRouteSync", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).SendToRouteSync(ctx, req.(*SendToRouteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_AddInvoice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Invoice) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).AddInvoice(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/AddInvoice", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).AddInvoice(ctx, req.(*Invoice)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_ListInvoices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListInvoiceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).ListInvoices(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/ListInvoices", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).ListInvoices(ctx, req.(*ListInvoiceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_LookupInvoice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PaymentHash) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).LookupInvoice(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/LookupInvoice", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).LookupInvoice(ctx, req.(*PaymentHash)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_SubscribeInvoices_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(InvoiceSubscription) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(LightningServer).SubscribeInvoices(m, &lightningSubscribeInvoicesServer{stream}) -} - -type Lightning_SubscribeInvoicesServer interface { - Send(*Invoice) error - grpc.ServerStream -} - -type lightningSubscribeInvoicesServer struct { - grpc.ServerStream -} - -func (x *lightningSubscribeInvoicesServer) Send(m *Invoice) error { - return x.ServerStream.SendMsg(m) -} - -func _Lightning_DecodePayReq_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PayReqString) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).DecodePayReq(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/DecodePayReq", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).DecodePayReq(ctx, req.(*PayReqString)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_ListPayments_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListPaymentsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).ListPayments(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/ListPayments", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).ListPayments(ctx, req.(*ListPaymentsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_DeleteAllPayments_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteAllPaymentsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).DeleteAllPayments(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/DeleteAllPayments", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).DeleteAllPayments(ctx, req.(*DeleteAllPaymentsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_DescribeGraph_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ChannelGraphRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).DescribeGraph(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/DescribeGraph", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).DescribeGraph(ctx, req.(*ChannelGraphRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_GetNodeMetrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeMetricsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).GetNodeMetrics(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/GetNodeMetrics", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).GetNodeMetrics(ctx, req.(*NodeMetricsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_GetChanInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ChanInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).GetChanInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/GetChanInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).GetChanInfo(ctx, req.(*ChanInfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_GetNodeInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).GetNodeInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/GetNodeInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).GetNodeInfo(ctx, req.(*NodeInfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_QueryRoutes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryRoutesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).QueryRoutes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/QueryRoutes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).QueryRoutes(ctx, req.(*QueryRoutesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_GetNetworkInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NetworkInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).GetNetworkInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/GetNetworkInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).GetNetworkInfo(ctx, req.(*NetworkInfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_StopDaemon_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StopRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).StopDaemon(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/StopDaemon", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).StopDaemon(ctx, req.(*StopRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_SubscribeChannelGraph_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(GraphTopologySubscription) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(LightningServer).SubscribeChannelGraph(m, &lightningSubscribeChannelGraphServer{stream}) -} - -type Lightning_SubscribeChannelGraphServer interface { - Send(*GraphTopologyUpdate) error - grpc.ServerStream -} - -type lightningSubscribeChannelGraphServer struct { - grpc.ServerStream -} - -func (x *lightningSubscribeChannelGraphServer) Send(m *GraphTopologyUpdate) error { - return x.ServerStream.SendMsg(m) -} - -func _Lightning_DebugLevel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DebugLevelRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).DebugLevel(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/DebugLevel", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).DebugLevel(ctx, req.(*DebugLevelRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_FeeReport_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(FeeReportRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).FeeReport(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/FeeReport", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).FeeReport(ctx, req.(*FeeReportRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_UpdateChannelPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PolicyUpdateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).UpdateChannelPolicy(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/UpdateChannelPolicy", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).UpdateChannelPolicy(ctx, req.(*PolicyUpdateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_ForwardingHistory_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ForwardingHistoryRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).ForwardingHistory(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/ForwardingHistory", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).ForwardingHistory(ctx, req.(*ForwardingHistoryRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_ExportChannelBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ExportChannelBackupRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).ExportChannelBackup(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/ExportChannelBackup", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).ExportChannelBackup(ctx, req.(*ExportChannelBackupRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_ExportAllChannelBackups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ChanBackupExportRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).ExportAllChannelBackups(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/ExportAllChannelBackups", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).ExportAllChannelBackups(ctx, req.(*ChanBackupExportRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_VerifyChanBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ChanBackupSnapshot) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).VerifyChanBackup(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/VerifyChanBackup", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).VerifyChanBackup(ctx, req.(*ChanBackupSnapshot)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_RestoreChannelBackups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RestoreChanBackupRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).RestoreChannelBackups(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/RestoreChannelBackups", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).RestoreChannelBackups(ctx, req.(*RestoreChanBackupRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_SubscribeChannelBackups_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ChannelBackupSubscription) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(LightningServer).SubscribeChannelBackups(m, &lightningSubscribeChannelBackupsServer{stream}) -} - -type Lightning_SubscribeChannelBackupsServer interface { - Send(*ChanBackupSnapshot) error - grpc.ServerStream -} - -type lightningSubscribeChannelBackupsServer struct { - grpc.ServerStream -} - -func (x *lightningSubscribeChannelBackupsServer) Send(m *ChanBackupSnapshot) error { - return x.ServerStream.SendMsg(m) -} - -func _Lightning_BakeMacaroon_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(BakeMacaroonRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).BakeMacaroon(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/BakeMacaroon", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).BakeMacaroon(ctx, req.(*BakeMacaroonRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_ListMacaroonIDs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListMacaroonIDsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).ListMacaroonIDs(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/ListMacaroonIDs", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).ListMacaroonIDs(ctx, req.(*ListMacaroonIDsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_DeleteMacaroonID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteMacaroonIDRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).DeleteMacaroonID(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/DeleteMacaroonID", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).DeleteMacaroonID(ctx, req.(*DeleteMacaroonIDRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_ListPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListPermissionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).ListPermissions(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/ListPermissions", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).ListPermissions(ctx, req.(*ListPermissionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_ReSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReSyncChainRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).ReSync(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/ReSync", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).ReSync(ctx, req.(*ReSyncChainRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lightning_StopReSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StopReSyncRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LightningServer).StopReSync(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.Lightning/StopReSync", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LightningServer).StopReSync(ctx, req.(*StopReSyncRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Lightning_serviceDesc = grpc.ServiceDesc{ - ServiceName: "lnrpc.Lightning", - HandlerType: (*LightningServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "WalletBalance", - Handler: _Lightning_WalletBalance_Handler, - }, - { - MethodName: "GetAddressBalances", - Handler: _Lightning_GetAddressBalances_Handler, - }, - { - MethodName: "ChannelBalance", - Handler: _Lightning_ChannelBalance_Handler, - }, - { - MethodName: "GetTransactions", - Handler: _Lightning_GetTransactions_Handler, - }, - { - MethodName: "EstimateFee", - Handler: _Lightning_EstimateFee_Handler, - }, - { - MethodName: "SendCoins", - Handler: _Lightning_SendCoins_Handler, - }, - { - MethodName: "ListUnspent", - Handler: _Lightning_ListUnspent_Handler, - }, - { - MethodName: "SendMany", - Handler: _Lightning_SendMany_Handler, - }, - { - MethodName: "NewAddress", - Handler: _Lightning_NewAddress_Handler, - }, - { - MethodName: "SignMessage", - Handler: _Lightning_SignMessage_Handler, - }, - { - MethodName: "VerifyMessage", - Handler: _Lightning_VerifyMessage_Handler, - }, - { - MethodName: "ConnectPeer", - Handler: _Lightning_ConnectPeer_Handler, - }, - { - MethodName: "DisconnectPeer", - Handler: _Lightning_DisconnectPeer_Handler, - }, - { - MethodName: "ListPeers", - Handler: _Lightning_ListPeers_Handler, - }, - { - MethodName: "GetInfo", - Handler: _Lightning_GetInfo_Handler, - }, - { - MethodName: "GetRecoveryInfo", - Handler: _Lightning_GetRecoveryInfo_Handler, - }, - { - MethodName: "PendingChannels", - Handler: _Lightning_PendingChannels_Handler, - }, - { - MethodName: "ListChannels", - Handler: _Lightning_ListChannels_Handler, - }, - { - MethodName: "ClosedChannels", - Handler: _Lightning_ClosedChannels_Handler, - }, - { - MethodName: "OpenChannelSync", - Handler: _Lightning_OpenChannelSync_Handler, - }, - { - MethodName: "FundingStateStep", - Handler: _Lightning_FundingStateStep_Handler, - }, - { - MethodName: "AbandonChannel", - Handler: _Lightning_AbandonChannel_Handler, - }, - { - MethodName: "SendPaymentSync", - Handler: _Lightning_SendPaymentSync_Handler, - }, - { - MethodName: "SendToRouteSync", - Handler: _Lightning_SendToRouteSync_Handler, - }, - { - MethodName: "AddInvoice", - Handler: _Lightning_AddInvoice_Handler, - }, - { - MethodName: "ListInvoices", - Handler: _Lightning_ListInvoices_Handler, - }, - { - MethodName: "LookupInvoice", - Handler: _Lightning_LookupInvoice_Handler, - }, - { - MethodName: "DecodePayReq", - Handler: _Lightning_DecodePayReq_Handler, - }, - { - MethodName: "ListPayments", - Handler: _Lightning_ListPayments_Handler, - }, - { - MethodName: "DeleteAllPayments", - Handler: _Lightning_DeleteAllPayments_Handler, - }, - { - MethodName: "DescribeGraph", - Handler: _Lightning_DescribeGraph_Handler, - }, - { - MethodName: "GetNodeMetrics", - Handler: _Lightning_GetNodeMetrics_Handler, - }, - { - MethodName: "GetChanInfo", - Handler: _Lightning_GetChanInfo_Handler, - }, - { - MethodName: "GetNodeInfo", - Handler: _Lightning_GetNodeInfo_Handler, - }, - { - MethodName: "QueryRoutes", - Handler: _Lightning_QueryRoutes_Handler, - }, - { - MethodName: "GetNetworkInfo", - Handler: _Lightning_GetNetworkInfo_Handler, - }, - { - MethodName: "StopDaemon", - Handler: _Lightning_StopDaemon_Handler, - }, - { - MethodName: "DebugLevel", - Handler: _Lightning_DebugLevel_Handler, - }, - { - MethodName: "FeeReport", - Handler: _Lightning_FeeReport_Handler, - }, - { - MethodName: "UpdateChannelPolicy", - Handler: _Lightning_UpdateChannelPolicy_Handler, - }, - { - MethodName: "ForwardingHistory", - Handler: _Lightning_ForwardingHistory_Handler, - }, - { - MethodName: "ExportChannelBackup", - Handler: _Lightning_ExportChannelBackup_Handler, - }, - { - MethodName: "ExportAllChannelBackups", - Handler: _Lightning_ExportAllChannelBackups_Handler, - }, - { - MethodName: "VerifyChanBackup", - Handler: _Lightning_VerifyChanBackup_Handler, - }, - { - MethodName: "RestoreChannelBackups", - Handler: _Lightning_RestoreChannelBackups_Handler, - }, - { - MethodName: "BakeMacaroon", - Handler: _Lightning_BakeMacaroon_Handler, - }, - { - MethodName: "ListMacaroonIDs", - Handler: _Lightning_ListMacaroonIDs_Handler, - }, - { - MethodName: "DeleteMacaroonID", - Handler: _Lightning_DeleteMacaroonID_Handler, - }, - { - MethodName: "ListPermissions", - Handler: _Lightning_ListPermissions_Handler, - }, - { - MethodName: "ReSync", - Handler: _Lightning_ReSync_Handler, - }, - { - MethodName: "StopReSync", - Handler: _Lightning_StopReSync_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "SubscribeTransactions", - Handler: _Lightning_SubscribeTransactions_Handler, - ServerStreams: true, - }, - { - StreamName: "SubscribePeerEvents", - Handler: _Lightning_SubscribePeerEvents_Handler, - ServerStreams: true, - }, - { - StreamName: "SubscribeChannelEvents", - Handler: _Lightning_SubscribeChannelEvents_Handler, - ServerStreams: true, - }, - { - StreamName: "OpenChannel", - Handler: _Lightning_OpenChannel_Handler, - ServerStreams: true, - }, - { - StreamName: "ChannelAcceptor", - Handler: _Lightning_ChannelAcceptor_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "CloseChannel", - Handler: _Lightning_CloseChannel_Handler, - ServerStreams: true, - }, - { - StreamName: "SendPayment", - Handler: _Lightning_SendPayment_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "SendToRoute", - Handler: _Lightning_SendToRoute_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "SubscribeInvoices", - Handler: _Lightning_SubscribeInvoices_Handler, - ServerStreams: true, - }, - { - StreamName: "SubscribeChannelGraph", - Handler: _Lightning_SubscribeChannelGraph_Handler, - ServerStreams: true, - }, - { - StreamName: "SubscribeChannelBackups", - Handler: _Lightning_SubscribeChannelBackups_Handler, - ServerStreams: true, - }, - }, - Metadata: "rpc.proto", -} diff --git a/lnd/lnrpc/rpc.pb.gw.go b/lnd/lnrpc/rpc.pb.gw.go deleted file mode 100644 index e3f63d79..00000000 --- a/lnd/lnrpc/rpc.pb.gw.go +++ /dev/null @@ -1,4417 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: rpc.proto - -/* -Package lnrpc is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package lnrpc - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage - -func request_Lightning_WalletBalance_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq WalletBalanceRequest - var metadata runtime.ServerMetadata - - msg, err := client.WalletBalance(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_WalletBalance_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq WalletBalanceRequest - var metadata runtime.ServerMetadata - - msg, err := server.WalletBalance(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_ChannelBalance_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ChannelBalanceRequest - var metadata runtime.ServerMetadata - - msg, err := client.ChannelBalance(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_ChannelBalance_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ChannelBalanceRequest - var metadata runtime.ServerMetadata - - msg, err := server.ChannelBalance(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Lightning_GetTransactions_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Lightning_GetTransactions_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetTransactionsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Lightning_GetTransactions_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.GetTransactions(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_GetTransactions_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetTransactionsRequest - var metadata runtime.ServerMetadata - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_Lightning_GetTransactions_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.GetTransactions(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Lightning_EstimateFee_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Lightning_EstimateFee_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq EstimateFeeRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Lightning_EstimateFee_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.EstimateFee(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_EstimateFee_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq EstimateFeeRequest - var metadata runtime.ServerMetadata - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_Lightning_EstimateFee_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.EstimateFee(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_SendCoins_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SendCoinsRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.SendCoins(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_SendCoins_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SendCoinsRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.SendCoins(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Lightning_ListUnspent_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Lightning_ListUnspent_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListUnspentRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Lightning_ListUnspent_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ListUnspent(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_ListUnspent_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListUnspentRequest - var metadata runtime.ServerMetadata - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_Lightning_ListUnspent_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ListUnspent(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Lightning_SubscribeTransactions_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Lightning_SubscribeTransactions_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (Lightning_SubscribeTransactionsClient, runtime.ServerMetadata, error) { - var protoReq GetTransactionsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Lightning_SubscribeTransactions_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - stream, err := client.SubscribeTransactions(ctx, &protoReq) - if err != nil { - return nil, metadata, err - } - header, err := stream.Header() - if err != nil { - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil - -} - -func request_Lightning_SendMany_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SendManyRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.SendMany(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_SendMany_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SendManyRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.SendMany(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Lightning_NewAddress_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Lightning_NewAddress_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq NewAddressRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Lightning_NewAddress_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.NewAddress(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_NewAddress_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq NewAddressRequest - var metadata runtime.ServerMetadata - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_Lightning_NewAddress_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.NewAddress(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_SignMessage_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SignMessageRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.SignMessage(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_SignMessage_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SignMessageRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.SignMessage(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_VerifyMessage_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq VerifyMessageRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.VerifyMessage(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_VerifyMessage_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq VerifyMessageRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.VerifyMessage(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_ConnectPeer_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ConnectPeerRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ConnectPeer(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_ConnectPeer_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ConnectPeerRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ConnectPeer(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_DisconnectPeer_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DisconnectPeerRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["pub_key"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pub_key") - } - - protoReq.PubKey, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pub_key", err) - } - - msg, err := client.DisconnectPeer(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_DisconnectPeer_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DisconnectPeerRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["pub_key"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pub_key") - } - - protoReq.PubKey, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pub_key", err) - } - - msg, err := server.DisconnectPeer(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Lightning_ListPeers_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Lightning_ListPeers_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListPeersRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Lightning_ListPeers_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ListPeers(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_ListPeers_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListPeersRequest - var metadata runtime.ServerMetadata - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_Lightning_ListPeers_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ListPeers(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_SubscribePeerEvents_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (Lightning_SubscribePeerEventsClient, runtime.ServerMetadata, error) { - var protoReq PeerEventSubscription - var metadata runtime.ServerMetadata - - stream, err := client.SubscribePeerEvents(ctx, &protoReq) - if err != nil { - return nil, metadata, err - } - header, err := stream.Header() - if err != nil { - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil - -} - -func request_Lightning_GetInfo_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetInfoRequest - var metadata runtime.ServerMetadata - - msg, err := client.GetInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_GetInfo_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetInfoRequest - var metadata runtime.ServerMetadata - - msg, err := server.GetInfo(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_GetRecoveryInfo_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetRecoveryInfoRequest - var metadata runtime.ServerMetadata - - msg, err := client.GetRecoveryInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_GetRecoveryInfo_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetRecoveryInfoRequest - var metadata runtime.ServerMetadata - - msg, err := server.GetRecoveryInfo(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_PendingChannels_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PendingChannelsRequest - var metadata runtime.ServerMetadata - - msg, err := client.PendingChannels(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_PendingChannels_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PendingChannelsRequest - var metadata runtime.ServerMetadata - - msg, err := server.PendingChannels(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Lightning_ListChannels_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Lightning_ListChannels_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListChannelsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Lightning_ListChannels_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ListChannels(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_ListChannels_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListChannelsRequest - var metadata runtime.ServerMetadata - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_Lightning_ListChannels_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ListChannels(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_SubscribeChannelEvents_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (Lightning_SubscribeChannelEventsClient, runtime.ServerMetadata, error) { - var protoReq ChannelEventSubscription - var metadata runtime.ServerMetadata - - stream, err := client.SubscribeChannelEvents(ctx, &protoReq) - if err != nil { - return nil, metadata, err - } - header, err := stream.Header() - if err != nil { - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil - -} - -var ( - filter_Lightning_ClosedChannels_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Lightning_ClosedChannels_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ClosedChannelsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Lightning_ClosedChannels_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ClosedChannels(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_ClosedChannels_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ClosedChannelsRequest - var metadata runtime.ServerMetadata - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_Lightning_ClosedChannels_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ClosedChannels(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_OpenChannelSync_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq OpenChannelRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.OpenChannelSync(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_OpenChannelSync_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq OpenChannelRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.OpenChannelSync(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_OpenChannel_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (Lightning_OpenChannelClient, runtime.ServerMetadata, error) { - var protoReq OpenChannelRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - stream, err := client.OpenChannel(ctx, &protoReq) - if err != nil { - return nil, metadata, err - } - header, err := stream.Header() - if err != nil { - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil - -} - -func request_Lightning_FundingStateStep_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq FundingTransitionMsg - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.FundingStateStep(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_FundingStateStep_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq FundingTransitionMsg - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.FundingStateStep(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Lightning_CloseChannel_0 = &utilities.DoubleArray{Encoding: map[string]int{"channel_point": 0, "funding_txid_str": 1, "output_index": 2}, Base: []int{1, 1, 1, 2, 0, 0}, Check: []int{0, 1, 2, 2, 3, 4}} -) - -func request_Lightning_CloseChannel_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (Lightning_CloseChannelClient, runtime.ServerMetadata, error) { - var protoReq CloseChannelRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["channel_point.funding_txid_str"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_point.funding_txid_str") - } - - err = runtime.PopulateFieldFromPath(&protoReq, "channel_point.funding_txid_str", val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_point.funding_txid_str", err) - } - - val, ok = pathParams["channel_point.output_index"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_point.output_index") - } - - err = runtime.PopulateFieldFromPath(&protoReq, "channel_point.output_index", val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_point.output_index", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Lightning_CloseChannel_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - stream, err := client.CloseChannel(ctx, &protoReq) - if err != nil { - return nil, metadata, err - } - header, err := stream.Header() - if err != nil { - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil - -} - -var ( - filter_Lightning_AbandonChannel_0 = &utilities.DoubleArray{Encoding: map[string]int{"channel_point": 0, "funding_txid_str": 1, "output_index": 2}, Base: []int{1, 1, 1, 2, 0, 0}, Check: []int{0, 1, 2, 2, 3, 4}} -) - -func request_Lightning_AbandonChannel_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AbandonChannelRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["channel_point.funding_txid_str"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_point.funding_txid_str") - } - - err = runtime.PopulateFieldFromPath(&protoReq, "channel_point.funding_txid_str", val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_point.funding_txid_str", err) - } - - val, ok = pathParams["channel_point.output_index"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_point.output_index") - } - - err = runtime.PopulateFieldFromPath(&protoReq, "channel_point.output_index", val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_point.output_index", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Lightning_AbandonChannel_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.AbandonChannel(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_AbandonChannel_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AbandonChannelRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["channel_point.funding_txid_str"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_point.funding_txid_str") - } - - err = runtime.PopulateFieldFromPath(&protoReq, "channel_point.funding_txid_str", val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_point.funding_txid_str", err) - } - - val, ok = pathParams["channel_point.output_index"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_point.output_index") - } - - err = runtime.PopulateFieldFromPath(&protoReq, "channel_point.output_index", val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_point.output_index", err) - } - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_Lightning_AbandonChannel_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.AbandonChannel(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_SendPaymentSync_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SendRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.SendPaymentSync(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_SendPaymentSync_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SendRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.SendPaymentSync(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_SendToRouteSync_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SendToRouteRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.SendToRouteSync(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_SendToRouteSync_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SendToRouteRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.SendToRouteSync(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_AddInvoice_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq Invoice - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.AddInvoice(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_AddInvoice_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq Invoice - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.AddInvoice(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Lightning_ListInvoices_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Lightning_ListInvoices_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListInvoiceRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Lightning_ListInvoices_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ListInvoices(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_ListInvoices_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListInvoiceRequest - var metadata runtime.ServerMetadata - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_Lightning_ListInvoices_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ListInvoices(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Lightning_LookupInvoice_0 = &utilities.DoubleArray{Encoding: map[string]int{"r_hash_str": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} -) - -func request_Lightning_LookupInvoice_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PaymentHash - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["r_hash_str"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "r_hash_str") - } - - protoReq.RHashStr, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "r_hash_str", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Lightning_LookupInvoice_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.LookupInvoice(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_LookupInvoice_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PaymentHash - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["r_hash_str"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "r_hash_str") - } - - protoReq.RHashStr, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "r_hash_str", err) - } - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_Lightning_LookupInvoice_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.LookupInvoice(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Lightning_SubscribeInvoices_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Lightning_SubscribeInvoices_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (Lightning_SubscribeInvoicesClient, runtime.ServerMetadata, error) { - var protoReq InvoiceSubscription - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Lightning_SubscribeInvoices_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - stream, err := client.SubscribeInvoices(ctx, &protoReq) - if err != nil { - return nil, metadata, err - } - header, err := stream.Header() - if err != nil { - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil - -} - -func request_Lightning_DecodePayReq_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PayReqString - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["pay_req"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pay_req") - } - - protoReq.PayReq, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pay_req", err) - } - - msg, err := client.DecodePayReq(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_DecodePayReq_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PayReqString - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["pay_req"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pay_req") - } - - protoReq.PayReq, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pay_req", err) - } - - msg, err := server.DecodePayReq(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Lightning_ListPayments_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Lightning_ListPayments_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListPaymentsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Lightning_ListPayments_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ListPayments(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_ListPayments_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListPaymentsRequest - var metadata runtime.ServerMetadata - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_Lightning_ListPayments_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ListPayments(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_DeleteAllPayments_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DeleteAllPaymentsRequest - var metadata runtime.ServerMetadata - - msg, err := client.DeleteAllPayments(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_DeleteAllPayments_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DeleteAllPaymentsRequest - var metadata runtime.ServerMetadata - - msg, err := server.DeleteAllPayments(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Lightning_DescribeGraph_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Lightning_DescribeGraph_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ChannelGraphRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Lightning_DescribeGraph_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.DescribeGraph(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_DescribeGraph_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ChannelGraphRequest - var metadata runtime.ServerMetadata - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_Lightning_DescribeGraph_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.DescribeGraph(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Lightning_GetNodeMetrics_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Lightning_GetNodeMetrics_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq NodeMetricsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Lightning_GetNodeMetrics_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.GetNodeMetrics(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_GetNodeMetrics_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq NodeMetricsRequest - var metadata runtime.ServerMetadata - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_Lightning_GetNodeMetrics_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.GetNodeMetrics(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_GetChanInfo_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ChanInfoRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["chan_id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chan_id") - } - - protoReq.ChanId, err = runtime.Uint64(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chan_id", err) - } - - msg, err := client.GetChanInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_GetChanInfo_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ChanInfoRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["chan_id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chan_id") - } - - protoReq.ChanId, err = runtime.Uint64(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chan_id", err) - } - - msg, err := server.GetChanInfo(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Lightning_GetNodeInfo_0 = &utilities.DoubleArray{Encoding: map[string]int{"pub_key": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} -) - -func request_Lightning_GetNodeInfo_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq NodeInfoRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["pub_key"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pub_key") - } - - protoReq.PubKey, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pub_key", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Lightning_GetNodeInfo_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.GetNodeInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_GetNodeInfo_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq NodeInfoRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["pub_key"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pub_key") - } - - protoReq.PubKey, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pub_key", err) - } - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_Lightning_GetNodeInfo_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.GetNodeInfo(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Lightning_QueryRoutes_0 = &utilities.DoubleArray{Encoding: map[string]int{"pub_key": 0, "amt": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} -) - -func request_Lightning_QueryRoutes_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryRoutesRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["pub_key"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pub_key") - } - - protoReq.PubKey, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pub_key", err) - } - - val, ok = pathParams["amt"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "amt") - } - - protoReq.Amt, err = runtime.Int64(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "amt", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Lightning_QueryRoutes_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.QueryRoutes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_QueryRoutes_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryRoutesRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["pub_key"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pub_key") - } - - protoReq.PubKey, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pub_key", err) - } - - val, ok = pathParams["amt"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "amt") - } - - protoReq.Amt, err = runtime.Int64(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "amt", err) - } - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_Lightning_QueryRoutes_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.QueryRoutes(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_GetNetworkInfo_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq NetworkInfoRequest - var metadata runtime.ServerMetadata - - msg, err := client.GetNetworkInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_GetNetworkInfo_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq NetworkInfoRequest - var metadata runtime.ServerMetadata - - msg, err := server.GetNetworkInfo(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_StopDaemon_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq StopRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.StopDaemon(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_StopDaemon_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq StopRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.StopDaemon(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_SubscribeChannelGraph_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (Lightning_SubscribeChannelGraphClient, runtime.ServerMetadata, error) { - var protoReq GraphTopologySubscription - var metadata runtime.ServerMetadata - - stream, err := client.SubscribeChannelGraph(ctx, &protoReq) - if err != nil { - return nil, metadata, err - } - header, err := stream.Header() - if err != nil { - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil - -} - -func request_Lightning_DebugLevel_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DebugLevelRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.DebugLevel(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_DebugLevel_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DebugLevelRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.DebugLevel(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_FeeReport_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq FeeReportRequest - var metadata runtime.ServerMetadata - - msg, err := client.FeeReport(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_FeeReport_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq FeeReportRequest - var metadata runtime.ServerMetadata - - msg, err := server.FeeReport(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_UpdateChannelPolicy_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PolicyUpdateRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.UpdateChannelPolicy(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_UpdateChannelPolicy_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PolicyUpdateRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.UpdateChannelPolicy(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_ForwardingHistory_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ForwardingHistoryRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ForwardingHistory(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_ForwardingHistory_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ForwardingHistoryRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ForwardingHistory(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Lightning_ExportChannelBackup_0 = &utilities.DoubleArray{Encoding: map[string]int{"chan_point": 0, "funding_txid_str": 1, "output_index": 2}, Base: []int{1, 1, 1, 2, 0, 0}, Check: []int{0, 1, 2, 2, 3, 4}} -) - -func request_Lightning_ExportChannelBackup_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ExportChannelBackupRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["chan_point.funding_txid_str"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chan_point.funding_txid_str") - } - - err = runtime.PopulateFieldFromPath(&protoReq, "chan_point.funding_txid_str", val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chan_point.funding_txid_str", err) - } - - val, ok = pathParams["chan_point.output_index"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chan_point.output_index") - } - - err = runtime.PopulateFieldFromPath(&protoReq, "chan_point.output_index", val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chan_point.output_index", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Lightning_ExportChannelBackup_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ExportChannelBackup(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_ExportChannelBackup_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ExportChannelBackupRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["chan_point.funding_txid_str"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chan_point.funding_txid_str") - } - - err = runtime.PopulateFieldFromPath(&protoReq, "chan_point.funding_txid_str", val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chan_point.funding_txid_str", err) - } - - val, ok = pathParams["chan_point.output_index"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chan_point.output_index") - } - - err = runtime.PopulateFieldFromPath(&protoReq, "chan_point.output_index", val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chan_point.output_index", err) - } - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_Lightning_ExportChannelBackup_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ExportChannelBackup(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_ExportAllChannelBackups_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ChanBackupExportRequest - var metadata runtime.ServerMetadata - - msg, err := client.ExportAllChannelBackups(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_ExportAllChannelBackups_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ChanBackupExportRequest - var metadata runtime.ServerMetadata - - msg, err := server.ExportAllChannelBackups(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_VerifyChanBackup_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ChanBackupSnapshot - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.VerifyChanBackup(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_VerifyChanBackup_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ChanBackupSnapshot - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.VerifyChanBackup(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_RestoreChannelBackups_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RestoreChanBackupRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.RestoreChannelBackups(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_RestoreChannelBackups_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RestoreChanBackupRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.RestoreChannelBackups(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_SubscribeChannelBackups_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (Lightning_SubscribeChannelBackupsClient, runtime.ServerMetadata, error) { - var protoReq ChannelBackupSubscription - var metadata runtime.ServerMetadata - - stream, err := client.SubscribeChannelBackups(ctx, &protoReq) - if err != nil { - return nil, metadata, err - } - header, err := stream.Header() - if err != nil { - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil - -} - -func request_Lightning_BakeMacaroon_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq BakeMacaroonRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.BakeMacaroon(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_BakeMacaroon_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq BakeMacaroonRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.BakeMacaroon(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_ListMacaroonIDs_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListMacaroonIDsRequest - var metadata runtime.ServerMetadata - - msg, err := client.ListMacaroonIDs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_ListMacaroonIDs_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListMacaroonIDsRequest - var metadata runtime.ServerMetadata - - msg, err := server.ListMacaroonIDs(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_DeleteMacaroonID_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DeleteMacaroonIDRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["root_key_id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "root_key_id") - } - - protoReq.RootKeyId, err = runtime.Uint64(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "root_key_id", err) - } - - msg, err := client.DeleteMacaroonID(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_DeleteMacaroonID_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DeleteMacaroonIDRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["root_key_id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "root_key_id") - } - - protoReq.RootKeyId, err = runtime.Uint64(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "root_key_id", err) - } - - msg, err := server.DeleteMacaroonID(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Lightning_ListPermissions_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListPermissionsRequest - var metadata runtime.ServerMetadata - - msg, err := client.ListPermissions(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Lightning_ListPermissions_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListPermissionsRequest - var metadata runtime.ServerMetadata - - msg, err := server.ListPermissions(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterLightningHandlerServer registers the http handlers for service Lightning to "mux". -// UnaryRPC :call LightningServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -func RegisterLightningHandlerServer(ctx context.Context, mux *runtime.ServeMux, server LightningServer) error { - - mux.Handle("GET", pattern_Lightning_WalletBalance_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_WalletBalance_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_WalletBalance_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_ChannelBalance_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_ChannelBalance_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ChannelBalance_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_GetTransactions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_GetTransactions_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_GetTransactions_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_EstimateFee_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_EstimateFee_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_EstimateFee_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_SendCoins_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_SendCoins_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_SendCoins_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_ListUnspent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_ListUnspent_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ListUnspent_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_SubscribeTransactions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") - _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - }) - - mux.Handle("POST", pattern_Lightning_SendMany_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_SendMany_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_SendMany_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_NewAddress_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_NewAddress_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_NewAddress_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_SignMessage_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_SignMessage_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_SignMessage_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_VerifyMessage_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_VerifyMessage_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_VerifyMessage_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_ConnectPeer_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_ConnectPeer_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ConnectPeer_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("DELETE", pattern_Lightning_DisconnectPeer_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_DisconnectPeer_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_DisconnectPeer_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_ListPeers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_ListPeers_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ListPeers_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_SubscribePeerEvents_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") - _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - }) - - mux.Handle("GET", pattern_Lightning_GetInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_GetInfo_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_GetInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_GetRecoveryInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_GetRecoveryInfo_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_GetRecoveryInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_PendingChannels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_PendingChannels_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_PendingChannels_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_ListChannels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_ListChannels_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ListChannels_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_SubscribeChannelEvents_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") - _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - }) - - mux.Handle("GET", pattern_Lightning_ClosedChannels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_ClosedChannels_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ClosedChannels_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_OpenChannelSync_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_OpenChannelSync_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_OpenChannelSync_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_OpenChannel_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") - _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - }) - - mux.Handle("POST", pattern_Lightning_FundingStateStep_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_FundingStateStep_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_FundingStateStep_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("DELETE", pattern_Lightning_CloseChannel_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") - _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - }) - - mux.Handle("DELETE", pattern_Lightning_AbandonChannel_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_AbandonChannel_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_AbandonChannel_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_SendPaymentSync_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_SendPaymentSync_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_SendPaymentSync_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_SendToRouteSync_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_SendToRouteSync_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_SendToRouteSync_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_AddInvoice_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_AddInvoice_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_AddInvoice_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_ListInvoices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_ListInvoices_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ListInvoices_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_LookupInvoice_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_LookupInvoice_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_LookupInvoice_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_SubscribeInvoices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") - _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - }) - - mux.Handle("GET", pattern_Lightning_DecodePayReq_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_DecodePayReq_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_DecodePayReq_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_ListPayments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_ListPayments_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ListPayments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("DELETE", pattern_Lightning_DeleteAllPayments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_DeleteAllPayments_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_DeleteAllPayments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_DescribeGraph_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_DescribeGraph_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_DescribeGraph_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_GetNodeMetrics_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_GetNodeMetrics_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_GetNodeMetrics_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_GetChanInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_GetChanInfo_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_GetChanInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_GetNodeInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_GetNodeInfo_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_GetNodeInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_QueryRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_QueryRoutes_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_QueryRoutes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_GetNetworkInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_GetNetworkInfo_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_GetNetworkInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_StopDaemon_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_StopDaemon_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_StopDaemon_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_SubscribeChannelGraph_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") - _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - }) - - mux.Handle("POST", pattern_Lightning_DebugLevel_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_DebugLevel_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_DebugLevel_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_FeeReport_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_FeeReport_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_FeeReport_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_UpdateChannelPolicy_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_UpdateChannelPolicy_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_UpdateChannelPolicy_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_ForwardingHistory_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_ForwardingHistory_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ForwardingHistory_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_ExportChannelBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_ExportChannelBackup_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ExportChannelBackup_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_ExportAllChannelBackups_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_ExportAllChannelBackups_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ExportAllChannelBackups_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_VerifyChanBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_VerifyChanBackup_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_VerifyChanBackup_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_RestoreChannelBackups_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_RestoreChannelBackups_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_RestoreChannelBackups_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_SubscribeChannelBackups_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") - _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - }) - - mux.Handle("POST", pattern_Lightning_BakeMacaroon_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_BakeMacaroon_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_BakeMacaroon_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_ListMacaroonIDs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_ListMacaroonIDs_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ListMacaroonIDs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("DELETE", pattern_Lightning_DeleteMacaroonID_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_DeleteMacaroonID_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_DeleteMacaroonID_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_ListPermissions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Lightning_ListPermissions_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ListPermissions_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterLightningHandlerFromEndpoint is same as RegisterLightningHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterLightningHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterLightningHandler(ctx, mux, conn) -} - -// RegisterLightningHandler registers the http handlers for service Lightning to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterLightningHandlerClient(ctx, mux, NewLightningClient(conn)) -} - -// RegisterLightningHandlerClient registers the http handlers for service Lightning -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "LightningClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LightningClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "LightningClient" to call the correct interceptors. -func RegisterLightningHandlerClient(ctx context.Context, mux *runtime.ServeMux, client LightningClient) error { - - mux.Handle("GET", pattern_Lightning_WalletBalance_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_WalletBalance_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_WalletBalance_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_ChannelBalance_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_ChannelBalance_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ChannelBalance_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_GetTransactions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_GetTransactions_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_GetTransactions_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_EstimateFee_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_EstimateFee_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_EstimateFee_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_SendCoins_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_SendCoins_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_SendCoins_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_ListUnspent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_ListUnspent_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ListUnspent_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_SubscribeTransactions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_SubscribeTransactions_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_SubscribeTransactions_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_SendMany_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_SendMany_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_SendMany_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_NewAddress_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_NewAddress_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_NewAddress_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_SignMessage_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_SignMessage_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_SignMessage_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_VerifyMessage_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_VerifyMessage_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_VerifyMessage_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_ConnectPeer_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_ConnectPeer_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ConnectPeer_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("DELETE", pattern_Lightning_DisconnectPeer_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_DisconnectPeer_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_DisconnectPeer_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_ListPeers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_ListPeers_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ListPeers_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_SubscribePeerEvents_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_SubscribePeerEvents_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_SubscribePeerEvents_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_GetInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_GetInfo_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_GetInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_GetRecoveryInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_GetRecoveryInfo_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_GetRecoveryInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_PendingChannels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_PendingChannels_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_PendingChannels_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_ListChannels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_ListChannels_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ListChannels_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_SubscribeChannelEvents_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_SubscribeChannelEvents_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_SubscribeChannelEvents_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_ClosedChannels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_ClosedChannels_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ClosedChannels_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_OpenChannelSync_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_OpenChannelSync_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_OpenChannelSync_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_OpenChannel_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_OpenChannel_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_OpenChannel_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_FundingStateStep_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_FundingStateStep_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_FundingStateStep_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("DELETE", pattern_Lightning_CloseChannel_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_CloseChannel_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_CloseChannel_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("DELETE", pattern_Lightning_AbandonChannel_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_AbandonChannel_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_AbandonChannel_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_SendPaymentSync_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_SendPaymentSync_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_SendPaymentSync_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_SendToRouteSync_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_SendToRouteSync_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_SendToRouteSync_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_AddInvoice_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_AddInvoice_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_AddInvoice_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_ListInvoices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_ListInvoices_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ListInvoices_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_LookupInvoice_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_LookupInvoice_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_LookupInvoice_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_SubscribeInvoices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_SubscribeInvoices_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_SubscribeInvoices_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_DecodePayReq_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_DecodePayReq_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_DecodePayReq_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_ListPayments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_ListPayments_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ListPayments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("DELETE", pattern_Lightning_DeleteAllPayments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_DeleteAllPayments_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_DeleteAllPayments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_DescribeGraph_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_DescribeGraph_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_DescribeGraph_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_GetNodeMetrics_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_GetNodeMetrics_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_GetNodeMetrics_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_GetChanInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_GetChanInfo_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_GetChanInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_GetNodeInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_GetNodeInfo_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_GetNodeInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_QueryRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_QueryRoutes_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_QueryRoutes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_GetNetworkInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_GetNetworkInfo_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_GetNetworkInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_StopDaemon_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_StopDaemon_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_StopDaemon_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_SubscribeChannelGraph_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_SubscribeChannelGraph_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_SubscribeChannelGraph_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_DebugLevel_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_DebugLevel_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_DebugLevel_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_FeeReport_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_FeeReport_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_FeeReport_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_UpdateChannelPolicy_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_UpdateChannelPolicy_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_UpdateChannelPolicy_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_ForwardingHistory_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_ForwardingHistory_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ForwardingHistory_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_ExportChannelBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_ExportChannelBackup_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ExportChannelBackup_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_ExportAllChannelBackups_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_ExportAllChannelBackups_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ExportAllChannelBackups_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_VerifyChanBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_VerifyChanBackup_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_VerifyChanBackup_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_RestoreChannelBackups_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_RestoreChannelBackups_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_RestoreChannelBackups_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_SubscribeChannelBackups_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_SubscribeChannelBackups_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_SubscribeChannelBackups_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lightning_BakeMacaroon_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_BakeMacaroon_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_BakeMacaroon_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_ListMacaroonIDs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_ListMacaroonIDs_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ListMacaroonIDs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("DELETE", pattern_Lightning_DeleteMacaroonID_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_DeleteMacaroonID_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_DeleteMacaroonID_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Lightning_ListPermissions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Lightning_ListPermissions_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Lightning_ListPermissions_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Lightning_WalletBalance_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "balance", "blockchain"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_ChannelBalance_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "balance", "channels"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_GetTransactions_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "transactions"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_EstimateFee_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "transactions", "fee"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_SendCoins_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "transactions"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_ListUnspent_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "utxos"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_SubscribeTransactions_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "transactions", "subscribe"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_SendMany_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "transactions", "many"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_NewAddress_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "newaddress"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_SignMessage_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "signmessage"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_VerifyMessage_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "verifymessage"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_ConnectPeer_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "peers"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_DisconnectPeer_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1", "peers", "pub_key"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_ListPeers_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "peers"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_SubscribePeerEvents_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "peers", "subscribe"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_GetInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "getinfo"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_GetRecoveryInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "getrecoveryinfo"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_PendingChannels_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "channels", "pending"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_ListChannels_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "channels"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_SubscribeChannelEvents_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "channels", "subscribe"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_ClosedChannels_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "channels", "closed"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_OpenChannelSync_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "channels"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_OpenChannel_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "channels", "stream"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_FundingStateStep_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "funding", "step"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_CloseChannel_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "channels", "channel_point.funding_txid_str", "channel_point.output_index"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_AbandonChannel_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"v1", "channels", "abandon", "channel_point.funding_txid_str", "channel_point.output_index"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_SendPaymentSync_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "channels", "transactions"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_SendToRouteSync_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "channels", "transactions", "route"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_AddInvoice_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "invoices"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_ListInvoices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "invoices"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_LookupInvoice_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1", "invoice", "r_hash_str"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_SubscribeInvoices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "invoices", "subscribe"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_DecodePayReq_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1", "payreq", "pay_req"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_ListPayments_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "payments"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_DeleteAllPayments_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "payments"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_DescribeGraph_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "graph"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_GetNodeMetrics_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "graph", "nodemetrics"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_GetChanInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "graph", "edge", "chan_id"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_GetNodeInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "graph", "node", "pub_key"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_QueryRoutes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"v1", "graph", "routes", "pub_key", "amt"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_GetNetworkInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "graph", "info"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_StopDaemon_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "stop"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_SubscribeChannelGraph_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "graph", "subscribe"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_DebugLevel_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "debuglevel"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_FeeReport_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "fees"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_UpdateChannelPolicy_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "chanpolicy"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_ForwardingHistory_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "switch"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_ExportChannelBackup_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"v1", "channels", "backup", "chan_point.funding_txid_str", "chan_point.output_index"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_ExportAllChannelBackups_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "channels", "backup"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_VerifyChanBackup_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "channels", "backup", "verify"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_RestoreChannelBackups_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "channels", "backup", "restore"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_SubscribeChannelBackups_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "channels", "backup", "subscribe"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_BakeMacaroon_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "macaroon"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_ListMacaroonIDs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "macaroon", "ids"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_DeleteMacaroonID_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1", "macaroon", "root_key_id"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Lightning_ListPermissions_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "macaroon", "permissions"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Lightning_WalletBalance_0 = runtime.ForwardResponseMessage - - forward_Lightning_ChannelBalance_0 = runtime.ForwardResponseMessage - - forward_Lightning_GetTransactions_0 = runtime.ForwardResponseMessage - - forward_Lightning_EstimateFee_0 = runtime.ForwardResponseMessage - - forward_Lightning_SendCoins_0 = runtime.ForwardResponseMessage - - forward_Lightning_ListUnspent_0 = runtime.ForwardResponseMessage - - forward_Lightning_SubscribeTransactions_0 = runtime.ForwardResponseStream - - forward_Lightning_SendMany_0 = runtime.ForwardResponseMessage - - forward_Lightning_NewAddress_0 = runtime.ForwardResponseMessage - - forward_Lightning_SignMessage_0 = runtime.ForwardResponseMessage - - forward_Lightning_VerifyMessage_0 = runtime.ForwardResponseMessage - - forward_Lightning_ConnectPeer_0 = runtime.ForwardResponseMessage - - forward_Lightning_DisconnectPeer_0 = runtime.ForwardResponseMessage - - forward_Lightning_ListPeers_0 = runtime.ForwardResponseMessage - - forward_Lightning_SubscribePeerEvents_0 = runtime.ForwardResponseStream - - forward_Lightning_GetInfo_0 = runtime.ForwardResponseMessage - - forward_Lightning_GetRecoveryInfo_0 = runtime.ForwardResponseMessage - - forward_Lightning_PendingChannels_0 = runtime.ForwardResponseMessage - - forward_Lightning_ListChannels_0 = runtime.ForwardResponseMessage - - forward_Lightning_SubscribeChannelEvents_0 = runtime.ForwardResponseStream - - forward_Lightning_ClosedChannels_0 = runtime.ForwardResponseMessage - - forward_Lightning_OpenChannelSync_0 = runtime.ForwardResponseMessage - - forward_Lightning_OpenChannel_0 = runtime.ForwardResponseStream - - forward_Lightning_FundingStateStep_0 = runtime.ForwardResponseMessage - - forward_Lightning_CloseChannel_0 = runtime.ForwardResponseStream - - forward_Lightning_AbandonChannel_0 = runtime.ForwardResponseMessage - - forward_Lightning_SendPaymentSync_0 = runtime.ForwardResponseMessage - - forward_Lightning_SendToRouteSync_0 = runtime.ForwardResponseMessage - - forward_Lightning_AddInvoice_0 = runtime.ForwardResponseMessage - - forward_Lightning_ListInvoices_0 = runtime.ForwardResponseMessage - - forward_Lightning_LookupInvoice_0 = runtime.ForwardResponseMessage - - forward_Lightning_SubscribeInvoices_0 = runtime.ForwardResponseStream - - forward_Lightning_DecodePayReq_0 = runtime.ForwardResponseMessage - - forward_Lightning_ListPayments_0 = runtime.ForwardResponseMessage - - forward_Lightning_DeleteAllPayments_0 = runtime.ForwardResponseMessage - - forward_Lightning_DescribeGraph_0 = runtime.ForwardResponseMessage - - forward_Lightning_GetNodeMetrics_0 = runtime.ForwardResponseMessage - - forward_Lightning_GetChanInfo_0 = runtime.ForwardResponseMessage - - forward_Lightning_GetNodeInfo_0 = runtime.ForwardResponseMessage - - forward_Lightning_QueryRoutes_0 = runtime.ForwardResponseMessage - - forward_Lightning_GetNetworkInfo_0 = runtime.ForwardResponseMessage - - forward_Lightning_StopDaemon_0 = runtime.ForwardResponseMessage - - forward_Lightning_SubscribeChannelGraph_0 = runtime.ForwardResponseStream - - forward_Lightning_DebugLevel_0 = runtime.ForwardResponseMessage - - forward_Lightning_FeeReport_0 = runtime.ForwardResponseMessage - - forward_Lightning_UpdateChannelPolicy_0 = runtime.ForwardResponseMessage - - forward_Lightning_ForwardingHistory_0 = runtime.ForwardResponseMessage - - forward_Lightning_ExportChannelBackup_0 = runtime.ForwardResponseMessage - - forward_Lightning_ExportAllChannelBackups_0 = runtime.ForwardResponseMessage - - forward_Lightning_VerifyChanBackup_0 = runtime.ForwardResponseMessage - - forward_Lightning_RestoreChannelBackups_0 = runtime.ForwardResponseMessage - - forward_Lightning_SubscribeChannelBackups_0 = runtime.ForwardResponseStream - - forward_Lightning_BakeMacaroon_0 = runtime.ForwardResponseMessage - - forward_Lightning_ListMacaroonIDs_0 = runtime.ForwardResponseMessage - - forward_Lightning_DeleteMacaroonID_0 = runtime.ForwardResponseMessage - - forward_Lightning_ListPermissions_0 = runtime.ForwardResponseMessage -) diff --git a/lnd/lnrpc/rpc.proto b/lnd/lnrpc/rpc.proto deleted file mode 100644 index 2b31867f..00000000 --- a/lnd/lnrpc/rpc.proto +++ /dev/null @@ -1,3786 +0,0 @@ -syntax = "proto3"; - -package lnrpc; - -option go_package = "github.com/pkt-cash/pktd/lnd/lnrpc"; - -/* - * Comments in this file will be directly parsed into the API - * Documentation as descriptions of the associated method, message, or field. - * These descriptions should go right above the definition of the object, and - * can be in either block or // comment format. - * - * An RPC method can be matched to an lncli command by placing a line in the - * beginning of the description in exactly the following format: - * lncli: `methodname` - * - * Failure to specify the exact name of the command will cause documentation - * generation to fail. - * - * More information on how exactly the gRPC documentation is generated from - * this proto file can be found here: - * https://github.com/lightninglabs/lightning-api - */ - -// Lightning is the main RPC server of the daemon. -service Lightning { - /* lncli: `walletbalance` - WalletBalance returns total unspent outputs(confirmed and unconfirmed), all - confirmed unspent outputs and all unconfirmed unspent outputs under control - of the wallet. - */ - rpc WalletBalance (WalletBalanceRequest) returns (WalletBalanceResponse); - - /* lncli: `getaddressbalances` - GetAddressBalances returns the balance for each of the addresses in the wallet. - */ - rpc GetAddressBalances (GetAddressBalancesRequest) returns (GetAddressBalancesResponse); - - /* lncli: `channelbalance` - ChannelBalance returns a report on the total funds across all open channels, - categorized in local/remote, pending local/remote and unsettled local/remote - balances. - */ - rpc ChannelBalance (ChannelBalanceRequest) returns (ChannelBalanceResponse); - - /* lncli: `listchaintxns` - GetTransactions returns a list describing all the known transactions - relevant to the wallet. - */ - rpc GetTransactions (GetTransactionsRequest) returns (TransactionDetails); - - /* lncli: `estimatefee` - EstimateFee asks the chain backend to estimate the fee rate and total fees - for a transaction that pays to multiple specified outputs. - - When using REST, the `AddrToAmount` map type can be set by appending - `&AddrToAmount[
]=` to the URL. Unfortunately this - map type doesn't appear in the REST API documentation because of a bug in - the grpc-gateway library. - */ - rpc EstimateFee (EstimateFeeRequest) returns (EstimateFeeResponse); - - /* lncli: `sendcoins` - SendCoins executes a request to send coins to a particular address. Unlike - SendMany, this RPC call only allows creating a single output at a time. If - neither target_conf, or sat_per_byte are set, then the internal wallet will - consult its fee model to determine a fee for the default confirmation - target. - */ - rpc SendCoins (SendCoinsRequest) returns (SendCoinsResponse); - - /* lncli: `listunspent` - Deprecated, use walletrpc.ListUnspent instead. - - ListUnspent returns a list of all utxos spendable by the wallet with a - number of confirmations between the specified minimum and maximum. - */ - rpc ListUnspent (ListUnspentRequest) returns (ListUnspentResponse); - - /* - SubscribeTransactions creates a uni-directional stream from the server to - the client in which any newly discovered transactions relevant to the - wallet are sent over. - */ - rpc SubscribeTransactions (GetTransactionsRequest) - returns (stream Transaction); - - /* lncli: `sendmany` - SendMany handles a request for a transaction that creates multiple specified - outputs in parallel. If neither target_conf, or sat_per_byte are set, then - the internal wallet will consult its fee model to determine a fee for the - default confirmation target. - */ - rpc SendMany (SendManyRequest) returns (SendManyResponse); - - /* lncli: `newaddress` - NewAddress creates a new address under control of the local wallet. - */ - rpc NewAddress (NewAddressRequest) returns (NewAddressResponse); - - /* lncli: `signmessage` - SignMessage signs a message with this node's private key. The returned - signature string is `zbase32` encoded and pubkey recoverable, meaning that - only the message digest and signature are needed for verification. - */ - rpc SignMessage (SignMessageRequest) returns (SignMessageResponse); - - /* lncli: `verifymessage` - VerifyMessage verifies a signature over a msg. The signature must be - zbase32 encoded and signed by an active node in the resident node's - channel database. In addition to returning the validity of the signature, - VerifyMessage also returns the recovered pubkey from the signature. - */ - rpc VerifyMessage (VerifyMessageRequest) returns (VerifyMessageResponse); - - /* lncli: `connect` - ConnectPeer attempts to establish a connection to a remote peer. This is at - the networking level, and is used for communication between nodes. This is - distinct from establishing a channel with a peer. - */ - rpc ConnectPeer (ConnectPeerRequest) returns (ConnectPeerResponse); - - /* lncli: `disconnect` - DisconnectPeer attempts to disconnect one peer from another identified by a - given pubKey. In the case that we currently have a pending or active channel - with the target peer, then this action will be not be allowed. - */ - rpc DisconnectPeer (DisconnectPeerRequest) returns (DisconnectPeerResponse); - - /* lncli: `listpeers` - ListPeers returns a verbose listing of all currently active peers. - */ - rpc ListPeers (ListPeersRequest) returns (ListPeersResponse); - - /* - SubscribePeerEvents creates a uni-directional stream from the server to - the client in which any events relevant to the state of peers are sent - over. Events include peers going online and offline. - */ - rpc SubscribePeerEvents (PeerEventSubscription) returns (stream PeerEvent); - - /* lncli: `getinfo` - GetInfo returns general information concerning the lightning node including - it's identity pubkey, alias, the chains it is connected to, and information - concerning the number of open+pending channels. - */ - rpc GetInfo (GetInfoRequest) returns (GetInfoResponse); - - /** lncli: `getrecoveryinfo` - GetRecoveryInfo returns information concerning the recovery mode including - whether it's in a recovery mode, whether the recovery is finished, and the - progress made so far. - */ - rpc GetRecoveryInfo (GetRecoveryInfoRequest) - returns (GetRecoveryInfoResponse); - - // TODO(roasbeef): merge with below with bool? - /* lncli: `pendingchannels` - PendingChannels returns a list of all the channels that are currently - considered "pending". A channel is pending if it has finished the funding - workflow and is waiting for confirmations for the funding txn, or is in the - process of closure, either initiated cooperatively or non-cooperatively. - */ - rpc PendingChannels (PendingChannelsRequest) - returns (PendingChannelsResponse); - - /* lncli: `listchannels` - ListChannels returns a description of all the open channels that this node - is a participant in. - */ - rpc ListChannels (ListChannelsRequest) returns (ListChannelsResponse); - - /* - SubscribeChannelEvents creates a uni-directional stream from the server to - the client in which any updates relevant to the state of the channels are - sent over. Events include new active channels, inactive channels, and closed - channels. - */ - rpc SubscribeChannelEvents (ChannelEventSubscription) - returns (stream ChannelEventUpdate); - - /* lncli: `closedchannels` - ClosedChannels returns a description of all the closed channels that - this node was a participant in. - */ - rpc ClosedChannels (ClosedChannelsRequest) returns (ClosedChannelsResponse); - - /* - OpenChannelSync is a synchronous version of the OpenChannel RPC call. This - call is meant to be consumed by clients to the REST proxy. As with all - other sync calls, all byte slices are intended to be populated as hex - encoded strings. - */ - rpc OpenChannelSync (OpenChannelRequest) returns (ChannelPoint); - - /* lncli: `openchannel` - OpenChannel attempts to open a singly funded channel specified in the - request to a remote peer. Users are able to specify a target number of - blocks that the funding transaction should be confirmed in, or a manual fee - rate to us for the funding transaction. If neither are specified, then a - lax block confirmation target is used. Each OpenStatusUpdate will return - the pending channel ID of the in-progress channel. Depending on the - arguments specified in the OpenChannelRequest, this pending channel ID can - then be used to manually progress the channel funding flow. - */ - rpc OpenChannel (OpenChannelRequest) returns (stream OpenStatusUpdate); - - /* - FundingStateStep is an advanced funding related call that allows the caller - to either execute some preparatory steps for a funding workflow, or - manually progress a funding workflow. The primary way a funding flow is - identified is via its pending channel ID. As an example, this method can be - used to specify that we're expecting a funding flow for a particular - pending channel ID, for which we need to use specific parameters. - Alternatively, this can be used to interactively drive PSBT signing for - funding for partially complete funding transactions. - */ - rpc FundingStateStep (FundingTransitionMsg) returns (FundingStateStepResp); - - /* - ChannelAcceptor dispatches a bi-directional streaming RPC in which - OpenChannel requests are sent to the client and the client responds with - a boolean that tells LND whether or not to accept the channel. This allows - node operators to specify their own criteria for accepting inbound channels - through a single persistent connection. - */ - rpc ChannelAcceptor (stream ChannelAcceptResponse) - returns (stream ChannelAcceptRequest); - - /* lncli: `closechannel` - CloseChannel attempts to close an active channel identified by its channel - outpoint (ChannelPoint). The actions of this method can additionally be - augmented to attempt a force close after a timeout period in the case of an - inactive peer. If a non-force close (cooperative closure) is requested, - then the user can specify either a target number of blocks until the - closure transaction is confirmed, or a manual fee rate. If neither are - specified, then a default lax, block confirmation target is used. - */ - rpc CloseChannel (CloseChannelRequest) returns (stream CloseStatusUpdate); - - /* lncli: `abandonchannel` - AbandonChannel removes all channel state from the database except for a - close summary. This method can be used to get rid of permanently unusable - channels due to bugs fixed in newer versions of lnd. This method can also be - used to remove externally funded channels where the funding transaction was - never broadcast. Only available for non-externally funded channels in dev - build. - */ - rpc AbandonChannel (AbandonChannelRequest) returns (AbandonChannelResponse); - - /* lncli: `sendpayment` - Deprecated, use routerrpc.SendPaymentV2. SendPayment dispatches a - bi-directional streaming RPC for sending payments through the Lightning - Network. A single RPC invocation creates a persistent bi-directional - stream allowing clients to rapidly send payments through the Lightning - Network with a single persistent connection. - */ - rpc SendPayment (stream SendRequest) returns (stream SendResponse) { - option deprecated = true; - } - - /* - SendPaymentSync is the synchronous non-streaming version of SendPayment. - This RPC is intended to be consumed by clients of the REST proxy. - Additionally, this RPC expects the destination's public key and the payment - hash (if any) to be encoded as hex strings. - */ - rpc SendPaymentSync (SendRequest) returns (SendResponse); - - /* lncli: `sendtoroute` - Deprecated, use routerrpc.SendToRouteV2. SendToRoute is a bi-directional - streaming RPC for sending payment through the Lightning Network. This - method differs from SendPayment in that it allows users to specify a full - route manually. This can be used for things like rebalancing, and atomic - swaps. - */ - rpc SendToRoute (stream SendToRouteRequest) returns (stream SendResponse) { - option deprecated = true; - } - - /* - SendToRouteSync is a synchronous version of SendToRoute. It Will block - until the payment either fails or succeeds. - */ - rpc SendToRouteSync (SendToRouteRequest) returns (SendResponse); - - /* lncli: `addinvoice` - AddInvoice attempts to add a new invoice to the invoice database. Any - duplicated invoices are rejected, therefore all invoices *must* have a - unique payment preimage. - */ - rpc AddInvoice (Invoice) returns (AddInvoiceResponse); - - /* lncli: `listinvoices` - ListInvoices returns a list of all the invoices currently stored within the - database. Any active debug invoices are ignored. It has full support for - paginated responses, allowing users to query for specific invoices through - their add_index. This can be done by using either the first_index_offset or - last_index_offset fields included in the response as the index_offset of the - next request. By default, the first 100 invoices created will be returned. - Backwards pagination is also supported through the Reversed flag. - */ - rpc ListInvoices (ListInvoiceRequest) returns (ListInvoiceResponse); - - /* lncli: `lookupinvoice` - LookupInvoice attempts to look up an invoice according to its payment hash. - The passed payment hash *must* be exactly 32 bytes, if not, an error is - returned. - */ - rpc LookupInvoice (PaymentHash) returns (Invoice); - - /* - SubscribeInvoices returns a uni-directional stream (server -> client) for - notifying the client of newly added/settled invoices. The caller can - optionally specify the add_index and/or the settle_index. If the add_index - is specified, then we'll first start by sending add invoice events for all - invoices with an add_index greater than the specified value. If the - settle_index is specified, the next, we'll send out all settle events for - invoices with a settle_index greater than the specified value. One or both - of these fields can be set. If no fields are set, then we'll only send out - the latest add/settle events. - */ - rpc SubscribeInvoices (InvoiceSubscription) returns (stream Invoice); - - /* lncli: `decodepayreq` - DecodePayReq takes an encoded payment request string and attempts to decode - it, returning a full description of the conditions encoded within the - payment request. - */ - rpc DecodePayReq (PayReqString) returns (PayReq); - - /* lncli: `listpayments` - ListPayments returns a list of all outgoing payments. - */ - rpc ListPayments (ListPaymentsRequest) returns (ListPaymentsResponse); - - /* - DeleteAllPayments deletes all outgoing payments from DB. - */ - rpc DeleteAllPayments (DeleteAllPaymentsRequest) - returns (DeleteAllPaymentsResponse); - - /* lncli: `describegraph` - DescribeGraph returns a description of the latest graph state from the - point of view of the node. The graph information is partitioned into two - components: all the nodes/vertexes, and all the edges that connect the - vertexes themselves. As this is a directed graph, the edges also contain - the node directional specific routing policy which includes: the time lock - delta, fee information, etc. - */ - rpc DescribeGraph (ChannelGraphRequest) returns (ChannelGraph); - - /* lncli: `getnodemetrics` - GetNodeMetrics returns node metrics calculated from the graph. Currently - the only supported metric is betweenness centrality of individual nodes. - */ - rpc GetNodeMetrics (NodeMetricsRequest) returns (NodeMetricsResponse); - - /* lncli: `getchaninfo` - GetChanInfo returns the latest authenticated network announcement for the - given channel identified by its channel ID: an 8-byte integer which - uniquely identifies the location of transaction's funding output within the - blockchain. - */ - rpc GetChanInfo (ChanInfoRequest) returns (ChannelEdge); - - /* lncli: `getnodeinfo` - GetNodeInfo returns the latest advertised, aggregated, and authenticated - channel information for the specified node identified by its public key. - */ - rpc GetNodeInfo (NodeInfoRequest) returns (NodeInfo); - - /* lncli: `queryroutes` - QueryRoutes attempts to query the daemon's Channel Router for a possible - route to a target destination capable of carrying a specific amount of - satoshis. The returned route contains the full details required to craft and - send an HTLC, also including the necessary information that should be - present within the Sphinx packet encapsulated within the HTLC. - - When using REST, the `dest_custom_records` map type can be set by appending - `&dest_custom_records[]=` - to the URL. Unfortunately this map type doesn't appear in the REST API - documentation because of a bug in the grpc-gateway library. - */ - rpc QueryRoutes (QueryRoutesRequest) returns (QueryRoutesResponse); - - /* lncli: `getnetworkinfo` - GetNetworkInfo returns some basic stats about the known channel graph from - the point of view of the node. - */ - rpc GetNetworkInfo (NetworkInfoRequest) returns (NetworkInfo); - - /* lncli: `stop` - StopDaemon will send a shutdown request to the interrupt handler, triggering - a graceful shutdown of the daemon. - */ - rpc StopDaemon (StopRequest) returns (StopResponse); - - /* - SubscribeChannelGraph launches a streaming RPC that allows the caller to - receive notifications upon any changes to the channel graph topology from - the point of view of the responding node. Events notified include: new - nodes coming online, nodes updating their authenticated attributes, new - channels being advertised, updates in the routing policy for a directional - channel edge, and when channels are closed on-chain. - */ - rpc SubscribeChannelGraph (GraphTopologySubscription) - returns (stream GraphTopologyUpdate); - - /* lncli: `debuglevel` - DebugLevel allows a caller to programmatically set the logging verbosity of - lnd. The logging can be targeted according to a coarse daemon-wide logging - level, or in a granular fashion to specify the logging for a target - sub-system. - */ - rpc DebugLevel (DebugLevelRequest) returns (DebugLevelResponse); - - /* lncli: `feereport` - FeeReport allows the caller to obtain a report detailing the current fee - schedule enforced by the node globally for each channel. - */ - rpc FeeReport (FeeReportRequest) returns (FeeReportResponse); - - /* lncli: `updatechanpolicy` - UpdateChannelPolicy allows the caller to update the fee schedule and - channel policies for all channels globally, or a particular channel. - */ - rpc UpdateChannelPolicy (PolicyUpdateRequest) - returns (PolicyUpdateResponse); - - /* lncli: `fwdinghistory` - ForwardingHistory allows the caller to query the htlcswitch for a record of - all HTLCs forwarded within the target time range, and integer offset - within that time range. If no time-range is specified, then the first chunk - of the past 24 hrs of forwarding history are returned. - - A list of forwarding events are returned. The size of each forwarding event - is 40 bytes, and the max message size able to be returned in gRPC is 4 MiB. - As a result each message can only contain 50k entries. Each response has - the index offset of the last entry. The index offset can be provided to the - request to allow the caller to skip a series of records. - */ - rpc ForwardingHistory (ForwardingHistoryRequest) - returns (ForwardingHistoryResponse); - - /* lncli: `exportchanbackup` - ExportChannelBackup attempts to return an encrypted static channel backup - for the target channel identified by it channel point. The backup is - encrypted with a key generated from the aezeed seed of the user. The - returned backup can either be restored using the RestoreChannelBackup - method once lnd is running, or via the InitWallet and UnlockWallet methods - from the WalletUnlocker service. - */ - rpc ExportChannelBackup (ExportChannelBackupRequest) - returns (ChannelBackup); - - /* - ExportAllChannelBackups returns static channel backups for all existing - channels known to lnd. A set of regular singular static channel backups for - each channel are returned. Additionally, a multi-channel backup is returned - as well, which contains a single encrypted blob containing the backups of - each channel. - */ - rpc ExportAllChannelBackups (ChanBackupExportRequest) - returns (ChanBackupSnapshot); - - /* - VerifyChanBackup allows a caller to verify the integrity of a channel backup - snapshot. This method will accept either a packed Single or a packed Multi. - Specifying both will result in an error. - */ - rpc VerifyChanBackup (ChanBackupSnapshot) - returns (VerifyChanBackupResponse); - - /* lncli: `restorechanbackup` - RestoreChannelBackups accepts a set of singular channel backups, or a - single encrypted multi-chan backup and attempts to recover any funds - remaining within the channel. If we are able to unpack the backup, then the - new channel will be shown under listchannels, as well as pending channels. - */ - rpc RestoreChannelBackups (RestoreChanBackupRequest) - returns (RestoreBackupResponse); - - /* - SubscribeChannelBackups allows a client to sub-subscribe to the most up to - date information concerning the state of all channel backups. Each time a - new channel is added, we return the new set of channels, along with a - multi-chan backup containing the backup info for all channels. Each time a - channel is closed, we send a new update, which contains new new chan back - ups, but the updated set of encrypted multi-chan backups with the closed - channel(s) removed. - */ - rpc SubscribeChannelBackups (ChannelBackupSubscription) - returns (stream ChanBackupSnapshot); - - /* lncli: `bakemacaroon` - BakeMacaroon allows the creation of a new macaroon with custom read and - write permissions. No first-party caveats are added since this can be done - offline. - */ - rpc BakeMacaroon (BakeMacaroonRequest) returns (BakeMacaroonResponse); - - /* lncli: `listmacaroonids` - ListMacaroonIDs returns all root key IDs that are in use. - */ - rpc ListMacaroonIDs (ListMacaroonIDsRequest) - returns (ListMacaroonIDsResponse); - - /* lncli: `deletemacaroonid` - DeleteMacaroonID deletes the specified macaroon ID and invalidates all - macaroons derived from that ID. - */ - rpc DeleteMacaroonID (DeleteMacaroonIDRequest) - returns (DeleteMacaroonIDResponse); - - /* lncli: `listpermissions` - ListPermissions lists all RPC method URIs and their required macaroon - permissions to access them. - */ - rpc ListPermissions (ListPermissionsRequest) - returns (ListPermissionsResponse); - - /*Scan over the chain to find any transactions which may not have been recorded in the wallet's database*/ - rpc ReSync (ReSyncChainRequest) returns (ReSyncChainResponse); - - /*Stop a re-synchronization job before it's completion*/ - rpc StopReSync (StopReSyncRequest) returns (StopReSyncResponse); -} - -message Utxo { - // The type of address - AddressType address_type = 1; - - // The address - string address = 2; - - // The value of the unspent coin in satoshis - int64 amount_sat = 3; - - // The pkscript in hex - string pk_script = 4; - - // The outpoint in format txid:n - OutPoint outpoint = 5; - - // The number of confirmations for the Utxo - int64 confirmations = 6; -} - -message Transaction { - // The transaction hash - string tx_hash = 1; - - // The transaction amount, denominated in satoshis - int64 amount = 2; - - // The number of confirmations - int32 num_confirmations = 3; - - // The hash of the block this transaction was included in - string block_hash = 4; - - // The height of the block this transaction was included in - int32 block_height = 5; - - // Timestamp of this transaction - int64 time_stamp = 6; - - // Fees paid for this transaction - int64 total_fees = 7; - - // Addresses that received funds for this transaction - repeated string dest_addresses = 8; - - // The raw transaction hex. - string raw_tx_hex = 9; - - // A label that was optionally set on transaction broadcast. - string label = 10; -} -message GetTransactionsRequest { - /* - The height from which to list transactions, inclusive. If this value is - greater than end_height, transactions will be read in reverse. - */ - int32 start_height = 1; - - /* - The height until which to list transactions, inclusive. To include - unconfirmed transactions, this value should be set to -1, which will - return transactions from start_height until the current chain tip and - unconfirmed transactions. If no end_height is provided, the call will - default to this option. - */ - int32 end_height = 2; - int32 txns_limit = 3; - int32 txns_skip = 4; - int32 coinbase = 5; -} - -message TransactionDetails { - // The list of transactions relevant to the wallet. - repeated Transaction transactions = 1; -} - -message FeeLimit { - oneof limit { - /* - The fee limit expressed as a fixed amount of satoshis. - - The fields fixed and fixed_msat are mutually exclusive. - */ - int64 fixed = 1; - - /* - The fee limit expressed as a fixed amount of millisatoshis. - - The fields fixed and fixed_msat are mutually exclusive. - */ - int64 fixed_msat = 3; - - // The fee limit expressed as a percentage of the payment amount. - int64 percent = 2; - } -} - -message SendRequest { - /* - The identity pubkey of the payment recipient. When using REST, this field - must be encoded as base64. - */ - bytes dest = 1; - - /* - The hex-encoded identity pubkey of the payment recipient. Deprecated now - that the REST gateway supports base64 encoding of bytes fields. - */ - string dest_string = 2 [deprecated = true]; - - /* - The amount to send expressed in satoshis. - - The fields amt and amt_msat are mutually exclusive. - */ - int64 amt = 3; - - /* - The amount to send expressed in millisatoshis. - - The fields amt and amt_msat are mutually exclusive. - */ - int64 amt_msat = 12; - - /* - The hash to use within the payment's HTLC. When using REST, this field - must be encoded as base64. - */ - bytes payment_hash = 4; - - /* - The hex-encoded hash to use within the payment's HTLC. Deprecated now - that the REST gateway supports base64 encoding of bytes fields. - */ - string payment_hash_string = 5 [deprecated = true]; - - /* - A bare-bones invoice for a payment within the Lightning Network. With the - details of the invoice, the sender has all the data necessary to send a - payment to the recipient. - */ - string payment_request = 6; - - /* - The CLTV delta from the current height that should be used to set the - timelock for the final hop. - */ - int32 final_cltv_delta = 7; - - /* - The maximum number of satoshis that will be paid as a fee of the payment. - This value can be represented either as a percentage of the amount being - sent, or as a fixed amount of the maximum fee the user is willing the pay to - send the payment. - */ - FeeLimit fee_limit = 8; - - /* - The channel id of the channel that must be taken to the first hop. If zero, - any channel may be used. - */ - uint64 outgoing_chan_id = 9 [jstype = JS_STRING]; - - /* - The pubkey of the last hop of the route. If empty, any hop may be used. - */ - bytes last_hop_pubkey = 13; - - /* - An optional maximum total time lock for the route. This should not exceed - lnd's `--max-cltv-expiry` setting. If zero, then the value of - `--max-cltv-expiry` is enforced. - */ - uint32 cltv_limit = 10; - - /* - An optional field that can be used to pass an arbitrary set of TLV records - to a peer which understands the new records. This can be used to pass - application specific data during the payment attempt. Record types are - required to be in the custom range >= 65536. When using REST, the values - must be encoded as base64. - */ - map dest_custom_records = 11; - - // If set, circular payments to self are permitted. - bool allow_self_payment = 14; - - /* - Features assumed to be supported by the final node. All transitive feature - dependencies must also be set properly. For a given feature bit pair, either - optional or remote may be set, but not both. If this field is nil or empty, - the router will try to load destination features from the graph as a - fallback. - */ - repeated FeatureBit dest_features = 15; -} - -message SendResponse { - string payment_error = 1; - bytes payment_preimage = 2; - Route payment_route = 3; - bytes payment_hash = 4; -} - -message SendToRouteRequest { - /* - The payment hash to use for the HTLC. When using REST, this field must be - encoded as base64. - */ - bytes payment_hash = 1; - - /* - An optional hex-encoded payment hash to be used for the HTLC. Deprecated now - that the REST gateway supports base64 encoding of bytes fields. - */ - string payment_hash_string = 2 [deprecated = true]; - - reserved 3; - - // Route that should be used to attempt to complete the payment. - Route route = 4; -} - -message ChannelAcceptRequest { - // The pubkey of the node that wishes to open an inbound channel. - bytes node_pubkey = 1; - - // The hash of the genesis block that the proposed channel resides in. - bytes chain_hash = 2; - - // The pending channel id. - bytes pending_chan_id = 3; - - // The funding amount in satoshis that initiator wishes to use in the - // channel. - uint64 funding_amt = 4; - - // The push amount of the proposed channel in millisatoshis. - uint64 push_amt = 5; - - // The dust limit of the initiator's commitment tx. - uint64 dust_limit = 6; - - // The maximum amount of coins in millisatoshis that can be pending in this - // channel. - uint64 max_value_in_flight = 7; - - // The minimum amount of satoshis the initiator requires us to have at all - // times. - uint64 channel_reserve = 8; - - // The smallest HTLC in millisatoshis that the initiator will accept. - uint64 min_htlc = 9; - - // The initial fee rate that the initiator suggests for both commitment - // transactions. - uint64 fee_per_kw = 10; - - /* - The number of blocks to use for the relative time lock in the pay-to-self - output of both commitment transactions. - */ - uint32 csv_delay = 11; - - // The total number of incoming HTLC's that the initiator will accept. - uint32 max_accepted_htlcs = 12; - - // A bit-field which the initiator uses to specify proposed channel - // behavior. - uint32 channel_flags = 13; -} - -message ChannelAcceptResponse { - // Whether or not the client accepts the channel. - bool accept = 1; - - // The pending channel id to which this response applies. - bytes pending_chan_id = 2; - - /* - An optional error to send the initiating party to indicate why the channel - was rejected. This field *should not* contain sensitive information, it will - be sent to the initiating party. This field should only be set if accept is - false, the channel will be rejected if an error is set with accept=true - because the meaning of this response is ambiguous. Limited to 500 - characters. - */ - string error = 3; - - /* - The upfront shutdown address to use if the initiating peer supports option - upfront shutdown script (see ListPeers for the features supported). Note - that the channel open will fail if this value is set for a peer that does - not support this feature bit. - */ - string upfront_shutdown = 4; - - /* - The csv delay (in blocks) that we require for the remote party. - */ - uint32 csv_delay = 5; - - /* - The reserve amount in satoshis that we require the remote peer to adhere to. - We require that the remote peer always have some reserve amount allocated to - them so that there is always a disincentive to broadcast old state (if they - hold 0 sats on their side of the channel, there is nothing to lose). - */ - uint64 reserve_sat = 6; - - /* - The maximum amount of funds in millisatoshis that we allow the remote peer - to have in outstanding htlcs. - */ - uint64 in_flight_max_msat = 7; - - /* - The maximum number of htlcs that the remote peer can offer us. - */ - uint32 max_htlc_count = 8; - - /* - The minimum value in millisatoshis for incoming htlcs on the channel. - */ - uint64 min_htlc_in = 9; - - /* - The number of confirmations we require before we consider the channel open. - */ - uint32 min_accept_depth = 10; -} - -message ChannelPoint { - oneof funding_txid { - /* - Txid of the funding transaction. When using REST, this field must be - encoded as base64. - */ - bytes funding_txid_bytes = 1; - - /* - Hex-encoded string representing the byte-reversed hash of the funding - transaction. - */ - string funding_txid_str = 2; - } - - // The index of the output of the funding transaction - uint32 output_index = 3; -} - -message OutPoint { - // Raw bytes representing the transaction id. - bytes txid_bytes = 1; - - // Reversed, hex-encoded string representing the transaction id. - string txid_str = 2; - - // The index of the output on the transaction. - uint32 output_index = 3; -} - -message LightningAddress { - // The identity pubkey of the Lightning node - string pubkey = 1; - - // The network location of the lightning node, e.g. `69.69.69.69:1337` or - // `localhost:10011` - string host = 2; -} - -message EstimateFeeRequest { - // The map from addresses to amounts for the transaction. - map AddrToAmount = 1; - - // The target number of blocks that this transaction should be confirmed - // by. - int32 target_conf = 2; -} - -message EstimateFeeResponse { - // The total fee in satoshis. - int64 fee_sat = 1; - - // The fee rate in satoshi/byte. - int64 feerate_sat_per_byte = 2; -} - -message SendManyRequest { - // The map from addresses to amounts - map AddrToAmount = 1; - - // The target number of blocks that this transaction should be confirmed - // by. - int32 target_conf = 3; - - // A manual fee rate set in sat/byte that should be used when crafting the - // transaction. - int64 sat_per_byte = 5; - - // An optional label for the transaction, limited to 500 characters. - string label = 6; - - // The minimum number of confirmations each one of your outputs used for - // the transaction must satisfy. - int32 min_confs = 7; - - // Whether unconfirmed outputs should be used as inputs for the transaction. - bool spend_unconfirmed = 8; -} -message SendManyResponse { - // The id of the transaction - string txid = 1; -} - -message SendCoinsRequest { - // The address to send coins to - string addr = 1; - - // The amount in satoshis to send - int64 amount = 2; - - // The target number of blocks that this transaction should be confirmed - // by. - int32 target_conf = 3; - - // A manual fee rate set in sat/byte that should be used when crafting the - // transaction. - int64 sat_per_byte = 5; - - /* - If set, then the amount field will be ignored, and lnd will attempt to - send all the coins under control of the internal wallet to the specified - address. - */ - bool send_all = 6; - - // An optional label for the transaction, limited to 500 characters. - string label = 7; - - // The minimum number of confirmations each one of your outputs used for - // the transaction must satisfy. - int32 min_confs = 8; - - // Whether unconfirmed outputs should be used as inputs for the transaction. - bool spend_unconfirmed = 9; -} -message SendCoinsResponse { - // The transaction ID of the transaction - string txid = 1; -} - -message ListUnspentRequest { - // The minimum number of confirmations to be included. - int32 min_confs = 1; - - // The maximum number of confirmations to be included. - int32 max_confs = 2; -} -message ListUnspentResponse { - // A list of utxos - repeated Utxo utxos = 1; -} - -/* -`AddressType` has to be one of: - -- `p2wkh`: Pay to witness key hash (`WITNESS_PUBKEY_HASH` = 0) -- `np2wkh`: Pay to nested witness key hash (`NESTED_PUBKEY_HASH` = 1) -*/ -enum AddressType { - WITNESS_PUBKEY_HASH = 0; - NESTED_PUBKEY_HASH = 1; - UNUSED_WITNESS_PUBKEY_HASH = 2; - UNUSED_NESTED_PUBKEY_HASH = 3; -} - -message NewAddressRequest { - // The address type - AddressType type = 1; -} -message NewAddressResponse { - // The newly generated wallet address - string address = 1; -} - -message SignMessageRequest { - /* - The message to be signed. When using REST, this field must be encoded as - base64. - */ - bytes msg = 1; -} -message SignMessageResponse { - // The signature for the given message - string signature = 1; -} - -message VerifyMessageRequest { - /* - The message over which the signature is to be verified. When using REST, - this field must be encoded as base64. - */ - bytes msg = 1; - - // The signature to be verified over the given message - string signature = 2; -} -message VerifyMessageResponse { - // Whether the signature was valid over the given message - bool valid = 1; - - // The pubkey recovered from the signature - string pubkey = 2; -} - -message ConnectPeerRequest { - // Lightning address of the peer, in the format `@host` - LightningAddress addr = 1; - - /* If set, the daemon will attempt to persistently connect to the target - * peer. Otherwise, the call will be synchronous. */ - bool perm = 2; - - /* - The connection timeout value (in seconds) for this request. It won't affect - other requests. - */ - uint64 timeout = 3; -} -message ConnectPeerResponse { -} - -message DisconnectPeerRequest { - // The pubkey of the node to disconnect from - string pub_key = 1; -} -message DisconnectPeerResponse { -} - -message HTLC { - bool incoming = 1; - int64 amount = 2; - bytes hash_lock = 3; - uint32 expiration_height = 4; - - // Index identifying the htlc on the channel. - uint64 htlc_index = 5; - - // If this HTLC is involved in a forwarding operation, this field indicates - // the forwarding channel. For an outgoing htlc, it is the incoming channel. - // For an incoming htlc, it is the outgoing channel. When the htlc - // originates from this node or this node is the final destination, - // forwarding_channel will be zero. The forwarding channel will also be zero - // for htlcs that need to be forwarded but don't have a forwarding decision - // persisted yet. - uint64 forwarding_channel = 6; - - // Index identifying the htlc on the forwarding channel. - uint64 forwarding_htlc_index = 7; -} - -enum CommitmentType { - /* - A channel using the legacy commitment format having tweaked to_remote - keys. - */ - LEGACY = 0; - - /* - A channel that uses the modern commitment format where the key in the - output of the remote party does not change each state. This makes back - up and recovery easier as when the channel is closed, the funds go - directly to that key. - */ - STATIC_REMOTE_KEY = 1; - - /* - A channel that uses a commitment format that has anchor outputs on the - commitments, allowing fee bumping after a force close transaction has - been broadcast. - */ - ANCHORS = 2; - - /* - Returned when the commitment type isn't known or unavailable. - */ - UNKNOWN_COMMITMENT_TYPE = 999; -} - -message ChannelConstraints { - /* - The CSV delay expressed in relative blocks. If the channel is force closed, - we will need to wait for this many blocks before we can regain our funds. - */ - uint32 csv_delay = 1; - - // The minimum satoshis this node is required to reserve in its balance. - uint64 chan_reserve_sat = 2; - - // The dust limit (in satoshis) of the initiator's commitment tx. - uint64 dust_limit_sat = 3; - - // The maximum amount of coins in millisatoshis that can be pending in this - // channel. - uint64 max_pending_amt_msat = 4; - - // The smallest HTLC in millisatoshis that the initiator will accept. - uint64 min_htlc_msat = 5; - - // The total number of incoming HTLC's that the initiator will accept. - uint32 max_accepted_htlcs = 6; -} - -message Channel { - // Whether this channel is active or not - bool active = 1; - - // The identity pubkey of the remote node - string remote_pubkey = 2; - - /* - The outpoint (txid:index) of the funding transaction. With this value, Bob - will be able to generate a signature for Alice's version of the commitment - transaction. - */ - string channel_point = 3; - - /* - The unique channel ID for the channel. The first 3 bytes are the block - height, the next 3 the index within the block, and the last 2 bytes are the - output index for the channel. - */ - uint64 chan_id = 4 [jstype = JS_STRING]; - - // The total amount of funds held in this channel - int64 capacity = 5; - - // This node's current balance in this channel - int64 local_balance = 6; - - // The counterparty's current balance in this channel - int64 remote_balance = 7; - - /* - The amount calculated to be paid in fees for the current set of commitment - transactions. The fee amount is persisted with the channel in order to - allow the fee amount to be removed and recalculated with each channel state - update, including updates that happen after a system restart. - */ - int64 commit_fee = 8; - - // The weight of the commitment transaction - int64 commit_weight = 9; - - /* - The required number of satoshis per kilo-weight that the requester will pay - at all times, for both the funding transaction and commitment transaction. - This value can later be updated once the channel is open. - */ - int64 fee_per_kw = 10; - - // The unsettled balance in this channel - int64 unsettled_balance = 11; - - /* - The total number of satoshis we've sent within this channel. - */ - int64 total_satoshis_sent = 12; - - /* - The total number of satoshis we've received within this channel. - */ - int64 total_satoshis_received = 13; - - /* - The total number of updates conducted within this channel. - */ - uint64 num_updates = 14; - - /* - The list of active, uncleared HTLCs currently pending within the channel. - */ - repeated HTLC pending_htlcs = 15; - - /* - Deprecated. The CSV delay expressed in relative blocks. If the channel is - force closed, we will need to wait for this many blocks before we can regain - our funds. - */ - uint32 csv_delay = 16 [deprecated = true]; - - // Whether this channel is advertised to the network or not. - bool private = 17; - - // True if we were the ones that created the channel. - bool initiator = 18; - - // A set of flags showing the current state of the channel. - string chan_status_flags = 19; - - // Deprecated. The minimum satoshis this node is required to reserve in its - // balance. - int64 local_chan_reserve_sat = 20 [deprecated = true]; - - /* - Deprecated. The minimum satoshis the other node is required to reserve in - its balance. - */ - int64 remote_chan_reserve_sat = 21 [deprecated = true]; - - // Deprecated. Use commitment_type. - bool static_remote_key = 22 [deprecated = true]; - - // The commitment type used by this channel. - CommitmentType commitment_type = 26; - - /* - The number of seconds that the channel has been monitored by the channel - scoring system. Scores are currently not persisted, so this value may be - less than the lifetime of the channel [EXPERIMENTAL]. - */ - int64 lifetime = 23; - - /* - The number of seconds that the remote peer has been observed as being online - by the channel scoring system over the lifetime of the channel - [EXPERIMENTAL]. - */ - int64 uptime = 24; - - /* - Close address is the address that we will enforce payout to on cooperative - close if the channel was opened utilizing option upfront shutdown. This - value can be set on channel open by setting close_address in an open channel - request. If this value is not set, you can still choose a payout address by - cooperatively closing with the delivery_address field set. - */ - string close_address = 25; - - /* - The amount that the initiator of the channel optionally pushed to the remote - party on channel open. This amount will be zero if the channel initiator did - not push any funds to the remote peer. If the initiator field is true, we - pushed this amount to our peer, if it is false, the remote peer pushed this - amount to us. - */ - uint64 push_amount_sat = 27; - - /* - This uint32 indicates if this channel is to be considered 'frozen'. A - frozen channel doest not allow a cooperative channel close by the - initiator. The thaw_height is the height that this restriction stops - applying to the channel. This field is optional, not setting it or using a - value of zero will mean the channel has no additional restrictions. The - height can be interpreted in two ways: as a relative height if the value is - less than 500,000, or as an absolute height otherwise. - */ - uint32 thaw_height = 28; - - // List constraints for the local node. - ChannelConstraints local_constraints = 29; - - // List constraints for the remote node. - ChannelConstraints remote_constraints = 30; -} - -message ListChannelsRequest { - bool active_only = 1; - bool inactive_only = 2; - bool public_only = 3; - bool private_only = 4; - - /* - Filters the response for channels with a target peer's pubkey. If peer is - empty, all channels will be returned. - */ - bytes peer = 5; -} -message ListChannelsResponse { - // The list of active channels - repeated Channel channels = 11; -} - -enum Initiator { - INITIATOR_UNKNOWN = 0; - INITIATOR_LOCAL = 1; - INITIATOR_REMOTE = 2; - INITIATOR_BOTH = 3; -} - -message ChannelCloseSummary { - // The outpoint (txid:index) of the funding transaction. - string channel_point = 1; - - // The unique channel ID for the channel. - uint64 chan_id = 2 [jstype = JS_STRING]; - - // The hash of the genesis block that this channel resides within. - string chain_hash = 3; - - // The txid of the transaction which ultimately closed this channel. - string closing_tx_hash = 4; - - // Public key of the remote peer that we formerly had a channel with. - string remote_pubkey = 5; - - // Total capacity of the channel. - int64 capacity = 6; - - // Height at which the funding transaction was spent. - uint32 close_height = 7; - - // Settled balance at the time of channel closure - int64 settled_balance = 8; - - // The sum of all the time-locked outputs at the time of channel closure - int64 time_locked_balance = 9; - - enum ClosureType { - COOPERATIVE_CLOSE = 0; - LOCAL_FORCE_CLOSE = 1; - REMOTE_FORCE_CLOSE = 2; - BREACH_CLOSE = 3; - FUNDING_CANCELED = 4; - ABANDONED = 5; - } - - // Details on how the channel was closed. - ClosureType close_type = 10; - - /* - Open initiator is the party that initiated opening the channel. Note that - this value may be unknown if the channel was closed before we migrated to - store open channel information after close. - */ - Initiator open_initiator = 11; - - /* - Close initiator indicates which party initiated the close. This value will - be unknown for channels that were cooperatively closed before we started - tracking cooperative close initiators. Note that this indicates which party - initiated a close, and it is possible for both to initiate cooperative or - force closes, although only one party's close will be confirmed on chain. - */ - Initiator close_initiator = 12; - - repeated Resolution resolutions = 13; -} - -enum ResolutionType { - TYPE_UNKNOWN = 0; - - // We resolved an anchor output. - ANCHOR = 1; - - /* - We are resolving an incoming htlc on chain. This if this htlc is - claimed, we swept the incoming htlc with the preimage. If it is timed - out, our peer swept the timeout path. - */ - INCOMING_HTLC = 2; - - /* - We are resolving an outgoing htlc on chain. If this htlc is claimed, - the remote party swept the htlc with the preimage. If it is timed out, - we swept it with the timeout path. - */ - OUTGOING_HTLC = 3; - - // We force closed and need to sweep our time locked commitment output. - COMMIT = 4; -} - -enum ResolutionOutcome { - // Outcome unknown. - OUTCOME_UNKNOWN = 0; - - // An output was claimed on chain. - CLAIMED = 1; - - // An output was left unclaimed on chain. - UNCLAIMED = 2; - - /* - ResolverOutcomeAbandoned indicates that an output that we did not - claim on chain, for example an anchor that we did not sweep and a - third party claimed on chain, or a htlc that we could not decode - so left unclaimed. - */ - ABANDONED = 3; - - /* - If we force closed our channel, our htlcs need to be claimed in two - stages. This outcome represents the broadcast of a timeout or success - transaction for this two stage htlc claim. - */ - FIRST_STAGE = 4; - - // A htlc was timed out on chain. - TIMEOUT = 5; -} - -message Resolution { - // The type of output we are resolving. - ResolutionType resolution_type = 1; - - // The outcome of our on chain action that resolved the outpoint. - ResolutionOutcome outcome = 2; - - // The outpoint that was spent by the resolution. - OutPoint outpoint = 3; - - // The amount that was claimed by the resolution. - uint64 amount_sat = 4; - - // The hex-encoded transaction ID of the sweep transaction that spent the - // output. - string sweep_txid = 5; -} - -message ClosedChannelsRequest { - bool cooperative = 1; - bool local_force = 2; - bool remote_force = 3; - bool breach = 4; - bool funding_canceled = 5; - bool abandoned = 6; -} - -message ClosedChannelsResponse { - repeated ChannelCloseSummary channels = 1; -} - -message Peer { - // The identity pubkey of the peer - string pub_key = 1; - - // Network address of the peer; eg `127.0.0.1:10011` - string address = 3; - - // Bytes of data transmitted to this peer - uint64 bytes_sent = 4; - - // Bytes of data transmitted from this peer - uint64 bytes_recv = 5; - - // Satoshis sent to this peer - int64 sat_sent = 6; - - // Satoshis received from this peer - int64 sat_recv = 7; - - // A channel is inbound if the counterparty initiated the channel - bool inbound = 8; - - // Ping time to this peer - int64 ping_time = 9; - - enum SyncType { - /* - Denotes that we cannot determine the peer's current sync type. - */ - UNKNOWN_SYNC = 0; - - /* - Denotes that we are actively receiving new graph updates from the peer. - */ - ACTIVE_SYNC = 1; - - /* - Denotes that we are not receiving new graph updates from the peer. - */ - PASSIVE_SYNC = 2; - } - - // The type of sync we are currently performing with this peer. - SyncType sync_type = 10; - - // Features advertised by the remote peer in their init message. - map features = 11; - - /* - The latest errors received from our peer with timestamps, limited to the 10 - most recent errors. These errors are tracked across peer connections, but - are not persisted across lnd restarts. Note that these errors are only - stored for peers that we have channels open with, to prevent peers from - spamming us with errors at no cost. - */ - repeated TimestampedError errors = 12; - - /* - The number of times we have recorded this peer going offline or coming - online, recorded across restarts. Note that this value is decreased over - time if the peer has not recently flapped, so that we can forgive peers - with historically high flap counts. - */ - int32 flap_count = 13; - - /* - The timestamp of the last flap we observed for this peer. If this value is - zero, we have not observed any flaps for this peer. - */ - int64 last_flap_ns = 14; -} - -message TimestampedError { - // The unix timestamp in seconds when the error occurred. - uint64 timestamp = 1; - - // The string representation of the error sent by our peer. - string error = 2; -} - -message ListPeersRequest { - /* - If true, only the last error that our peer sent us will be returned with - the peer's information, rather than the full set of historic errors we have - stored. - */ - bool latest_error = 1; -} -message ListPeersResponse { - // The list of currently connected peers - repeated Peer peers = 1; -} - -message PeerEventSubscription { -} - -message PeerEvent { - // The identity pubkey of the peer. - string pub_key = 1; - - enum EventType { - PEER_ONLINE = 0; - PEER_OFFLINE = 1; - } - - EventType type = 2; -} - -message GetInfoRequest { -} -message GetInfoResponse { - // The version of the LND software that the node is running. - string version = 14; - - // The SHA1 commit hash that the daemon is compiled with. - string commit_hash = 20; - - // The identity pubkey of the current node. - string identity_pubkey = 1; - - // If applicable, the alias of the current node, e.g. "bob" - string alias = 2; - - // The color of the current node in hex code format - string color = 17; - - // Number of pending channels - uint32 num_pending_channels = 3; - - // Number of active channels - uint32 num_active_channels = 4; - - // Number of inactive channels - uint32 num_inactive_channels = 15; - - // Number of peers - uint32 num_peers = 5; - - // The node's current view of the height of the best block - uint32 block_height = 6; - - // The node's current view of the hash of the best block - string block_hash = 8; - - // Timestamp of the block best known to the wallet - int64 best_header_timestamp = 13; - - // Whether the wallet's view is synced to the main chain - bool synced_to_chain = 9; - - // Whether we consider ourselves synced with the public channel graph. - bool synced_to_graph = 18; - - /* - Whether the current node is connected to testnet. This field is - deprecated and the network field should be used instead - **/ - bool testnet = 10 [deprecated = true]; - - reserved 11; - - // A list of active chains the node is connected to - repeated Chain chains = 16; - - // The URIs of the current node. - repeated string uris = 12; - - /* - Features that our node has advertised in our init message, node - announcements and invoices. - */ - map features = 19; -} - -message GetRecoveryInfoRequest { -} -message GetRecoveryInfoResponse { - // Whether the wallet is in recovery mode - bool recovery_mode = 1; - - // Whether the wallet recovery progress is finished - bool recovery_finished = 2; - - // The recovery progress, ranging from 0 to 1. - double progress = 3; -} - -message Chain { - // The blockchain the node is on (eg bitcoin, litecoin) - string chain = 1; - - // The network the node is on (eg regtest, testnet, mainnet) - string network = 2; -} - -message ConfirmationUpdate { - bytes block_sha = 1; - int32 block_height = 2; - - uint32 num_confs_left = 3; -} - -message ChannelOpenUpdate { - ChannelPoint channel_point = 1; -} - -message ChannelCloseUpdate { - bytes closing_txid = 1; - - bool success = 2; -} - -message CloseChannelRequest { - /* - The outpoint (txid:index) of the funding transaction. With this value, Bob - will be able to generate a signature for Alice's version of the commitment - transaction. - */ - ChannelPoint channel_point = 1; - - // If true, then the channel will be closed forcibly. This means the - // current commitment transaction will be signed and broadcast. - bool force = 2; - - // The target number of blocks that the closure transaction should be - // confirmed by. - int32 target_conf = 3; - - // A manual fee rate set in sat/byte that should be used when crafting the - // closure transaction. - int64 sat_per_byte = 4; - - /* - An optional address to send funds to in the case of a cooperative close. - If the channel was opened with an upfront shutdown script and this field - is set, the request to close will fail because the channel must pay out - to the upfront shutdown addresss. - */ - string delivery_address = 5; -} - -message CloseStatusUpdate { - oneof update { - PendingUpdate close_pending = 1; - ChannelCloseUpdate chan_close = 3; - } -} - -message PendingUpdate { - bytes txid = 1; - uint32 output_index = 2; -} - -message ReadyForPsbtFunding { - /* - The P2WSH address of the channel funding multisig address that the below - specified amount in satoshis needs to be sent to. - */ - string funding_address = 1; - - /* - The exact amount in satoshis that needs to be sent to the above address to - fund the pending channel. - */ - int64 funding_amount = 2; - - /* - A raw PSBT that contains the pending channel output. If a base PSBT was - provided in the PsbtShim, this is the base PSBT with one additional output. - If no base PSBT was specified, this is an otherwise empty PSBT with exactly - one output. - */ - bytes psbt = 3; -} - -message OpenChannelRequest { - /* - The pubkey of the node to open a channel with. When using REST, this field - must be encoded as base64. - */ - bytes node_pubkey = 2; - - /* - The hex encoded pubkey of the node to open a channel with. Deprecated now - that the REST gateway supports base64 encoding of bytes fields. - */ - string node_pubkey_string = 3 [deprecated = true]; - - // The number of satoshis the wallet should commit to the channel - int64 local_funding_amount = 4; - - // The number of satoshis to push to the remote side as part of the initial - // commitment state - int64 push_sat = 5; - - // The target number of blocks that the funding transaction should be - // confirmed by. - int32 target_conf = 6; - - // A manual fee rate set in sat/byte that should be used when crafting the - // funding transaction. - int64 sat_per_byte = 7; - - // Whether this channel should be private, not announced to the greater - // network. - bool private = 8; - - // The minimum value in millisatoshi we will require for incoming HTLCs on - // the channel. - int64 min_htlc_msat = 9; - - // The delay we require on the remote's commitment transaction. If this is - // not set, it will be scaled automatically with the channel size. - uint32 remote_csv_delay = 10; - - // The minimum number of confirmations each one of your outputs used for - // the funding transaction must satisfy. - int32 min_confs = 11; - - // Whether unconfirmed outputs should be used as inputs for the funding - // transaction. - bool spend_unconfirmed = 12; - - /* - Close address is an optional address which specifies the address to which - funds should be paid out to upon cooperative close. This field may only be - set if the peer supports the option upfront feature bit (call listpeers - to check). The remote peer will only accept cooperative closes to this - address if it is set. - - Note: If this value is set on channel creation, you will *not* be able to - cooperatively close out to a different address. - */ - string close_address = 13; - - /* - Funding shims are an optional argument that allow the caller to intercept - certain funding functionality. For example, a shim can be provided to use a - particular key for the commitment key (ideally cold) rather than use one - that is generated by the wallet as normal, or signal that signing will be - carried out in an interactive manner (PSBT based). - */ - FundingShim funding_shim = 14; - - /* - The maximum amount of coins in millisatoshi that can be pending within - the channel. It only applies to the remote party. - */ - uint64 remote_max_value_in_flight_msat = 15; - - /* - The maximum number of concurrent HTLCs we will allow the remote party to add - to the commitment transaction. - */ - uint32 remote_max_htlcs = 16; - - /* - Max local csv is the maximum csv delay we will allow for our own commitment - transaction. - */ - uint32 max_local_csv = 17; -} -message OpenStatusUpdate { - oneof update { - /* - Signals that the channel is now fully negotiated and the funding - transaction published. - */ - PendingUpdate chan_pending = 1; - - /* - Signals that the channel's funding transaction has now reached the - required number of confirmations on chain and can be used. - */ - ChannelOpenUpdate chan_open = 3; - - /* - Signals that the funding process has been suspended and the construction - of a PSBT that funds the channel PK script is now required. - */ - ReadyForPsbtFunding psbt_fund = 5; - } - - /* - The pending channel ID of the created channel. This value may be used to - further the funding flow manually via the FundingStateStep method. - */ - bytes pending_chan_id = 4; -} - -message KeyLocator { - // The family of key being identified. - int32 key_family = 1; - - // The precise index of the key being identified. - int32 key_index = 2; -} - -message KeyDescriptor { - /* - The raw bytes of the key being identified. - */ - bytes raw_key_bytes = 1; - - /* - The key locator that identifies which key to use for signing. - */ - KeyLocator key_loc = 2; -} - -message ChanPointShim { - /* - The size of the pre-crafted output to be used as the channel point for this - channel funding. - */ - int64 amt = 1; - - // The target channel point to refrence in created commitment transactions. - ChannelPoint chan_point = 2; - - // Our local key to use when creating the multi-sig output. - KeyDescriptor local_key = 3; - - // The key of the remote party to use when creating the multi-sig output. - bytes remote_key = 4; - - /* - If non-zero, then this will be used as the pending channel ID on the wire - protocol to initate the funding request. This is an optional field, and - should only be set if the responder is already expecting a specific pending - channel ID. - */ - bytes pending_chan_id = 5; - - /* - This uint32 indicates if this channel is to be considered 'frozen'. A frozen - channel does not allow a cooperative channel close by the initiator. The - thaw_height is the height that this restriction stops applying to the - channel. The height can be interpreted in two ways: as a relative height if - the value is less than 500,000, or as an absolute height otherwise. - */ - uint32 thaw_height = 6; -} - -message PsbtShim { - /* - A unique identifier of 32 random bytes that will be used as the pending - channel ID to identify the PSBT state machine when interacting with it and - on the wire protocol to initiate the funding request. - */ - bytes pending_chan_id = 1; - - /* - An optional base PSBT the new channel output will be added to. If this is - non-empty, it must be a binary serialized PSBT. - */ - bytes base_psbt = 2; - - /* - If a channel should be part of a batch (multiple channel openings in one - transaction), it can be dangerous if the whole batch transaction is - published too early before all channel opening negotiations are completed. - This flag prevents this particular channel from broadcasting the transaction - after the negotiation with the remote peer. In a batch of channel openings - this flag should be set to true for every channel but the very last. - */ - bool no_publish = 3; -} - -message FundingShim { - oneof shim { - /* - A channel shim where the channel point was fully constructed outside - of lnd's wallet and the transaction might already be published. - */ - ChanPointShim chan_point_shim = 1; - - /* - A channel shim that uses a PSBT to fund and sign the channel funding - transaction. - */ - PsbtShim psbt_shim = 2; - } -} - -message FundingShimCancel { - // The pending channel ID of the channel to cancel the funding shim for. - bytes pending_chan_id = 1; -} - -message FundingPsbtVerify { - /* - The funded but not yet signed PSBT that sends the exact channel capacity - amount to the PK script returned in the open channel message in a previous - step. - */ - bytes funded_psbt = 1; - - // The pending channel ID of the channel to get the PSBT for. - bytes pending_chan_id = 2; -} - -message FundingPsbtFinalize { - /* - The funded PSBT that contains all witness data to send the exact channel - capacity amount to the PK script returned in the open channel message in a - previous step. Cannot be set at the same time as final_raw_tx. - */ - bytes signed_psbt = 1; - - // The pending channel ID of the channel to get the PSBT for. - bytes pending_chan_id = 2; - - /* - As an alternative to the signed PSBT with all witness data, the final raw - wire format transaction can also be specified directly. Cannot be set at the - same time as signed_psbt. - */ - bytes final_raw_tx = 3; -} - -message FundingTransitionMsg { - oneof trigger { - /* - The funding shim to register. This should be used before any - channel funding has began by the remote party, as it is intended as a - preparatory step for the full channel funding. - */ - FundingShim shim_register = 1; - - // Used to cancel an existing registered funding shim. - FundingShimCancel shim_cancel = 2; - - /* - Used to continue a funding flow that was initiated to be executed - through a PSBT. This step verifies that the PSBT contains the correct - outputs to fund the channel. - */ - FundingPsbtVerify psbt_verify = 3; - - /* - Used to continue a funding flow that was initiated to be executed - through a PSBT. This step finalizes the funded and signed PSBT, finishes - negotiation with the peer and finally publishes the resulting funding - transaction. - */ - FundingPsbtFinalize psbt_finalize = 4; - } -} - -message FundingStateStepResp { -} - -message PendingHTLC { - // The direction within the channel that the htlc was sent - bool incoming = 1; - - // The total value of the htlc - int64 amount = 2; - - // The final output to be swept back to the user's wallet - string outpoint = 3; - - // The next block height at which we can spend the current stage - uint32 maturity_height = 4; - - /* - The number of blocks remaining until the current stage can be swept. - Negative values indicate how many blocks have passed since becoming - mature. - */ - int32 blocks_til_maturity = 5; - - // Indicates whether the htlc is in its first or second stage of recovery - uint32 stage = 6; -} - -message PendingChannelsRequest { -} -message PendingChannelsResponse { - message PendingChannel { - string remote_node_pub = 1; - string channel_point = 2; - - int64 capacity = 3; - - int64 local_balance = 4; - int64 remote_balance = 5; - - // The minimum satoshis this node is required to reserve in its - // balance. - int64 local_chan_reserve_sat = 6; - - /* - The minimum satoshis the other node is required to reserve in its - balance. - */ - int64 remote_chan_reserve_sat = 7; - - // The party that initiated opening the channel. - Initiator initiator = 8; - - // The commitment type used by this channel. - CommitmentType commitment_type = 9; - } - - message PendingOpenChannel { - // The pending channel - PendingChannel channel = 1; - - // The height at which this channel will be confirmed - uint32 confirmation_height = 2; - - /* - The amount calculated to be paid in fees for the current set of - commitment transactions. The fee amount is persisted with the channel - in order to allow the fee amount to be removed and recalculated with - each channel state update, including updates that happen after a system - restart. - */ - int64 commit_fee = 4; - - // The weight of the commitment transaction - int64 commit_weight = 5; - - /* - The required number of satoshis per kilo-weight that the requester will - pay at all times, for both the funding transaction and commitment - transaction. This value can later be updated once the channel is open. - */ - int64 fee_per_kw = 6; - } - - message WaitingCloseChannel { - // The pending channel waiting for closing tx to confirm - PendingChannel channel = 1; - - // The balance in satoshis encumbered in this channel - int64 limbo_balance = 2; - - /* - A list of valid commitment transactions. Any of these can confirm at - this point. - */ - Commitments commitments = 3; - } - - message Commitments { - // Hash of the local version of the commitment tx. - string local_txid = 1; - - // Hash of the remote version of the commitment tx. - string remote_txid = 2; - - // Hash of the remote pending version of the commitment tx. - string remote_pending_txid = 3; - - /* - The amount in satoshis calculated to be paid in fees for the local - commitment. - */ - uint64 local_commit_fee_sat = 4; - - /* - The amount in satoshis calculated to be paid in fees for the remote - commitment. - */ - uint64 remote_commit_fee_sat = 5; - - /* - The amount in satoshis calculated to be paid in fees for the remote - pending commitment. - */ - uint64 remote_pending_commit_fee_sat = 6; - } - - message ClosedChannel { - // The pending channel to be closed - PendingChannel channel = 1; - - // The transaction id of the closing transaction - string closing_txid = 2; - } - - message ForceClosedChannel { - // The pending channel to be force closed - PendingChannel channel = 1; - - // The transaction id of the closing transaction - string closing_txid = 2; - - // The balance in satoshis encumbered in this pending channel - int64 limbo_balance = 3; - - // The height at which funds can be swept into the wallet - uint32 maturity_height = 4; - - /* - Remaining # of blocks until the commitment output can be swept. - Negative values indicate how many blocks have passed since becoming - mature. - */ - int32 blocks_til_maturity = 5; - - // The total value of funds successfully recovered from this channel - int64 recovered_balance = 6; - - repeated PendingHTLC pending_htlcs = 8; - - enum AnchorState { - LIMBO = 0; - RECOVERED = 1; - LOST = 2; - } - - AnchorState anchor = 9; - } - - // The balance in satoshis encumbered in pending channels - int64 total_limbo_balance = 1; - - // Channels pending opening - repeated PendingOpenChannel pending_open_channels = 2; - - /* - Deprecated: Channels pending closing previously contained cooperatively - closed channels with a single confirmation. These channels are now - considered closed from the time we see them on chain. - */ - repeated ClosedChannel pending_closing_channels = 3 [deprecated = true]; - - // Channels pending force closing - repeated ForceClosedChannel pending_force_closing_channels = 4; - - // Channels waiting for closing tx to confirm - repeated WaitingCloseChannel waiting_close_channels = 5; -} - -message ChannelEventSubscription { -} - -message ChannelEventUpdate { - oneof channel { - Channel open_channel = 1; - ChannelCloseSummary closed_channel = 2; - ChannelPoint active_channel = 3; - ChannelPoint inactive_channel = 4; - PendingUpdate pending_open_channel = 6; - } - - enum UpdateType { - OPEN_CHANNEL = 0; - CLOSED_CHANNEL = 1; - ACTIVE_CHANNEL = 2; - INACTIVE_CHANNEL = 3; - PENDING_OPEN_CHANNEL = 4; - } - - UpdateType type = 5; -} - -message WalletBalanceRequest { -} -message WalletBalanceResponse { - // The balance of the wallet - int64 total_balance = 1; - - // The confirmed balance of a wallet(with >= 1 confirmations) - int64 confirmed_balance = 2; - - // The unconfirmed balance of a wallet(with 0 confirmations) - int64 unconfirmed_balance = 3; -} - -message GetAddressBalancesRequest { - // Minimum number of confirmations for coins to be considered received - int32 minconf = 1; - - // If true then addresses which have been created but carry zero balance will be included - bool showzerobalance = 2; -} - -message GetAddressBalancesResponseAddr{ - // The address which has this balance - string address = 1; - - // Total balance in coins - double total = 2; - - // Total balance (atomic units) - int64 stotal = 3; - - // Balance which is currently spendable (coins) - double spendable = 4; - - // Balance which is currently spendable (atomic units) - int64 sspendable = 5; - - // Mined coins which have not yet matured (coins) - double immaturereward = 6; - - // Mined coins which have not yet matured (atomic units) - int64 simmaturereward = 7; - - // Unconfirmed balance in coins - double unconfirmed = 8; - - // Unconfirmed balance in atomic units - int64 sunconfirmed = 9; - - // The number of transaction outputs which make up the balance - int32 outputcount = 10; -} -message GetAddressBalancesResponse{ - repeated GetAddressBalancesResponseAddr addrs = 1; -} - -message Amount { - // Value denominated in satoshis. - uint64 sat = 1; - - // Value denominated in milli-satoshis. - uint64 msat = 2; -} - -message ChannelBalanceRequest { -} -message ChannelBalanceResponse { - // Deprecated. Sum of channels balances denominated in satoshis - int64 balance = 1 [deprecated = true]; - - // Deprecated. Sum of channels pending balances denominated in satoshis - int64 pending_open_balance = 2 [deprecated = true]; - - // Sum of channels local balances. - Amount local_balance = 3; - - // Sum of channels remote balances. - Amount remote_balance = 4; - - // Sum of channels local unsettled balances. - Amount unsettled_local_balance = 5; - - // Sum of channels remote unsettled balances. - Amount unsettled_remote_balance = 6; - - // Sum of channels pending local balances. - Amount pending_open_local_balance = 7; - - // Sum of channels pending remote balances. - Amount pending_open_remote_balance = 8; -} - -message QueryRoutesRequest { - // The 33-byte hex-encoded public key for the payment destination - string pub_key = 1; - - /* - The amount to send expressed in satoshis. - - The fields amt and amt_msat are mutually exclusive. - */ - int64 amt = 2; - - /* - The amount to send expressed in millisatoshis. - - The fields amt and amt_msat are mutually exclusive. - */ - int64 amt_msat = 12; - - reserved 3; - - /* - An optional CLTV delta from the current height that should be used for the - timelock of the final hop. Note that unlike SendPayment, QueryRoutes does - not add any additional block padding on top of final_ctlv_delta. This - padding of a few blocks needs to be added manually or otherwise failures may - happen when a block comes in while the payment is in flight. - */ - int32 final_cltv_delta = 4; - - /* - The maximum number of satoshis that will be paid as a fee of the payment. - This value can be represented either as a percentage of the amount being - sent, or as a fixed amount of the maximum fee the user is willing the pay to - send the payment. - */ - FeeLimit fee_limit = 5; - - /* - A list of nodes to ignore during path finding. When using REST, these fields - must be encoded as base64. - */ - repeated bytes ignored_nodes = 6; - - /* - Deprecated. A list of edges to ignore during path finding. - */ - repeated EdgeLocator ignored_edges = 7 [deprecated = true]; - - /* - The source node where the request route should originated from. If empty, - self is assumed. - */ - string source_pub_key = 8; - - /* - If set to true, edge probabilities from mission control will be used to get - the optimal route. - */ - bool use_mission_control = 9; - - /* - A list of directed node pairs that will be ignored during path finding. - */ - repeated NodePair ignored_pairs = 10; - - /* - An optional maximum total time lock for the route. If the source is empty or - ourselves, this should not exceed lnd's `--max-cltv-expiry` setting. If - zero, then the value of `--max-cltv-expiry` is used as the limit. - */ - uint32 cltv_limit = 11; - - /* - An optional field that can be used to pass an arbitrary set of TLV records - to a peer which understands the new records. This can be used to pass - application specific data during the payment attempt. If the destination - does not support the specified recrods, and error will be returned. - Record types are required to be in the custom range >= 65536. When using - REST, the values must be encoded as base64. - */ - map dest_custom_records = 13; - - /* - The channel id of the channel that must be taken to the first hop. If zero, - any channel may be used. - */ - uint64 outgoing_chan_id = 14 [jstype = JS_STRING]; - - /* - The pubkey of the last hop of the route. If empty, any hop may be used. - */ - bytes last_hop_pubkey = 15; - - /* - Optional route hints to reach the destination through private channels. - */ - repeated lnrpc.RouteHint route_hints = 16; - - /* - Features assumed to be supported by the final node. All transitive feature - dependencies must also be set properly. For a given feature bit pair, either - optional or remote may be set, but not both. If this field is nil or empty, - the router will try to load destination features from the graph as a - fallback. - */ - repeated lnrpc.FeatureBit dest_features = 17; -} - -message NodePair { - /* - The sending node of the pair. When using REST, this field must be encoded as - base64. - */ - bytes from = 1; - - /* - The receiving node of the pair. When using REST, this field must be encoded - as base64. - */ - bytes to = 2; -} - -message EdgeLocator { - // The short channel id of this edge. - uint64 channel_id = 1 [jstype = JS_STRING]; - - /* - The direction of this edge. If direction_reverse is false, the direction - of this edge is from the channel endpoint with the lexicographically smaller - pub key to the endpoint with the larger pub key. If direction_reverse is - is true, the edge goes the other way. - */ - bool direction_reverse = 2; -} - -message QueryRoutesResponse { - /* - The route that results from the path finding operation. This is still a - repeated field to retain backwards compatibility. - */ - repeated Route routes = 1; - - /* - The success probability of the returned route based on the current mission - control state. [EXPERIMENTAL] - */ - double success_prob = 2; -} - -message Hop { - /* - The unique channel ID for the channel. The first 3 bytes are the block - height, the next 3 the index within the block, and the last 2 bytes are the - output index for the channel. - */ - uint64 chan_id = 1 [jstype = JS_STRING]; - int64 chan_capacity = 2; - int64 amt_to_forward = 3 [deprecated = true]; - int64 fee = 4 [deprecated = true]; - uint32 expiry = 5; - int64 amt_to_forward_msat = 6; - int64 fee_msat = 7; - - /* - An optional public key of the hop. If the public key is given, the payment - can be executed without relying on a copy of the channel graph. - */ - string pub_key = 8; - - /* - If set to true, then this hop will be encoded using the new variable length - TLV format. Note that if any custom tlv_records below are specified, then - this field MUST be set to true for them to be encoded properly. - */ - bool tlv_payload = 9; - - /* - An optional TLV record that signals the use of an MPP payment. If present, - the receiver will enforce that that the same mpp_record is included in the - final hop payload of all non-zero payments in the HTLC set. If empty, a - regular single-shot payment is or was attempted. - */ - MPPRecord mpp_record = 10; - - /* - An optional set of key-value TLV records. This is useful within the context - of the SendToRoute call as it allows callers to specify arbitrary K-V pairs - to drop off at each hop within the onion. - */ - map custom_records = 11; -} - -message MPPRecord { - /* - A unique, random identifier used to authenticate the sender as the intended - payer of a multi-path payment. The payment_addr must be the same for all - subpayments, and match the payment_addr provided in the receiver's invoice. - The same payment_addr must be used on all subpayments. - */ - bytes payment_addr = 11; - - /* - The total amount in milli-satoshis being sent as part of a larger multi-path - payment. The caller is responsible for ensuring subpayments to the same node - and payment_hash sum exactly to total_amt_msat. The same - total_amt_msat must be used on all subpayments. - */ - int64 total_amt_msat = 10; -} - -/* -A path through the channel graph which runs over one or more channels in -succession. This struct carries all the information required to craft the -Sphinx onion packet, and send the payment along the first hop in the path. A -route is only selected as valid if all the channels have sufficient capacity to -carry the initial payment amount after fees are accounted for. -*/ -message Route { - /* - The cumulative (final) time lock across the entire route. This is the CLTV - value that should be extended to the first hop in the route. All other hops - will decrement the time-lock as advertised, leaving enough time for all - hops to wait for or present the payment preimage to complete the payment. - */ - uint32 total_time_lock = 1; - - /* - The sum of the fees paid at each hop within the final route. In the case - of a one-hop payment, this value will be zero as we don't need to pay a fee - to ourselves. - */ - int64 total_fees = 2 [deprecated = true]; - - /* - The total amount of funds required to complete a payment over this route. - This value includes the cumulative fees at each hop. As a result, the HTLC - extended to the first-hop in the route will need to have at least this many - satoshis, otherwise the route will fail at an intermediate node due to an - insufficient amount of fees. - */ - int64 total_amt = 3 [deprecated = true]; - - /* - Contains details concerning the specific forwarding details at each hop. - */ - repeated Hop hops = 4; - - /* - The total fees in millisatoshis. - */ - int64 total_fees_msat = 5; - - /* - The total amount in millisatoshis. - */ - int64 total_amt_msat = 6; -} - -message NodeInfoRequest { - // The 33-byte hex-encoded compressed public of the target node - string pub_key = 1; - - // If true, will include all known channels associated with the node. - bool include_channels = 2; -} - -message NodeInfo { - /* - An individual vertex/node within the channel graph. A node is - connected to other nodes by one or more channel edges emanating from it. As - the graph is directed, a node will also have an incoming edge attached to - it for each outgoing edge. - */ - LightningNode node = 1; - - // The total number of channels for the node. - uint32 num_channels = 2; - - // The sum of all channels capacity for the node, denominated in satoshis. - int64 total_capacity = 3; - - // A list of all public channels for the node. - repeated ChannelEdge channels = 4; -} - -/* -An individual vertex/node within the channel graph. A node is -connected to other nodes by one or more channel edges emanating from it. As the -graph is directed, a node will also have an incoming edge attached to it for -each outgoing edge. -*/ -message LightningNode { - uint32 last_update = 1; - string pub_key = 2; - string alias = 3; - repeated NodeAddress addresses = 4; - string color = 5; - map features = 6; -} - -message NodeAddress { - string network = 1; - string addr = 2; -} - -message RoutingPolicy { - uint32 time_lock_delta = 1; - int64 min_htlc = 2; - int64 fee_base_msat = 3; - int64 fee_rate_milli_msat = 4; - bool disabled = 5; - uint64 max_htlc_msat = 6; - uint32 last_update = 7; -} - -/* -A fully authenticated channel along with all its unique attributes. -Once an authenticated channel announcement has been processed on the network, -then an instance of ChannelEdgeInfo encapsulating the channels attributes is -stored. The other portions relevant to routing policy of a channel are stored -within a ChannelEdgePolicy for each direction of the channel. -*/ -message ChannelEdge { - /* - The unique channel ID for the channel. The first 3 bytes are the block - height, the next 3 the index within the block, and the last 2 bytes are the - output index for the channel. - */ - uint64 channel_id = 1 [jstype = JS_STRING]; - string chan_point = 2; - - uint32 last_update = 3 [deprecated = true]; - - string node1_pub = 4; - string node2_pub = 5; - - int64 capacity = 6; - - RoutingPolicy node1_policy = 7; - RoutingPolicy node2_policy = 8; -} - -message ChannelGraphRequest { - /* - Whether unannounced channels are included in the response or not. If set, - unannounced channels are included. Unannounced channels are both private - channels, and public channels that are not yet announced to the network. - */ - bool include_unannounced = 1; -} - -// Returns a new instance of the directed channel graph. -message ChannelGraph { - // The list of `LightningNode`s in this channel graph - repeated LightningNode nodes = 1; - - // The list of `ChannelEdge`s in this channel graph - repeated ChannelEdge edges = 2; -} - -enum NodeMetricType { - UNKNOWN = 0; - BETWEENNESS_CENTRALITY = 1; -} - -message NodeMetricsRequest { - // The requested node metrics. - repeated NodeMetricType types = 1; -} - -message NodeMetricsResponse { - /* - Betweenness centrality is the sum of the ratio of shortest paths that pass - through the node for each pair of nodes in the graph (not counting paths - starting or ending at this node). - Map of node pubkey to betweenness centrality of the node. Normalized - values are in the [0,1] closed interval. - */ - map betweenness_centrality = 1; -} - -message FloatMetric { - // Arbitrary float value. - double value = 1; - - // The value normalized to [0,1] or [-1,1]. - double normalized_value = 2; -} - -message ChanInfoRequest { - /* - The unique channel ID for the channel. The first 3 bytes are the block - height, the next 3 the index within the block, and the last 2 bytes are the - output index for the channel. - */ - uint64 chan_id = 1 [jstype = JS_STRING]; -} - -message NetworkInfoRequest { -} -message NetworkInfo { - uint32 graph_diameter = 1; - double avg_out_degree = 2; - uint32 max_out_degree = 3; - - uint32 num_nodes = 4; - uint32 num_channels = 5; - - int64 total_network_capacity = 6; - - double avg_channel_size = 7; - int64 min_channel_size = 8; - int64 max_channel_size = 9; - int64 median_channel_size_sat = 10; - - // The number of edges marked as zombies. - uint64 num_zombie_chans = 11; - - // TODO(roasbeef): fee rate info, expiry - // * also additional RPC for tracking fee info once in -} - -message StopRequest { -} -message StopResponse { -} - -message GraphTopologySubscription { -} -message GraphTopologyUpdate { - repeated NodeUpdate node_updates = 1; - repeated ChannelEdgeUpdate channel_updates = 2; - repeated ClosedChannelUpdate closed_chans = 3; -} -message NodeUpdate { - repeated string addresses = 1; - string identity_key = 2; - bytes global_features = 3; - string alias = 4; - string color = 5; -} -message ChannelEdgeUpdate { - /* - The unique channel ID for the channel. The first 3 bytes are the block - height, the next 3 the index within the block, and the last 2 bytes are the - output index for the channel. - */ - uint64 chan_id = 1 [jstype = JS_STRING]; - - ChannelPoint chan_point = 2; - - int64 capacity = 3; - - RoutingPolicy routing_policy = 4; - - string advertising_node = 5; - string connecting_node = 6; -} -message ClosedChannelUpdate { - /* - The unique channel ID for the channel. The first 3 bytes are the block - height, the next 3 the index within the block, and the last 2 bytes are the - output index for the channel. - */ - uint64 chan_id = 1 [jstype = JS_STRING]; - int64 capacity = 2; - uint32 closed_height = 3; - ChannelPoint chan_point = 4; -} - -message HopHint { - // The public key of the node at the start of the channel. - string node_id = 1; - - // The unique identifier of the channel. - uint64 chan_id = 2 [jstype = JS_STRING]; - - // The base fee of the channel denominated in millisatoshis. - uint32 fee_base_msat = 3; - - /* - The fee rate of the channel for sending one satoshi across it denominated in - millionths of a satoshi. - */ - uint32 fee_proportional_millionths = 4; - - // The time-lock delta of the channel. - uint32 cltv_expiry_delta = 5; -} - -message RouteHint { - /* - A list of hop hints that when chained together can assist in reaching a - specific destination. - */ - repeated HopHint hop_hints = 1; -} - -message Invoice { - /* - An optional memo to attach along with the invoice. Used for record keeping - purposes for the invoice's creator, and will also be set in the description - field of the encoded payment request if the description_hash field is not - being used. - */ - string memo = 1; - - reserved 2; - - /* - The hex-encoded preimage (32 byte) which will allow settling an incoming - HTLC payable to this preimage. When using REST, this field must be encoded - as base64. - */ - bytes r_preimage = 3; - - /* - The hash of the preimage. When using REST, this field must be encoded as - base64. - */ - bytes r_hash = 4; - - /* - The value of this invoice in satoshis - - The fields value and value_msat are mutually exclusive. - */ - int64 value = 5; - - /* - The value of this invoice in millisatoshis - - The fields value and value_msat are mutually exclusive. - */ - int64 value_msat = 23; - - // Whether this invoice has been fulfilled - bool settled = 6 [deprecated = true]; - - // When this invoice was created - int64 creation_date = 7; - - // When this invoice was settled - int64 settle_date = 8; - - /* - A bare-bones invoice for a payment within the Lightning Network. With the - details of the invoice, the sender has all the data necessary to send a - payment to the recipient. - */ - string payment_request = 9; - - /* - Hash (SHA-256) of a description of the payment. Used if the description of - payment (memo) is too long to naturally fit within the description field - of an encoded payment request. When using REST, this field must be encoded - as base64. - */ - bytes description_hash = 10; - - // Payment request expiry time in seconds. Default is 3600 (1 hour). - int64 expiry = 11; - - // Fallback on-chain address. - string fallback_addr = 12; - - // Delta to use for the time-lock of the CLTV extended to the final hop. - uint64 cltv_expiry = 13; - - /* - Route hints that can each be individually used to assist in reaching the - invoice's destination. - */ - repeated RouteHint route_hints = 14; - - // Whether this invoice should include routing hints for private channels. - bool private = 15; - - /* - The "add" index of this invoice. Each newly created invoice will increment - this index making it monotonically increasing. Callers to the - SubscribeInvoices call can use this to instantly get notified of all added - invoices with an add_index greater than this one. - */ - uint64 add_index = 16; - - /* - The "settle" index of this invoice. Each newly settled invoice will - increment this index making it monotonically increasing. Callers to the - SubscribeInvoices call can use this to instantly get notified of all - settled invoices with an settle_index greater than this one. - */ - uint64 settle_index = 17; - - // Deprecated, use amt_paid_sat or amt_paid_msat. - int64 amt_paid = 18 [deprecated = true]; - - /* - The amount that was accepted for this invoice, in satoshis. This will ONLY - be set if this invoice has been settled. We provide this field as if the - invoice was created with a zero value, then we need to record what amount - was ultimately accepted. Additionally, it's possible that the sender paid - MORE that was specified in the original invoice. So we'll record that here - as well. - */ - int64 amt_paid_sat = 19; - - /* - The amount that was accepted for this invoice, in millisatoshis. This will - ONLY be set if this invoice has been settled. We provide this field as if - the invoice was created with a zero value, then we need to record what - amount was ultimately accepted. Additionally, it's possible that the sender - paid MORE that was specified in the original invoice. So we'll record that - here as well. - */ - int64 amt_paid_msat = 20; - - enum InvoiceState { - OPEN = 0; - SETTLED = 1; - CANCELED = 2; - ACCEPTED = 3; - } - - /* - The state the invoice is in. - */ - InvoiceState state = 21; - - // List of HTLCs paying to this invoice [EXPERIMENTAL]. - repeated InvoiceHTLC htlcs = 22; - - // List of features advertised on the invoice. - map features = 24; - - /* - Indicates if this invoice was a spontaneous payment that arrived via keysend - [EXPERIMENTAL]. - */ - bool is_keysend = 25; -} - -enum InvoiceHTLCState { - ACCEPTED = 0; - SETTLED = 1; - CANCELED = 2; -} - -// Details of an HTLC that paid to an invoice -message InvoiceHTLC { - // Short channel id over which the htlc was received. - uint64 chan_id = 1 [jstype = JS_STRING]; - - // Index identifying the htlc on the channel. - uint64 htlc_index = 2; - - // The amount of the htlc in msat. - uint64 amt_msat = 3; - - // Block height at which this htlc was accepted. - int32 accept_height = 4; - - // Time at which this htlc was accepted. - int64 accept_time = 5; - - // Time at which this htlc was settled or canceled. - int64 resolve_time = 6; - - // Block height at which this htlc expires. - int32 expiry_height = 7; - - // Current state the htlc is in. - InvoiceHTLCState state = 8; - - // Custom tlv records. - map custom_records = 9; - - // The total amount of the mpp payment in msat. - uint64 mpp_total_amt_msat = 10; -} - -message AddInvoiceResponse { - bytes r_hash = 1; - - /* - A bare-bones invoice for a payment within the Lightning Network. With the - details of the invoice, the sender has all the data necessary to send a - payment to the recipient. - */ - string payment_request = 2; - - /* - The "add" index of this invoice. Each newly created invoice will increment - this index making it monotonically increasing. Callers to the - SubscribeInvoices call can use this to instantly get notified of all added - invoices with an add_index greater than this one. - */ - uint64 add_index = 16; -} -message PaymentHash { - /* - The hex-encoded payment hash of the invoice to be looked up. The passed - payment hash must be exactly 32 bytes, otherwise an error is returned. - Deprecated now that the REST gateway supports base64 encoding of bytes - fields. - */ - string r_hash_str = 1 [deprecated = true]; - - /* - The payment hash of the invoice to be looked up. When using REST, this field - must be encoded as base64. - */ - bytes r_hash = 2; -} - -message ListInvoiceRequest { - /* - If set, only invoices that are not settled and not canceled will be returned - in the response. - */ - bool pending_only = 1; - - /* - The index of an invoice that will be used as either the start or end of a - query to determine which invoices should be returned in the response. - */ - uint64 index_offset = 4; - - // The max number of invoices to return in the response to this query. - uint64 num_max_invoices = 5; - - /* - If set, the invoices returned will result from seeking backwards from the - specified index offset. This can be used to paginate backwards. - */ - bool reversed = 6; -} -message ListInvoiceResponse { - /* - A list of invoices from the time slice of the time series specified in the - request. - */ - repeated Invoice invoices = 1; - - /* - The index of the last item in the set of returned invoices. This can be used - to seek further, pagination style. - */ - uint64 last_index_offset = 2; - - /* - The index of the last item in the set of returned invoices. This can be used - to seek backwards, pagination style. - */ - uint64 first_index_offset = 3; -} - -message InvoiceSubscription { - /* - If specified (non-zero), then we'll first start by sending out - notifications for all added indexes with an add_index greater than this - value. This allows callers to catch up on any events they missed while they - weren't connected to the streaming RPC. - */ - uint64 add_index = 1; - - /* - If specified (non-zero), then we'll first start by sending out - notifications for all settled indexes with an settle_index greater than - this value. This allows callers to catch up on any events they missed while - they weren't connected to the streaming RPC. - */ - uint64 settle_index = 2; -} - -enum PaymentFailureReason { - /* - Payment isn't failed (yet). - */ - FAILURE_REASON_NONE = 0; - - /* - There are more routes to try, but the payment timeout was exceeded. - */ - FAILURE_REASON_TIMEOUT = 1; - - /* - All possible routes were tried and failed permanently. Or were no - routes to the destination at all. - */ - FAILURE_REASON_NO_ROUTE = 2; - - /* - A non-recoverable error has occured. - */ - FAILURE_REASON_ERROR = 3; - - /* - Payment details incorrect (unknown hash, invalid amt or - invalid final cltv delta) - */ - FAILURE_REASON_INCORRECT_PAYMENT_DETAILS = 4; - - /* - Insufficient local balance. - */ - FAILURE_REASON_INSUFFICIENT_BALANCE = 5; -} - -message Payment { - // The payment hash - string payment_hash = 1; - - // Deprecated, use value_sat or value_msat. - int64 value = 2 [deprecated = true]; - - // Deprecated, use creation_time_ns - int64 creation_date = 3 [deprecated = true]; - - reserved 4; - - // Deprecated, use fee_sat or fee_msat. - int64 fee = 5 [deprecated = true]; - - // The payment preimage - string payment_preimage = 6; - - // The value of the payment in satoshis - int64 value_sat = 7; - - // The value of the payment in milli-satoshis - int64 value_msat = 8; - - // The optional payment request being fulfilled. - string payment_request = 9; - - enum PaymentStatus { - UNKNOWN = 0; - IN_FLIGHT = 1; - SUCCEEDED = 2; - FAILED = 3; - } - - // The status of the payment. - PaymentStatus status = 10; - - // The fee paid for this payment in satoshis - int64 fee_sat = 11; - - // The fee paid for this payment in milli-satoshis - int64 fee_msat = 12; - - // The time in UNIX nanoseconds at which the payment was created. - int64 creation_time_ns = 13; - - // The HTLCs made in attempt to settle the payment. - repeated HTLCAttempt htlcs = 14; - - /* - The creation index of this payment. Each payment can be uniquely identified - by this index, which may not strictly increment by 1 for payments made in - older versions of lnd. - */ - uint64 payment_index = 15; - - PaymentFailureReason failure_reason = 16; -} - -message HTLCAttempt { - enum HTLCStatus { - IN_FLIGHT = 0; - SUCCEEDED = 1; - FAILED = 2; - } - - // The status of the HTLC. - HTLCStatus status = 1; - - // The route taken by this HTLC. - Route route = 2; - - // The time in UNIX nanoseconds at which this HTLC was sent. - int64 attempt_time_ns = 3; - - /* - The time in UNIX nanoseconds at which this HTLC was settled or failed. - This value will not be set if the HTLC is still IN_FLIGHT. - */ - int64 resolve_time_ns = 4; - - // Detailed htlc failure info. - Failure failure = 5; - - // The preimage that was used to settle the HTLC. - bytes preimage = 6; -} - -message ListPaymentsRequest { - /* - If true, then return payments that have not yet fully completed. This means - that pending payments, as well as failed payments will show up if this - field is set to true. This flag doesn't change the meaning of the indices, - which are tied to individual payments. - */ - bool include_incomplete = 1; - - /* - The index of a payment that will be used as either the start or end of a - query to determine which payments should be returned in the response. The - index_offset is exclusive. In the case of a zero index_offset, the query - will start with the oldest payment when paginating forwards, or will end - with the most recent payment when paginating backwards. - */ - uint64 index_offset = 2; - - // The maximal number of payments returned in the response to this query. - uint64 max_payments = 3; - - /* - If set, the payments returned will result from seeking backwards from the - specified index offset. This can be used to paginate backwards. The order - of the returned payments is always oldest first (ascending index order). - */ - bool reversed = 4; -} - -message ListPaymentsResponse { - // The list of payments - repeated Payment payments = 1; - - /* - The index of the first item in the set of returned payments. This can be - used as the index_offset to continue seeking backwards in the next request. - */ - uint64 first_index_offset = 2; - - /* - The index of the last item in the set of returned payments. This can be used - as the index_offset to continue seeking forwards in the next request. - */ - uint64 last_index_offset = 3; -} - -message DeleteAllPaymentsRequest { -} - -message DeleteAllPaymentsResponse { -} - -message AbandonChannelRequest { - ChannelPoint channel_point = 1; - - bool pending_funding_shim_only = 2; -} - -message AbandonChannelResponse { -} - -message DebugLevelRequest { - bool show = 1; - string level_spec = 2; -} -message DebugLevelResponse { - string sub_systems = 1; -} - -message PayReqString { - // The payment request string to be decoded - string pay_req = 1; -} -message PayReq { - string destination = 1; - string payment_hash = 2; - int64 num_satoshis = 3; - int64 timestamp = 4; - int64 expiry = 5; - string description = 6; - string description_hash = 7; - string fallback_addr = 8; - int64 cltv_expiry = 9; - repeated RouteHint route_hints = 10; - bytes payment_addr = 11; - int64 num_msat = 12; - map features = 13; -} - -enum FeatureBit { - DATALOSS_PROTECT_REQ = 0; - DATALOSS_PROTECT_OPT = 1; - INITIAL_ROUING_SYNC = 3; - UPFRONT_SHUTDOWN_SCRIPT_REQ = 4; - UPFRONT_SHUTDOWN_SCRIPT_OPT = 5; - GOSSIP_QUERIES_REQ = 6; - GOSSIP_QUERIES_OPT = 7; - TLV_ONION_REQ = 8; - TLV_ONION_OPT = 9; - EXT_GOSSIP_QUERIES_REQ = 10; - EXT_GOSSIP_QUERIES_OPT = 11; - STATIC_REMOTE_KEY_REQ = 12; - STATIC_REMOTE_KEY_OPT = 13; - PAYMENT_ADDR_REQ = 14; - PAYMENT_ADDR_OPT = 15; - MPP_REQ = 16; - MPP_OPT = 17; -} - -message Feature { - string name = 2; - bool is_required = 3; - bool is_known = 4; -} - -message FeeReportRequest { -} -message ChannelFeeReport { - // The short channel id that this fee report belongs to. - uint64 chan_id = 5 [jstype = JS_STRING]; - - // The channel that this fee report belongs to. - string channel_point = 1; - - // The base fee charged regardless of the number of milli-satoshis sent. - int64 base_fee_msat = 2; - - // The amount charged per milli-satoshis transferred expressed in - // millionths of a satoshi. - int64 fee_per_mil = 3; - - // The effective fee rate in milli-satoshis. Computed by dividing the - // fee_per_mil value by 1 million. - double fee_rate = 4; -} -message FeeReportResponse { - // An array of channel fee reports which describes the current fee schedule - // for each channel. - repeated ChannelFeeReport channel_fees = 1; - - // The total amount of fee revenue (in satoshis) the switch has collected - // over the past 24 hrs. - uint64 day_fee_sum = 2; - - // The total amount of fee revenue (in satoshis) the switch has collected - // over the past 1 week. - uint64 week_fee_sum = 3; - - // The total amount of fee revenue (in satoshis) the switch has collected - // over the past 1 month. - uint64 month_fee_sum = 4; -} - -message PolicyUpdateRequest { - oneof scope { - // If set, then this update applies to all currently active channels. - bool global = 1; - - // If set, this update will target a specific channel. - ChannelPoint chan_point = 2; - } - - // The base fee charged regardless of the number of milli-satoshis sent. - int64 base_fee_msat = 3; - - // The effective fee rate in milli-satoshis. The precision of this value - // goes up to 6 decimal places, so 1e-6. - double fee_rate = 4; - - // The required timelock delta for HTLCs forwarded over the channel. - uint32 time_lock_delta = 5; - - // If set, the maximum HTLC size in milli-satoshis. If unset, the maximum - // HTLC will be unchanged. - uint64 max_htlc_msat = 6; - - // The minimum HTLC size in milli-satoshis. Only applied if - // min_htlc_msat_specified is true. - uint64 min_htlc_msat = 7; - - // If true, min_htlc_msat is applied. - bool min_htlc_msat_specified = 8; -} -message PolicyUpdateResponse { -} - -message ForwardingHistoryRequest { - // Start time is the starting point of the forwarding history request. All - // records beyond this point will be included, respecting the end time, and - // the index offset. - uint64 start_time = 1; - - // End time is the end point of the forwarding history request. The - // response will carry at most 50k records between the start time and the - // end time. The index offset can be used to implement pagination. - uint64 end_time = 2; - - // Index offset is the offset in the time series to start at. As each - // response can only contain 50k records, callers can use this to skip - // around within a packed time series. - uint32 index_offset = 3; - - // The max number of events to return in the response to this query. - uint32 num_max_events = 4; -} -message ForwardingEvent { - // Timestamp is the time (unix epoch offset) that this circuit was - // completed. - uint64 timestamp = 1; - - // The incoming channel ID that carried the HTLC that created the circuit. - uint64 chan_id_in = 2 [jstype = JS_STRING]; - - // The outgoing channel ID that carried the preimage that completed the - // circuit. - uint64 chan_id_out = 4 [jstype = JS_STRING]; - - // The total amount (in satoshis) of the incoming HTLC that created half - // the circuit. - uint64 amt_in = 5; - - // The total amount (in satoshis) of the outgoing HTLC that created the - // second half of the circuit. - uint64 amt_out = 6; - - // The total fee (in satoshis) that this payment circuit carried. - uint64 fee = 7; - - // The total fee (in milli-satoshis) that this payment circuit carried. - uint64 fee_msat = 8; - - // The total amount (in milli-satoshis) of the incoming HTLC that created - // half the circuit. - uint64 amt_in_msat = 9; - - // The total amount (in milli-satoshis) of the outgoing HTLC that created - // the second half of the circuit. - uint64 amt_out_msat = 10; - - // TODO(roasbeef): add settlement latency? - // * use FPE on the chan id? - // * also list failures? -} -message ForwardingHistoryResponse { - // A list of forwarding events from the time slice of the time series - // specified in the request. - repeated ForwardingEvent forwarding_events = 1; - - // The index of the last time in the set of returned forwarding events. Can - // be used to seek further, pagination style. - uint32 last_offset_index = 2; -} - -message ExportChannelBackupRequest { - // The target channel point to obtain a back up for. - ChannelPoint chan_point = 1; -} - -message ChannelBackup { - /* - Identifies the channel that this backup belongs to. - */ - ChannelPoint chan_point = 1; - - /* - Is an encrypted single-chan backup. this can be passed to - RestoreChannelBackups, or the WalletUnlocker Init and Unlock methods in - order to trigger the recovery protocol. When using REST, this field must be - encoded as base64. - */ - bytes chan_backup = 2; -} - -message MultiChanBackup { - /* - Is the set of all channels that are included in this multi-channel backup. - */ - repeated ChannelPoint chan_points = 1; - - /* - A single encrypted blob containing all the static channel backups of the - channel listed above. This can be stored as a single file or blob, and - safely be replaced with any prior/future versions. When using REST, this - field must be encoded as base64. - */ - bytes multi_chan_backup = 2; -} - -message ChanBackupExportRequest { -} -message ChanBackupSnapshot { - /* - The set of new channels that have been added since the last channel backup - snapshot was requested. - */ - ChannelBackups single_chan_backups = 1; - - /* - A multi-channel backup that covers all open channels currently known to - lnd. - */ - MultiChanBackup multi_chan_backup = 2; -} - -message ChannelBackups { - /* - A set of single-chan static channel backups. - */ - repeated ChannelBackup chan_backups = 1; -} - -message RestoreChanBackupRequest { - oneof backup { - /* - The channels to restore as a list of channel/backup pairs. - */ - ChannelBackups chan_backups = 1; - - /* - The channels to restore in the packed multi backup format. When using - REST, this field must be encoded as base64. - */ - bytes multi_chan_backup = 2; - } -} -message RestoreBackupResponse { -} - -message ChannelBackupSubscription { -} - -message VerifyChanBackupResponse { -} - -message MacaroonPermission { - // The entity a permission grants access to. - string entity = 1; - - // The action that is granted. - string action = 2; -} -message BakeMacaroonRequest { - // The list of permissions the new macaroon should grant. - repeated MacaroonPermission permissions = 1; - - // The root key ID used to create the macaroon, must be a positive integer. - uint64 root_key_id = 2; -} -message BakeMacaroonResponse { - // The hex encoded macaroon, serialized in binary format. - string macaroon = 1; -} - -message ListMacaroonIDsRequest { -} -message ListMacaroonIDsResponse { - // The list of root key IDs that are in use. - repeated uint64 root_key_ids = 1; -} - -message DeleteMacaroonIDRequest { - // The root key ID to be removed. - uint64 root_key_id = 1; -} -message DeleteMacaroonIDResponse { - // A boolean indicates that the deletion is successful. - bool deleted = 1; -} - -message MacaroonPermissionList { - // A list of macaroon permissions. - repeated MacaroonPermission permissions = 1; -} - -message ListPermissionsRequest { -} -message ListPermissionsResponse { - /* - A map between all RPC method URIs and their required macaroon permissions to - access them. - */ - map method_permissions = 1; -} - -message Failure { - enum FailureCode { - /* - The numbers assigned in this enumeration match the failure codes as - defined in BOLT #4. Because protobuf 3 requires enums to start with 0, - a RESERVED value is added. - */ - RESERVED = 0; - - INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS = 1; - INCORRECT_PAYMENT_AMOUNT = 2; - FINAL_INCORRECT_CLTV_EXPIRY = 3; - FINAL_INCORRECT_HTLC_AMOUNT = 4; - FINAL_EXPIRY_TOO_SOON = 5; - INVALID_REALM = 6; - EXPIRY_TOO_SOON = 7; - INVALID_ONION_VERSION = 8; - INVALID_ONION_HMAC = 9; - INVALID_ONION_KEY = 10; - AMOUNT_BELOW_MINIMUM = 11; - FEE_INSUFFICIENT = 12; - INCORRECT_CLTV_EXPIRY = 13; - CHANNEL_DISABLED = 14; - TEMPORARY_CHANNEL_FAILURE = 15; - REQUIRED_NODE_FEATURE_MISSING = 16; - REQUIRED_CHANNEL_FEATURE_MISSING = 17; - UNKNOWN_NEXT_PEER = 18; - TEMPORARY_NODE_FAILURE = 19; - PERMANENT_NODE_FAILURE = 20; - PERMANENT_CHANNEL_FAILURE = 21; - EXPIRY_TOO_FAR = 22; - MPP_TIMEOUT = 23; - - /* - An internal error occurred. - */ - INTERNAL_FAILURE = 997; - - /* - The error source is known, but the failure itself couldn't be decoded. - */ - UNKNOWN_FAILURE = 998; - - /* - An unreadable failure result is returned if the received failure message - cannot be decrypted. In that case the error source is unknown. - */ - UNREADABLE_FAILURE = 999; - } - - // Failure code as defined in the Lightning spec - FailureCode code = 1; - - reserved 2; - - // An optional channel update message. - ChannelUpdate channel_update = 3; - - // A failure type-dependent htlc value. - uint64 htlc_msat = 4; - - // The sha256 sum of the onion payload. - bytes onion_sha_256 = 5; - - // A failure type-dependent cltv expiry value. - uint32 cltv_expiry = 6; - - // A failure type-dependent flags value. - uint32 flags = 7; - - /* - The position in the path of the intermediate or final node that generated - the failure message. Position zero is the sender node. - **/ - uint32 failure_source_index = 8; - - // A failure type-dependent block height. - uint32 height = 9; -} - -message ChannelUpdate { - /* - The signature that validates the announced data and proves the ownership - of node id. - */ - bytes signature = 1; - - /* - The target chain that this channel was opened within. This value - should be the genesis hash of the target chain. Along with the short - channel ID, this uniquely identifies the channel globally in a - blockchain. - */ - bytes chain_hash = 2; - - /* - The unique description of the funding transaction. - */ - uint64 chan_id = 3 [jstype = JS_STRING]; - - /* - A timestamp that allows ordering in the case of multiple announcements. - We should ignore the message if timestamp is not greater than the - last-received. - */ - uint32 timestamp = 4; - - /* - The bitfield that describes whether optional fields are present in this - update. Currently, the least-significant bit must be set to 1 if the - optional field MaxHtlc is present. - */ - uint32 message_flags = 10; - - /* - The bitfield that describes additional meta-data concerning how the - update is to be interpreted. Currently, the least-significant bit must be - set to 0 if the creating node corresponds to the first node in the - previously sent channel announcement and 1 otherwise. If the second bit - is set, then the channel is set to be disabled. - */ - uint32 channel_flags = 5; - - /* - The minimum number of blocks this node requires to be added to the expiry - of HTLCs. This is a security parameter determined by the node operator. - This value represents the required gap between the time locks of the - incoming and outgoing HTLC's set to this node. - */ - uint32 time_lock_delta = 6; - - /* - The minimum HTLC value which will be accepted. - */ - uint64 htlc_minimum_msat = 7; - - /* - The base fee that must be used for incoming HTLC's to this particular - channel. This value will be tacked onto the required for a payment - independent of the size of the payment. - */ - uint32 base_fee = 8; - - /* - The fee rate that will be charged per millionth of a satoshi. - */ - uint32 fee_rate = 9; - - /* - The maximum HTLC value which will be accepted. - */ - uint64 htlc_maximum_msat = 11; - - /* - The set of data that was appended to this message, some of which we may - not actually know how to iterate or parse. By holding onto this data, we - ensure that we're able to properly validate the set of signatures that - cover these new fields, and ensure we're able to make upgrades to the - network in a forwards compatible manner. - */ - bytes extra_opaque_data = 12; -} - -message MacaroonId { - bytes nonce = 1; - bytes storageId = 2; - repeated Op ops = 3; -} - -message Op { - string entity = 1; - repeated string actions = 2; -} - -message ReSyncChainRequest { - int32 from_height = 1; - int32 to_height = 2; - repeated string addresses = 3; - bool drop_db = 4; -} - -message ReSyncChainResponse { -} - -message StopReSyncRequest{} - -message StopReSyncResponse{ - string value = 1; -} \ No newline at end of file diff --git a/lnd/lnrpc/rpc.swagger.json b/lnd/lnrpc/rpc.swagger.json deleted file mode 100644 index 42d9d792..00000000 --- a/lnd/lnrpc/rpc.swagger.json +++ /dev/null @@ -1,5804 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "rpc.proto", - "version": "version not set" - }, - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/v1/balance/blockchain": { - "get": { - "summary": "lncli: `walletbalance`\nWalletBalance returns total unspent outputs(confirmed and unconfirmed), all\nconfirmed unspent outputs and all unconfirmed unspent outputs under control\nof the wallet.", - "operationId": "WalletBalance", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcWalletBalanceResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "Lightning" - ] - } - }, - "/v1/balance/channels": { - "get": { - "summary": "lncli: `channelbalance`\nChannelBalance returns a report on the total funds across all open channels,\ncategorized in local/remote, pending local/remote and unsettled local/remote\nbalances.", - "operationId": "ChannelBalance", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcChannelBalanceResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "Lightning" - ] - } - }, - "/v1/channels": { - "get": { - "summary": "lncli: `listchannels`\nListChannels returns a description of all the open channels that this node\nis a participant in.", - "operationId": "ListChannels", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcListChannelsResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "active_only", - "in": "query", - "required": false, - "type": "boolean", - "format": "boolean" - }, - { - "name": "inactive_only", - "in": "query", - "required": false, - "type": "boolean", - "format": "boolean" - }, - { - "name": "public_only", - "in": "query", - "required": false, - "type": "boolean", - "format": "boolean" - }, - { - "name": "private_only", - "in": "query", - "required": false, - "type": "boolean", - "format": "boolean" - }, - { - "name": "peer", - "description": "Filters the response for channels with a target peer's pubkey. If peer is\nempty, all channels will be returned.", - "in": "query", - "required": false, - "type": "string", - "format": "byte" - } - ], - "tags": [ - "Lightning" - ] - }, - "post": { - "summary": "OpenChannelSync is a synchronous version of the OpenChannel RPC call. This\ncall is meant to be consumed by clients to the REST proxy. As with all\nother sync calls, all byte slices are intended to be populated as hex\nencoded strings.", - "operationId": "OpenChannelSync", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcChannelPoint" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/lnrpcOpenChannelRequest" - } - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/channels/abandon/{channel_point.funding_txid_str}/{channel_point.output_index}": { - "delete": { - "summary": "lncli: `abandonchannel`\nAbandonChannel removes all channel state from the database except for a\nclose summary. This method can be used to get rid of permanently unusable\nchannels due to bugs fixed in newer versions of lnd. This method can also be\nused to remove externally funded channels where the funding transaction was\nnever broadcast. Only available for non-externally funded channels in dev\nbuild.", - "operationId": "AbandonChannel", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcAbandonChannelResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "channel_point.funding_txid_str", - "description": "Hex-encoded string representing the byte-reversed hash of the funding\ntransaction.", - "in": "path", - "required": true, - "type": "string" - }, - { - "name": "channel_point.output_index", - "description": "The index of the output of the funding transaction", - "in": "path", - "required": true, - "type": "integer", - "format": "int64" - }, - { - "name": "channel_point.funding_txid_bytes", - "description": "Txid of the funding transaction. When using REST, this field must be\nencoded as base64.", - "in": "query", - "required": false, - "type": "string", - "format": "byte" - }, - { - "name": "pending_funding_shim_only", - "in": "query", - "required": false, - "type": "boolean", - "format": "boolean" - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/channels/backup": { - "get": { - "summary": "ExportAllChannelBackups returns static channel backups for all existing\nchannels known to lnd. A set of regular singular static channel backups for\neach channel are returned. Additionally, a multi-channel backup is returned\nas well, which contains a single encrypted blob containing the backups of\neach channel.", - "operationId": "ExportAllChannelBackups", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcChanBackupSnapshot" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "Lightning" - ] - } - }, - "/v1/channels/backup/restore": { - "post": { - "summary": "lncli: `restorechanbackup`\nRestoreChannelBackups accepts a set of singular channel backups, or a\nsingle encrypted multi-chan backup and attempts to recover any funds\nremaining within the channel. If we are able to unpack the backup, then the\nnew channel will be shown under listchannels, as well as pending channels.", - "operationId": "RestoreChannelBackups", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcRestoreBackupResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/lnrpcRestoreChanBackupRequest" - } - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/channels/backup/subscribe": { - "get": { - "summary": "SubscribeChannelBackups allows a client to sub-subscribe to the most up to\ndate information concerning the state of all channel backups. Each time a\nnew channel is added, we return the new set of channels, along with a\nmulti-chan backup containing the backup info for all channels. Each time a\nchannel is closed, we send a new update, which contains new new chan back\nups, but the updated set of encrypted multi-chan backups with the closed\nchannel(s) removed.", - "operationId": "SubscribeChannelBackups", - "responses": { - "200": { - "description": "A successful response.(streaming responses)", - "schema": { - "type": "object", - "properties": { - "result": { - "$ref": "#/definitions/lnrpcChanBackupSnapshot" - }, - "error": { - "$ref": "#/definitions/runtimeStreamError" - } - }, - "title": "Stream result of lnrpcChanBackupSnapshot" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "Lightning" - ] - } - }, - "/v1/channels/backup/verify": { - "post": { - "summary": "VerifyChanBackup allows a caller to verify the integrity of a channel backup\nsnapshot. This method will accept either a packed Single or a packed Multi.\nSpecifying both will result in an error.", - "operationId": "VerifyChanBackup", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcVerifyChanBackupResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/lnrpcChanBackupSnapshot" - } - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/channels/backup/{chan_point.funding_txid_str}/{chan_point.output_index}": { - "get": { - "summary": "lncli: `exportchanbackup`\nExportChannelBackup attempts to return an encrypted static channel backup\nfor the target channel identified by it channel point. The backup is\nencrypted with a key generated from the aezeed seed of the user. The\nreturned backup can either be restored using the RestoreChannelBackup\nmethod once lnd is running, or via the InitWallet and UnlockWallet methods\nfrom the WalletUnlocker service.", - "operationId": "ExportChannelBackup", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcChannelBackup" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "chan_point.funding_txid_str", - "description": "Hex-encoded string representing the byte-reversed hash of the funding\ntransaction.", - "in": "path", - "required": true, - "type": "string" - }, - { - "name": "chan_point.output_index", - "description": "The index of the output of the funding transaction", - "in": "path", - "required": true, - "type": "integer", - "format": "int64" - }, - { - "name": "chan_point.funding_txid_bytes", - "description": "Txid of the funding transaction. When using REST, this field must be\nencoded as base64.", - "in": "query", - "required": false, - "type": "string", - "format": "byte" - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/channels/closed": { - "get": { - "summary": "lncli: `closedchannels`\nClosedChannels returns a description of all the closed channels that\nthis node was a participant in.", - "operationId": "ClosedChannels", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcClosedChannelsResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "cooperative", - "in": "query", - "required": false, - "type": "boolean", - "format": "boolean" - }, - { - "name": "local_force", - "in": "query", - "required": false, - "type": "boolean", - "format": "boolean" - }, - { - "name": "remote_force", - "in": "query", - "required": false, - "type": "boolean", - "format": "boolean" - }, - { - "name": "breach", - "in": "query", - "required": false, - "type": "boolean", - "format": "boolean" - }, - { - "name": "funding_canceled", - "in": "query", - "required": false, - "type": "boolean", - "format": "boolean" - }, - { - "name": "abandoned", - "in": "query", - "required": false, - "type": "boolean", - "format": "boolean" - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/channels/pending": { - "get": { - "summary": "lncli: `pendingchannels`\nPendingChannels returns a list of all the channels that are currently\nconsidered \"pending\". A channel is pending if it has finished the funding\nworkflow and is waiting for confirmations for the funding txn, or is in the\nprocess of closure, either initiated cooperatively or non-cooperatively.", - "operationId": "PendingChannels", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcPendingChannelsResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "Lightning" - ] - } - }, - "/v1/channels/stream": { - "post": { - "summary": "lncli: `openchannel`\nOpenChannel attempts to open a singly funded channel specified in the\nrequest to a remote peer. Users are able to specify a target number of\nblocks that the funding transaction should be confirmed in, or a manual fee\nrate to us for the funding transaction. If neither are specified, then a\nlax block confirmation target is used. Each OpenStatusUpdate will return\nthe pending channel ID of the in-progress channel. Depending on the\narguments specified in the OpenChannelRequest, this pending channel ID can\nthen be used to manually progress the channel funding flow.", - "operationId": "OpenChannel", - "responses": { - "200": { - "description": "A successful response.(streaming responses)", - "schema": { - "type": "object", - "properties": { - "result": { - "$ref": "#/definitions/lnrpcOpenStatusUpdate" - }, - "error": { - "$ref": "#/definitions/runtimeStreamError" - } - }, - "title": "Stream result of lnrpcOpenStatusUpdate" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/lnrpcOpenChannelRequest" - } - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/channels/subscribe": { - "get": { - "summary": "SubscribeChannelEvents creates a uni-directional stream from the server to\nthe client in which any updates relevant to the state of the channels are\nsent over. Events include new active channels, inactive channels, and closed\nchannels.", - "operationId": "SubscribeChannelEvents", - "responses": { - "200": { - "description": "A successful response.(streaming responses)", - "schema": { - "type": "object", - "properties": { - "result": { - "$ref": "#/definitions/lnrpcChannelEventUpdate" - }, - "error": { - "$ref": "#/definitions/runtimeStreamError" - } - }, - "title": "Stream result of lnrpcChannelEventUpdate" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "Lightning" - ] - } - }, - "/v1/channels/transactions": { - "post": { - "summary": "SendPaymentSync is the synchronous non-streaming version of SendPayment.\nThis RPC is intended to be consumed by clients of the REST proxy.\nAdditionally, this RPC expects the destination's public key and the payment\nhash (if any) to be encoded as hex strings.", - "operationId": "SendPaymentSync", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcSendResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/lnrpcSendRequest" - } - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/channels/transactions/route": { - "post": { - "summary": "SendToRouteSync is a synchronous version of SendToRoute. It Will block\nuntil the payment either fails or succeeds.", - "operationId": "SendToRouteSync", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcSendResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/lnrpcSendToRouteRequest" - } - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/channels/{channel_point.funding_txid_str}/{channel_point.output_index}": { - "delete": { - "summary": "lncli: `closechannel`\nCloseChannel attempts to close an active channel identified by its channel\noutpoint (ChannelPoint). The actions of this method can additionally be\naugmented to attempt a force close after a timeout period in the case of an\ninactive peer. If a non-force close (cooperative closure) is requested,\nthen the user can specify either a target number of blocks until the\nclosure transaction is confirmed, or a manual fee rate. If neither are\nspecified, then a default lax, block confirmation target is used.", - "operationId": "CloseChannel", - "responses": { - "200": { - "description": "A successful response.(streaming responses)", - "schema": { - "type": "object", - "properties": { - "result": { - "$ref": "#/definitions/lnrpcCloseStatusUpdate" - }, - "error": { - "$ref": "#/definitions/runtimeStreamError" - } - }, - "title": "Stream result of lnrpcCloseStatusUpdate" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "channel_point.funding_txid_str", - "description": "Hex-encoded string representing the byte-reversed hash of the funding\ntransaction.", - "in": "path", - "required": true, - "type": "string" - }, - { - "name": "channel_point.output_index", - "description": "The index of the output of the funding transaction", - "in": "path", - "required": true, - "type": "integer", - "format": "int64" - }, - { - "name": "channel_point.funding_txid_bytes", - "description": "Txid of the funding transaction. When using REST, this field must be\nencoded as base64.", - "in": "query", - "required": false, - "type": "string", - "format": "byte" - }, - { - "name": "force", - "description": "If true, then the channel will be closed forcibly. This means the\ncurrent commitment transaction will be signed and broadcast.", - "in": "query", - "required": false, - "type": "boolean", - "format": "boolean" - }, - { - "name": "target_conf", - "description": "The target number of blocks that the closure transaction should be\nconfirmed by.", - "in": "query", - "required": false, - "type": "integer", - "format": "int32" - }, - { - "name": "sat_per_byte", - "description": "A manual fee rate set in sat/byte that should be used when crafting the\nclosure transaction.", - "in": "query", - "required": false, - "type": "string", - "format": "int64" - }, - { - "name": "delivery_address", - "description": "An optional address to send funds to in the case of a cooperative close.\nIf the channel was opened with an upfront shutdown script and this field\nis set, the request to close will fail because the channel must pay out\nto the upfront shutdown addresss.", - "in": "query", - "required": false, - "type": "string" - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/chanpolicy": { - "post": { - "summary": "lncli: `updatechanpolicy`\nUpdateChannelPolicy allows the caller to update the fee schedule and\nchannel policies for all channels globally, or a particular channel.", - "operationId": "UpdateChannelPolicy", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcPolicyUpdateResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/lnrpcPolicyUpdateRequest" - } - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/debuglevel": { - "post": { - "summary": "lncli: `debuglevel`\nDebugLevel allows a caller to programmatically set the logging verbosity of\nlnd. The logging can be targeted according to a coarse daemon-wide logging\nlevel, or in a granular fashion to specify the logging for a target\nsub-system.", - "operationId": "DebugLevel", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcDebugLevelResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/lnrpcDebugLevelRequest" - } - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/fees": { - "get": { - "summary": "lncli: `feereport`\nFeeReport allows the caller to obtain a report detailing the current fee\nschedule enforced by the node globally for each channel.", - "operationId": "FeeReport", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcFeeReportResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "Lightning" - ] - } - }, - "/v1/funding/step": { - "post": { - "summary": "FundingStateStep is an advanced funding related call that allows the caller\nto either execute some preparatory steps for a funding workflow, or\nmanually progress a funding workflow. The primary way a funding flow is\nidentified is via its pending channel ID. As an example, this method can be\nused to specify that we're expecting a funding flow for a particular\npending channel ID, for which we need to use specific parameters.\nAlternatively, this can be used to interactively drive PSBT signing for\nfunding for partially complete funding transactions.", - "operationId": "FundingStateStep", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcFundingStateStepResp" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/lnrpcFundingTransitionMsg" - } - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/getinfo": { - "get": { - "summary": "lncli: `getinfo`\nGetInfo returns general information concerning the lightning node including\nit's identity pubkey, alias, the chains it is connected to, and information\nconcerning the number of open+pending channels.", - "operationId": "GetInfo", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcGetInfoResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "Lightning" - ] - } - }, - "/v1/getrecoveryinfo": { - "get": { - "summary": "* lncli: `getrecoveryinfo`\nGetRecoveryInfo returns information concerning the recovery mode including\nwhether it's in a recovery mode, whether the recovery is finished, and the\nprogress made so far.", - "operationId": "GetRecoveryInfo", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcGetRecoveryInfoResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "Lightning" - ] - } - }, - "/v1/graph": { - "get": { - "summary": "lncli: `describegraph`\nDescribeGraph returns a description of the latest graph state from the\npoint of view of the node. The graph information is partitioned into two\ncomponents: all the nodes/vertexes, and all the edges that connect the\nvertexes themselves. As this is a directed graph, the edges also contain\nthe node directional specific routing policy which includes: the time lock\ndelta, fee information, etc.", - "operationId": "DescribeGraph", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcChannelGraph" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "include_unannounced", - "description": "Whether unannounced channels are included in the response or not. If set,\nunannounced channels are included. Unannounced channels are both private\nchannels, and public channels that are not yet announced to the network.", - "in": "query", - "required": false, - "type": "boolean", - "format": "boolean" - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/graph/edge/{chan_id}": { - "get": { - "summary": "lncli: `getchaninfo`\nGetChanInfo returns the latest authenticated network announcement for the\ngiven channel identified by its channel ID: an 8-byte integer which\nuniquely identifies the location of transaction's funding output within the\nblockchain.", - "operationId": "GetChanInfo", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcChannelEdge" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "chan_id", - "description": "The unique channel ID for the channel. The first 3 bytes are the block\nheight, the next 3 the index within the block, and the last 2 bytes are the\noutput index for the channel.", - "in": "path", - "required": true, - "type": "string", - "format": "uint64" - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/graph/info": { - "get": { - "summary": "lncli: `getnetworkinfo`\nGetNetworkInfo returns some basic stats about the known channel graph from\nthe point of view of the node.", - "operationId": "GetNetworkInfo", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcNetworkInfo" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "Lightning" - ] - } - }, - "/v1/graph/node/{pub_key}": { - "get": { - "summary": "lncli: `getnodeinfo`\nGetNodeInfo returns the latest advertised, aggregated, and authenticated\nchannel information for the specified node identified by its public key.", - "operationId": "GetNodeInfo", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcNodeInfo" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "pub_key", - "description": "The 33-byte hex-encoded compressed public of the target node", - "in": "path", - "required": true, - "type": "string" - }, - { - "name": "include_channels", - "description": "If true, will include all known channels associated with the node.", - "in": "query", - "required": false, - "type": "boolean", - "format": "boolean" - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/graph/nodemetrics": { - "get": { - "summary": "lncli: `getnodemetrics`\nGetNodeMetrics returns node metrics calculated from the graph. Currently\nthe only supported metric is betweenness centrality of individual nodes.", - "operationId": "GetNodeMetrics", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcNodeMetricsResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "types", - "description": "The requested node metrics.", - "in": "query", - "required": false, - "type": "array", - "items": { - "type": "string", - "enum": [ - "UNKNOWN", - "BETWEENNESS_CENTRALITY" - ] - }, - "collectionFormat": "multi" - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/graph/routes/{pub_key}/{amt}": { - "get": { - "summary": "lncli: `queryroutes`\nQueryRoutes attempts to query the daemon's Channel Router for a possible\nroute to a target destination capable of carrying a specific amount of\nsatoshis. The returned route contains the full details required to craft and\nsend an HTLC, also including the necessary information that should be\npresent within the Sphinx packet encapsulated within the HTLC.", - "description": "When using REST, the `dest_custom_records` map type can be set by appending\n`\u0026dest_custom_records[\u003crecord_number\u003e]=\u003crecord_data_base64_url_encoded\u003e`\nto the URL. Unfortunately this map type doesn't appear in the REST API\ndocumentation because of a bug in the grpc-gateway library.", - "operationId": "QueryRoutes", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcQueryRoutesResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "pub_key", - "description": "The 33-byte hex-encoded public key for the payment destination", - "in": "path", - "required": true, - "type": "string" - }, - { - "name": "amt", - "description": "The amount to send expressed in satoshis.\n\nThe fields amt and amt_msat are mutually exclusive.", - "in": "path", - "required": true, - "type": "string", - "format": "int64" - }, - { - "name": "amt_msat", - "description": "The amount to send expressed in millisatoshis.\n\nThe fields amt and amt_msat are mutually exclusive.", - "in": "query", - "required": false, - "type": "string", - "format": "int64" - }, - { - "name": "final_cltv_delta", - "description": "An optional CLTV delta from the current height that should be used for the\ntimelock of the final hop. Note that unlike SendPayment, QueryRoutes does\nnot add any additional block padding on top of final_ctlv_delta. This\npadding of a few blocks needs to be added manually or otherwise failures may\nhappen when a block comes in while the payment is in flight.", - "in": "query", - "required": false, - "type": "integer", - "format": "int32" - }, - { - "name": "fee_limit.fixed", - "description": "The fee limit expressed as a fixed amount of satoshis.\n\nThe fields fixed and fixed_msat are mutually exclusive.", - "in": "query", - "required": false, - "type": "string", - "format": "int64" - }, - { - "name": "fee_limit.fixed_msat", - "description": "The fee limit expressed as a fixed amount of millisatoshis.\n\nThe fields fixed and fixed_msat are mutually exclusive.", - "in": "query", - "required": false, - "type": "string", - "format": "int64" - }, - { - "name": "fee_limit.percent", - "description": "The fee limit expressed as a percentage of the payment amount.", - "in": "query", - "required": false, - "type": "string", - "format": "int64" - }, - { - "name": "ignored_nodes", - "description": "A list of nodes to ignore during path finding. When using REST, these fields\nmust be encoded as base64.", - "in": "query", - "required": false, - "type": "array", - "items": { - "type": "string", - "format": "byte" - }, - "collectionFormat": "multi" - }, - { - "name": "source_pub_key", - "description": "The source node where the request route should originated from. If empty,\nself is assumed.", - "in": "query", - "required": false, - "type": "string" - }, - { - "name": "use_mission_control", - "description": "If set to true, edge probabilities from mission control will be used to get\nthe optimal route.", - "in": "query", - "required": false, - "type": "boolean", - "format": "boolean" - }, - { - "name": "cltv_limit", - "description": "An optional maximum total time lock for the route. If the source is empty or\nourselves, this should not exceed lnd's `--max-cltv-expiry` setting. If\nzero, then the value of `--max-cltv-expiry` is used as the limit.", - "in": "query", - "required": false, - "type": "integer", - "format": "int64" - }, - { - "name": "outgoing_chan_id", - "description": "The channel id of the channel that must be taken to the first hop. If zero,\nany channel may be used.", - "in": "query", - "required": false, - "type": "string", - "format": "uint64" - }, - { - "name": "last_hop_pubkey", - "description": "The pubkey of the last hop of the route. If empty, any hop may be used.", - "in": "query", - "required": false, - "type": "string", - "format": "byte" - }, - { - "name": "dest_features", - "description": "Features assumed to be supported by the final node. All transitive feature\ndependencies must also be set properly. For a given feature bit pair, either\noptional or remote may be set, but not both. If this field is nil or empty,\nthe router will try to load destination features from the graph as a\nfallback.", - "in": "query", - "required": false, - "type": "array", - "items": { - "type": "string", - "enum": [ - "DATALOSS_PROTECT_REQ", - "DATALOSS_PROTECT_OPT", - "INITIAL_ROUING_SYNC", - "UPFRONT_SHUTDOWN_SCRIPT_REQ", - "UPFRONT_SHUTDOWN_SCRIPT_OPT", - "GOSSIP_QUERIES_REQ", - "GOSSIP_QUERIES_OPT", - "TLV_ONION_REQ", - "TLV_ONION_OPT", - "EXT_GOSSIP_QUERIES_REQ", - "EXT_GOSSIP_QUERIES_OPT", - "STATIC_REMOTE_KEY_REQ", - "STATIC_REMOTE_KEY_OPT", - "PAYMENT_ADDR_REQ", - "PAYMENT_ADDR_OPT", - "MPP_REQ", - "MPP_OPT" - ] - }, - "collectionFormat": "multi" - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/graph/subscribe": { - "get": { - "summary": "SubscribeChannelGraph launches a streaming RPC that allows the caller to\nreceive notifications upon any changes to the channel graph topology from\nthe point of view of the responding node. Events notified include: new\nnodes coming online, nodes updating their authenticated attributes, new\nchannels being advertised, updates in the routing policy for a directional\nchannel edge, and when channels are closed on-chain.", - "operationId": "SubscribeChannelGraph", - "responses": { - "200": { - "description": "A successful response.(streaming responses)", - "schema": { - "type": "object", - "properties": { - "result": { - "$ref": "#/definitions/lnrpcGraphTopologyUpdate" - }, - "error": { - "$ref": "#/definitions/runtimeStreamError" - } - }, - "title": "Stream result of lnrpcGraphTopologyUpdate" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "Lightning" - ] - } - }, - "/v1/invoice/{r_hash_str}": { - "get": { - "summary": "lncli: `lookupinvoice`\nLookupInvoice attempts to look up an invoice according to its payment hash.\nThe passed payment hash *must* be exactly 32 bytes, if not, an error is\nreturned.", - "operationId": "LookupInvoice", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcInvoice" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "r_hash_str", - "description": "The hex-encoded payment hash of the invoice to be looked up. The passed\npayment hash must be exactly 32 bytes, otherwise an error is returned.\nDeprecated now that the REST gateway supports base64 encoding of bytes\nfields.", - "in": "path", - "required": true, - "type": "string" - }, - { - "name": "r_hash", - "description": "The payment hash of the invoice to be looked up. When using REST, this field\nmust be encoded as base64.", - "in": "query", - "required": false, - "type": "string", - "format": "byte" - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/invoices": { - "get": { - "summary": "lncli: `listinvoices`\nListInvoices returns a list of all the invoices currently stored within the\ndatabase. Any active debug invoices are ignored. It has full support for\npaginated responses, allowing users to query for specific invoices through\ntheir add_index. This can be done by using either the first_index_offset or\nlast_index_offset fields included in the response as the index_offset of the\nnext request. By default, the first 100 invoices created will be returned.\nBackwards pagination is also supported through the Reversed flag.", - "operationId": "ListInvoices", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcListInvoiceResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "pending_only", - "description": "If set, only invoices that are not settled and not canceled will be returned\nin the response.", - "in": "query", - "required": false, - "type": "boolean", - "format": "boolean" - }, - { - "name": "index_offset", - "description": "The index of an invoice that will be used as either the start or end of a\nquery to determine which invoices should be returned in the response.", - "in": "query", - "required": false, - "type": "string", - "format": "uint64" - }, - { - "name": "num_max_invoices", - "description": "The max number of invoices to return in the response to this query.", - "in": "query", - "required": false, - "type": "string", - "format": "uint64" - }, - { - "name": "reversed", - "description": "If set, the invoices returned will result from seeking backwards from the\nspecified index offset. This can be used to paginate backwards.", - "in": "query", - "required": false, - "type": "boolean", - "format": "boolean" - } - ], - "tags": [ - "Lightning" - ] - }, - "post": { - "summary": "lncli: `addinvoice`\nAddInvoice attempts to add a new invoice to the invoice database. Any\nduplicated invoices are rejected, therefore all invoices *must* have a\nunique payment preimage.", - "operationId": "AddInvoice", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcAddInvoiceResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/lnrpcInvoice" - } - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/invoices/subscribe": { - "get": { - "summary": "SubscribeInvoices returns a uni-directional stream (server -\u003e client) for\nnotifying the client of newly added/settled invoices. The caller can\noptionally specify the add_index and/or the settle_index. If the add_index\nis specified, then we'll first start by sending add invoice events for all\ninvoices with an add_index greater than the specified value. If the\nsettle_index is specified, the next, we'll send out all settle events for\ninvoices with a settle_index greater than the specified value. One or both\nof these fields can be set. If no fields are set, then we'll only send out\nthe latest add/settle events.", - "operationId": "SubscribeInvoices", - "responses": { - "200": { - "description": "A successful response.(streaming responses)", - "schema": { - "type": "object", - "properties": { - "result": { - "$ref": "#/definitions/lnrpcInvoice" - }, - "error": { - "$ref": "#/definitions/runtimeStreamError" - } - }, - "title": "Stream result of lnrpcInvoice" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "add_index", - "description": "If specified (non-zero), then we'll first start by sending out\nnotifications for all added indexes with an add_index greater than this\nvalue. This allows callers to catch up on any events they missed while they\nweren't connected to the streaming RPC.", - "in": "query", - "required": false, - "type": "string", - "format": "uint64" - }, - { - "name": "settle_index", - "description": "If specified (non-zero), then we'll first start by sending out\nnotifications for all settled indexes with an settle_index greater than\nthis value. This allows callers to catch up on any events they missed while\nthey weren't connected to the streaming RPC.", - "in": "query", - "required": false, - "type": "string", - "format": "uint64" - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/macaroon": { - "post": { - "summary": "lncli: `bakemacaroon`\nBakeMacaroon allows the creation of a new macaroon with custom read and\nwrite permissions. No first-party caveats are added since this can be done\noffline.", - "operationId": "BakeMacaroon", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcBakeMacaroonResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/lnrpcBakeMacaroonRequest" - } - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/macaroon/ids": { - "get": { - "summary": "lncli: `listmacaroonids`\nListMacaroonIDs returns all root key IDs that are in use.", - "operationId": "ListMacaroonIDs", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcListMacaroonIDsResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "Lightning" - ] - } - }, - "/v1/macaroon/permissions": { - "get": { - "summary": "lncli: `listpermissions`\nListPermissions lists all RPC method URIs and their required macaroon\npermissions to access them.", - "operationId": "ListPermissions", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcListPermissionsResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "Lightning" - ] - } - }, - "/v1/macaroon/{root_key_id}": { - "delete": { - "summary": "lncli: `deletemacaroonid`\nDeleteMacaroonID deletes the specified macaroon ID and invalidates all\nmacaroons derived from that ID.", - "operationId": "DeleteMacaroonID", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcDeleteMacaroonIDResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "root_key_id", - "description": "The root key ID to be removed.", - "in": "path", - "required": true, - "type": "string", - "format": "uint64" - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/newaddress": { - "get": { - "summary": "lncli: `newaddress`\nNewAddress creates a new address under control of the local wallet.", - "operationId": "NewAddress", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcNewAddressResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "type", - "description": "The address type.", - "in": "query", - "required": false, - "type": "string", - "enum": [ - "WITNESS_PUBKEY_HASH", - "NESTED_PUBKEY_HASH", - "UNUSED_WITNESS_PUBKEY_HASH", - "UNUSED_NESTED_PUBKEY_HASH" - ], - "default": "WITNESS_PUBKEY_HASH" - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/payments": { - "get": { - "summary": "lncli: `listpayments`\nListPayments returns a list of all outgoing payments.", - "operationId": "ListPayments", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcListPaymentsResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "include_incomplete", - "description": "If true, then return payments that have not yet fully completed. This means\nthat pending payments, as well as failed payments will show up if this\nfield is set to true. This flag doesn't change the meaning of the indices,\nwhich are tied to individual payments.", - "in": "query", - "required": false, - "type": "boolean", - "format": "boolean" - }, - { - "name": "index_offset", - "description": "The index of a payment that will be used as either the start or end of a\nquery to determine which payments should be returned in the response. The\nindex_offset is exclusive. In the case of a zero index_offset, the query\nwill start with the oldest payment when paginating forwards, or will end\nwith the most recent payment when paginating backwards.", - "in": "query", - "required": false, - "type": "string", - "format": "uint64" - }, - { - "name": "max_payments", - "description": "The maximal number of payments returned in the response to this query.", - "in": "query", - "required": false, - "type": "string", - "format": "uint64" - }, - { - "name": "reversed", - "description": "If set, the payments returned will result from seeking backwards from the\nspecified index offset. This can be used to paginate backwards. The order\nof the returned payments is always oldest first (ascending index order).", - "in": "query", - "required": false, - "type": "boolean", - "format": "boolean" - } - ], - "tags": [ - "Lightning" - ] - }, - "delete": { - "summary": "DeleteAllPayments deletes all outgoing payments from DB.", - "operationId": "DeleteAllPayments", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcDeleteAllPaymentsResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "Lightning" - ] - } - }, - "/v1/payreq/{pay_req}": { - "get": { - "summary": "lncli: `decodepayreq`\nDecodePayReq takes an encoded payment request string and attempts to decode\nit, returning a full description of the conditions encoded within the\npayment request.", - "operationId": "DecodePayReq", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcPayReq" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "pay_req", - "description": "The payment request string to be decoded", - "in": "path", - "required": true, - "type": "string" - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/peers": { - "get": { - "summary": "lncli: `listpeers`\nListPeers returns a verbose listing of all currently active peers.", - "operationId": "ListPeers", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcListPeersResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "latest_error", - "description": "If true, only the last error that our peer sent us will be returned with\nthe peer's information, rather than the full set of historic errors we have\nstored.", - "in": "query", - "required": false, - "type": "boolean", - "format": "boolean" - } - ], - "tags": [ - "Lightning" - ] - }, - "post": { - "summary": "lncli: `connect`\nConnectPeer attempts to establish a connection to a remote peer. This is at\nthe networking level, and is used for communication between nodes. This is\ndistinct from establishing a channel with a peer.", - "operationId": "ConnectPeer", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcConnectPeerResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/lnrpcConnectPeerRequest" - } - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/peers/subscribe": { - "get": { - "summary": "SubscribePeerEvents creates a uni-directional stream from the server to\nthe client in which any events relevant to the state of peers are sent\nover. Events include peers going online and offline.", - "operationId": "SubscribePeerEvents", - "responses": { - "200": { - "description": "A successful response.(streaming responses)", - "schema": { - "type": "object", - "properties": { - "result": { - "$ref": "#/definitions/lnrpcPeerEvent" - }, - "error": { - "$ref": "#/definitions/runtimeStreamError" - } - }, - "title": "Stream result of lnrpcPeerEvent" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "Lightning" - ] - } - }, - "/v1/peers/{pub_key}": { - "delete": { - "summary": "lncli: `disconnect`\nDisconnectPeer attempts to disconnect one peer from another identified by a\ngiven pubKey. In the case that we currently have a pending or active channel\nwith the target peer, then this action will be not be allowed.", - "operationId": "DisconnectPeer", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcDisconnectPeerResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "pub_key", - "description": "The pubkey of the node to disconnect from", - "in": "path", - "required": true, - "type": "string" - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/signmessage": { - "post": { - "summary": "lncli: `signmessage`\nSignMessage signs a message with this node's private key. The returned\nsignature string is `zbase32` encoded and pubkey recoverable, meaning that\nonly the message digest and signature are needed for verification.", - "operationId": "SignMessage", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcSignMessageResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/lnrpcSignMessageRequest" - } - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/stop": { - "post": { - "summary": "lncli: `stop`\nStopDaemon will send a shutdown request to the interrupt handler, triggering\na graceful shutdown of the daemon.", - "operationId": "StopDaemon", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcStopResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/lnrpcStopRequest" - } - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/switch": { - "post": { - "summary": "lncli: `fwdinghistory`\nForwardingHistory allows the caller to query the htlcswitch for a record of\nall HTLCs forwarded within the target time range, and integer offset\nwithin that time range. If no time-range is specified, then the first chunk\nof the past 24 hrs of forwarding history are returned.", - "description": "A list of forwarding events are returned. The size of each forwarding event\nis 40 bytes, and the max message size able to be returned in gRPC is 4 MiB.\nAs a result each message can only contain 50k entries. Each response has\nthe index offset of the last entry. The index offset can be provided to the\nrequest to allow the caller to skip a series of records.", - "operationId": "ForwardingHistory", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcForwardingHistoryResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/lnrpcForwardingHistoryRequest" - } - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/transactions": { - "get": { - "summary": "lncli: `listchaintxns`\nGetTransactions returns a list describing all the known transactions\nrelevant to the wallet.", - "operationId": "GetTransactions", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcTransactionDetails" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "start_height", - "description": "The height from which to list transactions, inclusive. If this value is\ngreater than end_height, transactions will be read in reverse.", - "in": "query", - "required": false, - "type": "integer", - "format": "int32" - }, - { - "name": "end_height", - "description": "The height until which to list transactions, inclusive. To include\nunconfirmed transactions, this value should be set to -1, which will\nreturn transactions from start_height until the current chain tip and\nunconfirmed transactions. If no end_height is provided, the call will\ndefault to this option.", - "in": "query", - "required": false, - "type": "integer", - "format": "int32" - }, - { - "name": "txns_limit", - "in": "query", - "required": false, - "type": "integer", - "format": "int32" - }, - { - "name": "txns_skip", - "in": "query", - "required": false, - "type": "integer", - "format": "int32" - }, - { - "name": "coinbase", - "in": "query", - "required": false, - "type": "integer", - "format": "int32" - } - ], - "tags": [ - "Lightning" - ] - }, - "post": { - "summary": "lncli: `sendcoins`\nSendCoins executes a request to send coins to a particular address. Unlike\nSendMany, this RPC call only allows creating a single output at a time. If\nneither target_conf, or sat_per_byte are set, then the internal wallet will\nconsult its fee model to determine a fee for the default confirmation\ntarget.", - "operationId": "SendCoins", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcSendCoinsResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/lnrpcSendCoinsRequest" - } - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/transactions/fee": { - "get": { - "summary": "lncli: `estimatefee`\nEstimateFee asks the chain backend to estimate the fee rate and total fees\nfor a transaction that pays to multiple specified outputs.", - "description": "When using REST, the `AddrToAmount` map type can be set by appending\n`\u0026AddrToAmount[\u003caddress\u003e]=\u003camount_to_send\u003e` to the URL. Unfortunately this\nmap type doesn't appear in the REST API documentation because of a bug in\nthe grpc-gateway library.", - "operationId": "EstimateFee", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcEstimateFeeResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "target_conf", - "description": "The target number of blocks that this transaction should be confirmed\nby.", - "in": "query", - "required": false, - "type": "integer", - "format": "int32" - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/transactions/many": { - "post": { - "summary": "lncli: `sendmany`\nSendMany handles a request for a transaction that creates multiple specified\noutputs in parallel. If neither target_conf, or sat_per_byte are set, then\nthe internal wallet will consult its fee model to determine a fee for the\ndefault confirmation target.", - "operationId": "SendMany", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcSendManyResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/lnrpcSendManyRequest" - } - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/transactions/subscribe": { - "get": { - "summary": "SubscribeTransactions creates a uni-directional stream from the server to\nthe client in which any newly discovered transactions relevant to the\nwallet are sent over.", - "operationId": "SubscribeTransactions", - "responses": { - "200": { - "description": "A successful response.(streaming responses)", - "schema": { - "type": "object", - "properties": { - "result": { - "$ref": "#/definitions/lnrpcTransaction" - }, - "error": { - "$ref": "#/definitions/runtimeStreamError" - } - }, - "title": "Stream result of lnrpcTransaction" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "start_height", - "description": "The height from which to list transactions, inclusive. If this value is\ngreater than end_height, transactions will be read in reverse.", - "in": "query", - "required": false, - "type": "integer", - "format": "int32" - }, - { - "name": "end_height", - "description": "The height until which to list transactions, inclusive. To include\nunconfirmed transactions, this value should be set to -1, which will\nreturn transactions from start_height until the current chain tip and\nunconfirmed transactions. If no end_height is provided, the call will\ndefault to this option.", - "in": "query", - "required": false, - "type": "integer", - "format": "int32" - }, - { - "name": "txns_limit", - "in": "query", - "required": false, - "type": "integer", - "format": "int32" - }, - { - "name": "txns_skip", - "in": "query", - "required": false, - "type": "integer", - "format": "int32" - }, - { - "name": "coinbase", - "in": "query", - "required": false, - "type": "integer", - "format": "int32" - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/utxos": { - "get": { - "summary": "lncli: `listunspent`\nDeprecated, use walletrpc.ListUnspent instead.", - "description": "ListUnspent returns a list of all utxos spendable by the wallet with a\nnumber of confirmations between the specified minimum and maximum.", - "operationId": "ListUnspent", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcListUnspentResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "min_confs", - "description": "The minimum number of confirmations to be included.", - "in": "query", - "required": false, - "type": "integer", - "format": "int32" - }, - { - "name": "max_confs", - "description": "The maximum number of confirmations to be included.", - "in": "query", - "required": false, - "type": "integer", - "format": "int32" - } - ], - "tags": [ - "Lightning" - ] - } - }, - "/v1/verifymessage": { - "post": { - "summary": "lncli: `verifymessage`\nVerifyMessage verifies a signature over a msg. The signature must be\nzbase32 encoded and signed by an active node in the resident node's\nchannel database. In addition to returning the validity of the signature,\nVerifyMessage also returns the recovered pubkey from the signature.", - "operationId": "VerifyMessage", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcVerifyMessageResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/lnrpcVerifyMessageRequest" - } - } - ], - "tags": [ - "Lightning" - ] - } - } - }, - "definitions": { - "ChannelCloseSummaryClosureType": { - "type": "string", - "enum": [ - "COOPERATIVE_CLOSE", - "LOCAL_FORCE_CLOSE", - "REMOTE_FORCE_CLOSE", - "BREACH_CLOSE", - "FUNDING_CANCELED", - "ABANDONED" - ], - "default": "COOPERATIVE_CLOSE" - }, - "ChannelEventUpdateUpdateType": { - "type": "string", - "enum": [ - "OPEN_CHANNEL", - "CLOSED_CHANNEL", - "ACTIVE_CHANNEL", - "INACTIVE_CHANNEL", - "PENDING_OPEN_CHANNEL" - ], - "default": "OPEN_CHANNEL" - }, - "FailureFailureCode": { - "type": "string", - "enum": [ - "RESERVED", - "INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS", - "INCORRECT_PAYMENT_AMOUNT", - "FINAL_INCORRECT_CLTV_EXPIRY", - "FINAL_INCORRECT_HTLC_AMOUNT", - "FINAL_EXPIRY_TOO_SOON", - "INVALID_REALM", - "EXPIRY_TOO_SOON", - "INVALID_ONION_VERSION", - "INVALID_ONION_HMAC", - "INVALID_ONION_KEY", - "AMOUNT_BELOW_MINIMUM", - "FEE_INSUFFICIENT", - "INCORRECT_CLTV_EXPIRY", - "CHANNEL_DISABLED", - "TEMPORARY_CHANNEL_FAILURE", - "REQUIRED_NODE_FEATURE_MISSING", - "REQUIRED_CHANNEL_FEATURE_MISSING", - "UNKNOWN_NEXT_PEER", - "TEMPORARY_NODE_FAILURE", - "PERMANENT_NODE_FAILURE", - "PERMANENT_CHANNEL_FAILURE", - "EXPIRY_TOO_FAR", - "MPP_TIMEOUT", - "INTERNAL_FAILURE", - "UNKNOWN_FAILURE", - "UNREADABLE_FAILURE" - ], - "default": "RESERVED", - "description": " - RESERVED: The numbers assigned in this enumeration match the failure codes as\ndefined in BOLT #4. Because protobuf 3 requires enums to start with 0,\na RESERVED value is added.\n - INTERNAL_FAILURE: An internal error occurred.\n - UNKNOWN_FAILURE: The error source is known, but the failure itself couldn't be decoded.\n - UNREADABLE_FAILURE: An unreadable failure result is returned if the received failure message\ncannot be decrypted. In that case the error source is unknown." - }, - "ForceClosedChannelAnchorState": { - "type": "string", - "enum": [ - "LIMBO", - "RECOVERED", - "LOST" - ], - "default": "LIMBO" - }, - "HTLCAttemptHTLCStatus": { - "type": "string", - "enum": [ - "IN_FLIGHT", - "SUCCEEDED", - "FAILED" - ], - "default": "IN_FLIGHT" - }, - "InvoiceInvoiceState": { - "type": "string", - "enum": [ - "OPEN", - "SETTLED", - "CANCELED", - "ACCEPTED" - ], - "default": "OPEN" - }, - "PaymentPaymentStatus": { - "type": "string", - "enum": [ - "UNKNOWN", - "IN_FLIGHT", - "SUCCEEDED", - "FAILED" - ], - "default": "UNKNOWN" - }, - "PeerEventEventType": { - "type": "string", - "enum": [ - "PEER_ONLINE", - "PEER_OFFLINE" - ], - "default": "PEER_ONLINE" - }, - "PeerSyncType": { - "type": "string", - "enum": [ - "UNKNOWN_SYNC", - "ACTIVE_SYNC", - "PASSIVE_SYNC" - ], - "default": "UNKNOWN_SYNC", - "description": " - UNKNOWN_SYNC: Denotes that we cannot determine the peer's current sync type.\n - ACTIVE_SYNC: Denotes that we are actively receiving new graph updates from the peer.\n - PASSIVE_SYNC: Denotes that we are not receiving new graph updates from the peer." - }, - "PendingChannelsResponseClosedChannel": { - "type": "object", - "properties": { - "channel": { - "$ref": "#/definitions/PendingChannelsResponsePendingChannel", - "title": "The pending channel to be closed" - }, - "closing_txid": { - "type": "string", - "title": "The transaction id of the closing transaction" - } - } - }, - "PendingChannelsResponseCommitments": { - "type": "object", - "properties": { - "local_txid": { - "type": "string", - "description": "Hash of the local version of the commitment tx." - }, - "remote_txid": { - "type": "string", - "description": "Hash of the remote version of the commitment tx." - }, - "remote_pending_txid": { - "type": "string", - "description": "Hash of the remote pending version of the commitment tx." - }, - "local_commit_fee_sat": { - "type": "string", - "format": "uint64", - "description": "The amount in satoshis calculated to be paid in fees for the local\ncommitment." - }, - "remote_commit_fee_sat": { - "type": "string", - "format": "uint64", - "description": "The amount in satoshis calculated to be paid in fees for the remote\ncommitment." - }, - "remote_pending_commit_fee_sat": { - "type": "string", - "format": "uint64", - "description": "The amount in satoshis calculated to be paid in fees for the remote\npending commitment." - } - } - }, - "PendingChannelsResponseForceClosedChannel": { - "type": "object", - "properties": { - "channel": { - "$ref": "#/definitions/PendingChannelsResponsePendingChannel", - "title": "The pending channel to be force closed" - }, - "closing_txid": { - "type": "string", - "title": "The transaction id of the closing transaction" - }, - "limbo_balance": { - "type": "string", - "format": "int64", - "title": "The balance in satoshis encumbered in this pending channel" - }, - "maturity_height": { - "type": "integer", - "format": "int64", - "title": "The height at which funds can be swept into the wallet" - }, - "blocks_til_maturity": { - "type": "integer", - "format": "int32", - "description": "Remaining # of blocks until the commitment output can be swept.\nNegative values indicate how many blocks have passed since becoming\nmature." - }, - "recovered_balance": { - "type": "string", - "format": "int64", - "title": "The total value of funds successfully recovered from this channel" - }, - "pending_htlcs": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcPendingHTLC" - } - }, - "anchor": { - "$ref": "#/definitions/ForceClosedChannelAnchorState" - } - } - }, - "PendingChannelsResponsePendingChannel": { - "type": "object", - "properties": { - "remote_node_pub": { - "type": "string" - }, - "channel_point": { - "type": "string" - }, - "capacity": { - "type": "string", - "format": "int64" - }, - "local_balance": { - "type": "string", - "format": "int64" - }, - "remote_balance": { - "type": "string", - "format": "int64" - }, - "local_chan_reserve_sat": { - "type": "string", - "format": "int64", - "description": "The minimum satoshis this node is required to reserve in its\nbalance." - }, - "remote_chan_reserve_sat": { - "type": "string", - "format": "int64", - "description": "The minimum satoshis the other node is required to reserve in its\nbalance." - }, - "initiator": { - "$ref": "#/definitions/lnrpcInitiator", - "description": "The party that initiated opening the channel." - }, - "commitment_type": { - "$ref": "#/definitions/lnrpcCommitmentType", - "description": "The commitment type used by this channel." - } - } - }, - "PendingChannelsResponsePendingOpenChannel": { - "type": "object", - "properties": { - "channel": { - "$ref": "#/definitions/PendingChannelsResponsePendingChannel", - "title": "The pending channel" - }, - "confirmation_height": { - "type": "integer", - "format": "int64", - "title": "The height at which this channel will be confirmed" - }, - "commit_fee": { - "type": "string", - "format": "int64", - "description": "The amount calculated to be paid in fees for the current set of\ncommitment transactions. The fee amount is persisted with the channel\nin order to allow the fee amount to be removed and recalculated with\neach channel state update, including updates that happen after a system\nrestart." - }, - "commit_weight": { - "type": "string", - "format": "int64", - "title": "The weight of the commitment transaction" - }, - "fee_per_kw": { - "type": "string", - "format": "int64", - "description": "The required number of satoshis per kilo-weight that the requester will\npay at all times, for both the funding transaction and commitment\ntransaction. This value can later be updated once the channel is open." - } - } - }, - "PendingChannelsResponseWaitingCloseChannel": { - "type": "object", - "properties": { - "channel": { - "$ref": "#/definitions/PendingChannelsResponsePendingChannel", - "title": "The pending channel waiting for closing tx to confirm" - }, - "limbo_balance": { - "type": "string", - "format": "int64", - "title": "The balance in satoshis encumbered in this channel" - }, - "commitments": { - "$ref": "#/definitions/PendingChannelsResponseCommitments", - "description": "A list of valid commitment transactions. Any of these can confirm at\nthis point." - } - } - }, - "lnrpcAbandonChannelResponse": { - "type": "object" - }, - "lnrpcAddInvoiceResponse": { - "type": "object", - "properties": { - "r_hash": { - "type": "string", - "format": "byte" - }, - "payment_request": { - "type": "string", - "description": "A bare-bones invoice for a payment within the Lightning Network. With the\ndetails of the invoice, the sender has all the data necessary to send a\npayment to the recipient." - }, - "add_index": { - "type": "string", - "format": "uint64", - "description": "The \"add\" index of this invoice. Each newly created invoice will increment\nthis index making it monotonically increasing. Callers to the\nSubscribeInvoices call can use this to instantly get notified of all added\ninvoices with an add_index greater than this one." - } - } - }, - "lnrpcAddressType": { - "type": "string", - "enum": [ - "WITNESS_PUBKEY_HASH", - "NESTED_PUBKEY_HASH", - "UNUSED_WITNESS_PUBKEY_HASH", - "UNUSED_NESTED_PUBKEY_HASH" - ], - "default": "WITNESS_PUBKEY_HASH", - "description": "- `p2wkh`: Pay to witness key hash (`WITNESS_PUBKEY_HASH` = 0)\n- `np2wkh`: Pay to nested witness key hash (`NESTED_PUBKEY_HASH` = 1)", - "title": "`AddressType` has to be one of:" - }, - "lnrpcAmount": { - "type": "object", - "properties": { - "sat": { - "type": "string", - "format": "uint64", - "description": "Value denominated in satoshis." - }, - "msat": { - "type": "string", - "format": "uint64", - "description": "Value denominated in milli-satoshis." - } - } - }, - "lnrpcBakeMacaroonRequest": { - "type": "object", - "properties": { - "permissions": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcMacaroonPermission" - }, - "description": "The list of permissions the new macaroon should grant." - }, - "root_key_id": { - "type": "string", - "format": "uint64", - "description": "The root key ID used to create the macaroon, must be a positive integer." - } - } - }, - "lnrpcBakeMacaroonResponse": { - "type": "object", - "properties": { - "macaroon": { - "type": "string", - "description": "The hex encoded macaroon, serialized in binary format." - } - } - }, - "lnrpcChain": { - "type": "object", - "properties": { - "chain": { - "type": "string", - "title": "The blockchain the node is on (eg bitcoin, litecoin)" - }, - "network": { - "type": "string", - "title": "The network the node is on (eg regtest, testnet, mainnet)" - } - } - }, - "lnrpcChanBackupSnapshot": { - "type": "object", - "properties": { - "single_chan_backups": { - "$ref": "#/definitions/lnrpcChannelBackups", - "description": "The set of new channels that have been added since the last channel backup\nsnapshot was requested." - }, - "multi_chan_backup": { - "$ref": "#/definitions/lnrpcMultiChanBackup", - "description": "A multi-channel backup that covers all open channels currently known to\nlnd." - } - } - }, - "lnrpcChanPointShim": { - "type": "object", - "properties": { - "amt": { - "type": "string", - "format": "int64", - "description": "The size of the pre-crafted output to be used as the channel point for this\nchannel funding." - }, - "chan_point": { - "$ref": "#/definitions/lnrpcChannelPoint", - "description": "The target channel point to refrence in created commitment transactions." - }, - "local_key": { - "$ref": "#/definitions/lnrpcKeyDescriptor", - "description": "Our local key to use when creating the multi-sig output." - }, - "remote_key": { - "type": "string", - "format": "byte", - "description": "The key of the remote party to use when creating the multi-sig output." - }, - "pending_chan_id": { - "type": "string", - "format": "byte", - "description": "If non-zero, then this will be used as the pending channel ID on the wire\nprotocol to initate the funding request. This is an optional field, and\nshould only be set if the responder is already expecting a specific pending\nchannel ID." - }, - "thaw_height": { - "type": "integer", - "format": "int64", - "description": "This uint32 indicates if this channel is to be considered 'frozen'. A frozen\nchannel does not allow a cooperative channel close by the initiator. The\nthaw_height is the height that this restriction stops applying to the\nchannel. The height can be interpreted in two ways: as a relative height if\nthe value is less than 500,000, or as an absolute height otherwise." - } - } - }, - "lnrpcChannel": { - "type": "object", - "properties": { - "active": { - "type": "boolean", - "format": "boolean", - "title": "Whether this channel is active or not" - }, - "remote_pubkey": { - "type": "string", - "title": "The identity pubkey of the remote node" - }, - "channel_point": { - "type": "string", - "description": "The outpoint (txid:index) of the funding transaction. With this value, Bob\nwill be able to generate a signature for Alice's version of the commitment\ntransaction." - }, - "chan_id": { - "type": "string", - "format": "uint64", - "description": "The unique channel ID for the channel. The first 3 bytes are the block\nheight, the next 3 the index within the block, and the last 2 bytes are the\noutput index for the channel." - }, - "capacity": { - "type": "string", - "format": "int64", - "title": "The total amount of funds held in this channel" - }, - "local_balance": { - "type": "string", - "format": "int64", - "title": "This node's current balance in this channel" - }, - "remote_balance": { - "type": "string", - "format": "int64", - "title": "The counterparty's current balance in this channel" - }, - "commit_fee": { - "type": "string", - "format": "int64", - "description": "The amount calculated to be paid in fees for the current set of commitment\ntransactions. The fee amount is persisted with the channel in order to\nallow the fee amount to be removed and recalculated with each channel state\nupdate, including updates that happen after a system restart." - }, - "commit_weight": { - "type": "string", - "format": "int64", - "title": "The weight of the commitment transaction" - }, - "fee_per_kw": { - "type": "string", - "format": "int64", - "description": "The required number of satoshis per kilo-weight that the requester will pay\nat all times, for both the funding transaction and commitment transaction.\nThis value can later be updated once the channel is open." - }, - "unsettled_balance": { - "type": "string", - "format": "int64", - "title": "The unsettled balance in this channel" - }, - "total_satoshis_sent": { - "type": "string", - "format": "int64", - "description": "The total number of satoshis we've sent within this channel." - }, - "total_satoshis_received": { - "type": "string", - "format": "int64", - "description": "The total number of satoshis we've received within this channel." - }, - "num_updates": { - "type": "string", - "format": "uint64", - "description": "The total number of updates conducted within this channel." - }, - "pending_htlcs": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcHTLC" - }, - "description": "The list of active, uncleared HTLCs currently pending within the channel." - }, - "csv_delay": { - "type": "integer", - "format": "int64", - "description": "Deprecated. The CSV delay expressed in relative blocks. If the channel is\nforce closed, we will need to wait for this many blocks before we can regain\nour funds." - }, - "private": { - "type": "boolean", - "format": "boolean", - "description": "Whether this channel is advertised to the network or not." - }, - "initiator": { - "type": "boolean", - "format": "boolean", - "description": "True if we were the ones that created the channel." - }, - "chan_status_flags": { - "type": "string", - "description": "A set of flags showing the current state of the channel." - }, - "local_chan_reserve_sat": { - "type": "string", - "format": "int64", - "description": "Deprecated. The minimum satoshis this node is required to reserve in its\nbalance." - }, - "remote_chan_reserve_sat": { - "type": "string", - "format": "int64", - "description": "Deprecated. The minimum satoshis the other node is required to reserve in\nits balance." - }, - "static_remote_key": { - "type": "boolean", - "format": "boolean", - "description": "Deprecated. Use commitment_type." - }, - "commitment_type": { - "$ref": "#/definitions/lnrpcCommitmentType", - "description": "The commitment type used by this channel." - }, - "lifetime": { - "type": "string", - "format": "int64", - "description": "The number of seconds that the channel has been monitored by the channel\nscoring system. Scores are currently not persisted, so this value may be\nless than the lifetime of the channel [EXPERIMENTAL]." - }, - "uptime": { - "type": "string", - "format": "int64", - "description": "The number of seconds that the remote peer has been observed as being online\nby the channel scoring system over the lifetime of the channel\n[EXPERIMENTAL]." - }, - "close_address": { - "type": "string", - "description": "Close address is the address that we will enforce payout to on cooperative\nclose if the channel was opened utilizing option upfront shutdown. This\nvalue can be set on channel open by setting close_address in an open channel\nrequest. If this value is not set, you can still choose a payout address by\ncooperatively closing with the delivery_address field set." - }, - "push_amount_sat": { - "type": "string", - "format": "uint64", - "description": "The amount that the initiator of the channel optionally pushed to the remote\nparty on channel open. This amount will be zero if the channel initiator did\nnot push any funds to the remote peer. If the initiator field is true, we\npushed this amount to our peer, if it is false, the remote peer pushed this\namount to us." - }, - "thaw_height": { - "type": "integer", - "format": "int64", - "description": "This uint32 indicates if this channel is to be considered 'frozen'. A\nfrozen channel doest not allow a cooperative channel close by the\ninitiator. The thaw_height is the height that this restriction stops\napplying to the channel. This field is optional, not setting it or using a\nvalue of zero will mean the channel has no additional restrictions. The\nheight can be interpreted in two ways: as a relative height if the value is\nless than 500,000, or as an absolute height otherwise." - }, - "local_constraints": { - "$ref": "#/definitions/lnrpcChannelConstraints", - "description": "List constraints for the local node." - }, - "remote_constraints": { - "$ref": "#/definitions/lnrpcChannelConstraints", - "description": "List constraints for the remote node." - } - } - }, - "lnrpcChannelAcceptRequest": { - "type": "object", - "properties": { - "node_pubkey": { - "type": "string", - "format": "byte", - "description": "The pubkey of the node that wishes to open an inbound channel." - }, - "chain_hash": { - "type": "string", - "format": "byte", - "description": "The hash of the genesis block that the proposed channel resides in." - }, - "pending_chan_id": { - "type": "string", - "format": "byte", - "description": "The pending channel id." - }, - "funding_amt": { - "type": "string", - "format": "uint64", - "description": "The funding amount in satoshis that initiator wishes to use in the\nchannel." - }, - "push_amt": { - "type": "string", - "format": "uint64", - "description": "The push amount of the proposed channel in millisatoshis." - }, - "dust_limit": { - "type": "string", - "format": "uint64", - "description": "The dust limit of the initiator's commitment tx." - }, - "max_value_in_flight": { - "type": "string", - "format": "uint64", - "description": "The maximum amount of coins in millisatoshis that can be pending in this\nchannel." - }, - "channel_reserve": { - "type": "string", - "format": "uint64", - "description": "The minimum amount of satoshis the initiator requires us to have at all\ntimes." - }, - "min_htlc": { - "type": "string", - "format": "uint64", - "description": "The smallest HTLC in millisatoshis that the initiator will accept." - }, - "fee_per_kw": { - "type": "string", - "format": "uint64", - "description": "The initial fee rate that the initiator suggests for both commitment\ntransactions." - }, - "csv_delay": { - "type": "integer", - "format": "int64", - "description": "The number of blocks to use for the relative time lock in the pay-to-self\noutput of both commitment transactions." - }, - "max_accepted_htlcs": { - "type": "integer", - "format": "int64", - "description": "The total number of incoming HTLC's that the initiator will accept." - }, - "channel_flags": { - "type": "integer", - "format": "int64", - "description": "A bit-field which the initiator uses to specify proposed channel\nbehavior." - } - } - }, - "lnrpcChannelBackup": { - "type": "object", - "properties": { - "chan_point": { - "$ref": "#/definitions/lnrpcChannelPoint", - "description": "Identifies the channel that this backup belongs to." - }, - "chan_backup": { - "type": "string", - "format": "byte", - "description": "Is an encrypted single-chan backup. this can be passed to\nRestoreChannelBackups, or the WalletUnlocker Init and Unlock methods in\norder to trigger the recovery protocol. When using REST, this field must be\nencoded as base64." - } - } - }, - "lnrpcChannelBackups": { - "type": "object", - "properties": { - "chan_backups": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcChannelBackup" - }, - "description": "A set of single-chan static channel backups." - } - } - }, - "lnrpcChannelBalanceResponse": { - "type": "object", - "properties": { - "balance": { - "type": "string", - "format": "int64", - "title": "Deprecated. Sum of channels balances denominated in satoshis" - }, - "pending_open_balance": { - "type": "string", - "format": "int64", - "title": "Deprecated. Sum of channels pending balances denominated in satoshis" - }, - "local_balance": { - "$ref": "#/definitions/lnrpcAmount", - "description": "Sum of channels local balances." - }, - "remote_balance": { - "$ref": "#/definitions/lnrpcAmount", - "description": "Sum of channels remote balances." - }, - "unsettled_local_balance": { - "$ref": "#/definitions/lnrpcAmount", - "description": "Sum of channels local unsettled balances." - }, - "unsettled_remote_balance": { - "$ref": "#/definitions/lnrpcAmount", - "description": "Sum of channels remote unsettled balances." - }, - "pending_open_local_balance": { - "$ref": "#/definitions/lnrpcAmount", - "description": "Sum of channels pending local balances." - }, - "pending_open_remote_balance": { - "$ref": "#/definitions/lnrpcAmount", - "description": "Sum of channels pending remote balances." - } - } - }, - "lnrpcChannelCloseSummary": { - "type": "object", - "properties": { - "channel_point": { - "type": "string", - "description": "The outpoint (txid:index) of the funding transaction." - }, - "chan_id": { - "type": "string", - "format": "uint64", - "description": "The unique channel ID for the channel." - }, - "chain_hash": { - "type": "string", - "description": "The hash of the genesis block that this channel resides within." - }, - "closing_tx_hash": { - "type": "string", - "description": "The txid of the transaction which ultimately closed this channel." - }, - "remote_pubkey": { - "type": "string", - "description": "Public key of the remote peer that we formerly had a channel with." - }, - "capacity": { - "type": "string", - "format": "int64", - "description": "Total capacity of the channel." - }, - "close_height": { - "type": "integer", - "format": "int64", - "description": "Height at which the funding transaction was spent." - }, - "settled_balance": { - "type": "string", - "format": "int64", - "title": "Settled balance at the time of channel closure" - }, - "time_locked_balance": { - "type": "string", - "format": "int64", - "title": "The sum of all the time-locked outputs at the time of channel closure" - }, - "close_type": { - "$ref": "#/definitions/ChannelCloseSummaryClosureType", - "description": "Details on how the channel was closed." - }, - "open_initiator": { - "$ref": "#/definitions/lnrpcInitiator", - "description": "Open initiator is the party that initiated opening the channel. Note that\nthis value may be unknown if the channel was closed before we migrated to\nstore open channel information after close." - }, - "close_initiator": { - "$ref": "#/definitions/lnrpcInitiator", - "description": "Close initiator indicates which party initiated the close. This value will\nbe unknown for channels that were cooperatively closed before we started\ntracking cooperative close initiators. Note that this indicates which party\ninitiated a close, and it is possible for both to initiate cooperative or\nforce closes, although only one party's close will be confirmed on chain." - }, - "resolutions": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcResolution" - } - } - } - }, - "lnrpcChannelCloseUpdate": { - "type": "object", - "properties": { - "closing_txid": { - "type": "string", - "format": "byte" - }, - "success": { - "type": "boolean", - "format": "boolean" - } - } - }, - "lnrpcChannelConstraints": { - "type": "object", - "properties": { - "csv_delay": { - "type": "integer", - "format": "int64", - "description": "The CSV delay expressed in relative blocks. If the channel is force closed,\nwe will need to wait for this many blocks before we can regain our funds." - }, - "chan_reserve_sat": { - "type": "string", - "format": "uint64", - "description": "The minimum satoshis this node is required to reserve in its balance." - }, - "dust_limit_sat": { - "type": "string", - "format": "uint64", - "description": "The dust limit (in satoshis) of the initiator's commitment tx." - }, - "max_pending_amt_msat": { - "type": "string", - "format": "uint64", - "description": "The maximum amount of coins in millisatoshis that can be pending in this\nchannel." - }, - "min_htlc_msat": { - "type": "string", - "format": "uint64", - "description": "The smallest HTLC in millisatoshis that the initiator will accept." - }, - "max_accepted_htlcs": { - "type": "integer", - "format": "int64", - "description": "The total number of incoming HTLC's that the initiator will accept." - } - } - }, - "lnrpcChannelEdge": { - "type": "object", - "properties": { - "channel_id": { - "type": "string", - "format": "uint64", - "description": "The unique channel ID for the channel. The first 3 bytes are the block\nheight, the next 3 the index within the block, and the last 2 bytes are the\noutput index for the channel." - }, - "chan_point": { - "type": "string" - }, - "last_update": { - "type": "integer", - "format": "int64" - }, - "node1_pub": { - "type": "string" - }, - "node2_pub": { - "type": "string" - }, - "capacity": { - "type": "string", - "format": "int64" - }, - "node1_policy": { - "$ref": "#/definitions/lnrpcRoutingPolicy" - }, - "node2_policy": { - "$ref": "#/definitions/lnrpcRoutingPolicy" - } - }, - "description": "A fully authenticated channel along with all its unique attributes.\nOnce an authenticated channel announcement has been processed on the network,\nthen an instance of ChannelEdgeInfo encapsulating the channels attributes is\nstored. The other portions relevant to routing policy of a channel are stored\nwithin a ChannelEdgePolicy for each direction of the channel." - }, - "lnrpcChannelEdgeUpdate": { - "type": "object", - "properties": { - "chan_id": { - "type": "string", - "format": "uint64", - "description": "The unique channel ID for the channel. The first 3 bytes are the block\nheight, the next 3 the index within the block, and the last 2 bytes are the\noutput index for the channel." - }, - "chan_point": { - "$ref": "#/definitions/lnrpcChannelPoint" - }, - "capacity": { - "type": "string", - "format": "int64" - }, - "routing_policy": { - "$ref": "#/definitions/lnrpcRoutingPolicy" - }, - "advertising_node": { - "type": "string" - }, - "connecting_node": { - "type": "string" - } - } - }, - "lnrpcChannelEventUpdate": { - "type": "object", - "properties": { - "open_channel": { - "$ref": "#/definitions/lnrpcChannel" - }, - "closed_channel": { - "$ref": "#/definitions/lnrpcChannelCloseSummary" - }, - "active_channel": { - "$ref": "#/definitions/lnrpcChannelPoint" - }, - "inactive_channel": { - "$ref": "#/definitions/lnrpcChannelPoint" - }, - "pending_open_channel": { - "$ref": "#/definitions/lnrpcPendingUpdate" - }, - "type": { - "$ref": "#/definitions/ChannelEventUpdateUpdateType" - } - } - }, - "lnrpcChannelFeeReport": { - "type": "object", - "properties": { - "chan_id": { - "type": "string", - "format": "uint64", - "description": "The short channel id that this fee report belongs to." - }, - "channel_point": { - "type": "string", - "description": "The channel that this fee report belongs to." - }, - "base_fee_msat": { - "type": "string", - "format": "int64", - "description": "The base fee charged regardless of the number of milli-satoshis sent." - }, - "fee_per_mil": { - "type": "string", - "format": "int64", - "description": "The amount charged per milli-satoshis transferred expressed in\nmillionths of a satoshi." - }, - "fee_rate": { - "type": "number", - "format": "double", - "description": "The effective fee rate in milli-satoshis. Computed by dividing the\nfee_per_mil value by 1 million." - } - } - }, - "lnrpcChannelGraph": { - "type": "object", - "properties": { - "nodes": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcLightningNode" - }, - "title": "The list of `LightningNode`s in this channel graph" - }, - "edges": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcChannelEdge" - }, - "title": "The list of `ChannelEdge`s in this channel graph" - } - }, - "description": "Returns a new instance of the directed channel graph." - }, - "lnrpcChannelOpenUpdate": { - "type": "object", - "properties": { - "channel_point": { - "$ref": "#/definitions/lnrpcChannelPoint" - } - } - }, - "lnrpcChannelPoint": { - "type": "object", - "properties": { - "funding_txid_bytes": { - "type": "string", - "format": "byte", - "description": "Txid of the funding transaction. When using REST, this field must be\nencoded as base64." - }, - "funding_txid_str": { - "type": "string", - "description": "Hex-encoded string representing the byte-reversed hash of the funding\ntransaction." - }, - "output_index": { - "type": "integer", - "format": "int64", - "title": "The index of the output of the funding transaction" - } - } - }, - "lnrpcChannelUpdate": { - "type": "object", - "properties": { - "signature": { - "type": "string", - "format": "byte", - "description": "The signature that validates the announced data and proves the ownership\nof node id." - }, - "chain_hash": { - "type": "string", - "format": "byte", - "description": "The target chain that this channel was opened within. This value\nshould be the genesis hash of the target chain. Along with the short\nchannel ID, this uniquely identifies the channel globally in a\nblockchain." - }, - "chan_id": { - "type": "string", - "format": "uint64", - "description": "The unique description of the funding transaction." - }, - "timestamp": { - "type": "integer", - "format": "int64", - "description": "A timestamp that allows ordering in the case of multiple announcements.\nWe should ignore the message if timestamp is not greater than the\nlast-received." - }, - "message_flags": { - "type": "integer", - "format": "int64", - "description": "The bitfield that describes whether optional fields are present in this\nupdate. Currently, the least-significant bit must be set to 1 if the\noptional field MaxHtlc is present." - }, - "channel_flags": { - "type": "integer", - "format": "int64", - "description": "The bitfield that describes additional meta-data concerning how the\nupdate is to be interpreted. Currently, the least-significant bit must be\nset to 0 if the creating node corresponds to the first node in the\npreviously sent channel announcement and 1 otherwise. If the second bit\nis set, then the channel is set to be disabled." - }, - "time_lock_delta": { - "type": "integer", - "format": "int64", - "description": "The minimum number of blocks this node requires to be added to the expiry\nof HTLCs. This is a security parameter determined by the node operator.\nThis value represents the required gap between the time locks of the\nincoming and outgoing HTLC's set to this node." - }, - "htlc_minimum_msat": { - "type": "string", - "format": "uint64", - "description": "The minimum HTLC value which will be accepted." - }, - "base_fee": { - "type": "integer", - "format": "int64", - "description": "The base fee that must be used for incoming HTLC's to this particular\nchannel. This value will be tacked onto the required for a payment\nindependent of the size of the payment." - }, - "fee_rate": { - "type": "integer", - "format": "int64", - "description": "The fee rate that will be charged per millionth of a satoshi." - }, - "htlc_maximum_msat": { - "type": "string", - "format": "uint64", - "description": "The maximum HTLC value which will be accepted." - }, - "extra_opaque_data": { - "type": "string", - "format": "byte", - "description": "The set of data that was appended to this message, some of which we may\nnot actually know how to iterate or parse. By holding onto this data, we\nensure that we're able to properly validate the set of signatures that\ncover these new fields, and ensure we're able to make upgrades to the\nnetwork in a forwards compatible manner." - } - } - }, - "lnrpcCloseStatusUpdate": { - "type": "object", - "properties": { - "close_pending": { - "$ref": "#/definitions/lnrpcPendingUpdate" - }, - "chan_close": { - "$ref": "#/definitions/lnrpcChannelCloseUpdate" - } - } - }, - "lnrpcClosedChannelUpdate": { - "type": "object", - "properties": { - "chan_id": { - "type": "string", - "format": "uint64", - "description": "The unique channel ID for the channel. The first 3 bytes are the block\nheight, the next 3 the index within the block, and the last 2 bytes are the\noutput index for the channel." - }, - "capacity": { - "type": "string", - "format": "int64" - }, - "closed_height": { - "type": "integer", - "format": "int64" - }, - "chan_point": { - "$ref": "#/definitions/lnrpcChannelPoint" - } - } - }, - "lnrpcClosedChannelsResponse": { - "type": "object", - "properties": { - "channels": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcChannelCloseSummary" - } - } - } - }, - "lnrpcCommitmentType": { - "type": "string", - "enum": [ - "LEGACY", - "STATIC_REMOTE_KEY", - "ANCHORS", - "UNKNOWN_COMMITMENT_TYPE" - ], - "default": "LEGACY", - "description": " - LEGACY: A channel using the legacy commitment format having tweaked to_remote\nkeys.\n - STATIC_REMOTE_KEY: A channel that uses the modern commitment format where the key in the\noutput of the remote party does not change each state. This makes back\nup and recovery easier as when the channel is closed, the funds go\ndirectly to that key.\n - ANCHORS: A channel that uses a commitment format that has anchor outputs on the\ncommitments, allowing fee bumping after a force close transaction has\nbeen broadcast.\n - UNKNOWN_COMMITMENT_TYPE: Returned when the commitment type isn't known or unavailable." - }, - "lnrpcConnectPeerRequest": { - "type": "object", - "properties": { - "addr": { - "$ref": "#/definitions/lnrpcLightningAddress", - "title": "Lightning address of the peer, in the format `\u003cpubkey\u003e@host`" - }, - "perm": { - "type": "boolean", - "format": "boolean", - "description": "If set, the daemon will attempt to persistently connect to the target\npeer. Otherwise, the call will be synchronous." - }, - "timeout": { - "type": "string", - "format": "uint64", - "description": "The connection timeout value (in seconds) for this request. It won't affect\nother requests." - } - } - }, - "lnrpcConnectPeerResponse": { - "type": "object" - }, - "lnrpcDebugLevelRequest": { - "type": "object", - "properties": { - "show": { - "type": "boolean", - "format": "boolean" - }, - "level_spec": { - "type": "string" - } - } - }, - "lnrpcDebugLevelResponse": { - "type": "object", - "properties": { - "sub_systems": { - "type": "string" - } - } - }, - "lnrpcDeleteAllPaymentsResponse": { - "type": "object" - }, - "lnrpcDeleteMacaroonIDResponse": { - "type": "object", - "properties": { - "deleted": { - "type": "boolean", - "format": "boolean", - "description": "A boolean indicates that the deletion is successful." - } - } - }, - "lnrpcDisconnectPeerResponse": { - "type": "object" - }, - "lnrpcEdgeLocator": { - "type": "object", - "properties": { - "channel_id": { - "type": "string", - "format": "uint64", - "description": "The short channel id of this edge." - }, - "direction_reverse": { - "type": "boolean", - "format": "boolean", - "description": "The direction of this edge. If direction_reverse is false, the direction\nof this edge is from the channel endpoint with the lexicographically smaller\npub key to the endpoint with the larger pub key. If direction_reverse is\nis true, the edge goes the other way." - } - } - }, - "lnrpcEstimateFeeResponse": { - "type": "object", - "properties": { - "fee_sat": { - "type": "string", - "format": "int64", - "description": "The total fee in satoshis." - }, - "feerate_sat_per_byte": { - "type": "string", - "format": "int64", - "description": "The fee rate in satoshi/byte." - } - } - }, - "lnrpcFailure": { - "type": "object", - "properties": { - "code": { - "$ref": "#/definitions/FailureFailureCode", - "title": "Failure code as defined in the Lightning spec" - }, - "channel_update": { - "$ref": "#/definitions/lnrpcChannelUpdate", - "description": "An optional channel update message." - }, - "htlc_msat": { - "type": "string", - "format": "uint64", - "description": "A failure type-dependent htlc value." - }, - "onion_sha_256": { - "type": "string", - "format": "byte", - "description": "The sha256 sum of the onion payload." - }, - "cltv_expiry": { - "type": "integer", - "format": "int64", - "description": "A failure type-dependent cltv expiry value." - }, - "flags": { - "type": "integer", - "format": "int64", - "description": "A failure type-dependent flags value." - }, - "failure_source_index": { - "type": "integer", - "format": "int64", - "description": "The position in the path of the intermediate or final node that generated\nthe failure message. Position zero is the sender node." - }, - "height": { - "type": "integer", - "format": "int64", - "description": "A failure type-dependent block height." - } - } - }, - "lnrpcFeature": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "is_required": { - "type": "boolean", - "format": "boolean" - }, - "is_known": { - "type": "boolean", - "format": "boolean" - } - } - }, - "lnrpcFeatureBit": { - "type": "string", - "enum": [ - "DATALOSS_PROTECT_REQ", - "DATALOSS_PROTECT_OPT", - "INITIAL_ROUING_SYNC", - "UPFRONT_SHUTDOWN_SCRIPT_REQ", - "UPFRONT_SHUTDOWN_SCRIPT_OPT", - "GOSSIP_QUERIES_REQ", - "GOSSIP_QUERIES_OPT", - "TLV_ONION_REQ", - "TLV_ONION_OPT", - "EXT_GOSSIP_QUERIES_REQ", - "EXT_GOSSIP_QUERIES_OPT", - "STATIC_REMOTE_KEY_REQ", - "STATIC_REMOTE_KEY_OPT", - "PAYMENT_ADDR_REQ", - "PAYMENT_ADDR_OPT", - "MPP_REQ", - "MPP_OPT" - ], - "default": "DATALOSS_PROTECT_REQ" - }, - "lnrpcFeeLimit": { - "type": "object", - "properties": { - "fixed": { - "type": "string", - "format": "int64", - "description": "The fee limit expressed as a fixed amount of satoshis.\n\nThe fields fixed and fixed_msat are mutually exclusive." - }, - "fixed_msat": { - "type": "string", - "format": "int64", - "description": "The fee limit expressed as a fixed amount of millisatoshis.\n\nThe fields fixed and fixed_msat are mutually exclusive." - }, - "percent": { - "type": "string", - "format": "int64", - "description": "The fee limit expressed as a percentage of the payment amount." - } - } - }, - "lnrpcFeeReportResponse": { - "type": "object", - "properties": { - "channel_fees": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcChannelFeeReport" - }, - "description": "An array of channel fee reports which describes the current fee schedule\nfor each channel." - }, - "day_fee_sum": { - "type": "string", - "format": "uint64", - "description": "The total amount of fee revenue (in satoshis) the switch has collected\nover the past 24 hrs." - }, - "week_fee_sum": { - "type": "string", - "format": "uint64", - "description": "The total amount of fee revenue (in satoshis) the switch has collected\nover the past 1 week." - }, - "month_fee_sum": { - "type": "string", - "format": "uint64", - "description": "The total amount of fee revenue (in satoshis) the switch has collected\nover the past 1 month." - } - } - }, - "lnrpcFloatMetric": { - "type": "object", - "properties": { - "value": { - "type": "number", - "format": "double", - "description": "Arbitrary float value." - }, - "normalized_value": { - "type": "number", - "format": "double", - "description": "The value normalized to [0,1] or [-1,1]." - } - } - }, - "lnrpcForwardingEvent": { - "type": "object", - "properties": { - "timestamp": { - "type": "string", - "format": "uint64", - "description": "Timestamp is the time (unix epoch offset) that this circuit was\ncompleted." - }, - "chan_id_in": { - "type": "string", - "format": "uint64", - "description": "The incoming channel ID that carried the HTLC that created the circuit." - }, - "chan_id_out": { - "type": "string", - "format": "uint64", - "description": "The outgoing channel ID that carried the preimage that completed the\ncircuit." - }, - "amt_in": { - "type": "string", - "format": "uint64", - "description": "The total amount (in satoshis) of the incoming HTLC that created half\nthe circuit." - }, - "amt_out": { - "type": "string", - "format": "uint64", - "description": "The total amount (in satoshis) of the outgoing HTLC that created the\nsecond half of the circuit." - }, - "fee": { - "type": "string", - "format": "uint64", - "description": "The total fee (in satoshis) that this payment circuit carried." - }, - "fee_msat": { - "type": "string", - "format": "uint64", - "description": "The total fee (in milli-satoshis) that this payment circuit carried." - }, - "amt_in_msat": { - "type": "string", - "format": "uint64", - "description": "The total amount (in milli-satoshis) of the incoming HTLC that created\nhalf the circuit." - }, - "amt_out_msat": { - "type": "string", - "format": "uint64", - "description": "The total amount (in milli-satoshis) of the outgoing HTLC that created\nthe second half of the circuit." - } - } - }, - "lnrpcForwardingHistoryRequest": { - "type": "object", - "properties": { - "start_time": { - "type": "string", - "format": "uint64", - "description": "Start time is the starting point of the forwarding history request. All\nrecords beyond this point will be included, respecting the end time, and\nthe index offset." - }, - "end_time": { - "type": "string", - "format": "uint64", - "description": "End time is the end point of the forwarding history request. The\nresponse will carry at most 50k records between the start time and the\nend time. The index offset can be used to implement pagination." - }, - "index_offset": { - "type": "integer", - "format": "int64", - "description": "Index offset is the offset in the time series to start at. As each\nresponse can only contain 50k records, callers can use this to skip\naround within a packed time series." - }, - "num_max_events": { - "type": "integer", - "format": "int64", - "description": "The max number of events to return in the response to this query." - } - } - }, - "lnrpcForwardingHistoryResponse": { - "type": "object", - "properties": { - "forwarding_events": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcForwardingEvent" - }, - "description": "A list of forwarding events from the time slice of the time series\nspecified in the request." - }, - "last_offset_index": { - "type": "integer", - "format": "int64", - "description": "The index of the last time in the set of returned forwarding events. Can\nbe used to seek further, pagination style." - } - } - }, - "lnrpcFundingPsbtFinalize": { - "type": "object", - "properties": { - "signed_psbt": { - "type": "string", - "format": "byte", - "description": "The funded PSBT that contains all witness data to send the exact channel\ncapacity amount to the PK script returned in the open channel message in a\nprevious step. Cannot be set at the same time as final_raw_tx." - }, - "pending_chan_id": { - "type": "string", - "format": "byte", - "description": "The pending channel ID of the channel to get the PSBT for." - }, - "final_raw_tx": { - "type": "string", - "format": "byte", - "description": "As an alternative to the signed PSBT with all witness data, the final raw\nwire format transaction can also be specified directly. Cannot be set at the\nsame time as signed_psbt." - } - } - }, - "lnrpcFundingPsbtVerify": { - "type": "object", - "properties": { - "funded_psbt": { - "type": "string", - "format": "byte", - "description": "The funded but not yet signed PSBT that sends the exact channel capacity\namount to the PK script returned in the open channel message in a previous\nstep." - }, - "pending_chan_id": { - "type": "string", - "format": "byte", - "description": "The pending channel ID of the channel to get the PSBT for." - } - } - }, - "lnrpcFundingShim": { - "type": "object", - "properties": { - "chan_point_shim": { - "$ref": "#/definitions/lnrpcChanPointShim", - "description": "A channel shim where the channel point was fully constructed outside\nof lnd's wallet and the transaction might already be published." - }, - "psbt_shim": { - "$ref": "#/definitions/lnrpcPsbtShim", - "description": "A channel shim that uses a PSBT to fund and sign the channel funding\ntransaction." - } - } - }, - "lnrpcFundingShimCancel": { - "type": "object", - "properties": { - "pending_chan_id": { - "type": "string", - "format": "byte", - "description": "The pending channel ID of the channel to cancel the funding shim for." - } - } - }, - "lnrpcFundingStateStepResp": { - "type": "object" - }, - "lnrpcFundingTransitionMsg": { - "type": "object", - "properties": { - "shim_register": { - "$ref": "#/definitions/lnrpcFundingShim", - "description": "The funding shim to register. This should be used before any\nchannel funding has began by the remote party, as it is intended as a\npreparatory step for the full channel funding." - }, - "shim_cancel": { - "$ref": "#/definitions/lnrpcFundingShimCancel", - "description": "Used to cancel an existing registered funding shim." - }, - "psbt_verify": { - "$ref": "#/definitions/lnrpcFundingPsbtVerify", - "description": "Used to continue a funding flow that was initiated to be executed\nthrough a PSBT. This step verifies that the PSBT contains the correct\noutputs to fund the channel." - }, - "psbt_finalize": { - "$ref": "#/definitions/lnrpcFundingPsbtFinalize", - "description": "Used to continue a funding flow that was initiated to be executed\nthrough a PSBT. This step finalizes the funded and signed PSBT, finishes\nnegotiation with the peer and finally publishes the resulting funding\ntransaction." - } - } - }, - "lnrpcGetAddressBalancesResponse": { - "type": "object", - "properties": { - "addrs": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcGetAddressBalancesResponseAddr" - } - } - } - }, - "lnrpcGetAddressBalancesResponseAddr": { - "type": "object", - "properties": { - "address": { - "type": "string", - "title": "The address which has this balance" - }, - "total": { - "type": "number", - "format": "double", - "title": "Total balance in coins" - }, - "stotal": { - "type": "string", - "format": "int64", - "title": "Total balance (atomic units)" - }, - "spendable": { - "type": "number", - "format": "double", - "title": "Balance which is currently spendable (coins)" - }, - "sspendable": { - "type": "string", - "format": "int64", - "title": "Balance which is currently spendable (atomic units)" - }, - "immaturereward": { - "type": "number", - "format": "double", - "title": "Mined coins which have not yet matured (coins)" - }, - "simmaturereward": { - "type": "string", - "format": "int64", - "title": "Mined coins which have not yet matured (atomic units)" - }, - "unconfirmed": { - "type": "number", - "format": "double", - "title": "Unconfirmed balance in coins" - }, - "sunconfirmed": { - "type": "string", - "format": "int64", - "title": "Unconfirmed balance in atomic units" - }, - "outputcount": { - "type": "integer", - "format": "int32", - "title": "The number of transaction outputs which make up the balance" - } - } - }, - "lnrpcGetInfoResponse": { - "type": "object", - "properties": { - "version": { - "type": "string", - "description": "The version of the LND software that the node is running." - }, - "commit_hash": { - "type": "string", - "description": "The SHA1 commit hash that the daemon is compiled with." - }, - "identity_pubkey": { - "type": "string", - "description": "The identity pubkey of the current node." - }, - "alias": { - "type": "string", - "title": "If applicable, the alias of the current node, e.g. \"bob\"" - }, - "color": { - "type": "string", - "title": "The color of the current node in hex code format" - }, - "num_pending_channels": { - "type": "integer", - "format": "int64", - "title": "Number of pending channels" - }, - "num_active_channels": { - "type": "integer", - "format": "int64", - "title": "Number of active channels" - }, - "num_inactive_channels": { - "type": "integer", - "format": "int64", - "title": "Number of inactive channels" - }, - "num_peers": { - "type": "integer", - "format": "int64", - "title": "Number of peers" - }, - "block_height": { - "type": "integer", - "format": "int64", - "title": "The node's current view of the height of the best block" - }, - "block_hash": { - "type": "string", - "title": "The node's current view of the hash of the best block" - }, - "best_header_timestamp": { - "type": "string", - "format": "int64", - "title": "Timestamp of the block best known to the wallet" - }, - "synced_to_chain": { - "type": "boolean", - "format": "boolean", - "title": "Whether the wallet's view is synced to the main chain" - }, - "synced_to_graph": { - "type": "boolean", - "format": "boolean", - "description": "Whether we consider ourselves synced with the public channel graph." - }, - "testnet": { - "type": "boolean", - "format": "boolean", - "title": "Whether the current node is connected to testnet. This field is\ndeprecated and the network field should be used instead" - }, - "chains": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcChain" - }, - "title": "A list of active chains the node is connected to" - }, - "uris": { - "type": "array", - "items": { - "type": "string" - }, - "description": "The URIs of the current node." - }, - "features": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/lnrpcFeature" - }, - "description": "Features that our node has advertised in our init message, node\nannouncements and invoices." - } - } - }, - "lnrpcGetRecoveryInfoResponse": { - "type": "object", - "properties": { - "recovery_mode": { - "type": "boolean", - "format": "boolean", - "title": "Whether the wallet is in recovery mode" - }, - "recovery_finished": { - "type": "boolean", - "format": "boolean", - "title": "Whether the wallet recovery progress is finished" - }, - "progress": { - "type": "number", - "format": "double", - "description": "The recovery progress, ranging from 0 to 1." - } - } - }, - "lnrpcGraphTopologyUpdate": { - "type": "object", - "properties": { - "node_updates": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcNodeUpdate" - } - }, - "channel_updates": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcChannelEdgeUpdate" - } - }, - "closed_chans": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcClosedChannelUpdate" - } - } - } - }, - "lnrpcHTLC": { - "type": "object", - "properties": { - "incoming": { - "type": "boolean", - "format": "boolean" - }, - "amount": { - "type": "string", - "format": "int64" - }, - "hash_lock": { - "type": "string", - "format": "byte" - }, - "expiration_height": { - "type": "integer", - "format": "int64" - }, - "htlc_index": { - "type": "string", - "format": "uint64", - "description": "Index identifying the htlc on the channel." - }, - "forwarding_channel": { - "type": "string", - "format": "uint64", - "description": "If this HTLC is involved in a forwarding operation, this field indicates\nthe forwarding channel. For an outgoing htlc, it is the incoming channel.\nFor an incoming htlc, it is the outgoing channel. When the htlc\noriginates from this node or this node is the final destination,\nforwarding_channel will be zero. The forwarding channel will also be zero\nfor htlcs that need to be forwarded but don't have a forwarding decision\npersisted yet." - }, - "forwarding_htlc_index": { - "type": "string", - "format": "uint64", - "description": "Index identifying the htlc on the forwarding channel." - } - } - }, - "lnrpcHTLCAttempt": { - "type": "object", - "properties": { - "status": { - "$ref": "#/definitions/HTLCAttemptHTLCStatus", - "description": "The status of the HTLC." - }, - "route": { - "$ref": "#/definitions/lnrpcRoute", - "description": "The route taken by this HTLC." - }, - "attempt_time_ns": { - "type": "string", - "format": "int64", - "description": "The time in UNIX nanoseconds at which this HTLC was sent." - }, - "resolve_time_ns": { - "type": "string", - "format": "int64", - "description": "The time in UNIX nanoseconds at which this HTLC was settled or failed.\nThis value will not be set if the HTLC is still IN_FLIGHT." - }, - "failure": { - "$ref": "#/definitions/lnrpcFailure", - "description": "Detailed htlc failure info." - }, - "preimage": { - "type": "string", - "format": "byte", - "description": "The preimage that was used to settle the HTLC." - } - } - }, - "lnrpcHop": { - "type": "object", - "properties": { - "chan_id": { - "type": "string", - "format": "uint64", - "description": "The unique channel ID for the channel. The first 3 bytes are the block\nheight, the next 3 the index within the block, and the last 2 bytes are the\noutput index for the channel." - }, - "chan_capacity": { - "type": "string", - "format": "int64" - }, - "amt_to_forward": { - "type": "string", - "format": "int64" - }, - "fee": { - "type": "string", - "format": "int64" - }, - "expiry": { - "type": "integer", - "format": "int64" - }, - "amt_to_forward_msat": { - "type": "string", - "format": "int64" - }, - "fee_msat": { - "type": "string", - "format": "int64" - }, - "pub_key": { - "type": "string", - "description": "An optional public key of the hop. If the public key is given, the payment\ncan be executed without relying on a copy of the channel graph." - }, - "tlv_payload": { - "type": "boolean", - "format": "boolean", - "description": "If set to true, then this hop will be encoded using the new variable length\nTLV format. Note that if any custom tlv_records below are specified, then\nthis field MUST be set to true for them to be encoded properly." - }, - "mpp_record": { - "$ref": "#/definitions/lnrpcMPPRecord", - "description": "An optional TLV record that signals the use of an MPP payment. If present,\nthe receiver will enforce that that the same mpp_record is included in the\nfinal hop payload of all non-zero payments in the HTLC set. If empty, a\nregular single-shot payment is or was attempted." - }, - "custom_records": { - "type": "object", - "additionalProperties": { - "type": "string", - "format": "byte" - }, - "description": "An optional set of key-value TLV records. This is useful within the context\nof the SendToRoute call as it allows callers to specify arbitrary K-V pairs\nto drop off at each hop within the onion." - } - } - }, - "lnrpcHopHint": { - "type": "object", - "properties": { - "node_id": { - "type": "string", - "description": "The public key of the node at the start of the channel." - }, - "chan_id": { - "type": "string", - "format": "uint64", - "description": "The unique identifier of the channel." - }, - "fee_base_msat": { - "type": "integer", - "format": "int64", - "description": "The base fee of the channel denominated in millisatoshis." - }, - "fee_proportional_millionths": { - "type": "integer", - "format": "int64", - "description": "The fee rate of the channel for sending one satoshi across it denominated in\nmillionths of a satoshi." - }, - "cltv_expiry_delta": { - "type": "integer", - "format": "int64", - "description": "The time-lock delta of the channel." - } - } - }, - "lnrpcInitiator": { - "type": "string", - "enum": [ - "INITIATOR_UNKNOWN", - "INITIATOR_LOCAL", - "INITIATOR_REMOTE", - "INITIATOR_BOTH" - ], - "default": "INITIATOR_UNKNOWN" - }, - "lnrpcInvoice": { - "type": "object", - "properties": { - "memo": { - "type": "string", - "description": "An optional memo to attach along with the invoice. Used for record keeping\npurposes for the invoice's creator, and will also be set in the description\nfield of the encoded payment request if the description_hash field is not\nbeing used." - }, - "r_preimage": { - "type": "string", - "format": "byte", - "description": "The hex-encoded preimage (32 byte) which will allow settling an incoming\nHTLC payable to this preimage. When using REST, this field must be encoded\nas base64." - }, - "r_hash": { - "type": "string", - "format": "byte", - "description": "The hash of the preimage. When using REST, this field must be encoded as\nbase64." - }, - "value": { - "type": "string", - "format": "int64", - "description": "The fields value and value_msat are mutually exclusive.", - "title": "The value of this invoice in satoshis" - }, - "value_msat": { - "type": "string", - "format": "int64", - "description": "The fields value and value_msat are mutually exclusive.", - "title": "The value of this invoice in millisatoshis" - }, - "settled": { - "type": "boolean", - "format": "boolean", - "title": "Whether this invoice has been fulfilled" - }, - "creation_date": { - "type": "string", - "format": "int64", - "title": "When this invoice was created" - }, - "settle_date": { - "type": "string", - "format": "int64", - "title": "When this invoice was settled" - }, - "payment_request": { - "type": "string", - "description": "A bare-bones invoice for a payment within the Lightning Network. With the\ndetails of the invoice, the sender has all the data necessary to send a\npayment to the recipient." - }, - "description_hash": { - "type": "string", - "format": "byte", - "description": "Hash (SHA-256) of a description of the payment. Used if the description of\npayment (memo) is too long to naturally fit within the description field\nof an encoded payment request. When using REST, this field must be encoded\nas base64." - }, - "expiry": { - "type": "string", - "format": "int64", - "description": "Payment request expiry time in seconds. Default is 3600 (1 hour)." - }, - "fallback_addr": { - "type": "string", - "description": "Fallback on-chain address." - }, - "cltv_expiry": { - "type": "string", - "format": "uint64", - "description": "Delta to use for the time-lock of the CLTV extended to the final hop." - }, - "route_hints": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcRouteHint" - }, - "description": "Route hints that can each be individually used to assist in reaching the\ninvoice's destination." - }, - "private": { - "type": "boolean", - "format": "boolean", - "description": "Whether this invoice should include routing hints for private channels." - }, - "add_index": { - "type": "string", - "format": "uint64", - "description": "The \"add\" index of this invoice. Each newly created invoice will increment\nthis index making it monotonically increasing. Callers to the\nSubscribeInvoices call can use this to instantly get notified of all added\ninvoices with an add_index greater than this one." - }, - "settle_index": { - "type": "string", - "format": "uint64", - "description": "The \"settle\" index of this invoice. Each newly settled invoice will\nincrement this index making it monotonically increasing. Callers to the\nSubscribeInvoices call can use this to instantly get notified of all\nsettled invoices with an settle_index greater than this one." - }, - "amt_paid": { - "type": "string", - "format": "int64", - "description": "Deprecated, use amt_paid_sat or amt_paid_msat." - }, - "amt_paid_sat": { - "type": "string", - "format": "int64", - "description": "The amount that was accepted for this invoice, in satoshis. This will ONLY\nbe set if this invoice has been settled. We provide this field as if the\ninvoice was created with a zero value, then we need to record what amount\nwas ultimately accepted. Additionally, it's possible that the sender paid\nMORE that was specified in the original invoice. So we'll record that here\nas well." - }, - "amt_paid_msat": { - "type": "string", - "format": "int64", - "description": "The amount that was accepted for this invoice, in millisatoshis. This will\nONLY be set if this invoice has been settled. We provide this field as if\nthe invoice was created with a zero value, then we need to record what\namount was ultimately accepted. Additionally, it's possible that the sender\npaid MORE that was specified in the original invoice. So we'll record that\nhere as well." - }, - "state": { - "$ref": "#/definitions/InvoiceInvoiceState", - "description": "The state the invoice is in." - }, - "htlcs": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcInvoiceHTLC" - }, - "description": "List of HTLCs paying to this invoice [EXPERIMENTAL]." - }, - "features": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/lnrpcFeature" - }, - "description": "List of features advertised on the invoice." - }, - "is_keysend": { - "type": "boolean", - "format": "boolean", - "description": "Indicates if this invoice was a spontaneous payment that arrived via keysend\n[EXPERIMENTAL]." - } - } - }, - "lnrpcInvoiceHTLC": { - "type": "object", - "properties": { - "chan_id": { - "type": "string", - "format": "uint64", - "description": "Short channel id over which the htlc was received." - }, - "htlc_index": { - "type": "string", - "format": "uint64", - "description": "Index identifying the htlc on the channel." - }, - "amt_msat": { - "type": "string", - "format": "uint64", - "description": "The amount of the htlc in msat." - }, - "accept_height": { - "type": "integer", - "format": "int32", - "description": "Block height at which this htlc was accepted." - }, - "accept_time": { - "type": "string", - "format": "int64", - "description": "Time at which this htlc was accepted." - }, - "resolve_time": { - "type": "string", - "format": "int64", - "description": "Time at which this htlc was settled or canceled." - }, - "expiry_height": { - "type": "integer", - "format": "int32", - "description": "Block height at which this htlc expires." - }, - "state": { - "$ref": "#/definitions/lnrpcInvoiceHTLCState", - "description": "Current state the htlc is in." - }, - "custom_records": { - "type": "object", - "additionalProperties": { - "type": "string", - "format": "byte" - }, - "description": "Custom tlv records." - }, - "mpp_total_amt_msat": { - "type": "string", - "format": "uint64", - "description": "The total amount of the mpp payment in msat." - } - }, - "title": "Details of an HTLC that paid to an invoice" - }, - "lnrpcInvoiceHTLCState": { - "type": "string", - "enum": [ - "ACCEPTED", - "SETTLED", - "CANCELED" - ], - "default": "ACCEPTED" - }, - "lnrpcKeyDescriptor": { - "type": "object", - "properties": { - "raw_key_bytes": { - "type": "string", - "format": "byte", - "description": "The raw bytes of the key being identified." - }, - "key_loc": { - "$ref": "#/definitions/lnrpcKeyLocator", - "description": "The key locator that identifies which key to use for signing." - } - } - }, - "lnrpcKeyLocator": { - "type": "object", - "properties": { - "key_family": { - "type": "integer", - "format": "int32", - "description": "The family of key being identified." - }, - "key_index": { - "type": "integer", - "format": "int32", - "description": "The precise index of the key being identified." - } - } - }, - "lnrpcLightningAddress": { - "type": "object", - "properties": { - "pubkey": { - "type": "string", - "title": "The identity pubkey of the Lightning node" - }, - "host": { - "type": "string", - "title": "The network location of the lightning node, e.g. `69.69.69.69:1337` or\n`localhost:10011`" - } - } - }, - "lnrpcLightningNode": { - "type": "object", - "properties": { - "last_update": { - "type": "integer", - "format": "int64" - }, - "pub_key": { - "type": "string" - }, - "alias": { - "type": "string" - }, - "addresses": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcNodeAddress" - } - }, - "color": { - "type": "string" - }, - "features": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/lnrpcFeature" - } - } - }, - "description": "An individual vertex/node within the channel graph. A node is\nconnected to other nodes by one or more channel edges emanating from it. As the\ngraph is directed, a node will also have an incoming edge attached to it for\neach outgoing edge." - }, - "lnrpcListChannelsResponse": { - "type": "object", - "properties": { - "channels": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcChannel" - }, - "title": "The list of active channels" - } - } - }, - "lnrpcListInvoiceResponse": { - "type": "object", - "properties": { - "invoices": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcInvoice" - }, - "description": "A list of invoices from the time slice of the time series specified in the\nrequest." - }, - "last_index_offset": { - "type": "string", - "format": "uint64", - "description": "The index of the last item in the set of returned invoices. This can be used\nto seek further, pagination style." - }, - "first_index_offset": { - "type": "string", - "format": "uint64", - "description": "The index of the last item in the set of returned invoices. This can be used\nto seek backwards, pagination style." - } - } - }, - "lnrpcListMacaroonIDsResponse": { - "type": "object", - "properties": { - "root_key_ids": { - "type": "array", - "items": { - "type": "string", - "format": "uint64" - }, - "description": "The list of root key IDs that are in use." - } - } - }, - "lnrpcListPaymentsResponse": { - "type": "object", - "properties": { - "payments": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcPayment" - }, - "title": "The list of payments" - }, - "first_index_offset": { - "type": "string", - "format": "uint64", - "description": "The index of the first item in the set of returned payments. This can be\nused as the index_offset to continue seeking backwards in the next request." - }, - "last_index_offset": { - "type": "string", - "format": "uint64", - "description": "The index of the last item in the set of returned payments. This can be used\nas the index_offset to continue seeking forwards in the next request." - } - } - }, - "lnrpcListPeersResponse": { - "type": "object", - "properties": { - "peers": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcPeer" - }, - "title": "The list of currently connected peers" - } - } - }, - "lnrpcListPermissionsResponse": { - "type": "object", - "properties": { - "method_permissions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/lnrpcMacaroonPermissionList" - }, - "description": "A map between all RPC method URIs and their required macaroon permissions to\naccess them." - } - } - }, - "lnrpcListUnspentResponse": { - "type": "object", - "properties": { - "utxos": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcUtxo" - }, - "title": "A list of utxos" - } - } - }, - "lnrpcMPPRecord": { - "type": "object", - "properties": { - "payment_addr": { - "type": "string", - "format": "byte", - "description": "A unique, random identifier used to authenticate the sender as the intended\npayer of a multi-path payment. The payment_addr must be the same for all\nsubpayments, and match the payment_addr provided in the receiver's invoice.\nThe same payment_addr must be used on all subpayments." - }, - "total_amt_msat": { - "type": "string", - "format": "int64", - "description": "The total amount in milli-satoshis being sent as part of a larger multi-path\npayment. The caller is responsible for ensuring subpayments to the same node\nand payment_hash sum exactly to total_amt_msat. The same\ntotal_amt_msat must be used on all subpayments." - } - } - }, - "lnrpcMacaroonPermission": { - "type": "object", - "properties": { - "entity": { - "type": "string", - "description": "The entity a permission grants access to." - }, - "action": { - "type": "string", - "description": "The action that is granted." - } - } - }, - "lnrpcMacaroonPermissionList": { - "type": "object", - "properties": { - "permissions": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcMacaroonPermission" - }, - "description": "A list of macaroon permissions." - } - } - }, - "lnrpcMultiChanBackup": { - "type": "object", - "properties": { - "chan_points": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcChannelPoint" - }, - "description": "Is the set of all channels that are included in this multi-channel backup." - }, - "multi_chan_backup": { - "type": "string", - "format": "byte", - "description": "A single encrypted blob containing all the static channel backups of the\nchannel listed above. This can be stored as a single file or blob, and\nsafely be replaced with any prior/future versions. When using REST, this\nfield must be encoded as base64." - } - } - }, - "lnrpcNetworkInfo": { - "type": "object", - "properties": { - "graph_diameter": { - "type": "integer", - "format": "int64" - }, - "avg_out_degree": { - "type": "number", - "format": "double" - }, - "max_out_degree": { - "type": "integer", - "format": "int64" - }, - "num_nodes": { - "type": "integer", - "format": "int64" - }, - "num_channels": { - "type": "integer", - "format": "int64" - }, - "total_network_capacity": { - "type": "string", - "format": "int64" - }, - "avg_channel_size": { - "type": "number", - "format": "double" - }, - "min_channel_size": { - "type": "string", - "format": "int64" - }, - "max_channel_size": { - "type": "string", - "format": "int64" - }, - "median_channel_size_sat": { - "type": "string", - "format": "int64" - }, - "num_zombie_chans": { - "type": "string", - "format": "uint64", - "description": "The number of edges marked as zombies." - } - } - }, - "lnrpcNewAddressResponse": { - "type": "object", - "properties": { - "address": { - "type": "string", - "title": "The newly generated wallet address" - } - } - }, - "lnrpcNodeAddress": { - "type": "object", - "properties": { - "network": { - "type": "string" - }, - "addr": { - "type": "string" - } - } - }, - "lnrpcNodeInfo": { - "type": "object", - "properties": { - "node": { - "$ref": "#/definitions/lnrpcLightningNode", - "description": "An individual vertex/node within the channel graph. A node is\nconnected to other nodes by one or more channel edges emanating from it. As\nthe graph is directed, a node will also have an incoming edge attached to\nit for each outgoing edge." - }, - "num_channels": { - "type": "integer", - "format": "int64", - "description": "The total number of channels for the node." - }, - "total_capacity": { - "type": "string", - "format": "int64", - "description": "The sum of all channels capacity for the node, denominated in satoshis." - }, - "channels": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcChannelEdge" - }, - "description": "A list of all public channels for the node." - } - } - }, - "lnrpcNodeMetricType": { - "type": "string", - "enum": [ - "UNKNOWN", - "BETWEENNESS_CENTRALITY" - ], - "default": "UNKNOWN" - }, - "lnrpcNodeMetricsResponse": { - "type": "object", - "properties": { - "betweenness_centrality": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/lnrpcFloatMetric" - }, - "description": "Betweenness centrality is the sum of the ratio of shortest paths that pass\nthrough the node for each pair of nodes in the graph (not counting paths\nstarting or ending at this node).\nMap of node pubkey to betweenness centrality of the node. Normalized\nvalues are in the [0,1] closed interval." - } - } - }, - "lnrpcNodePair": { - "type": "object", - "properties": { - "from": { - "type": "string", - "format": "byte", - "description": "The sending node of the pair. When using REST, this field must be encoded as\nbase64." - }, - "to": { - "type": "string", - "format": "byte", - "description": "The receiving node of the pair. When using REST, this field must be encoded\nas base64." - } - } - }, - "lnrpcNodeUpdate": { - "type": "object", - "properties": { - "addresses": { - "type": "array", - "items": { - "type": "string" - } - }, - "identity_key": { - "type": "string" - }, - "global_features": { - "type": "string", - "format": "byte" - }, - "alias": { - "type": "string" - }, - "color": { - "type": "string" - } - } - }, - "lnrpcOpenChannelRequest": { - "type": "object", - "properties": { - "node_pubkey": { - "type": "string", - "format": "byte", - "description": "The pubkey of the node to open a channel with. When using REST, this field\nmust be encoded as base64." - }, - "node_pubkey_string": { - "type": "string", - "description": "The hex encoded pubkey of the node to open a channel with. Deprecated now\nthat the REST gateway supports base64 encoding of bytes fields." - }, - "local_funding_amount": { - "type": "string", - "format": "int64", - "title": "The number of satoshis the wallet should commit to the channel" - }, - "push_sat": { - "type": "string", - "format": "int64", - "title": "The number of satoshis to push to the remote side as part of the initial\ncommitment state" - }, - "target_conf": { - "type": "integer", - "format": "int32", - "description": "The target number of blocks that the funding transaction should be\nconfirmed by." - }, - "sat_per_byte": { - "type": "string", - "format": "int64", - "description": "A manual fee rate set in sat/byte that should be used when crafting the\nfunding transaction." - }, - "private": { - "type": "boolean", - "format": "boolean", - "description": "Whether this channel should be private, not announced to the greater\nnetwork." - }, - "min_htlc_msat": { - "type": "string", - "format": "int64", - "description": "The minimum value in millisatoshi we will require for incoming HTLCs on\nthe channel." - }, - "remote_csv_delay": { - "type": "integer", - "format": "int64", - "description": "The delay we require on the remote's commitment transaction. If this is\nnot set, it will be scaled automatically with the channel size." - }, - "min_confs": { - "type": "integer", - "format": "int32", - "description": "The minimum number of confirmations each one of your outputs used for\nthe funding transaction must satisfy." - }, - "spend_unconfirmed": { - "type": "boolean", - "format": "boolean", - "description": "Whether unconfirmed outputs should be used as inputs for the funding\ntransaction." - }, - "close_address": { - "type": "string", - "description": "Close address is an optional address which specifies the address to which\nfunds should be paid out to upon cooperative close. This field may only be\nset if the peer supports the option upfront feature bit (call listpeers\nto check). The remote peer will only accept cooperative closes to this\naddress if it is set.\n\nNote: If this value is set on channel creation, you will *not* be able to\ncooperatively close out to a different address." - }, - "funding_shim": { - "$ref": "#/definitions/lnrpcFundingShim", - "description": "Funding shims are an optional argument that allow the caller to intercept\ncertain funding functionality. For example, a shim can be provided to use a\nparticular key for the commitment key (ideally cold) rather than use one\nthat is generated by the wallet as normal, or signal that signing will be\ncarried out in an interactive manner (PSBT based)." - }, - "remote_max_value_in_flight_msat": { - "type": "string", - "format": "uint64", - "description": "The maximum amount of coins in millisatoshi that can be pending within\nthe channel. It only applies to the remote party." - }, - "remote_max_htlcs": { - "type": "integer", - "format": "int64", - "description": "The maximum number of concurrent HTLCs we will allow the remote party to add\nto the commitment transaction." - }, - "max_local_csv": { - "type": "integer", - "format": "int64", - "description": "Max local csv is the maximum csv delay we will allow for our own commitment\ntransaction." - } - } - }, - "lnrpcOpenStatusUpdate": { - "type": "object", - "properties": { - "chan_pending": { - "$ref": "#/definitions/lnrpcPendingUpdate", - "description": "Signals that the channel is now fully negotiated and the funding\ntransaction published." - }, - "chan_open": { - "$ref": "#/definitions/lnrpcChannelOpenUpdate", - "description": "Signals that the channel's funding transaction has now reached the\nrequired number of confirmations on chain and can be used." - }, - "psbt_fund": { - "$ref": "#/definitions/lnrpcReadyForPsbtFunding", - "description": "Signals that the funding process has been suspended and the construction\nof a PSBT that funds the channel PK script is now required." - }, - "pending_chan_id": { - "type": "string", - "format": "byte", - "description": "The pending channel ID of the created channel. This value may be used to\nfurther the funding flow manually via the FundingStateStep method." - } - } - }, - "lnrpcOutPoint": { - "type": "object", - "properties": { - "txid_bytes": { - "type": "string", - "format": "byte", - "description": "Raw bytes representing the transaction id." - }, - "txid_str": { - "type": "string", - "description": "Reversed, hex-encoded string representing the transaction id." - }, - "output_index": { - "type": "integer", - "format": "int64", - "description": "The index of the output on the transaction." - } - } - }, - "lnrpcPayReq": { - "type": "object", - "properties": { - "destination": { - "type": "string" - }, - "payment_hash": { - "type": "string" - }, - "num_satoshis": { - "type": "string", - "format": "int64" - }, - "timestamp": { - "type": "string", - "format": "int64" - }, - "expiry": { - "type": "string", - "format": "int64" - }, - "description": { - "type": "string" - }, - "description_hash": { - "type": "string" - }, - "fallback_addr": { - "type": "string" - }, - "cltv_expiry": { - "type": "string", - "format": "int64" - }, - "route_hints": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcRouteHint" - } - }, - "payment_addr": { - "type": "string", - "format": "byte" - }, - "num_msat": { - "type": "string", - "format": "int64" - }, - "features": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/lnrpcFeature" - } - } - } - }, - "lnrpcPayment": { - "type": "object", - "properties": { - "payment_hash": { - "type": "string", - "title": "The payment hash" - }, - "value": { - "type": "string", - "format": "int64", - "description": "Deprecated, use value_sat or value_msat." - }, - "creation_date": { - "type": "string", - "format": "int64", - "title": "Deprecated, use creation_time_ns" - }, - "fee": { - "type": "string", - "format": "int64", - "description": "Deprecated, use fee_sat or fee_msat." - }, - "payment_preimage": { - "type": "string", - "title": "The payment preimage" - }, - "value_sat": { - "type": "string", - "format": "int64", - "title": "The value of the payment in satoshis" - }, - "value_msat": { - "type": "string", - "format": "int64", - "title": "The value of the payment in milli-satoshis" - }, - "payment_request": { - "type": "string", - "description": "The optional payment request being fulfilled." - }, - "status": { - "$ref": "#/definitions/PaymentPaymentStatus", - "description": "The status of the payment." - }, - "fee_sat": { - "type": "string", - "format": "int64", - "title": "The fee paid for this payment in satoshis" - }, - "fee_msat": { - "type": "string", - "format": "int64", - "title": "The fee paid for this payment in milli-satoshis" - }, - "creation_time_ns": { - "type": "string", - "format": "int64", - "description": "The time in UNIX nanoseconds at which the payment was created." - }, - "htlcs": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcHTLCAttempt" - }, - "description": "The HTLCs made in attempt to settle the payment." - }, - "payment_index": { - "type": "string", - "format": "uint64", - "description": "The creation index of this payment. Each payment can be uniquely identified\nby this index, which may not strictly increment by 1 for payments made in\nolder versions of lnd." - }, - "failure_reason": { - "$ref": "#/definitions/lnrpcPaymentFailureReason" - } - } - }, - "lnrpcPaymentFailureReason": { - "type": "string", - "enum": [ - "FAILURE_REASON_NONE", - "FAILURE_REASON_TIMEOUT", - "FAILURE_REASON_NO_ROUTE", - "FAILURE_REASON_ERROR", - "FAILURE_REASON_INCORRECT_PAYMENT_DETAILS", - "FAILURE_REASON_INSUFFICIENT_BALANCE" - ], - "default": "FAILURE_REASON_NONE", - "description": " - FAILURE_REASON_NONE: Payment isn't failed (yet).\n - FAILURE_REASON_TIMEOUT: There are more routes to try, but the payment timeout was exceeded.\n - FAILURE_REASON_NO_ROUTE: All possible routes were tried and failed permanently. Or were no\nroutes to the destination at all.\n - FAILURE_REASON_ERROR: A non-recoverable error has occured.\n - FAILURE_REASON_INCORRECT_PAYMENT_DETAILS: Payment details incorrect (unknown hash, invalid amt or\ninvalid final cltv delta)\n - FAILURE_REASON_INSUFFICIENT_BALANCE: Insufficient local balance." - }, - "lnrpcPeer": { - "type": "object", - "properties": { - "pub_key": { - "type": "string", - "title": "The identity pubkey of the peer" - }, - "address": { - "type": "string", - "title": "Network address of the peer; eg `127.0.0.1:10011`" - }, - "bytes_sent": { - "type": "string", - "format": "uint64", - "title": "Bytes of data transmitted to this peer" - }, - "bytes_recv": { - "type": "string", - "format": "uint64", - "title": "Bytes of data transmitted from this peer" - }, - "sat_sent": { - "type": "string", - "format": "int64", - "title": "Satoshis sent to this peer" - }, - "sat_recv": { - "type": "string", - "format": "int64", - "title": "Satoshis received from this peer" - }, - "inbound": { - "type": "boolean", - "format": "boolean", - "title": "A channel is inbound if the counterparty initiated the channel" - }, - "ping_time": { - "type": "string", - "format": "int64", - "title": "Ping time to this peer" - }, - "sync_type": { - "$ref": "#/definitions/PeerSyncType", - "description": "The type of sync we are currently performing with this peer." - }, - "features": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/lnrpcFeature" - }, - "description": "Features advertised by the remote peer in their init message." - }, - "errors": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcTimestampedError" - }, - "description": "The latest errors received from our peer with timestamps, limited to the 10\nmost recent errors. These errors are tracked across peer connections, but\nare not persisted across lnd restarts. Note that these errors are only\nstored for peers that we have channels open with, to prevent peers from\nspamming us with errors at no cost." - }, - "flap_count": { - "type": "integer", - "format": "int32", - "description": "The number of times we have recorded this peer going offline or coming\nonline, recorded across restarts. Note that this value is decreased over\ntime if the peer has not recently flapped, so that we can forgive peers\nwith historically high flap counts." - }, - "last_flap_ns": { - "type": "string", - "format": "int64", - "description": "The timestamp of the last flap we observed for this peer. If this value is\nzero, we have not observed any flaps for this peer." - } - } - }, - "lnrpcPeerEvent": { - "type": "object", - "properties": { - "pub_key": { - "type": "string", - "description": "The identity pubkey of the peer." - }, - "type": { - "$ref": "#/definitions/PeerEventEventType" - } - } - }, - "lnrpcPendingChannelsResponse": { - "type": "object", - "properties": { - "total_limbo_balance": { - "type": "string", - "format": "int64", - "title": "The balance in satoshis encumbered in pending channels" - }, - "pending_open_channels": { - "type": "array", - "items": { - "$ref": "#/definitions/PendingChannelsResponsePendingOpenChannel" - }, - "title": "Channels pending opening" - }, - "pending_closing_channels": { - "type": "array", - "items": { - "$ref": "#/definitions/PendingChannelsResponseClosedChannel" - }, - "description": "Deprecated: Channels pending closing previously contained cooperatively\nclosed channels with a single confirmation. These channels are now\nconsidered closed from the time we see them on chain." - }, - "pending_force_closing_channels": { - "type": "array", - "items": { - "$ref": "#/definitions/PendingChannelsResponseForceClosedChannel" - }, - "title": "Channels pending force closing" - }, - "waiting_close_channels": { - "type": "array", - "items": { - "$ref": "#/definitions/PendingChannelsResponseWaitingCloseChannel" - }, - "title": "Channels waiting for closing tx to confirm" - } - } - }, - "lnrpcPendingHTLC": { - "type": "object", - "properties": { - "incoming": { - "type": "boolean", - "format": "boolean", - "title": "The direction within the channel that the htlc was sent" - }, - "amount": { - "type": "string", - "format": "int64", - "title": "The total value of the htlc" - }, - "outpoint": { - "type": "string", - "title": "The final output to be swept back to the user's wallet" - }, - "maturity_height": { - "type": "integer", - "format": "int64", - "title": "The next block height at which we can spend the current stage" - }, - "blocks_til_maturity": { - "type": "integer", - "format": "int32", - "description": "The number of blocks remaining until the current stage can be swept.\nNegative values indicate how many blocks have passed since becoming\nmature." - }, - "stage": { - "type": "integer", - "format": "int64", - "title": "Indicates whether the htlc is in its first or second stage of recovery" - } - } - }, - "lnrpcPendingUpdate": { - "type": "object", - "properties": { - "txid": { - "type": "string", - "format": "byte" - }, - "output_index": { - "type": "integer", - "format": "int64" - } - } - }, - "lnrpcPolicyUpdateRequest": { - "type": "object", - "properties": { - "global": { - "type": "boolean", - "format": "boolean", - "description": "If set, then this update applies to all currently active channels." - }, - "chan_point": { - "$ref": "#/definitions/lnrpcChannelPoint", - "description": "If set, this update will target a specific channel." - }, - "base_fee_msat": { - "type": "string", - "format": "int64", - "description": "The base fee charged regardless of the number of milli-satoshis sent." - }, - "fee_rate": { - "type": "number", - "format": "double", - "description": "The effective fee rate in milli-satoshis. The precision of this value\ngoes up to 6 decimal places, so 1e-6." - }, - "time_lock_delta": { - "type": "integer", - "format": "int64", - "description": "The required timelock delta for HTLCs forwarded over the channel." - }, - "max_htlc_msat": { - "type": "string", - "format": "uint64", - "description": "If set, the maximum HTLC size in milli-satoshis. If unset, the maximum\nHTLC will be unchanged." - }, - "min_htlc_msat": { - "type": "string", - "format": "uint64", - "description": "The minimum HTLC size in milli-satoshis. Only applied if\nmin_htlc_msat_specified is true." - }, - "min_htlc_msat_specified": { - "type": "boolean", - "format": "boolean", - "description": "If true, min_htlc_msat is applied." - } - } - }, - "lnrpcPolicyUpdateResponse": { - "type": "object" - }, - "lnrpcPsbtShim": { - "type": "object", - "properties": { - "pending_chan_id": { - "type": "string", - "format": "byte", - "description": "A unique identifier of 32 random bytes that will be used as the pending\nchannel ID to identify the PSBT state machine when interacting with it and\non the wire protocol to initiate the funding request." - }, - "base_psbt": { - "type": "string", - "format": "byte", - "description": "An optional base PSBT the new channel output will be added to. If this is\nnon-empty, it must be a binary serialized PSBT." - }, - "no_publish": { - "type": "boolean", - "format": "boolean", - "description": "If a channel should be part of a batch (multiple channel openings in one\ntransaction), it can be dangerous if the whole batch transaction is\npublished too early before all channel opening negotiations are completed.\nThis flag prevents this particular channel from broadcasting the transaction\nafter the negotiation with the remote peer. In a batch of channel openings\nthis flag should be set to true for every channel but the very last." - } - } - }, - "lnrpcQueryRoutesResponse": { - "type": "object", - "properties": { - "routes": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcRoute" - }, - "description": "The route that results from the path finding operation. This is still a\nrepeated field to retain backwards compatibility." - }, - "success_prob": { - "type": "number", - "format": "double", - "title": "The success probability of the returned route based on the current mission\ncontrol state. [EXPERIMENTAL]" - } - } - }, - "lnrpcReadyForPsbtFunding": { - "type": "object", - "properties": { - "funding_address": { - "type": "string", - "description": "The P2WSH address of the channel funding multisig address that the below\nspecified amount in satoshis needs to be sent to." - }, - "funding_amount": { - "type": "string", - "format": "int64", - "description": "The exact amount in satoshis that needs to be sent to the above address to\nfund the pending channel." - }, - "psbt": { - "type": "string", - "format": "byte", - "description": "A raw PSBT that contains the pending channel output. If a base PSBT was\nprovided in the PsbtShim, this is the base PSBT with one additional output.\nIf no base PSBT was specified, this is an otherwise empty PSBT with exactly\none output." - } - } - }, - "lnrpcResolution": { - "type": "object", - "properties": { - "resolution_type": { - "$ref": "#/definitions/lnrpcResolutionType", - "description": "The type of output we are resolving." - }, - "outcome": { - "$ref": "#/definitions/lnrpcResolutionOutcome", - "description": "The outcome of our on chain action that resolved the outpoint." - }, - "outpoint": { - "$ref": "#/definitions/lnrpcOutPoint", - "description": "The outpoint that was spent by the resolution." - }, - "amount_sat": { - "type": "string", - "format": "uint64", - "description": "The amount that was claimed by the resolution." - }, - "sweep_txid": { - "type": "string", - "description": "The hex-encoded transaction ID of the sweep transaction that spent the\noutput." - } - } - }, - "lnrpcResolutionOutcome": { - "type": "string", - "enum": [ - "OUTCOME_UNKNOWN", - "CLAIMED", - "UNCLAIMED", - "ABANDONED", - "FIRST_STAGE", - "TIMEOUT" - ], - "default": "OUTCOME_UNKNOWN", - "description": " - OUTCOME_UNKNOWN: Outcome unknown.\n - CLAIMED: An output was claimed on chain.\n - UNCLAIMED: An output was left unclaimed on chain.\n - ABANDONED: ResolverOutcomeAbandoned indicates that an output that we did not\nclaim on chain, for example an anchor that we did not sweep and a\nthird party claimed on chain, or a htlc that we could not decode\nso left unclaimed.\n - FIRST_STAGE: If we force closed our channel, our htlcs need to be claimed in two\nstages. This outcome represents the broadcast of a timeout or success\ntransaction for this two stage htlc claim.\n - TIMEOUT: A htlc was timed out on chain." - }, - "lnrpcResolutionType": { - "type": "string", - "enum": [ - "TYPE_UNKNOWN", - "ANCHOR", - "INCOMING_HTLC", - "OUTGOING_HTLC", - "COMMIT" - ], - "default": "TYPE_UNKNOWN", - "description": " - ANCHOR: We resolved an anchor output.\n - INCOMING_HTLC: We are resolving an incoming htlc on chain. This if this htlc is\nclaimed, we swept the incoming htlc with the preimage. If it is timed\nout, our peer swept the timeout path.\n - OUTGOING_HTLC: We are resolving an outgoing htlc on chain. If this htlc is claimed,\nthe remote party swept the htlc with the preimage. If it is timed out,\nwe swept it with the timeout path.\n - COMMIT: We force closed and need to sweep our time locked commitment output." - }, - "lnrpcRestoreBackupResponse": { - "type": "object" - }, - "lnrpcRestoreChanBackupRequest": { - "type": "object", - "properties": { - "chan_backups": { - "$ref": "#/definitions/lnrpcChannelBackups", - "description": "The channels to restore as a list of channel/backup pairs." - }, - "multi_chan_backup": { - "type": "string", - "format": "byte", - "description": "The channels to restore in the packed multi backup format. When using\nREST, this field must be encoded as base64." - } - } - }, - "lnrpcRoute": { - "type": "object", - "properties": { - "total_time_lock": { - "type": "integer", - "format": "int64", - "description": "The cumulative (final) time lock across the entire route. This is the CLTV\nvalue that should be extended to the first hop in the route. All other hops\nwill decrement the time-lock as advertised, leaving enough time for all\nhops to wait for or present the payment preimage to complete the payment." - }, - "total_fees": { - "type": "string", - "format": "int64", - "description": "The sum of the fees paid at each hop within the final route. In the case\nof a one-hop payment, this value will be zero as we don't need to pay a fee\nto ourselves." - }, - "total_amt": { - "type": "string", - "format": "int64", - "description": "The total amount of funds required to complete a payment over this route.\nThis value includes the cumulative fees at each hop. As a result, the HTLC\nextended to the first-hop in the route will need to have at least this many\nsatoshis, otherwise the route will fail at an intermediate node due to an\ninsufficient amount of fees." - }, - "hops": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcHop" - }, - "description": "Contains details concerning the specific forwarding details at each hop." - }, - "total_fees_msat": { - "type": "string", - "format": "int64", - "description": "The total fees in millisatoshis." - }, - "total_amt_msat": { - "type": "string", - "format": "int64", - "description": "The total amount in millisatoshis." - } - }, - "description": "A path through the channel graph which runs over one or more channels in\nsuccession. This struct carries all the information required to craft the\nSphinx onion packet, and send the payment along the first hop in the path. A\nroute is only selected as valid if all the channels have sufficient capacity to\ncarry the initial payment amount after fees are accounted for." - }, - "lnrpcRouteHint": { - "type": "object", - "properties": { - "hop_hints": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcHopHint" - }, - "description": "A list of hop hints that when chained together can assist in reaching a\nspecific destination." - } - } - }, - "lnrpcRoutingPolicy": { - "type": "object", - "properties": { - "time_lock_delta": { - "type": "integer", - "format": "int64" - }, - "min_htlc": { - "type": "string", - "format": "int64" - }, - "fee_base_msat": { - "type": "string", - "format": "int64" - }, - "fee_rate_milli_msat": { - "type": "string", - "format": "int64" - }, - "disabled": { - "type": "boolean", - "format": "boolean" - }, - "max_htlc_msat": { - "type": "string", - "format": "uint64" - }, - "last_update": { - "type": "integer", - "format": "int64" - } - } - }, - "lnrpcSendCoinsRequest": { - "type": "object", - "properties": { - "addr": { - "type": "string", - "title": "The address to send coins to" - }, - "amount": { - "type": "string", - "format": "int64", - "title": "The amount in satoshis to send" - }, - "target_conf": { - "type": "integer", - "format": "int32", - "description": "The target number of blocks that this transaction should be confirmed\nby." - }, - "sat_per_byte": { - "type": "string", - "format": "int64", - "description": "A manual fee rate set in sat/byte that should be used when crafting the\ntransaction." - }, - "send_all": { - "type": "boolean", - "format": "boolean", - "description": "If set, then the amount field will be ignored, and lnd will attempt to\nsend all the coins under control of the internal wallet to the specified\naddress." - }, - "label": { - "type": "string", - "description": "An optional label for the transaction, limited to 500 characters." - }, - "min_confs": { - "type": "integer", - "format": "int32", - "description": "The minimum number of confirmations each one of your outputs used for\nthe transaction must satisfy." - }, - "spend_unconfirmed": { - "type": "boolean", - "format": "boolean", - "description": "Whether unconfirmed outputs should be used as inputs for the transaction." - } - } - }, - "lnrpcSendCoinsResponse": { - "type": "object", - "properties": { - "txid": { - "type": "string", - "title": "The transaction ID of the transaction" - } - } - }, - "lnrpcSendManyRequest": { - "type": "object", - "properties": { - "AddrToAmount": { - "type": "object", - "additionalProperties": { - "type": "string", - "format": "int64" - }, - "title": "The map from addresses to amounts" - }, - "target_conf": { - "type": "integer", - "format": "int32", - "description": "The target number of blocks that this transaction should be confirmed\nby." - }, - "sat_per_byte": { - "type": "string", - "format": "int64", - "description": "A manual fee rate set in sat/byte that should be used when crafting the\ntransaction." - }, - "label": { - "type": "string", - "description": "An optional label for the transaction, limited to 500 characters." - }, - "min_confs": { - "type": "integer", - "format": "int32", - "description": "The minimum number of confirmations each one of your outputs used for\nthe transaction must satisfy." - }, - "spend_unconfirmed": { - "type": "boolean", - "format": "boolean", - "description": "Whether unconfirmed outputs should be used as inputs for the transaction." - } - } - }, - "lnrpcSendManyResponse": { - "type": "object", - "properties": { - "txid": { - "type": "string", - "title": "The id of the transaction" - } - } - }, - "lnrpcSendRequest": { - "type": "object", - "properties": { - "dest": { - "type": "string", - "format": "byte", - "description": "The identity pubkey of the payment recipient. When using REST, this field\nmust be encoded as base64." - }, - "dest_string": { - "type": "string", - "description": "The hex-encoded identity pubkey of the payment recipient. Deprecated now\nthat the REST gateway supports base64 encoding of bytes fields." - }, - "amt": { - "type": "string", - "format": "int64", - "description": "The amount to send expressed in satoshis.\n\nThe fields amt and amt_msat are mutually exclusive." - }, - "amt_msat": { - "type": "string", - "format": "int64", - "description": "The amount to send expressed in millisatoshis.\n\nThe fields amt and amt_msat are mutually exclusive." - }, - "payment_hash": { - "type": "string", - "format": "byte", - "description": "The hash to use within the payment's HTLC. When using REST, this field\nmust be encoded as base64." - }, - "payment_hash_string": { - "type": "string", - "description": "The hex-encoded hash to use within the payment's HTLC. Deprecated now\nthat the REST gateway supports base64 encoding of bytes fields." - }, - "payment_request": { - "type": "string", - "description": "A bare-bones invoice for a payment within the Lightning Network. With the\ndetails of the invoice, the sender has all the data necessary to send a\npayment to the recipient." - }, - "final_cltv_delta": { - "type": "integer", - "format": "int32", - "description": "The CLTV delta from the current height that should be used to set the\ntimelock for the final hop." - }, - "fee_limit": { - "$ref": "#/definitions/lnrpcFeeLimit", - "description": "The maximum number of satoshis that will be paid as a fee of the payment.\nThis value can be represented either as a percentage of the amount being\nsent, or as a fixed amount of the maximum fee the user is willing the pay to\nsend the payment." - }, - "outgoing_chan_id": { - "type": "string", - "format": "uint64", - "description": "The channel id of the channel that must be taken to the first hop. If zero,\nany channel may be used." - }, - "last_hop_pubkey": { - "type": "string", - "format": "byte", - "description": "The pubkey of the last hop of the route. If empty, any hop may be used." - }, - "cltv_limit": { - "type": "integer", - "format": "int64", - "description": "An optional maximum total time lock for the route. This should not exceed\nlnd's `--max-cltv-expiry` setting. If zero, then the value of\n`--max-cltv-expiry` is enforced." - }, - "dest_custom_records": { - "type": "object", - "additionalProperties": { - "type": "string", - "format": "byte" - }, - "description": "An optional field that can be used to pass an arbitrary set of TLV records\nto a peer which understands the new records. This can be used to pass\napplication specific data during the payment attempt. Record types are\nrequired to be in the custom range \u003e= 65536. When using REST, the values\nmust be encoded as base64." - }, - "allow_self_payment": { - "type": "boolean", - "format": "boolean", - "description": "If set, circular payments to self are permitted." - }, - "dest_features": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcFeatureBit" - }, - "description": "Features assumed to be supported by the final node. All transitive feature\ndependencies must also be set properly. For a given feature bit pair, either\noptional or remote may be set, but not both. If this field is nil or empty,\nthe router will try to load destination features from the graph as a\nfallback." - } - } - }, - "lnrpcSendResponse": { - "type": "object", - "properties": { - "payment_error": { - "type": "string" - }, - "payment_preimage": { - "type": "string", - "format": "byte" - }, - "payment_route": { - "$ref": "#/definitions/lnrpcRoute" - }, - "payment_hash": { - "type": "string", - "format": "byte" - } - } - }, - "lnrpcSendToRouteRequest": { - "type": "object", - "properties": { - "payment_hash": { - "type": "string", - "format": "byte", - "description": "The payment hash to use for the HTLC. When using REST, this field must be\nencoded as base64." - }, - "payment_hash_string": { - "type": "string", - "description": "An optional hex-encoded payment hash to be used for the HTLC. Deprecated now\nthat the REST gateway supports base64 encoding of bytes fields." - }, - "route": { - "$ref": "#/definitions/lnrpcRoute", - "description": "Route that should be used to attempt to complete the payment." - } - } - }, - "lnrpcSignMessageRequest": { - "type": "object", - "properties": { - "msg": { - "type": "string", - "format": "byte", - "description": "The message to be signed. When using REST, this field must be encoded as\nbase64." - } - } - }, - "lnrpcSignMessageResponse": { - "type": "object", - "properties": { - "signature": { - "type": "string", - "title": "The signature for the given message" - } - } - }, - "lnrpcStopRequest": { - "type": "object" - }, - "lnrpcStopResponse": { - "type": "object" - }, - "lnrpcTimestampedError": { - "type": "object", - "properties": { - "timestamp": { - "type": "string", - "format": "uint64", - "description": "The unix timestamp in seconds when the error occurred." - }, - "error": { - "type": "string", - "description": "The string representation of the error sent by our peer." - } - } - }, - "lnrpcTransaction": { - "type": "object", - "properties": { - "tx_hash": { - "type": "string", - "title": "The transaction hash" - }, - "amount": { - "type": "string", - "format": "int64", - "title": "The transaction amount, denominated in satoshis" - }, - "num_confirmations": { - "type": "integer", - "format": "int32", - "title": "The number of confirmations" - }, - "block_hash": { - "type": "string", - "title": "The hash of the block this transaction was included in" - }, - "block_height": { - "type": "integer", - "format": "int32", - "title": "The height of the block this transaction was included in" - }, - "time_stamp": { - "type": "string", - "format": "int64", - "title": "Timestamp of this transaction" - }, - "total_fees": { - "type": "string", - "format": "int64", - "title": "Fees paid for this transaction" - }, - "dest_addresses": { - "type": "array", - "items": { - "type": "string" - }, - "title": "Addresses that received funds for this transaction" - }, - "raw_tx_hex": { - "type": "string", - "description": "The raw transaction hex." - }, - "label": { - "type": "string", - "description": "A label that was optionally set on transaction broadcast." - } - } - }, - "lnrpcTransactionDetails": { - "type": "object", - "properties": { - "transactions": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcTransaction" - }, - "description": "The list of transactions relevant to the wallet." - } - } - }, - "lnrpcUtxo": { - "type": "object", - "properties": { - "address_type": { - "$ref": "#/definitions/lnrpcAddressType", - "title": "The type of address" - }, - "address": { - "type": "string", - "title": "The address" - }, - "amount_sat": { - "type": "string", - "format": "int64", - "title": "The value of the unspent coin in satoshis" - }, - "pk_script": { - "type": "string", - "title": "The pkscript in hex" - }, - "outpoint": { - "$ref": "#/definitions/lnrpcOutPoint", - "title": "The outpoint in format txid:n" - }, - "confirmations": { - "type": "string", - "format": "int64", - "title": "The number of confirmations for the Utxo" - } - } - }, - "lnrpcVerifyChanBackupResponse": { - "type": "object" - }, - "lnrpcVerifyMessageRequest": { - "type": "object", - "properties": { - "msg": { - "type": "string", - "format": "byte", - "description": "The message over which the signature is to be verified. When using REST,\nthis field must be encoded as base64." - }, - "signature": { - "type": "string", - "title": "The signature to be verified over the given message" - } - } - }, - "lnrpcVerifyMessageResponse": { - "type": "object", - "properties": { - "valid": { - "type": "boolean", - "format": "boolean", - "title": "Whether the signature was valid over the given message" - }, - "pubkey": { - "type": "string", - "title": "The pubkey recovered from the signature" - } - } - }, - "lnrpcWalletBalanceResponse": { - "type": "object", - "properties": { - "total_balance": { - "type": "string", - "format": "int64", - "title": "The balance of the wallet" - }, - "confirmed_balance": { - "type": "string", - "format": "int64", - "title": "The confirmed balance of a wallet(with \u003e= 1 confirmations)" - }, - "unconfirmed_balance": { - "type": "string", - "format": "int64", - "title": "The unconfirmed balance of a wallet(with 0 confirmations)" - } - } - }, - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string" - }, - "value": { - "type": "string", - "format": "byte" - } - } - }, - "runtimeError": { - "type": "object", - "properties": { - "error": { - "type": "string" - }, - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - }, - "runtimeStreamError": { - "type": "object", - "properties": { - "grpc_code": { - "type": "integer", - "format": "int32" - }, - "http_code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "http_status": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - } - } -} diff --git a/lnd/lnrpc/rpc_utils.go b/lnd/lnrpc/rpc_utils.go deleted file mode 100644 index e471f004..00000000 --- a/lnd/lnrpc/rpc_utils.go +++ /dev/null @@ -1,90 +0,0 @@ -package lnrpc - -import ( - "encoding/hex" - "sort" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnwallet" -) - -// RPCTransactionDetails returns a set of rpc transaction details. -func RPCTransactionDetails(txns []*lnwallet.TransactionDetail) *TransactionDetails { - txDetails := &TransactionDetails{ - Transactions: make([]*Transaction, len(txns)), - } - - for i, tx := range txns { - var destAddresses []string - for _, destAddress := range tx.DestAddresses { - destAddresses = append(destAddresses, destAddress.EncodeAddress()) - } - - // We also get unconfirmed transactions, so BlockHash can be - // nil. - blockHash := "" - if tx.BlockHash != nil { - blockHash = tx.BlockHash.String() - } - - txDetails.Transactions[i] = &Transaction{ - TxHash: tx.Hash.String(), - Amount: int64(tx.Value), - NumConfirmations: tx.NumConfirmations, - BlockHash: blockHash, - BlockHeight: tx.BlockHeight, - TimeStamp: tx.Timestamp, - TotalFees: tx.TotalFees, - DestAddresses: destAddresses, - RawTxHex: hex.EncodeToString(tx.RawTx), - Label: tx.Label, - } - } - - // Sort transactions by number of confirmations rather than height so - // that unconfirmed transactions (height =0; confirmations =-1) will - // follow the most recently set of confirmed transactions. If we sort - // by height, unconfirmed transactions will follow our oldest - // transactions, because they have lower block heights. - sort.Slice(txDetails.Transactions, func(i, j int) bool { - return txDetails.Transactions[i].NumConfirmations < - txDetails.Transactions[j].NumConfirmations - }) - - return txDetails -} - -// ExtractMinConfs extracts the minimum number of confirmations that each -// output used to fund a transaction should satisfy. -func ExtractMinConfs(minConfs int32, spendUnconfirmed bool) (int32, er.R) { - switch { - // Ensure that the MinConfs parameter is non-negative. - case minConfs < 0: - return 0, er.New("minimum number of confirmations must " + - "be a non-negative number") - - // The transaction should not be funded with unconfirmed outputs - // unless explicitly specified by SpendUnconfirmed. We do this to - // provide sane defaults to the OpenChannel RPC, as otherwise, if the - // MinConfs field isn't explicitly set by the caller, we'll use - // unconfirmed outputs without the caller being aware. - case minConfs == 0 && !spendUnconfirmed: - return 1, nil - - // In the event that the caller set MinConfs > 0 and SpendUnconfirmed to - // true, we'll return an error to indicate the conflict. - case minConfs > 0 && spendUnconfirmed: - return 0, er.New("SpendUnconfirmed set to true with " + - "MinConfs > 0") - - // The funding transaction of the new channel to be created can be - // funded with unconfirmed outputs. - case spendUnconfirmed: - return 0, nil - - // If none of the above cases matched, we'll return the value set - // explicitly by the caller. - default: - return minConfs, nil - } -} diff --git a/lnd/lnrpc/signrpc/config_active.go b/lnd/lnrpc/signrpc/config_active.go deleted file mode 100644 index 43c837a7..00000000 --- a/lnd/lnrpc/signrpc/config_active.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build signrpc - -package signrpc - -import ( - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/macaroons" -) - -// Config is the primary configuration struct for the signer RPC server. It -// contains all the items required for the signer rpc server to carry out its -// duties. The fields with struct tags are meant to be parsed as normal -// configuration options, while if able to be populated, the latter fields MUST -// also be specified. -type Config struct { - // SignerMacPath is the path for the signer macaroon. If unspecified - // then we assume that the macaroon will be found under the network - // directory, named DefaultSignerMacFilename. - SignerMacPath string `long:"signermacaroonpath" description:"Path to the signer macaroon"` - - // NetworkDir is the main network directory wherein the signer rpc - // server will find the macaroon named DefaultSignerMacFilename. - NetworkDir string - - // MacService is the main macaroon service that we'll use to handle - // authentication for the signer rpc server. - MacService *macaroons.Service - - // Signer is the signer instance that backs the signer RPC server. The - // job of the signer RPC server is simply to proxy valid requests to - // the active signer instance. - Signer input.Signer - - // KeyRing is an interface that the signer will use to derive any keys - // for signing messages. - KeyRing keychain.SecretKeyRing -} diff --git a/lnd/lnrpc/signrpc/config_default.go b/lnd/lnrpc/signrpc/config_default.go deleted file mode 100644 index 7698f19f..00000000 --- a/lnd/lnrpc/signrpc/config_default.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !signrpc - -package signrpc - -// Config is empty for non-signrpc builds. -type Config struct{} diff --git a/lnd/lnrpc/signrpc/driver.go b/lnd/lnrpc/signrpc/driver.go deleted file mode 100644 index c0f4a8c1..00000000 --- a/lnd/lnrpc/signrpc/driver.go +++ /dev/null @@ -1,71 +0,0 @@ -// +build signrpc - -package signrpc - -import ( - "fmt" - - "github.com/pkt-cash/pktd/lnd/lnrpc" -) - -// createNewSubServer is a helper method that will create the new signer sub -// server given the main config dispatcher method. If we're unable to find the -// config that is meant for us in the config dispatcher, then we'll exit with -// an error. -func createNewSubServer(configRegistry lnrpc.SubServerConfigDispatcher) ( - lnrpc.SubServer, lnrpc.MacaroonPerms, er.R) { - - // We'll attempt to look up the config that we expect, according to our - // subServerName name. If we can't find this, then we'll exit with an - // error, as we're unable to properly initialize ourselves without this - // config. - signServerConf, ok := configRegistry.FetchConfig(subServerName) - if !ok { - return nil, nil, er.Errorf("unable to find config for "+ - "subserver type %s", subServerName) - } - - // Now that we've found an object mapping to our service name, we'll - // ensure that it's the type we need. - config, ok := signServerConf.(*Config) - if !ok { - return nil, nil, er.Errorf("wrong type of config for "+ - "subserver %s, expected %T got %T", subServerName, - &Config{}, signServerConf) - } - - // Before we try to make the new signer service instance, we'll perform - // some sanity checks on the arguments to ensure that they're useable. - - switch { - // If the macaroon service is set (we should use macaroons), then - // ensure that we know where to look for them, or create them if not - // found. - case config.MacService != nil && config.NetworkDir == "": - return nil, nil, er.Errorf("NetworkDir must be set to create " + - "Signrpc") - case config.Signer == nil: - return nil, nil, er.Errorf("Signer must be set to create " + - "Signrpc") - } - - return New(config) -} - -func init() { - subServer := &lnrpc.SubServerDriver{ - SubServerName: subServerName, - New: func(c lnrpc.SubServerConfigDispatcher) ( - lnrpc.SubServer, lnrpc.MacaroonPerms, er.R) { - - return createNewSubServer(c) - }, - } - - // If the build tag is active, then we'll register ourselves as a - // sub-RPC server within the global lnrpc package namespace. - if err := lnrpc.RegisterSubServer(subServer); err != nil { - panic(fmt.Sprintf("failed to register sub server driver '%s': %v", - subServerName, err)) - } -} diff --git a/lnd/lnrpc/signrpc/signer.pb.go b/lnd/lnrpc/signrpc/signer.pb.go deleted file mode 100644 index a5769290..00000000 --- a/lnd/lnrpc/signrpc/signer.pb.go +++ /dev/null @@ -1,1153 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: signrpc/signer.proto - -package signrpc - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type KeyLocator struct { - // The family of key being identified. - KeyFamily int32 `protobuf:"varint,1,opt,name=key_family,json=keyFamily,proto3" json:"key_family,omitempty"` - // The precise index of the key being identified. - KeyIndex int32 `protobuf:"varint,2,opt,name=key_index,json=keyIndex,proto3" json:"key_index,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KeyLocator) Reset() { *m = KeyLocator{} } -func (m *KeyLocator) String() string { return proto.CompactTextString(m) } -func (*KeyLocator) ProtoMessage() {} -func (*KeyLocator) Descriptor() ([]byte, []int) { - return fileDescriptor_4ecd772f6c7ffacf, []int{0} -} - -func (m *KeyLocator) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_KeyLocator.Unmarshal(m, b) -} -func (m *KeyLocator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_KeyLocator.Marshal(b, m, deterministic) -} -func (m *KeyLocator) XXX_Merge(src proto.Message) { - xxx_messageInfo_KeyLocator.Merge(m, src) -} -func (m *KeyLocator) XXX_Size() int { - return xxx_messageInfo_KeyLocator.Size(m) -} -func (m *KeyLocator) XXX_DiscardUnknown() { - xxx_messageInfo_KeyLocator.DiscardUnknown(m) -} - -var xxx_messageInfo_KeyLocator proto.InternalMessageInfo - -func (m *KeyLocator) GetKeyFamily() int32 { - if m != nil { - return m.KeyFamily - } - return 0 -} - -func (m *KeyLocator) GetKeyIndex() int32 { - if m != nil { - return m.KeyIndex - } - return 0 -} - -type KeyDescriptor struct { - // - //The raw bytes of the key being identified. Either this or the KeyLocator - //must be specified. - RawKeyBytes []byte `protobuf:"bytes,1,opt,name=raw_key_bytes,json=rawKeyBytes,proto3" json:"raw_key_bytes,omitempty"` - // - //The key locator that identifies which key to use for signing. Either this - //or the raw bytes of the target key must be specified. - KeyLoc *KeyLocator `protobuf:"bytes,2,opt,name=key_loc,json=keyLoc,proto3" json:"key_loc,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KeyDescriptor) Reset() { *m = KeyDescriptor{} } -func (m *KeyDescriptor) String() string { return proto.CompactTextString(m) } -func (*KeyDescriptor) ProtoMessage() {} -func (*KeyDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_4ecd772f6c7ffacf, []int{1} -} - -func (m *KeyDescriptor) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_KeyDescriptor.Unmarshal(m, b) -} -func (m *KeyDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_KeyDescriptor.Marshal(b, m, deterministic) -} -func (m *KeyDescriptor) XXX_Merge(src proto.Message) { - xxx_messageInfo_KeyDescriptor.Merge(m, src) -} -func (m *KeyDescriptor) XXX_Size() int { - return xxx_messageInfo_KeyDescriptor.Size(m) -} -func (m *KeyDescriptor) XXX_DiscardUnknown() { - xxx_messageInfo_KeyDescriptor.DiscardUnknown(m) -} - -var xxx_messageInfo_KeyDescriptor proto.InternalMessageInfo - -func (m *KeyDescriptor) GetRawKeyBytes() []byte { - if m != nil { - return m.RawKeyBytes - } - return nil -} - -func (m *KeyDescriptor) GetKeyLoc() *KeyLocator { - if m != nil { - return m.KeyLoc - } - return nil -} - -type TxOut struct { - // The value of the output being spent. - Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` - // The script of the output being spent. - PkScript []byte `protobuf:"bytes,2,opt,name=pk_script,json=pkScript,proto3" json:"pk_script,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TxOut) Reset() { *m = TxOut{} } -func (m *TxOut) String() string { return proto.CompactTextString(m) } -func (*TxOut) ProtoMessage() {} -func (*TxOut) Descriptor() ([]byte, []int) { - return fileDescriptor_4ecd772f6c7ffacf, []int{2} -} - -func (m *TxOut) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TxOut.Unmarshal(m, b) -} -func (m *TxOut) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TxOut.Marshal(b, m, deterministic) -} -func (m *TxOut) XXX_Merge(src proto.Message) { - xxx_messageInfo_TxOut.Merge(m, src) -} -func (m *TxOut) XXX_Size() int { - return xxx_messageInfo_TxOut.Size(m) -} -func (m *TxOut) XXX_DiscardUnknown() { - xxx_messageInfo_TxOut.DiscardUnknown(m) -} - -var xxx_messageInfo_TxOut proto.InternalMessageInfo - -func (m *TxOut) GetValue() int64 { - if m != nil { - return m.Value - } - return 0 -} - -func (m *TxOut) GetPkScript() []byte { - if m != nil { - return m.PkScript - } - return nil -} - -type SignDescriptor struct { - // - //A descriptor that precisely describes *which* key to use for signing. This - //may provide the raw public key directly, or require the Signer to re-derive - //the key according to the populated derivation path. - // - //Note that if the key descriptor was obtained through walletrpc.DeriveKey, - //then the key locator MUST always be provided, since the derived keys are not - //persisted unlike with DeriveNextKey. - KeyDesc *KeyDescriptor `protobuf:"bytes,1,opt,name=key_desc,json=keyDesc,proto3" json:"key_desc,omitempty"` - // - //A scalar value that will be added to the private key corresponding to the - //above public key to obtain the private key to be used to sign this input. - //This value is typically derived via the following computation: - // - // derivedKey = privkey + sha256(perCommitmentPoint || pubKey) mod N - SingleTweak []byte `protobuf:"bytes,2,opt,name=single_tweak,json=singleTweak,proto3" json:"single_tweak,omitempty"` - // - //A private key that will be used in combination with its corresponding - //private key to derive the private key that is to be used to sign the target - //input. Within the Lightning protocol, this value is typically the - //commitment secret from a previously revoked commitment transaction. This - //value is in combination with two hash values, and the original private key - //to derive the private key to be used when signing. - // - // k = (privKey*sha256(pubKey || tweakPub) + - //tweakPriv*sha256(tweakPub || pubKey)) mod N - DoubleTweak []byte `protobuf:"bytes,3,opt,name=double_tweak,json=doubleTweak,proto3" json:"double_tweak,omitempty"` - // - //The full script required to properly redeem the output. This field will - //only be populated if a p2wsh or a p2sh output is being signed. - WitnessScript []byte `protobuf:"bytes,4,opt,name=witness_script,json=witnessScript,proto3" json:"witness_script,omitempty"` - // - //A description of the output being spent. The value and script MUST be - //provided. - Output *TxOut `protobuf:"bytes,5,opt,name=output,proto3" json:"output,omitempty"` - // - //The target sighash type that should be used when generating the final - //sighash, and signature. - Sighash uint32 `protobuf:"varint,7,opt,name=sighash,proto3" json:"sighash,omitempty"` - // - //The target input within the transaction that should be signed. - InputIndex int32 `protobuf:"varint,8,opt,name=input_index,json=inputIndex,proto3" json:"input_index,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SignDescriptor) Reset() { *m = SignDescriptor{} } -func (m *SignDescriptor) String() string { return proto.CompactTextString(m) } -func (*SignDescriptor) ProtoMessage() {} -func (*SignDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_4ecd772f6c7ffacf, []int{3} -} - -func (m *SignDescriptor) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SignDescriptor.Unmarshal(m, b) -} -func (m *SignDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SignDescriptor.Marshal(b, m, deterministic) -} -func (m *SignDescriptor) XXX_Merge(src proto.Message) { - xxx_messageInfo_SignDescriptor.Merge(m, src) -} -func (m *SignDescriptor) XXX_Size() int { - return xxx_messageInfo_SignDescriptor.Size(m) -} -func (m *SignDescriptor) XXX_DiscardUnknown() { - xxx_messageInfo_SignDescriptor.DiscardUnknown(m) -} - -var xxx_messageInfo_SignDescriptor proto.InternalMessageInfo - -func (m *SignDescriptor) GetKeyDesc() *KeyDescriptor { - if m != nil { - return m.KeyDesc - } - return nil -} - -func (m *SignDescriptor) GetSingleTweak() []byte { - if m != nil { - return m.SingleTweak - } - return nil -} - -func (m *SignDescriptor) GetDoubleTweak() []byte { - if m != nil { - return m.DoubleTweak - } - return nil -} - -func (m *SignDescriptor) GetWitnessScript() []byte { - if m != nil { - return m.WitnessScript - } - return nil -} - -func (m *SignDescriptor) GetOutput() *TxOut { - if m != nil { - return m.Output - } - return nil -} - -func (m *SignDescriptor) GetSighash() uint32 { - if m != nil { - return m.Sighash - } - return 0 -} - -func (m *SignDescriptor) GetInputIndex() int32 { - if m != nil { - return m.InputIndex - } - return 0 -} - -type SignReq struct { - // The raw bytes of the transaction to be signed. - RawTxBytes []byte `protobuf:"bytes,1,opt,name=raw_tx_bytes,json=rawTxBytes,proto3" json:"raw_tx_bytes,omitempty"` - // A set of sign descriptors, for each input to be signed. - SignDescs []*SignDescriptor `protobuf:"bytes,2,rep,name=sign_descs,json=signDescs,proto3" json:"sign_descs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SignReq) Reset() { *m = SignReq{} } -func (m *SignReq) String() string { return proto.CompactTextString(m) } -func (*SignReq) ProtoMessage() {} -func (*SignReq) Descriptor() ([]byte, []int) { - return fileDescriptor_4ecd772f6c7ffacf, []int{4} -} - -func (m *SignReq) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SignReq.Unmarshal(m, b) -} -func (m *SignReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SignReq.Marshal(b, m, deterministic) -} -func (m *SignReq) XXX_Merge(src proto.Message) { - xxx_messageInfo_SignReq.Merge(m, src) -} -func (m *SignReq) XXX_Size() int { - return xxx_messageInfo_SignReq.Size(m) -} -func (m *SignReq) XXX_DiscardUnknown() { - xxx_messageInfo_SignReq.DiscardUnknown(m) -} - -var xxx_messageInfo_SignReq proto.InternalMessageInfo - -func (m *SignReq) GetRawTxBytes() []byte { - if m != nil { - return m.RawTxBytes - } - return nil -} - -func (m *SignReq) GetSignDescs() []*SignDescriptor { - if m != nil { - return m.SignDescs - } - return nil -} - -type SignResp struct { - // - //A set of signatures realized in a fixed 64-byte format ordered in ascending - //input order. - RawSigs [][]byte `protobuf:"bytes,1,rep,name=raw_sigs,json=rawSigs,proto3" json:"raw_sigs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SignResp) Reset() { *m = SignResp{} } -func (m *SignResp) String() string { return proto.CompactTextString(m) } -func (*SignResp) ProtoMessage() {} -func (*SignResp) Descriptor() ([]byte, []int) { - return fileDescriptor_4ecd772f6c7ffacf, []int{5} -} - -func (m *SignResp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SignResp.Unmarshal(m, b) -} -func (m *SignResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SignResp.Marshal(b, m, deterministic) -} -func (m *SignResp) XXX_Merge(src proto.Message) { - xxx_messageInfo_SignResp.Merge(m, src) -} -func (m *SignResp) XXX_Size() int { - return xxx_messageInfo_SignResp.Size(m) -} -func (m *SignResp) XXX_DiscardUnknown() { - xxx_messageInfo_SignResp.DiscardUnknown(m) -} - -var xxx_messageInfo_SignResp proto.InternalMessageInfo - -func (m *SignResp) GetRawSigs() [][]byte { - if m != nil { - return m.RawSigs - } - return nil -} - -type InputScript struct { - // The serializes witness stack for the specified input. - Witness [][]byte `protobuf:"bytes,1,rep,name=witness,proto3" json:"witness,omitempty"` - // - //The optional sig script for the specified witness that will only be set if - //the input specified is a nested p2sh witness program. - SigScript []byte `protobuf:"bytes,2,opt,name=sig_script,json=sigScript,proto3" json:"sig_script,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InputScript) Reset() { *m = InputScript{} } -func (m *InputScript) String() string { return proto.CompactTextString(m) } -func (*InputScript) ProtoMessage() {} -func (*InputScript) Descriptor() ([]byte, []int) { - return fileDescriptor_4ecd772f6c7ffacf, []int{6} -} - -func (m *InputScript) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InputScript.Unmarshal(m, b) -} -func (m *InputScript) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InputScript.Marshal(b, m, deterministic) -} -func (m *InputScript) XXX_Merge(src proto.Message) { - xxx_messageInfo_InputScript.Merge(m, src) -} -func (m *InputScript) XXX_Size() int { - return xxx_messageInfo_InputScript.Size(m) -} -func (m *InputScript) XXX_DiscardUnknown() { - xxx_messageInfo_InputScript.DiscardUnknown(m) -} - -var xxx_messageInfo_InputScript proto.InternalMessageInfo - -func (m *InputScript) GetWitness() [][]byte { - if m != nil { - return m.Witness - } - return nil -} - -func (m *InputScript) GetSigScript() []byte { - if m != nil { - return m.SigScript - } - return nil -} - -type InputScriptResp struct { - // The set of fully valid input scripts requested. - InputScripts []*InputScript `protobuf:"bytes,1,rep,name=input_scripts,json=inputScripts,proto3" json:"input_scripts,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InputScriptResp) Reset() { *m = InputScriptResp{} } -func (m *InputScriptResp) String() string { return proto.CompactTextString(m) } -func (*InputScriptResp) ProtoMessage() {} -func (*InputScriptResp) Descriptor() ([]byte, []int) { - return fileDescriptor_4ecd772f6c7ffacf, []int{7} -} - -func (m *InputScriptResp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InputScriptResp.Unmarshal(m, b) -} -func (m *InputScriptResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InputScriptResp.Marshal(b, m, deterministic) -} -func (m *InputScriptResp) XXX_Merge(src proto.Message) { - xxx_messageInfo_InputScriptResp.Merge(m, src) -} -func (m *InputScriptResp) XXX_Size() int { - return xxx_messageInfo_InputScriptResp.Size(m) -} -func (m *InputScriptResp) XXX_DiscardUnknown() { - xxx_messageInfo_InputScriptResp.DiscardUnknown(m) -} - -var xxx_messageInfo_InputScriptResp proto.InternalMessageInfo - -func (m *InputScriptResp) GetInputScripts() []*InputScript { - if m != nil { - return m.InputScripts - } - return nil -} - -type SignMessageReq struct { - // The message to be signed. - Msg []byte `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` - // The key locator that identifies which key to use for signing. - KeyLoc *KeyLocator `protobuf:"bytes,2,opt,name=key_loc,json=keyLoc,proto3" json:"key_loc,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SignMessageReq) Reset() { *m = SignMessageReq{} } -func (m *SignMessageReq) String() string { return proto.CompactTextString(m) } -func (*SignMessageReq) ProtoMessage() {} -func (*SignMessageReq) Descriptor() ([]byte, []int) { - return fileDescriptor_4ecd772f6c7ffacf, []int{8} -} - -func (m *SignMessageReq) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SignMessageReq.Unmarshal(m, b) -} -func (m *SignMessageReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SignMessageReq.Marshal(b, m, deterministic) -} -func (m *SignMessageReq) XXX_Merge(src proto.Message) { - xxx_messageInfo_SignMessageReq.Merge(m, src) -} -func (m *SignMessageReq) XXX_Size() int { - return xxx_messageInfo_SignMessageReq.Size(m) -} -func (m *SignMessageReq) XXX_DiscardUnknown() { - xxx_messageInfo_SignMessageReq.DiscardUnknown(m) -} - -var xxx_messageInfo_SignMessageReq proto.InternalMessageInfo - -func (m *SignMessageReq) GetMsg() []byte { - if m != nil { - return m.Msg - } - return nil -} - -func (m *SignMessageReq) GetKeyLoc() *KeyLocator { - if m != nil { - return m.KeyLoc - } - return nil -} - -type SignMessageResp struct { - // - //The signature for the given message in the fixed-size LN wire format. - Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SignMessageResp) Reset() { *m = SignMessageResp{} } -func (m *SignMessageResp) String() string { return proto.CompactTextString(m) } -func (*SignMessageResp) ProtoMessage() {} -func (*SignMessageResp) Descriptor() ([]byte, []int) { - return fileDescriptor_4ecd772f6c7ffacf, []int{9} -} - -func (m *SignMessageResp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SignMessageResp.Unmarshal(m, b) -} -func (m *SignMessageResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SignMessageResp.Marshal(b, m, deterministic) -} -func (m *SignMessageResp) XXX_Merge(src proto.Message) { - xxx_messageInfo_SignMessageResp.Merge(m, src) -} -func (m *SignMessageResp) XXX_Size() int { - return xxx_messageInfo_SignMessageResp.Size(m) -} -func (m *SignMessageResp) XXX_DiscardUnknown() { - xxx_messageInfo_SignMessageResp.DiscardUnknown(m) -} - -var xxx_messageInfo_SignMessageResp proto.InternalMessageInfo - -func (m *SignMessageResp) GetSignature() []byte { - if m != nil { - return m.Signature - } - return nil -} - -type VerifyMessageReq struct { - // The message over which the signature is to be verified. - Msg []byte `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` - // - //The fixed-size LN wire encoded signature to be verified over the given - //message. - Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` - // The public key the signature has to be valid for. - Pubkey []byte `protobuf:"bytes,3,opt,name=pubkey,proto3" json:"pubkey,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VerifyMessageReq) Reset() { *m = VerifyMessageReq{} } -func (m *VerifyMessageReq) String() string { return proto.CompactTextString(m) } -func (*VerifyMessageReq) ProtoMessage() {} -func (*VerifyMessageReq) Descriptor() ([]byte, []int) { - return fileDescriptor_4ecd772f6c7ffacf, []int{10} -} - -func (m *VerifyMessageReq) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VerifyMessageReq.Unmarshal(m, b) -} -func (m *VerifyMessageReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VerifyMessageReq.Marshal(b, m, deterministic) -} -func (m *VerifyMessageReq) XXX_Merge(src proto.Message) { - xxx_messageInfo_VerifyMessageReq.Merge(m, src) -} -func (m *VerifyMessageReq) XXX_Size() int { - return xxx_messageInfo_VerifyMessageReq.Size(m) -} -func (m *VerifyMessageReq) XXX_DiscardUnknown() { - xxx_messageInfo_VerifyMessageReq.DiscardUnknown(m) -} - -var xxx_messageInfo_VerifyMessageReq proto.InternalMessageInfo - -func (m *VerifyMessageReq) GetMsg() []byte { - if m != nil { - return m.Msg - } - return nil -} - -func (m *VerifyMessageReq) GetSignature() []byte { - if m != nil { - return m.Signature - } - return nil -} - -func (m *VerifyMessageReq) GetPubkey() []byte { - if m != nil { - return m.Pubkey - } - return nil -} - -type VerifyMessageResp struct { - // Whether the signature was valid over the given message. - Valid bool `protobuf:"varint,1,opt,name=valid,proto3" json:"valid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VerifyMessageResp) Reset() { *m = VerifyMessageResp{} } -func (m *VerifyMessageResp) String() string { return proto.CompactTextString(m) } -func (*VerifyMessageResp) ProtoMessage() {} -func (*VerifyMessageResp) Descriptor() ([]byte, []int) { - return fileDescriptor_4ecd772f6c7ffacf, []int{11} -} - -func (m *VerifyMessageResp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VerifyMessageResp.Unmarshal(m, b) -} -func (m *VerifyMessageResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VerifyMessageResp.Marshal(b, m, deterministic) -} -func (m *VerifyMessageResp) XXX_Merge(src proto.Message) { - xxx_messageInfo_VerifyMessageResp.Merge(m, src) -} -func (m *VerifyMessageResp) XXX_Size() int { - return xxx_messageInfo_VerifyMessageResp.Size(m) -} -func (m *VerifyMessageResp) XXX_DiscardUnknown() { - xxx_messageInfo_VerifyMessageResp.DiscardUnknown(m) -} - -var xxx_messageInfo_VerifyMessageResp proto.InternalMessageInfo - -func (m *VerifyMessageResp) GetValid() bool { - if m != nil { - return m.Valid - } - return false -} - -type SharedKeyRequest struct { - // The ephemeral public key to use for the DH key derivation. - EphemeralPubkey []byte `protobuf:"bytes,1,opt,name=ephemeral_pubkey,json=ephemeralPubkey,proto3" json:"ephemeral_pubkey,omitempty"` - // - //Deprecated. The optional key locator of the local key that should be used. - //If this parameter is not set then the node's identity private key will be - //used. - KeyLoc *KeyLocator `protobuf:"bytes,2,opt,name=key_loc,json=keyLoc,proto3" json:"key_loc,omitempty"` // Deprecated: Do not use. - // - //A key descriptor describes the key used for performing ECDH. Either a key - //locator or a raw public key is expected, if neither is supplied, defaults to - //the node's identity private key. - KeyDesc *KeyDescriptor `protobuf:"bytes,3,opt,name=key_desc,json=keyDesc,proto3" json:"key_desc,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SharedKeyRequest) Reset() { *m = SharedKeyRequest{} } -func (m *SharedKeyRequest) String() string { return proto.CompactTextString(m) } -func (*SharedKeyRequest) ProtoMessage() {} -func (*SharedKeyRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_4ecd772f6c7ffacf, []int{12} -} - -func (m *SharedKeyRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SharedKeyRequest.Unmarshal(m, b) -} -func (m *SharedKeyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SharedKeyRequest.Marshal(b, m, deterministic) -} -func (m *SharedKeyRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SharedKeyRequest.Merge(m, src) -} -func (m *SharedKeyRequest) XXX_Size() int { - return xxx_messageInfo_SharedKeyRequest.Size(m) -} -func (m *SharedKeyRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SharedKeyRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SharedKeyRequest proto.InternalMessageInfo - -func (m *SharedKeyRequest) GetEphemeralPubkey() []byte { - if m != nil { - return m.EphemeralPubkey - } - return nil -} - -// Deprecated: Do not use. -func (m *SharedKeyRequest) GetKeyLoc() *KeyLocator { - if m != nil { - return m.KeyLoc - } - return nil -} - -func (m *SharedKeyRequest) GetKeyDesc() *KeyDescriptor { - if m != nil { - return m.KeyDesc - } - return nil -} - -type SharedKeyResponse struct { - // The shared public key, hashed with sha256. - SharedKey []byte `protobuf:"bytes,1,opt,name=shared_key,json=sharedKey,proto3" json:"shared_key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SharedKeyResponse) Reset() { *m = SharedKeyResponse{} } -func (m *SharedKeyResponse) String() string { return proto.CompactTextString(m) } -func (*SharedKeyResponse) ProtoMessage() {} -func (*SharedKeyResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_4ecd772f6c7ffacf, []int{13} -} - -func (m *SharedKeyResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SharedKeyResponse.Unmarshal(m, b) -} -func (m *SharedKeyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SharedKeyResponse.Marshal(b, m, deterministic) -} -func (m *SharedKeyResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SharedKeyResponse.Merge(m, src) -} -func (m *SharedKeyResponse) XXX_Size() int { - return xxx_messageInfo_SharedKeyResponse.Size(m) -} -func (m *SharedKeyResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SharedKeyResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SharedKeyResponse proto.InternalMessageInfo - -func (m *SharedKeyResponse) GetSharedKey() []byte { - if m != nil { - return m.SharedKey - } - return nil -} - -func init() { - proto.RegisterType((*KeyLocator)(nil), "signrpc.KeyLocator") - proto.RegisterType((*KeyDescriptor)(nil), "signrpc.KeyDescriptor") - proto.RegisterType((*TxOut)(nil), "signrpc.TxOut") - proto.RegisterType((*SignDescriptor)(nil), "signrpc.SignDescriptor") - proto.RegisterType((*SignReq)(nil), "signrpc.SignReq") - proto.RegisterType((*SignResp)(nil), "signrpc.SignResp") - proto.RegisterType((*InputScript)(nil), "signrpc.InputScript") - proto.RegisterType((*InputScriptResp)(nil), "signrpc.InputScriptResp") - proto.RegisterType((*SignMessageReq)(nil), "signrpc.SignMessageReq") - proto.RegisterType((*SignMessageResp)(nil), "signrpc.SignMessageResp") - proto.RegisterType((*VerifyMessageReq)(nil), "signrpc.VerifyMessageReq") - proto.RegisterType((*VerifyMessageResp)(nil), "signrpc.VerifyMessageResp") - proto.RegisterType((*SharedKeyRequest)(nil), "signrpc.SharedKeyRequest") - proto.RegisterType((*SharedKeyResponse)(nil), "signrpc.SharedKeyResponse") -} - -func init() { proto.RegisterFile("signrpc/signer.proto", fileDescriptor_4ecd772f6c7ffacf) } - -var fileDescriptor_4ecd772f6c7ffacf = []byte{ - // 771 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x6e, 0xeb, 0x44, - 0x10, 0x56, 0x12, 0xf2, 0xd3, 0x71, 0xd2, 0xa4, 0x4b, 0x75, 0xf0, 0x29, 0x20, 0x82, 0xa5, 0x83, - 0x72, 0xd0, 0x21, 0x81, 0x80, 0x90, 0xe0, 0x0a, 0x95, 0xaa, 0x6a, 0x95, 0xa2, 0x56, 0x4e, 0xc5, - 0x45, 0x6f, 0xac, 0x8d, 0x33, 0x75, 0x56, 0x4e, 0xec, 0xad, 0xd7, 0x6e, 0xe2, 0xe7, 0xe0, 0x0d, - 0x78, 0x26, 0x1e, 0x08, 0xed, 0x4f, 0x12, 0x3b, 0x05, 0x54, 0x2e, 0xa2, 0xec, 0x7c, 0x3b, 0xfb, - 0xcd, 0x97, 0x6f, 0x66, 0xb3, 0x70, 0x2a, 0x58, 0x10, 0x25, 0xdc, 0x1f, 0xc9, 0x6f, 0x4c, 0x86, - 0x3c, 0x89, 0xd3, 0x98, 0x34, 0x0d, 0xea, 0x5c, 0x01, 0x4c, 0x30, 0xbf, 0x89, 0x7d, 0x9a, 0xc6, - 0x09, 0xf9, 0x1c, 0x20, 0xc4, 0xdc, 0x7b, 0xa4, 0x2b, 0xb6, 0xcc, 0xed, 0x4a, 0xbf, 0x32, 0xa8, - 0xbb, 0x47, 0x21, 0xe6, 0x97, 0x0a, 0x20, 0x9f, 0x82, 0x0c, 0x3c, 0x16, 0xcd, 0x71, 0x63, 0x57, - 0xd5, 0x6e, 0x2b, 0xc4, 0xfc, 0x5a, 0xc6, 0x0e, 0x85, 0xce, 0x04, 0xf3, 0x0b, 0x14, 0x7e, 0xc2, - 0xb8, 0x24, 0x73, 0xa0, 0x93, 0xd0, 0xb5, 0x27, 0x4f, 0xcc, 0xf2, 0x14, 0x85, 0xe2, 0x6b, 0xbb, - 0x56, 0x42, 0xd7, 0x13, 0xcc, 0xcf, 0x25, 0x44, 0x3e, 0x40, 0x53, 0xee, 0x2f, 0x63, 0x5f, 0xf1, - 0x59, 0xe3, 0x8f, 0x87, 0x46, 0xd9, 0x70, 0x2f, 0xcb, 0x6d, 0x84, 0x6a, 0xed, 0xfc, 0x0c, 0xf5, - 0xfb, 0xcd, 0x6d, 0x96, 0x92, 0x53, 0xa8, 0x3f, 0xd3, 0x65, 0x86, 0x8a, 0xb2, 0xe6, 0xea, 0x40, - 0xca, 0xe3, 0xa1, 0xa7, 0xeb, 0x2b, 0xba, 0xb6, 0xdb, 0xe2, 0xe1, 0x54, 0xc5, 0xce, 0x1f, 0x55, - 0x38, 0x9e, 0xb2, 0x20, 0x2a, 0x08, 0xfc, 0x0e, 0xa4, 0x7a, 0x6f, 0x8e, 0xc2, 0x57, 0x44, 0xd6, - 0xf8, 0x4d, 0xb1, 0xfa, 0x3e, 0xd3, 0x95, 0x22, 0x65, 0x48, 0xbe, 0x84, 0xb6, 0x60, 0x51, 0xb0, - 0x44, 0x2f, 0x5d, 0x23, 0x0d, 0x4d, 0x15, 0x4b, 0x63, 0xf7, 0x12, 0x92, 0x29, 0xf3, 0x38, 0x9b, - 0xed, 0x52, 0x6a, 0x3a, 0x45, 0x63, 0x3a, 0xe5, 0x1d, 0x1c, 0xaf, 0x59, 0x1a, 0xa1, 0x10, 0x5b, - 0xb5, 0x1f, 0xa9, 0xa4, 0x8e, 0x41, 0xb5, 0x64, 0xf2, 0x15, 0x34, 0xe2, 0x2c, 0xe5, 0x59, 0x6a, - 0xd7, 0x95, 0xba, 0xe3, 0x9d, 0x3a, 0xe5, 0x82, 0x6b, 0x76, 0x89, 0x0d, 0xb2, 0x9d, 0x0b, 0x2a, - 0x16, 0x76, 0xb3, 0x5f, 0x19, 0x74, 0xdc, 0x6d, 0x48, 0xbe, 0x00, 0x8b, 0x45, 0x3c, 0x4b, 0x4d, - 0xcb, 0x5a, 0xaa, 0x65, 0xa0, 0x20, 0xdd, 0x34, 0x1f, 0x9a, 0xd2, 0x14, 0x17, 0x9f, 0x48, 0x1f, - 0xda, 0xb2, 0x5d, 0xe9, 0xa6, 0xd4, 0x2d, 0x48, 0xe8, 0xfa, 0x7e, 0xa3, 0x9b, 0xf5, 0x23, 0x80, - 0x14, 0xa0, 0x0c, 0x13, 0x76, 0xb5, 0x5f, 0x1b, 0x58, 0xe3, 0x4f, 0x76, 0x9a, 0xca, 0xe6, 0xba, - 0x47, 0xc2, 0xc4, 0xc2, 0x79, 0x07, 0x2d, 0x5d, 0x44, 0x70, 0xf2, 0x16, 0x5a, 0xb2, 0x8a, 0x60, - 0x81, 0xac, 0x50, 0x1b, 0xb4, 0xdd, 0x66, 0x42, 0xd7, 0x53, 0x16, 0x08, 0xe7, 0x12, 0xac, 0x6b, - 0xa9, 0xcc, 0xfc, 0x7a, 0x1b, 0x9a, 0xc6, 0x8e, 0x6d, 0xa2, 0x09, 0xe5, 0x94, 0x0a, 0x16, 0x94, - 0x1b, 0x2d, 0xcb, 0x99, 0x4e, 0xdf, 0x40, 0xb7, 0xc0, 0xa3, 0xaa, 0xfe, 0x04, 0x1d, 0xed, 0x83, - 0x3e, 0xa3, 0x19, 0xad, 0xf1, 0xe9, 0x4e, 0x7c, 0xf1, 0x40, 0x9b, 0xed, 0x03, 0xe1, 0xdc, 0xe9, - 0xb1, 0xf9, 0x0d, 0x85, 0xa0, 0x01, 0x4a, 0xa3, 0x7a, 0x50, 0x5b, 0x89, 0xc0, 0xf8, 0x23, 0x97, - 0xff, 0x73, 0x8a, 0x47, 0xd0, 0x2d, 0x31, 0x0a, 0x4e, 0x3e, 0x03, 0x65, 0x17, 0x4d, 0xb3, 0x04, - 0x0d, 0xf1, 0x1e, 0x70, 0x1e, 0xa0, 0xf7, 0x3b, 0x26, 0xec, 0x31, 0xff, 0x4f, 0x11, 0x25, 0x8e, - 0xea, 0x01, 0x07, 0x79, 0x03, 0x0d, 0x9e, 0xcd, 0x42, 0xcc, 0xcd, 0x3c, 0x9a, 0xc8, 0x79, 0x0f, - 0x27, 0x07, 0xdc, 0x82, 0x9b, 0xeb, 0xc5, 0xe6, 0x8a, 0xbe, 0xe5, 0xea, 0xc0, 0xf9, 0xb3, 0x02, - 0xbd, 0xe9, 0x82, 0x26, 0x38, 0x9f, 0x60, 0xee, 0xe2, 0x53, 0x86, 0x22, 0x25, 0xef, 0xa1, 0x87, - 0x7c, 0x81, 0x2b, 0x4c, 0xe8, 0xd2, 0x33, 0x15, 0xb4, 0xa8, 0xee, 0x0e, 0xbf, 0x53, 0x30, 0xf9, - 0xf6, 0x35, 0x2e, 0x9d, 0x57, 0xed, 0xca, 0xd6, 0xa9, 0xd2, 0x05, 0xad, 0xbd, 0xea, 0x82, 0x3a, - 0x63, 0x38, 0x29, 0x68, 0x14, 0x3c, 0x8e, 0x04, 0xaa, 0x81, 0x51, 0xa0, 0xb7, 0x97, 0x77, 0x24, - 0xb6, 0x69, 0xe3, 0xbf, 0xaa, 0xd0, 0x98, 0xaa, 0x7f, 0x47, 0xf2, 0x03, 0x74, 0xe4, 0xea, 0x56, - 0x5d, 0x2c, 0x97, 0xae, 0x49, 0xaf, 0x34, 0xdf, 0x2e, 0x3e, 0x9d, 0x9d, 0x1c, 0x20, 0x82, 0x93, - 0x5f, 0x80, 0xfc, 0x1a, 0xaf, 0x78, 0x96, 0x62, 0x71, 0x80, 0x5f, 0x1e, 0xb5, 0xff, 0x71, 0xde, - 0x34, 0x83, 0x55, 0x98, 0x09, 0x52, 0xbe, 0x55, 0xfb, 0xb6, 0x17, 0x18, 0x0e, 0x47, 0xe8, 0x12, - 0x3a, 0xa5, 0x46, 0x92, 0xb7, 0xbb, 0xd4, 0xc3, 0xe1, 0x39, 0x3b, 0xfb, 0xb7, 0x2d, 0xc1, 0xc9, - 0x15, 0x74, 0x2f, 0x30, 0x61, 0xcf, 0xb8, 0xb3, 0xb1, 0xc0, 0x74, 0xd8, 0xfe, 0x02, 0xd3, 0x0b, - 0xd7, 0xcf, 0x3f, 0x3c, 0x7c, 0x1d, 0xb0, 0x74, 0x91, 0xcd, 0x86, 0x7e, 0xbc, 0x1a, 0xf1, 0x30, - 0xfd, 0xc6, 0xa7, 0x62, 0x21, 0x17, 0xf3, 0xd1, 0x32, 0x92, 0x9f, 0xed, 0xb3, 0x94, 0x70, 0x7f, - 0xd6, 0x50, 0x0f, 0xd3, 0xf7, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x6d, 0xfa, 0x21, 0xb0, - 0x06, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// SignerClient is the client API for Signer service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type SignerClient interface { - // - //SignOutputRaw is a method that can be used to generated a signature for a - //set of inputs/outputs to a transaction. Each request specifies details - //concerning how the outputs should be signed, which keys they should be - //signed with, and also any optional tweaks. The return value is a fixed - //64-byte signature (the same format as we use on the wire in Lightning). - // - //If we are unable to sign using the specified keys, then an error will be - //returned. - SignOutputRaw(ctx context.Context, in *SignReq, opts ...grpc.CallOption) (*SignResp, error) - // - //ComputeInputScript generates a complete InputIndex for the passed - //transaction with the signature as defined within the passed SignDescriptor. - //This method should be capable of generating the proper input script for - //both regular p2wkh output and p2wkh outputs nested within a regular p2sh - //output. - // - //Note that when using this method to sign inputs belonging to the wallet, - //the only items of the SignDescriptor that need to be populated are pkScript - //in the TxOut field, the value in that same field, and finally the input - //index. - ComputeInputScript(ctx context.Context, in *SignReq, opts ...grpc.CallOption) (*InputScriptResp, error) - // - //SignMessage signs a message with the key specified in the key locator. The - //returned signature is fixed-size LN wire format encoded. - // - //The main difference to SignMessage in the main RPC is that a specific key is - //used to sign the message instead of the node identity private key. - SignMessage(ctx context.Context, in *SignMessageReq, opts ...grpc.CallOption) (*SignMessageResp, error) - // - //VerifyMessage verifies a signature over a message using the public key - //provided. The signature must be fixed-size LN wire format encoded. - // - //The main difference to VerifyMessage in the main RPC is that the public key - //used to sign the message does not have to be a node known to the network. - VerifyMessage(ctx context.Context, in *VerifyMessageReq, opts ...grpc.CallOption) (*VerifyMessageResp, error) - // - //DeriveSharedKey returns a shared secret key by performing Diffie-Hellman key - //derivation between the ephemeral public key in the request and the node's - //key specified in the key_desc parameter. Either a key locator or a raw - //public key is expected in the key_desc, if neither is supplied, defaults to - //the node's identity private key: - //P_shared = privKeyNode * ephemeralPubkey - //The resulting shared public key is serialized in the compressed format and - //hashed with sha256, resulting in the final key length of 256bit. - DeriveSharedKey(ctx context.Context, in *SharedKeyRequest, opts ...grpc.CallOption) (*SharedKeyResponse, error) -} - -type signerClient struct { - cc *grpc.ClientConn -} - -func NewSignerClient(cc *grpc.ClientConn) SignerClient { - return &signerClient{cc} -} - -func (c *signerClient) SignOutputRaw(ctx context.Context, in *SignReq, opts ...grpc.CallOption) (*SignResp, error) { - out := new(SignResp) - err := c.cc.Invoke(ctx, "/signrpc.Signer/SignOutputRaw", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *signerClient) ComputeInputScript(ctx context.Context, in *SignReq, opts ...grpc.CallOption) (*InputScriptResp, error) { - out := new(InputScriptResp) - err := c.cc.Invoke(ctx, "/signrpc.Signer/ComputeInputScript", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *signerClient) SignMessage(ctx context.Context, in *SignMessageReq, opts ...grpc.CallOption) (*SignMessageResp, error) { - out := new(SignMessageResp) - err := c.cc.Invoke(ctx, "/signrpc.Signer/SignMessage", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *signerClient) VerifyMessage(ctx context.Context, in *VerifyMessageReq, opts ...grpc.CallOption) (*VerifyMessageResp, error) { - out := new(VerifyMessageResp) - err := c.cc.Invoke(ctx, "/signrpc.Signer/VerifyMessage", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *signerClient) DeriveSharedKey(ctx context.Context, in *SharedKeyRequest, opts ...grpc.CallOption) (*SharedKeyResponse, error) { - out := new(SharedKeyResponse) - err := c.cc.Invoke(ctx, "/signrpc.Signer/DeriveSharedKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// SignerServer is the server API for Signer service. -type SignerServer interface { - // - //SignOutputRaw is a method that can be used to generated a signature for a - //set of inputs/outputs to a transaction. Each request specifies details - //concerning how the outputs should be signed, which keys they should be - //signed with, and also any optional tweaks. The return value is a fixed - //64-byte signature (the same format as we use on the wire in Lightning). - // - //If we are unable to sign using the specified keys, then an error will be - //returned. - SignOutputRaw(context.Context, *SignReq) (*SignResp, error) - // - //ComputeInputScript generates a complete InputIndex for the passed - //transaction with the signature as defined within the passed SignDescriptor. - //This method should be capable of generating the proper input script for - //both regular p2wkh output and p2wkh outputs nested within a regular p2sh - //output. - // - //Note that when using this method to sign inputs belonging to the wallet, - //the only items of the SignDescriptor that need to be populated are pkScript - //in the TxOut field, the value in that same field, and finally the input - //index. - ComputeInputScript(context.Context, *SignReq) (*InputScriptResp, error) - // - //SignMessage signs a message with the key specified in the key locator. The - //returned signature is fixed-size LN wire format encoded. - // - //The main difference to SignMessage in the main RPC is that a specific key is - //used to sign the message instead of the node identity private key. - SignMessage(context.Context, *SignMessageReq) (*SignMessageResp, error) - // - //VerifyMessage verifies a signature over a message using the public key - //provided. The signature must be fixed-size LN wire format encoded. - // - //The main difference to VerifyMessage in the main RPC is that the public key - //used to sign the message does not have to be a node known to the network. - VerifyMessage(context.Context, *VerifyMessageReq) (*VerifyMessageResp, error) - // - //DeriveSharedKey returns a shared secret key by performing Diffie-Hellman key - //derivation between the ephemeral public key in the request and the node's - //key specified in the key_desc parameter. Either a key locator or a raw - //public key is expected in the key_desc, if neither is supplied, defaults to - //the node's identity private key: - //P_shared = privKeyNode * ephemeralPubkey - //The resulting shared public key is serialized in the compressed format and - //hashed with sha256, resulting in the final key length of 256bit. - DeriveSharedKey(context.Context, *SharedKeyRequest) (*SharedKeyResponse, error) -} - -// UnimplementedSignerServer can be embedded to have forward compatible implementations. -type UnimplementedSignerServer struct { -} - -func (*UnimplementedSignerServer) SignOutputRaw(ctx context.Context, req *SignReq) (*SignResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method SignOutputRaw not implemented") -} -func (*UnimplementedSignerServer) ComputeInputScript(ctx context.Context, req *SignReq) (*InputScriptResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method ComputeInputScript not implemented") -} -func (*UnimplementedSignerServer) SignMessage(ctx context.Context, req *SignMessageReq) (*SignMessageResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method SignMessage not implemented") -} -func (*UnimplementedSignerServer) VerifyMessage(ctx context.Context, req *VerifyMessageReq) (*VerifyMessageResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method VerifyMessage not implemented") -} -func (*UnimplementedSignerServer) DeriveSharedKey(ctx context.Context, req *SharedKeyRequest) (*SharedKeyResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeriveSharedKey not implemented") -} - -func RegisterSignerServer(s *grpc.Server, srv SignerServer) { - s.RegisterService(&_Signer_serviceDesc, srv) -} - -func _Signer_SignOutputRaw_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SignReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SignerServer).SignOutputRaw(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/signrpc.Signer/SignOutputRaw", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SignerServer).SignOutputRaw(ctx, req.(*SignReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _Signer_ComputeInputScript_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SignReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SignerServer).ComputeInputScript(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/signrpc.Signer/ComputeInputScript", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SignerServer).ComputeInputScript(ctx, req.(*SignReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _Signer_SignMessage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SignMessageReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SignerServer).SignMessage(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/signrpc.Signer/SignMessage", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SignerServer).SignMessage(ctx, req.(*SignMessageReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _Signer_VerifyMessage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(VerifyMessageReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SignerServer).VerifyMessage(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/signrpc.Signer/VerifyMessage", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SignerServer).VerifyMessage(ctx, req.(*VerifyMessageReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _Signer_DeriveSharedKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SharedKeyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SignerServer).DeriveSharedKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/signrpc.Signer/DeriveSharedKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SignerServer).DeriveSharedKey(ctx, req.(*SharedKeyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Signer_serviceDesc = grpc.ServiceDesc{ - ServiceName: "signrpc.Signer", - HandlerType: (*SignerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "SignOutputRaw", - Handler: _Signer_SignOutputRaw_Handler, - }, - { - MethodName: "ComputeInputScript", - Handler: _Signer_ComputeInputScript_Handler, - }, - { - MethodName: "SignMessage", - Handler: _Signer_SignMessage_Handler, - }, - { - MethodName: "VerifyMessage", - Handler: _Signer_VerifyMessage_Handler, - }, - { - MethodName: "DeriveSharedKey", - Handler: _Signer_DeriveSharedKey_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "signrpc/signer.proto", -} diff --git a/lnd/lnrpc/signrpc/signer.pb.gw.go b/lnd/lnrpc/signrpc/signer.pb.gw.go deleted file mode 100644 index 0247c1a0..00000000 --- a/lnd/lnrpc/signrpc/signer.pb.gw.go +++ /dev/null @@ -1,475 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: signrpc/signer.proto - -/* -Package signrpc is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package signrpc - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage - -func request_Signer_SignOutputRaw_0(ctx context.Context, marshaler runtime.Marshaler, client SignerClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SignReq - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.SignOutputRaw(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Signer_SignOutputRaw_0(ctx context.Context, marshaler runtime.Marshaler, server SignerServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SignReq - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.SignOutputRaw(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Signer_ComputeInputScript_0(ctx context.Context, marshaler runtime.Marshaler, client SignerClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SignReq - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ComputeInputScript(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Signer_ComputeInputScript_0(ctx context.Context, marshaler runtime.Marshaler, server SignerServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SignReq - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ComputeInputScript(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Signer_SignMessage_0(ctx context.Context, marshaler runtime.Marshaler, client SignerClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SignMessageReq - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.SignMessage(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Signer_SignMessage_0(ctx context.Context, marshaler runtime.Marshaler, server SignerServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SignMessageReq - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.SignMessage(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Signer_VerifyMessage_0(ctx context.Context, marshaler runtime.Marshaler, client SignerClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq VerifyMessageReq - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.VerifyMessage(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Signer_VerifyMessage_0(ctx context.Context, marshaler runtime.Marshaler, server SignerServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq VerifyMessageReq - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.VerifyMessage(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Signer_DeriveSharedKey_0(ctx context.Context, marshaler runtime.Marshaler, client SignerClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SharedKeyRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.DeriveSharedKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Signer_DeriveSharedKey_0(ctx context.Context, marshaler runtime.Marshaler, server SignerServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SharedKeyRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.DeriveSharedKey(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterSignerHandlerServer registers the http handlers for service Signer to "mux". -// UnaryRPC :call SignerServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -func RegisterSignerHandlerServer(ctx context.Context, mux *runtime.ServeMux, server SignerServer) error { - - mux.Handle("POST", pattern_Signer_SignOutputRaw_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Signer_SignOutputRaw_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Signer_SignOutputRaw_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Signer_ComputeInputScript_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Signer_ComputeInputScript_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Signer_ComputeInputScript_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Signer_SignMessage_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Signer_SignMessage_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Signer_SignMessage_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Signer_VerifyMessage_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Signer_VerifyMessage_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Signer_VerifyMessage_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Signer_DeriveSharedKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Signer_DeriveSharedKey_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Signer_DeriveSharedKey_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterSignerHandlerFromEndpoint is same as RegisterSignerHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterSignerHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterSignerHandler(ctx, mux, conn) -} - -// RegisterSignerHandler registers the http handlers for service Signer to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterSignerHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterSignerHandlerClient(ctx, mux, NewSignerClient(conn)) -} - -// RegisterSignerHandlerClient registers the http handlers for service Signer -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "SignerClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "SignerClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "SignerClient" to call the correct interceptors. -func RegisterSignerHandlerClient(ctx context.Context, mux *runtime.ServeMux, client SignerClient) error { - - mux.Handle("POST", pattern_Signer_SignOutputRaw_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Signer_SignOutputRaw_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Signer_SignOutputRaw_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Signer_ComputeInputScript_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Signer_ComputeInputScript_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Signer_ComputeInputScript_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Signer_SignMessage_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Signer_SignMessage_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Signer_SignMessage_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Signer_VerifyMessage_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Signer_VerifyMessage_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Signer_VerifyMessage_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Signer_DeriveSharedKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Signer_DeriveSharedKey_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Signer_DeriveSharedKey_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Signer_SignOutputRaw_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "signer", "signraw"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Signer_ComputeInputScript_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "signer", "inputscript"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Signer_SignMessage_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "signer", "signmessage"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Signer_VerifyMessage_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "signer", "verifymessage"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Signer_DeriveSharedKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "signer", "sharedkey"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Signer_SignOutputRaw_0 = runtime.ForwardResponseMessage - - forward_Signer_ComputeInputScript_0 = runtime.ForwardResponseMessage - - forward_Signer_SignMessage_0 = runtime.ForwardResponseMessage - - forward_Signer_VerifyMessage_0 = runtime.ForwardResponseMessage - - forward_Signer_DeriveSharedKey_0 = runtime.ForwardResponseMessage -) diff --git a/lnd/lnrpc/signrpc/signer.proto b/lnd/lnrpc/signrpc/signer.proto deleted file mode 100644 index 71c15b87..00000000 --- a/lnd/lnrpc/signrpc/signer.proto +++ /dev/null @@ -1,241 +0,0 @@ -syntax = "proto3"; - -package signrpc; - -option go_package = "github.com/pkt-cash/pktd/lnd/lnrpc/signrpc"; - -// Signer is a service that gives access to the signing functionality of the -// daemon's wallet. -service Signer { - /* - SignOutputRaw is a method that can be used to generated a signature for a - set of inputs/outputs to a transaction. Each request specifies details - concerning how the outputs should be signed, which keys they should be - signed with, and also any optional tweaks. The return value is a fixed - 64-byte signature (the same format as we use on the wire in Lightning). - - If we are unable to sign using the specified keys, then an error will be - returned. - */ - rpc SignOutputRaw (SignReq) returns (SignResp); - - /* - ComputeInputScript generates a complete InputIndex for the passed - transaction with the signature as defined within the passed SignDescriptor. - This method should be capable of generating the proper input script for - both regular p2wkh output and p2wkh outputs nested within a regular p2sh - output. - - Note that when using this method to sign inputs belonging to the wallet, - the only items of the SignDescriptor that need to be populated are pkScript - in the TxOut field, the value in that same field, and finally the input - index. - */ - rpc ComputeInputScript (SignReq) returns (InputScriptResp); - - /* - SignMessage signs a message with the key specified in the key locator. The - returned signature is fixed-size LN wire format encoded. - - The main difference to SignMessage in the main RPC is that a specific key is - used to sign the message instead of the node identity private key. - */ - rpc SignMessage (SignMessageReq) returns (SignMessageResp); - - /* - VerifyMessage verifies a signature over a message using the public key - provided. The signature must be fixed-size LN wire format encoded. - - The main difference to VerifyMessage in the main RPC is that the public key - used to sign the message does not have to be a node known to the network. - */ - rpc VerifyMessage (VerifyMessageReq) returns (VerifyMessageResp); - - /* - DeriveSharedKey returns a shared secret key by performing Diffie-Hellman key - derivation between the ephemeral public key in the request and the node's - key specified in the key_desc parameter. Either a key locator or a raw - public key is expected in the key_desc, if neither is supplied, defaults to - the node's identity private key: - P_shared = privKeyNode * ephemeralPubkey - The resulting shared public key is serialized in the compressed format and - hashed with sha256, resulting in the final key length of 256bit. - */ - rpc DeriveSharedKey (SharedKeyRequest) returns (SharedKeyResponse); -} - -message KeyLocator { - // The family of key being identified. - int32 key_family = 1; - - // The precise index of the key being identified. - int32 key_index = 2; -} - -message KeyDescriptor { - /* - The raw bytes of the key being identified. Either this or the KeyLocator - must be specified. - */ - bytes raw_key_bytes = 1; - - /* - The key locator that identifies which key to use for signing. Either this - or the raw bytes of the target key must be specified. - */ - KeyLocator key_loc = 2; -} - -message TxOut { - // The value of the output being spent. - int64 value = 1; - - // The script of the output being spent. - bytes pk_script = 2; -} - -message SignDescriptor { - /* - A descriptor that precisely describes *which* key to use for signing. This - may provide the raw public key directly, or require the Signer to re-derive - the key according to the populated derivation path. - - Note that if the key descriptor was obtained through walletrpc.DeriveKey, - then the key locator MUST always be provided, since the derived keys are not - persisted unlike with DeriveNextKey. - */ - KeyDescriptor key_desc = 1; - - /* - A scalar value that will be added to the private key corresponding to the - above public key to obtain the private key to be used to sign this input. - This value is typically derived via the following computation: - - * derivedKey = privkey + sha256(perCommitmentPoint || pubKey) mod N - */ - bytes single_tweak = 2; - - /* - A private key that will be used in combination with its corresponding - private key to derive the private key that is to be used to sign the target - input. Within the Lightning protocol, this value is typically the - commitment secret from a previously revoked commitment transaction. This - value is in combination with two hash values, and the original private key - to derive the private key to be used when signing. - - * k = (privKey*sha256(pubKey || tweakPub) + - tweakPriv*sha256(tweakPub || pubKey)) mod N - */ - bytes double_tweak = 3; - - /* - The full script required to properly redeem the output. This field will - only be populated if a p2wsh or a p2sh output is being signed. - */ - bytes witness_script = 4; - - /* - A description of the output being spent. The value and script MUST be - provided. - */ - TxOut output = 5; - - /* - The target sighash type that should be used when generating the final - sighash, and signature. - */ - uint32 sighash = 7; - - /* - The target input within the transaction that should be signed. - */ - int32 input_index = 8; -} - -message SignReq { - // The raw bytes of the transaction to be signed. - bytes raw_tx_bytes = 1; - - // A set of sign descriptors, for each input to be signed. - repeated SignDescriptor sign_descs = 2; -} - -message SignResp { - /* - A set of signatures realized in a fixed 64-byte format ordered in ascending - input order. - */ - repeated bytes raw_sigs = 1; -} - -message InputScript { - // The serializes witness stack for the specified input. - repeated bytes witness = 1; - - /* - The optional sig script for the specified witness that will only be set if - the input specified is a nested p2sh witness program. - */ - bytes sig_script = 2; -} - -message InputScriptResp { - // The set of fully valid input scripts requested. - repeated InputScript input_scripts = 1; -} - -message SignMessageReq { - // The message to be signed. - bytes msg = 1; - - // The key locator that identifies which key to use for signing. - KeyLocator key_loc = 2; -} -message SignMessageResp { - /* - The signature for the given message in the fixed-size LN wire format. - */ - bytes signature = 1; -} - -message VerifyMessageReq { - // The message over which the signature is to be verified. - bytes msg = 1; - - /* - The fixed-size LN wire encoded signature to be verified over the given - message. - */ - bytes signature = 2; - - // The public key the signature has to be valid for. - bytes pubkey = 3; -} -message VerifyMessageResp { - // Whether the signature was valid over the given message. - bool valid = 1; -} - -message SharedKeyRequest { - // The ephemeral public key to use for the DH key derivation. - bytes ephemeral_pubkey = 1; - - /* - Deprecated. The optional key locator of the local key that should be used. - If this parameter is not set then the node's identity private key will be - used. - */ - KeyLocator key_loc = 2 [deprecated = true]; - - /* - A key descriptor describes the key used for performing ECDH. Either a key - locator or a raw public key is expected, if neither is supplied, defaults to - the node's identity private key. - */ - KeyDescriptor key_desc = 3; -} - -message SharedKeyResponse { - // The shared public key, hashed with sha256. - bytes shared_key = 1; -} diff --git a/lnd/lnrpc/signrpc/signer.swagger.json b/lnd/lnrpc/signrpc/signer.swagger.json deleted file mode 100644 index 10e5fc26..00000000 --- a/lnd/lnrpc/signrpc/signer.swagger.json +++ /dev/null @@ -1,443 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "signrpc/signer.proto", - "version": "version not set" - }, - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/v2/signer/inputscript": { - "post": { - "summary": "ComputeInputScript generates a complete InputIndex for the passed\ntransaction with the signature as defined within the passed SignDescriptor.\nThis method should be capable of generating the proper input script for\nboth regular p2wkh output and p2wkh outputs nested within a regular p2sh\noutput.", - "description": "Note that when using this method to sign inputs belonging to the wallet,\nthe only items of the SignDescriptor that need to be populated are pkScript\nin the TxOut field, the value in that same field, and finally the input\nindex.", - "operationId": "Signer_ComputeInputScript", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/signrpcInputScriptResp" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/signrpcSignReq" - } - } - ], - "tags": [ - "Signer" - ] - } - }, - "/v2/signer/sharedkey": { - "post": { - "summary": "DeriveSharedKey returns a shared secret key by performing Diffie-Hellman key\nderivation between the ephemeral public key in the request and the node's\nkey specified in the key_desc parameter. Either a key locator or a raw\npublic key is expected in the key_desc, if neither is supplied, defaults to\nthe node's identity private key:\nP_shared = privKeyNode * ephemeralPubkey\nThe resulting shared public key is serialized in the compressed format and\nhashed with sha256, resulting in the final key length of 256bit.", - "operationId": "Signer_DeriveSharedKey", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/signrpcSharedKeyResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/signrpcSharedKeyRequest" - } - } - ], - "tags": [ - "Signer" - ] - } - }, - "/v2/signer/signmessage": { - "post": { - "summary": "SignMessage signs a message with the key specified in the key locator. The\nreturned signature is fixed-size LN wire format encoded.", - "description": "The main difference to SignMessage in the main RPC is that a specific key is\nused to sign the message instead of the node identity private key.", - "operationId": "Signer_SignMessage", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/signrpcSignMessageResp" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/signrpcSignMessageReq" - } - } - ], - "tags": [ - "Signer" - ] - } - }, - "/v2/signer/signraw": { - "post": { - "summary": "SignOutputRaw is a method that can be used to generated a signature for a\nset of inputs/outputs to a transaction. Each request specifies details\nconcerning how the outputs should be signed, which keys they should be\nsigned with, and also any optional tweaks. The return value is a fixed\n64-byte signature (the same format as we use on the wire in Lightning).", - "description": "If we are unable to sign using the specified keys, then an error will be\nreturned.", - "operationId": "Signer_SignOutputRaw", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/signrpcSignResp" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/signrpcSignReq" - } - } - ], - "tags": [ - "Signer" - ] - } - }, - "/v2/signer/verifymessage": { - "post": { - "summary": "VerifyMessage verifies a signature over a message using the public key\nprovided. The signature must be fixed-size LN wire format encoded.", - "description": "The main difference to VerifyMessage in the main RPC is that the public key\nused to sign the message does not have to be a node known to the network.", - "operationId": "Signer_VerifyMessage", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/signrpcVerifyMessageResp" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/signrpcVerifyMessageReq" - } - } - ], - "tags": [ - "Signer" - ] - } - } - }, - "definitions": { - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string" - }, - "value": { - "type": "string", - "format": "byte" - } - } - }, - "runtimeError": { - "type": "object", - "properties": { - "error": { - "type": "string" - }, - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - }, - "signrpcInputScript": { - "type": "object", - "properties": { - "witness": { - "type": "array", - "items": { - "type": "string", - "format": "byte" - }, - "description": "The serializes witness stack for the specified input." - }, - "sig_script": { - "type": "string", - "format": "byte", - "description": "The optional sig script for the specified witness that will only be set if\nthe input specified is a nested p2sh witness program." - } - } - }, - "signrpcInputScriptResp": { - "type": "object", - "properties": { - "input_scripts": { - "type": "array", - "items": { - "$ref": "#/definitions/signrpcInputScript" - }, - "description": "The set of fully valid input scripts requested." - } - } - }, - "signrpcKeyDescriptor": { - "type": "object", - "properties": { - "raw_key_bytes": { - "type": "string", - "format": "byte", - "description": "The raw bytes of the key being identified. Either this or the KeyLocator\nmust be specified." - }, - "key_loc": { - "$ref": "#/definitions/signrpcKeyLocator", - "description": "The key locator that identifies which key to use for signing. Either this\nor the raw bytes of the target key must be specified." - } - } - }, - "signrpcKeyLocator": { - "type": "object", - "properties": { - "key_family": { - "type": "integer", - "format": "int32", - "description": "The family of key being identified." - }, - "key_index": { - "type": "integer", - "format": "int32", - "description": "The precise index of the key being identified." - } - } - }, - "signrpcSharedKeyRequest": { - "type": "object", - "properties": { - "ephemeral_pubkey": { - "type": "string", - "format": "byte", - "description": "The ephemeral public key to use for the DH key derivation." - }, - "key_loc": { - "$ref": "#/definitions/signrpcKeyLocator", - "description": "Deprecated. The optional key locator of the local key that should be used.\nIf this parameter is not set then the node's identity private key will be\nused." - }, - "key_desc": { - "$ref": "#/definitions/signrpcKeyDescriptor", - "description": "A key descriptor describes the key used for performing ECDH. Either a key\nlocator or a raw public key is expected, if neither is supplied, defaults to\nthe node's identity private key." - } - } - }, - "signrpcSharedKeyResponse": { - "type": "object", - "properties": { - "shared_key": { - "type": "string", - "format": "byte", - "description": "The shared public key, hashed with sha256." - } - } - }, - "signrpcSignDescriptor": { - "type": "object", - "properties": { - "key_desc": { - "$ref": "#/definitions/signrpcKeyDescriptor", - "description": "A descriptor that precisely describes *which* key to use for signing. This\nmay provide the raw public key directly, or require the Signer to re-derive\nthe key according to the populated derivation path.\n\nNote that if the key descriptor was obtained through walletrpc.DeriveKey,\nthen the key locator MUST always be provided, since the derived keys are not\npersisted unlike with DeriveNextKey." - }, - "single_tweak": { - "type": "string", - "format": "byte", - "description": "derivedKey = privkey + sha256(perCommitmentPoint || pubKey) mod N", - "title": "A scalar value that will be added to the private key corresponding to the\nabove public key to obtain the private key to be used to sign this input.\nThis value is typically derived via the following computation:" - }, - "double_tweak": { - "type": "string", - "format": "byte", - "description": "A private key that will be used in combination with its corresponding\nprivate key to derive the private key that is to be used to sign the target\ninput. Within the Lightning protocol, this value is typically the\ncommitment secret from a previously revoked commitment transaction. This\nvalue is in combination with two hash values, and the original private key\nto derive the private key to be used when signing.\n\nk = (privKey*sha256(pubKey || tweakPub) +\ntweakPriv*sha256(tweakPub || pubKey)) mod N" - }, - "witness_script": { - "type": "string", - "format": "byte", - "description": "The full script required to properly redeem the output. This field will\nonly be populated if a p2wsh or a p2sh output is being signed." - }, - "output": { - "$ref": "#/definitions/signrpcTxOut", - "description": "A description of the output being spent. The value and script MUST be\nprovided." - }, - "sighash": { - "type": "integer", - "format": "int64", - "description": "The target sighash type that should be used when generating the final\nsighash, and signature." - }, - "input_index": { - "type": "integer", - "format": "int32", - "description": "The target input within the transaction that should be signed." - } - } - }, - "signrpcSignMessageReq": { - "type": "object", - "properties": { - "msg": { - "type": "string", - "format": "byte", - "description": "The message to be signed." - }, - "key_loc": { - "$ref": "#/definitions/signrpcKeyLocator", - "description": "The key locator that identifies which key to use for signing." - } - } - }, - "signrpcSignMessageResp": { - "type": "object", - "properties": { - "signature": { - "type": "string", - "format": "byte", - "description": "The signature for the given message in the fixed-size LN wire format." - } - } - }, - "signrpcSignReq": { - "type": "object", - "properties": { - "raw_tx_bytes": { - "type": "string", - "format": "byte", - "description": "The raw bytes of the transaction to be signed." - }, - "sign_descs": { - "type": "array", - "items": { - "$ref": "#/definitions/signrpcSignDescriptor" - }, - "description": "A set of sign descriptors, for each input to be signed." - } - } - }, - "signrpcSignResp": { - "type": "object", - "properties": { - "raw_sigs": { - "type": "array", - "items": { - "type": "string", - "format": "byte" - }, - "description": "A set of signatures realized in a fixed 64-byte format ordered in ascending\ninput order." - } - } - }, - "signrpcTxOut": { - "type": "object", - "properties": { - "value": { - "type": "string", - "format": "int64", - "description": "The value of the output being spent." - }, - "pk_script": { - "type": "string", - "format": "byte", - "description": "The script of the output being spent." - } - } - }, - "signrpcVerifyMessageReq": { - "type": "object", - "properties": { - "msg": { - "type": "string", - "format": "byte", - "description": "The message over which the signature is to be verified." - }, - "signature": { - "type": "string", - "format": "byte", - "description": "The fixed-size LN wire encoded signature to be verified over the given\nmessage." - }, - "pubkey": { - "type": "string", - "format": "byte", - "description": "The public key the signature has to be valid for." - } - } - }, - "signrpcVerifyMessageResp": { - "type": "object", - "properties": { - "valid": { - "type": "boolean", - "description": "Whether the signature was valid over the given message." - } - } - } - } -} diff --git a/lnd/lnrpc/signrpc/signer_server.go b/lnd/lnrpc/signrpc/signer_server.go deleted file mode 100644 index aa14b441..00000000 --- a/lnd/lnrpc/signrpc/signer_server.go +++ /dev/null @@ -1,608 +0,0 @@ -// +build signrpc - -package signrpc - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/macaroons" - "github.com/pkt-cash/pktd/txscript" - "github.com/pkt-cash/pktd/txscript/params" - "github.com/pkt-cash/pktd/wire" - "google.golang.org/grpc" - "gopkg.in/macaroon-bakery.v2/bakery" -) - -const ( - // subServerName is the name of the sub rpc server. We'll use this name - // to register ourselves, and we also require that the main - // SubServerConfigDispatcher instance recognize this as the name of the - // config file that we need. - subServerName = "SignRPC" -) - -var ( - // macaroonOps are the set of capabilities that our minted macaroon (if - // it doesn't already exist) will have. - macaroonOps = []bakery.Op{ - { - Entity: "signer", - Action: "generate", - }, - { - Entity: "signer", - Action: "read", - }, - } - - // macPermissions maps RPC calls to the permissions they require. - macPermissions = map[string][]bakery.Op{ - "/signrpc.Signer/SignOutputRaw": {{ - Entity: "signer", - Action: "generate", - }}, - "/signrpc.Signer/ComputeInputScript": {{ - Entity: "signer", - Action: "generate", - }}, - "/signrpc.Signer/SignMessage": {{ - Entity: "signer", - Action: "generate", - }}, - "/signrpc.Signer/VerifyMessage": {{ - Entity: "signer", - Action: "read", - }}, - "/signrpc.Signer/DeriveSharedKey": {{ - Entity: "signer", - Action: "generate", - }}, - } - - // DefaultSignerMacFilename is the default name of the signer macaroon - // that we expect to find via a file handle within the main - // configuration file in this package. - DefaultSignerMacFilename = "signer.macaroon" -) - -// Server is a sub-server of the main RPC server: the signer RPC. This sub RPC -// server allows external callers to access the full signing capabilities of -// lnd. This allows callers to create custom protocols, external to lnd, even -// backed by multiple distinct lnd across independent failure domains. -type Server struct { - cfg *Config -} - -// A compile time check to ensure that Server fully implements the SignerServer -// gRPC service. -var _ SignerServer = (*Server)(nil) - -// New returns a new instance of the signrpc Signer sub-server. We also return -// the set of permissions for the macaroons that we may create within this -// method. If the macaroons we need aren't found in the filepath, then we'll -// create them on start up. If we're unable to locate, or create the macaroons -// we need, then we'll return with an error. -func New(cfg *Config) (*Server, lnrpc.MacaroonPerms, er.R) { - // If the path of the signer macaroon wasn't generated, then we'll - // assume that it's found at the default network directory. - if cfg.SignerMacPath == "" { - cfg.SignerMacPath = filepath.Join( - cfg.NetworkDir, DefaultSignerMacFilename, - ) - } - - // Now that we know the full path of the signer macaroon, we can check - // to see if we need to create it or not. If stateless_init is set - // then we don't write the macaroons. - macFilePath := cfg.SignerMacPath - if cfg.MacService != nil && !cfg.MacService.StatelessInit && - !lnrpc.FileExists(macFilePath) { - - log.Infof("Making macaroons for Signer RPC Server at: %v", - macFilePath) - - // At this point, we know that the signer macaroon doesn't yet, - // exist, so we need to create it with the help of the main - // macaroon service. - signerMac, err := cfg.MacService.NewMacaroon( - context.Background(), macaroons.DefaultRootKeyID, - macaroonOps..., - ) - if err != nil { - return nil, nil, err - } - signerMacBytes, err := signerMac.M().MarshalBinary() - if err != nil { - return nil, nil, err - } - err = ioutil.WriteFile(macFilePath, signerMacBytes, 0644) - if err != nil { - _ = os.Remove(macFilePath) - return nil, nil, err - } - } - - signerServer := &Server{ - cfg: cfg, - } - - return signerServer, macPermissions, nil -} - -// Start launches any helper goroutines required for the rpcServer to function. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) Start() er.R { - return nil -} - -// Stop signals any active goroutines for a graceful closure. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) Stop() er.R { - return nil -} - -// Name returns a unique string representation of the sub-server. This can be -// used to identify the sub-server and also de-duplicate them. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) Name() string { - return subServerName -} - -// RegisterWithRootServer will be called by the root gRPC server to direct a -// sub RPC server to register itself with the main gRPC root server. Until this -// is called, each sub-server won't be able to have -// requests routed towards it. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) RegisterWithRootServer(grpcServer *grpc.Server) er.R { - // We make sure that we register it with the main gRPC server to ensure - // all our methods are routed properly. - RegisterSignerServer(grpcServer, s) - - log.Debugf("Signer RPC server successfully register with root gRPC " + - "server") - - return nil -} - -// RegisterWithRestServer will be called by the root REST mux to direct a sub -// RPC server to register itself with the main REST mux server. Until this is -// called, each sub-server won't be able to have requests routed towards it. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) RegisterWithRestServer(ctx context.Context, - mux *runtime.ServeMux, dest string, opts []grpc.DialOption) er.R { - - // We make sure that we register it with the main REST server to ensure - // all our methods are routed properly. - err := RegisterSignerHandlerFromEndpoint(ctx, mux, dest, opts) - if err != nil { - log.Errorf("Could not register Signer REST server "+ - "with root REST server: %v", err) - return err - } - - log.Debugf("Signer REST server successfully registered with " + - "root REST server") - return nil -} - -// SignOutputRaw generates a signature for the passed transaction according to -// the data within the passed SignReq. If we're unable to find the keys that -// correspond to the KeyLocators in the SignReq then we'll return an error. -// Additionally, if the user doesn't provide the set of required parameters, or -// provides an invalid transaction, then we'll return with an error. -// -// NOTE: The resulting signature should be void of a sighash byte. -func (s *Server) SignOutputRaw(ctx context.Context, in *SignReq) (*SignResp, er.R) { - - switch { - // If the client doesn't specify a transaction, then there's nothing to - // sign, so we'll exit early. - case len(in.RawTxBytes) == 0: - return nil, er.Errorf("a transaction to sign MUST be " + - "passed in") - - // If the client doesn't tell us *how* to sign the transaction, then we - // can't sign anything, so we'll exit early. - case len(in.SignDescs) == 0: - return nil, er.Errorf("at least one SignDescs MUST be " + - "passed in") - } - - // Now that we know we have an actual transaction to decode, we'll - // deserialize it into something that we can properly utilize. - var ( - txToSign wire.MsgTx - err error - ) - txReader := bytes.NewReader(in.RawTxBytes) - if err := txToSign.Deserialize(txReader); err != nil { - return nil, er.Errorf("unable to decode tx: %v", err) - } - - sigHashCache := txscript.NewTxSigHashes(&txToSign) - - log.Debugf("Generating sigs for %v inputs: ", len(in.SignDescs)) - - // With the transaction deserialized, we'll now convert sign descs so - // we can feed it into the actual signer. - signDescs := make([]*input.SignDescriptor, 0, len(in.SignDescs)) - for _, signDesc := range in.SignDescs { - keyDesc := signDesc.KeyDesc - - // The caller can either specify the key using the raw pubkey, - // or the description of the key. We'll still attempt to parse - // both if both were provided however, to ensure the underlying - // SignOutputRaw has as much information as possible. - var ( - targetPubKey *btcec.PublicKey - keyLoc keychain.KeyLocator - ) - - // If this method doesn't return nil, then we know that user is - // attempting to include a raw serialized pub key. - if keyDesc.GetRawKeyBytes() != nil { - targetPubKey, err = parseRawKeyBytes( - keyDesc.GetRawKeyBytes(), - ) - if err != nil { - return nil, err - } - } - - // Similarly, if they specified a key locator, then we'll parse - // that as well. - if keyDesc.GetKeyLoc() != nil { - protoLoc := keyDesc.GetKeyLoc() - keyLoc = keychain.KeyLocator{ - Family: keychain.KeyFamily( - protoLoc.KeyFamily, - ), - Index: uint32(protoLoc.KeyIndex), - } - } - - // If a witness script isn't passed, then we can't proceed, as - // in the p2wsh case, we can't properly generate the sighash. - if len(signDesc.WitnessScript) == 0 { - // TODO(roasbeef): if regualr p2wkh, then at times - // internally we allow script to go by - return nil, er.Errorf("witness script MUST be " + - "specified") - } - - // If the users provided a double tweak, then we'll need to - // parse that out now to ensure their input is properly signed. - var tweakPrivKey *btcec.PrivateKey - if len(signDesc.DoubleTweak) != 0 { - tweakPrivKey, _ = btcec.PrivKeyFromBytes( - btcec.S256(), signDesc.DoubleTweak, - ) - } - - // Finally, with verification and parsing complete, we can - // construct the final sign descriptor to generate the proper - // signature for this input. - signDescs = append(signDescs, &input.SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - KeyLocator: keyLoc, - PubKey: targetPubKey, - }, - SingleTweak: signDesc.SingleTweak, - DoubleTweak: tweakPrivKey, - WitnessScript: signDesc.WitnessScript, - Output: &wire.TxOut{ - Value: signDesc.Output.Value, - PkScript: signDesc.Output.PkScript, - }, - HashType: params.SigHashType(signDesc.Sighash), - SigHashes: sigHashCache, - InputIndex: int(signDesc.InputIndex), - }) - } - - // Now that we've mapped all the proper sign descriptors, we can - // request signatures for each of them, passing in the transaction to - // be signed. - numSigs := len(in.SignDescs) - resp := &SignResp{ - RawSigs: make([][]byte, numSigs), - } - for i, signDesc := range signDescs { - sig, err := s.cfg.Signer.SignOutputRaw(&txToSign, signDesc) - if err != nil { - log.Errorf("unable to generate sig for input "+ - "#%v: %v", i, err) - - return nil, err - } - - resp.RawSigs[i] = sig.Serialize() - } - - return resp, nil -} - -// ComputeInputScript generates a complete InputIndex for the passed -// transaction with the signature as defined within the passed SignDescriptor. -// This method should be capable of generating the proper input script for both -// regular p2wkh output and p2wkh outputs nested within a regular p2sh output. -// -// Note that when using this method to sign inputs belonging to the wallet, the -// only items of the SignDescriptor that need to be populated are pkScript in -// the TxOut field, the value in that same field, and finally the input index. -func (s *Server) ComputeInputScript(ctx context.Context, - in *SignReq) (*InputScriptResp, er.R) { - - switch { - // If the client doesn't specify a transaction, then there's nothing to - // sign, so we'll exit early. - case len(in.RawTxBytes) == 0: - return nil, er.Errorf("a transaction to sign MUST be " + - "passed in") - - // If the client doesn't tell us *how* to sign the transaction, then we - // can't sign anything, so we'll exit early. - case len(in.SignDescs) == 0: - return nil, er.Errorf("at least one SignDescs MUST be " + - "passed in") - } - - // Now that we know we have an actual transaction to decode, we'll - // deserialize it into something that we can properly utilize. - var txToSign wire.MsgTx - txReader := bytes.NewReader(in.RawTxBytes) - if err := txToSign.Deserialize(txReader); err != nil { - return nil, er.Errorf("unable to decode tx: %v", err) - } - - sigHashCache := txscript.NewTxSigHashes(&txToSign) - - signDescs := make([]*input.SignDescriptor, 0, len(in.SignDescs)) - for _, signDesc := range in.SignDescs { - // For this method, the only fields that we care about are the - // hash type, and the information concerning the output as we - // only know how to provide full witnesses for outputs that we - // solely control. - signDescs = append(signDescs, &input.SignDescriptor{ - Output: &wire.TxOut{ - Value: signDesc.Output.Value, - PkScript: signDesc.Output.PkScript, - }, - HashType: params.SigHashType(signDesc.Sighash), - SigHashes: sigHashCache, - InputIndex: int(signDesc.InputIndex), - }) - } - - // With all of our signDescs assembled, we can now generate a valid - // input script for each of them, and collate the responses to return - // back to the caller. - numWitnesses := len(in.SignDescs) - resp := &InputScriptResp{ - InputScripts: make([]*InputScript, numWitnesses), - } - for i, signDesc := range signDescs { - inputScript, err := s.cfg.Signer.ComputeInputScript( - &txToSign, signDesc, - ) - if err != nil { - return nil, err - } - - resp.InputScripts[i] = &InputScript{ - Witness: inputScript.Witness, - SigScript: inputScript.SigScript, - } - } - - return resp, nil -} - -// SignMessage signs a message with the key specified in the key locator. The -// returned signature is fixed-size LN wire format encoded. -func (s *Server) SignMessage(ctx context.Context, - in *SignMessageReq) (*SignMessageResp, er.R) { - - if in.Msg == nil { - return nil, er.Errorf("a message to sign MUST be passed in") - } - if in.KeyLoc == nil { - return nil, er.Errorf("a key locator MUST be passed in") - } - - // Describe the private key we'll be using for signing. - keyDescriptor := keychain.KeyDescriptor{ - KeyLocator: keychain.KeyLocator{ - Family: keychain.KeyFamily(in.KeyLoc.KeyFamily), - Index: uint32(in.KeyLoc.KeyIndex), - }, - } - - // The signature is over the sha256 hash of the message. - var digest [32]byte - copy(digest[:], chainhash.HashB(in.Msg)) - - // Create the raw ECDSA signature first and convert it to the final wire - // format after. - sig, err := s.cfg.KeyRing.SignDigest(keyDescriptor, digest) - if err != nil { - return nil, er.Errorf("can't sign the hash: %v", err) - } - wireSig, err := lnwire.NewSigFromSignature(sig) - if err != nil { - return nil, er.Errorf("can't convert to wire format: %v", err) - } - return &SignMessageResp{ - Signature: wireSig.ToSignatureBytes(), - }, nil -} - -// VerifyMessage verifies a signature over a message using the public key -// provided. The signature must be fixed-size LN wire format encoded. -func (s *Server) VerifyMessage(ctx context.Context, - in *VerifyMessageReq) (*VerifyMessageResp, er.R) { - - if in.Msg == nil { - return nil, er.Errorf("a message to verify MUST be passed in") - } - if in.Signature == nil { - return nil, er.Errorf("a signature to verify MUST be passed " + - "in") - } - if in.Pubkey == nil { - return nil, er.Errorf("a pubkey to verify MUST be passed in") - } - pubkey, err := btcec.ParsePubKey(in.Pubkey, btcec.S256()) - if err != nil { - return nil, er.Errorf("unable to parse pubkey: %v", err) - } - - // The signature must be fixed-size LN wire format encoded. - wireSig, err := lnwire.NewSigFromRawSignature(in.Signature) - if err != nil { - return nil, er.Errorf("failed to decode signature: %v", err) - } - sig, err := wireSig.ToSignature() - if err != nil { - return nil, er.Errorf("failed to convert from wire format: %v", - err) - } - - // The signature is over the sha256 hash of the message. - digest := chainhash.HashB(in.Msg) - valid := sig.Verify(digest, pubkey) - return &VerifyMessageResp{ - Valid: valid, - }, nil -} - -// DeriveSharedKey returns a shared secret key by performing Diffie-Hellman key -// derivation between the ephemeral public key in the request and the node's -// key specified in the key_desc parameter. Either a key locator or a raw public -// key is expected in the key_desc, if neither is supplied, defaults to the -// node's identity private key. The old key_loc parameter in the request -// shouldn't be used anymore. -// The resulting shared public key is serialized in the compressed format and -// hashed with sha256, resulting in the final key length of 256bit. -func (s *Server) DeriveSharedKey(_ context.Context, in *SharedKeyRequest) ( - *SharedKeyResponse, er.R) { - - // Check that EphemeralPubkey is valid. - ephemeralPubkey, err := parseRawKeyBytes(in.EphemeralPubkey) - if err != nil { - return nil, er.Errorf("error in ephemeral pubkey: %v", err) - } - if ephemeralPubkey == nil { - return nil, er.Errorf("must provide ephemeral pubkey") - } - - // Check for backward compatibility. The caller either specifies the old - // key_loc field, or the new key_desc field, but not both. - if in.KeyDesc != nil && in.KeyLoc != nil { - return nil, er.Errorf("use either key_desc or key_loc") - } - - // When key_desc is used, the key_desc.key_loc is expected as the caller - // needs to specify the KeyFamily. - if in.KeyDesc != nil && in.KeyDesc.KeyLoc == nil { - return nil, er.Errorf("when setting key_desc the field " + - "key_desc.key_loc must also be set") - } - - // We extract two params, rawKeyBytes and keyLoc. Notice their initial - // values will be overwritten if not using the deprecated RPC param. - var rawKeyBytes []byte - keyLoc := in.KeyLoc - if in.KeyDesc != nil { - keyLoc = in.KeyDesc.GetKeyLoc() - rawKeyBytes = in.KeyDesc.GetRawKeyBytes() - } - - // When no keyLoc is supplied, defaults to the node's identity private - // key. - if keyLoc == nil { - keyLoc = &KeyLocator{ - KeyFamily: int32(keychain.KeyFamilyNodeKey), - KeyIndex: 0, - } - } - - // Check the caller is using either the key index or the raw public key - // to perform the ECDH, we can't have both. - if rawKeyBytes != nil && keyLoc.KeyIndex != 0 { - return nil, er.Errorf("use either raw_key_bytes or key_index") - } - - // Check the raw public key is valid. Notice that if the rawKeyBytes is - // empty, the parseRawKeyBytes won't return an error, a nil - // *btcec.PublicKey is returned instead. - pk, err := parseRawKeyBytes(rawKeyBytes) - if err != nil { - return nil, er.Errorf("error in raw pubkey: %v", err) - } - - // Create a key descriptor. When the KeyIndex is not specified, it uses - // the empty value 0, and when the raw public key is not specified, the - // pk is nil. - keyDescriptor := keychain.KeyDescriptor{ - KeyLocator: keychain.KeyLocator{ - Family: keychain.KeyFamily(keyLoc.KeyFamily), - Index: uint32(keyLoc.KeyIndex), - }, - PubKey: pk, - } - - // Derive the shared key using ECDH and hashing the serialized - // compressed shared point. - sharedKeyHash, err := s.cfg.KeyRing.ECDH(keyDescriptor, ephemeralPubkey) - if err != nil { - err := er.Errorf("unable to derive shared key: %v", err) - log.Error(err) - return nil, err - } - - return &SharedKeyResponse{SharedKey: sharedKeyHash[:]}, nil -} - -// parseRawKeyBytes checks that the provided raw public key is valid and returns -// the public key. A nil public key is returned if the length of the rawKeyBytes -// is zero. -func parseRawKeyBytes(rawKeyBytes []byte) (*btcec.PublicKey, er.R) { - switch { - - case len(rawKeyBytes) == 33: - // If a proper raw key was provided, then we'll attempt - // to decode and parse it. - return btcec.ParsePubKey( - rawKeyBytes, btcec.S256(), - ) - - case len(rawKeyBytes) == 0: - // No key is provided, return nil. - return nil, nil - - default: - // If the user provided a raw key, but it's of the - // wrong length, then we'll return with an error. - return nil, er.Errorf("pubkey must be " + - "serialized in compressed format if " + - "specified") - } -} diff --git a/lnd/lnrpc/sub_server.go b/lnd/lnrpc/sub_server.go deleted file mode 100644 index a5e9a612..00000000 --- a/lnd/lnrpc/sub_server.go +++ /dev/null @@ -1,140 +0,0 @@ -package lnrpc - -import ( - "context" - "sync" - - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/pkt-cash/pktd/btcutil/er" - "google.golang.org/grpc" - "gopkg.in/macaroon-bakery.v2/bakery" -) - -// MacaroonPerms is a map from the FullMethod of an invoked gRPC command. It -// maps the set of operations that the macaroon presented with the command MUST -// satisfy. With this map, all sub-servers are able to communicate to the -// primary macaroon service what type of macaroon must be passed with each -// method present on the service of the sub-server. -type MacaroonPerms map[string][]bakery.Op - -// SubServer is a child server of the main lnrpc gRPC server. Sub-servers allow -// lnd to expose discrete services that can be used with or independent of the -// main RPC server. The main rpcserver will create, start, stop, and manage -// each sub-server in a generalized manner. -type SubServer interface { - // Start starts the sub-server and all goroutines it needs to operate. - Start() er.R - - // Stop signals that the sub-server should wrap up any lingering - // requests, and being a graceful shutdown. - Stop() er.R - - // Name returns a unique string representation of the sub-server. This - // can be used to identify the sub-server and also de-duplicate them. - Name() string - - // RegisterWithRootServer will be called by the root gRPC server to - // direct a sub RPC server to register itself with the main gRPC root - // server. Until this is called, each sub-server won't be able to have - // requests routed towards it. - RegisterWithRootServer(*grpc.Server) er.R - - // RegisterWithRestServer will be called by the root REST mux to direct - // a sub RPC server to register itself with the main REST mux server. - // Until this is called, each sub-server won't be able to have requests - // routed towards it. - RegisterWithRestServer(context.Context, *runtime.ServeMux, string, - []grpc.DialOption) er.R -} - -// SubServerConfigDispatcher is an interface that all sub-servers will use to -// dynamically locate their configuration files. This abstraction will allow -// the primary RPC sever to initialize all sub-servers in a generic manner -// without knowing of each individual sub server. -type SubServerConfigDispatcher interface { - // FetchConfig attempts to locate an existing configuration file mapped - // to the target sub-server. If we're unable to find a config file - // matching the subServerName name, then false will be returned for the - // second parameter. - FetchConfig(subServerName string) (interface{}, bool) -} - -// SubServerDriver is a template struct that allows the root server to create a -// sub-server with minimal knowledge. The root server only need a fully -// populated SubServerConfigDispatcher and with the aide of the -// RegisterSubServers method, it's able to create and initialize all -// sub-servers. -type SubServerDriver struct { - // SubServerName is the full name of a sub-sever. - // - // NOTE: This MUST be unique. - SubServerName string - - // New creates, and fully initializes a new sub-server instance with - // the aide of the SubServerConfigDispatcher. This closure should - // return the SubServer, ready for action, along with the set of - // macaroon permissions that the sub-server wishes to pass on to the - // root server for all methods routed towards it. - New func(subCfgs SubServerConfigDispatcher) (SubServer, MacaroonPerms, er.R) -} - -var ( - // subServers is a package level global variable that houses all the - // registered sub-servers. - subServers = make(map[string]*SubServerDriver) - - // registerMtx is a mutex that protects access to the above subServer - // map. - registerMtx sync.Mutex -) - -// RegisteredSubServers returns all registered sub-servers. -// -// NOTE: This function is safe for concurrent access. -func RegisteredSubServers() []*SubServerDriver { - registerMtx.Lock() - defer registerMtx.Unlock() - - drivers := make([]*SubServerDriver, 0, len(subServers)) - for _, driver := range subServers { - drivers = append(drivers, driver) - } - - return drivers -} - -// RegisterSubServer should be called by a sub-server within its package's -// init() method to register its existence with the main sub-server map. Each -// sub-server, if active, is meant to register via this method in their init() -// method. This allows callers to easily initialize and register all -// sub-servers without knowing any details beyond that the fact that they -// satisfy the necessary interfaces. -// -// NOTE: This function is safe for concurrent access. -func RegisterSubServer(driver *SubServerDriver) er.R { - registerMtx.Lock() - defer registerMtx.Unlock() - - if _, ok := subServers[driver.SubServerName]; ok { - return er.Errorf("subserver already registered") - } - - subServers[driver.SubServerName] = driver - - return nil -} - -// SupportedServers returns slice of the names of all registered sub-servers. -// -// NOTE: This function is safe for concurrent access. -func SupportedServers() []string { - registerMtx.Lock() - defer registerMtx.Unlock() - - supportedSubServers := make([]string, 0, len(subServers)) - for driverName := range subServers { - supportedSubServers = append(supportedSubServers, driverName) - } - - return supportedSubServers -} diff --git a/lnd/lnrpc/verrpc/driver.go b/lnd/lnrpc/verrpc/driver.go deleted file mode 100644 index 27a3e04d..00000000 --- a/lnd/lnrpc/verrpc/driver.go +++ /dev/null @@ -1,26 +0,0 @@ -package verrpc - -import ( - "fmt" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnrpc" -) - -func init() { - subServer := &lnrpc.SubServerDriver{ - SubServerName: subServerName, - New: func(c lnrpc.SubServerConfigDispatcher) (lnrpc.SubServer, - lnrpc.MacaroonPerms, er.R) { - - return &Server{}, macPermissions, nil - }, - } - - // We'll register ourselves as a sub-RPC server within the global lnrpc - // package namespace. - if err := lnrpc.RegisterSubServer(subServer); err != nil { - panic(fmt.Sprintf("failed to register sub server driver '%s': %v", - subServerName, err)) - } -} diff --git a/lnd/lnrpc/verrpc/server.go b/lnd/lnrpc/verrpc/server.go deleted file mode 100644 index 7bb81b4b..00000000 --- a/lnd/lnrpc/verrpc/server.go +++ /dev/null @@ -1,95 +0,0 @@ -package verrpc - -import ( - "context" - - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/pktconfig/version" - "github.com/pkt-cash/pktd/pktlog/log" - "google.golang.org/grpc" - "gopkg.in/macaroon-bakery.v2/bakery" -) - -const subServerName = "VersionRPC" - -var macPermissions = map[string][]bakery.Op{ - "/verrpc.Versioner/GetVersion": {{ - Entity: "info", - Action: "read", - }}, -} - -// Server is an rpc server that supports querying for information about the -// running binary. -type Server struct{} - -// Start launches any helper goroutines required for the rpcServer to function. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) Start() er.R { - return nil -} - -// Stop signals any active goroutines for a graceful closure. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) Stop() er.R { - return nil -} - -// Name returns a unique string representation of the sub-server. This can be -// used to identify the sub-server and also de-duplicate them. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) Name() string { - return subServerName -} - -// RegisterWithRootServer will be called by the root gRPC server to direct a -// sub RPC server to register itself with the main gRPC root server. Until this -// is called, each sub-server won't be able to have requests routed towards it. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) RegisterWithRootServer(grpcServer *grpc.Server) er.R { - RegisterVersionerServer(grpcServer, s) - - log.Debugf("Versioner RPC server successfully registered with root " + - "gRPC server") - - return nil -} - -// RegisterWithRestServer will be called by the root REST mux to direct a sub -// RPC server to register itself with the main REST mux server. Until this is -// called, each sub-server won't be able to have requests routed towards it. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (s *Server) RegisterWithRestServer(ctx context.Context, - mux *runtime.ServeMux, dest string, opts []grpc.DialOption) er.R { - - // We make sure that we register it with the main REST server to ensure - // all our methods are routed properly. - err := RegisterVersionerHandlerFromEndpoint(ctx, mux, dest, opts) - if err != nil { - log.Errorf("Could not register Versioner REST server "+ - "with root REST server: %v", err) - return er.E(err) - } - - log.Debugf("Versioner REST server successfully registered with " + - "root REST server") - return nil -} - -// GetVersion returns information about the compiled binary. -func (s *Server) GetVersion(_ context.Context, - _ *VersionRequest) (*Version, error) { - - return &Version{ - Version: version.Version(), - AppMajor: uint32(version.AppMajorVersion()), - AppMinor: uint32(version.AppMinorVersion()), - AppPatch: uint32(version.AppPatchVersion()), - }, nil -} diff --git a/lnd/lnrpc/verrpc/verrpc.pb.go b/lnd/lnrpc/verrpc/verrpc.pb.go deleted file mode 100644 index f1ab1945..00000000 --- a/lnd/lnrpc/verrpc/verrpc.pb.go +++ /dev/null @@ -1,284 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: verrpc/verrpc.proto - -package verrpc - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type VersionRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VersionRequest) Reset() { *m = VersionRequest{} } -func (m *VersionRequest) String() string { return proto.CompactTextString(m) } -func (*VersionRequest) ProtoMessage() {} -func (*VersionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_494312204cefa0e6, []int{0} -} - -func (m *VersionRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VersionRequest.Unmarshal(m, b) -} -func (m *VersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VersionRequest.Marshal(b, m, deterministic) -} -func (m *VersionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_VersionRequest.Merge(m, src) -} -func (m *VersionRequest) XXX_Size() int { - return xxx_messageInfo_VersionRequest.Size(m) -} -func (m *VersionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_VersionRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_VersionRequest proto.InternalMessageInfo - -type Version struct { - // A verbose description of the daemon's commit. - Commit string `protobuf:"bytes,1,opt,name=commit,proto3" json:"commit,omitempty"` - // The SHA1 commit hash that the daemon is compiled with. - CommitHash string `protobuf:"bytes,2,opt,name=commit_hash,json=commitHash,proto3" json:"commit_hash,omitempty"` - // The semantic version. - Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` - // The major application version. - AppMajor uint32 `protobuf:"varint,4,opt,name=app_major,json=appMajor,proto3" json:"app_major,omitempty"` - // The minor application version. - AppMinor uint32 `protobuf:"varint,5,opt,name=app_minor,json=appMinor,proto3" json:"app_minor,omitempty"` - // The application patch number. - AppPatch uint32 `protobuf:"varint,6,opt,name=app_patch,json=appPatch,proto3" json:"app_patch,omitempty"` - // The application pre-release modifier, possibly empty. - AppPreRelease string `protobuf:"bytes,7,opt,name=app_pre_release,json=appPreRelease,proto3" json:"app_pre_release,omitempty"` - // The list of build tags that were supplied during compilation. - BuildTags []string `protobuf:"bytes,8,rep,name=build_tags,json=buildTags,proto3" json:"build_tags,omitempty"` - // The version of go that compiled the executable. - GoVersion string `protobuf:"bytes,9,opt,name=go_version,json=goVersion,proto3" json:"go_version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Version) Reset() { *m = Version{} } -func (m *Version) String() string { return proto.CompactTextString(m) } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_494312204cefa0e6, []int{1} -} - -func (m *Version) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Version.Unmarshal(m, b) -} -func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Version.Marshal(b, m, deterministic) -} -func (m *Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_Version.Merge(m, src) -} -func (m *Version) XXX_Size() int { - return xxx_messageInfo_Version.Size(m) -} -func (m *Version) XXX_DiscardUnknown() { - xxx_messageInfo_Version.DiscardUnknown(m) -} - -var xxx_messageInfo_Version proto.InternalMessageInfo - -func (m *Version) GetCommit() string { - if m != nil { - return m.Commit - } - return "" -} - -func (m *Version) GetCommitHash() string { - if m != nil { - return m.CommitHash - } - return "" -} - -func (m *Version) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func (m *Version) GetAppMajor() uint32 { - if m != nil { - return m.AppMajor - } - return 0 -} - -func (m *Version) GetAppMinor() uint32 { - if m != nil { - return m.AppMinor - } - return 0 -} - -func (m *Version) GetAppPatch() uint32 { - if m != nil { - return m.AppPatch - } - return 0 -} - -func (m *Version) GetAppPreRelease() string { - if m != nil { - return m.AppPreRelease - } - return "" -} - -func (m *Version) GetBuildTags() []string { - if m != nil { - return m.BuildTags - } - return nil -} - -func (m *Version) GetGoVersion() string { - if m != nil { - return m.GoVersion - } - return "" -} - -func init() { - proto.RegisterType((*VersionRequest)(nil), "verrpc.VersionRequest") - proto.RegisterType((*Version)(nil), "verrpc.Version") -} - -func init() { proto.RegisterFile("verrpc/verrpc.proto", fileDescriptor_494312204cefa0e6) } - -var fileDescriptor_494312204cefa0e6 = []byte{ - // 294 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x91, 0xd1, 0x4a, 0xc3, 0x30, - 0x18, 0x85, 0xd9, 0xa6, 0xdd, 0xf2, 0xcb, 0x9c, 0x44, 0x90, 0xa0, 0x88, 0x65, 0x17, 0x52, 0x11, - 0x5b, 0x50, 0x7c, 0x81, 0xdd, 0xe8, 0x8d, 0x20, 0x45, 0xbc, 0xf0, 0xa6, 0xa4, 0x5d, 0x68, 0xaa, - 0x6d, 0x13, 0x93, 0x74, 0xef, 0xe2, 0xdb, 0x4a, 0x92, 0xb6, 0x43, 0x2f, 0x4a, 0xcf, 0x39, 0x5f, - 0xf8, 0x13, 0xce, 0x0f, 0xa7, 0x3b, 0xa6, 0x94, 0x2c, 0x12, 0xff, 0x8b, 0xa5, 0x12, 0x46, 0xe0, - 0xc0, 0xbb, 0xf5, 0x09, 0x1c, 0xbf, 0x33, 0xa5, 0x2b, 0xd1, 0xa6, 0xec, 0xbb, 0x63, 0xda, 0xac, - 0x7f, 0xa6, 0x30, 0xef, 0x23, 0x7c, 0x06, 0x41, 0x21, 0x9a, 0xa6, 0x32, 0x64, 0x12, 0x4e, 0x22, - 0x94, 0xf6, 0x0e, 0x5f, 0xc1, 0x91, 0x57, 0x19, 0xa7, 0x9a, 0x93, 0xa9, 0x83, 0xe0, 0xa3, 0x67, - 0xaa, 0x39, 0x26, 0x30, 0xdf, 0xf9, 0x19, 0x64, 0xe6, 0xe0, 0x60, 0xf1, 0x05, 0x20, 0x2a, 0x65, - 0xd6, 0xd0, 0x4f, 0xa1, 0xc8, 0x41, 0x38, 0x89, 0x96, 0xe9, 0x82, 0x4a, 0xf9, 0x62, 0xfd, 0x08, - 0xab, 0x56, 0x28, 0x72, 0xb8, 0x87, 0xd6, 0x0f, 0x50, 0x52, 0x53, 0x70, 0x12, 0x8c, 0xf0, 0xd5, - 0x7a, 0x7c, 0x0d, 0x2b, 0x07, 0x15, 0xcb, 0x14, 0xab, 0x19, 0xd5, 0x8c, 0xcc, 0xdd, 0xc5, 0x4b, - 0x7b, 0x44, 0xb1, 0xd4, 0x87, 0xf8, 0x12, 0x20, 0xef, 0xaa, 0x7a, 0x9b, 0x19, 0x5a, 0x6a, 0xb2, - 0x08, 0x67, 0x11, 0x4a, 0x91, 0x4b, 0xde, 0x68, 0xa9, 0x2d, 0x2e, 0x45, 0x36, 0x3c, 0x1d, 0xb9, - 0x09, 0xa8, 0x14, 0x7d, 0x1f, 0xf7, 0x1b, 0x40, 0xbd, 0x64, 0x0a, 0x3f, 0x02, 0x3c, 0x31, 0x33, - 0x56, 0x15, 0xf7, 0xfd, 0xfe, 0xad, 0xf3, 0x7c, 0xf5, 0x2f, 0xdf, 0xdc, 0x7e, 0xdc, 0x94, 0x95, - 0xe1, 0x5d, 0x1e, 0x17, 0xa2, 0x49, 0xe4, 0x97, 0xb9, 0x2b, 0xa8, 0xe6, 0x56, 0x6c, 0x93, 0xba, - 0xb5, 0xdf, 0x7e, 0x59, 0x79, 0xe0, 0xb6, 0xf5, 0xf0, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x17, 0xf2, - 0x96, 0x3c, 0xc4, 0x01, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// VersionerClient is the client API for Versioner service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type VersionerClient interface { - // lncli: `version` - //GetVersion returns the current version and build information of the running - //daemon. - GetVersion(ctx context.Context, in *VersionRequest, opts ...grpc.CallOption) (*Version, error) -} - -type versionerClient struct { - cc *grpc.ClientConn -} - -func NewVersionerClient(cc *grpc.ClientConn) VersionerClient { - return &versionerClient{cc} -} - -func (c *versionerClient) GetVersion(ctx context.Context, in *VersionRequest, opts ...grpc.CallOption) (*Version, error) { - out := new(Version) - err := c.cc.Invoke(ctx, "/verrpc.Versioner/GetVersion", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// VersionerServer is the server API for Versioner service. -type VersionerServer interface { - // lncli: `version` - //GetVersion returns the current version and build information of the running - //daemon. - GetVersion(context.Context, *VersionRequest) (*Version, error) -} - -// UnimplementedVersionerServer can be embedded to have forward compatible implementations. -type UnimplementedVersionerServer struct { -} - -func (*UnimplementedVersionerServer) GetVersion(ctx context.Context, req *VersionRequest) (*Version, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetVersion not implemented") -} - -func RegisterVersionerServer(s *grpc.Server, srv VersionerServer) { - s.RegisterService(&_Versioner_serviceDesc, srv) -} - -func _Versioner_GetVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(VersionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(VersionerServer).GetVersion(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/verrpc.Versioner/GetVersion", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VersionerServer).GetVersion(ctx, req.(*VersionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Versioner_serviceDesc = grpc.ServiceDesc{ - ServiceName: "verrpc.Versioner", - HandlerType: (*VersionerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetVersion", - Handler: _Versioner_GetVersion_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "verrpc/verrpc.proto", -} diff --git a/lnd/lnrpc/verrpc/verrpc.pb.gw.go b/lnd/lnrpc/verrpc/verrpc.pb.gw.go deleted file mode 100644 index d6f6d597..00000000 --- a/lnd/lnrpc/verrpc/verrpc.pb.gw.go +++ /dev/null @@ -1,147 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: verrpc/verrpc.proto - -/* -Package verrpc is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package verrpc - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage - -func request_Versioner_GetVersion_0(ctx context.Context, marshaler runtime.Marshaler, client VersionerClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq VersionRequest - var metadata runtime.ServerMetadata - - msg, err := client.GetVersion(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Versioner_GetVersion_0(ctx context.Context, marshaler runtime.Marshaler, server VersionerServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq VersionRequest - var metadata runtime.ServerMetadata - - msg, err := server.GetVersion(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterVersionerHandlerServer registers the http handlers for service Versioner to "mux". -// UnaryRPC :call VersionerServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -func RegisterVersionerHandlerServer(ctx context.Context, mux *runtime.ServeMux, server VersionerServer) error { - - mux.Handle("GET", pattern_Versioner_GetVersion_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Versioner_GetVersion_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Versioner_GetVersion_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterVersionerHandlerFromEndpoint is same as RegisterVersionerHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterVersionerHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterVersionerHandler(ctx, mux, conn) -} - -// RegisterVersionerHandler registers the http handlers for service Versioner to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterVersionerHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterVersionerHandlerClient(ctx, mux, NewVersionerClient(conn)) -} - -// RegisterVersionerHandlerClient registers the http handlers for service Versioner -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "VersionerClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "VersionerClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "VersionerClient" to call the correct interceptors. -func RegisterVersionerHandlerClient(ctx context.Context, mux *runtime.ServeMux, client VersionerClient) error { - - mux.Handle("GET", pattern_Versioner_GetVersion_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Versioner_GetVersion_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Versioner_GetVersion_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Versioner_GetVersion_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "versioner", "version"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Versioner_GetVersion_0 = runtime.ForwardResponseMessage -) diff --git a/lnd/lnrpc/verrpc/verrpc.proto b/lnd/lnrpc/verrpc/verrpc.proto deleted file mode 100644 index 6607ab3f..00000000 --- a/lnd/lnrpc/verrpc/verrpc.proto +++ /dev/null @@ -1,47 +0,0 @@ -syntax = "proto3"; - -package verrpc; - -option go_package = "github.com/pkt-cash/pktd/lnd/lnrpc/verrpc"; - -// Versioner is a service that can be used to get information about the version -// and build information of the running daemon. -service Versioner { - /* lncli: `version` - GetVersion returns the current version and build information of the running - daemon. - */ - rpc GetVersion (VersionRequest) returns (Version); -} - -message VersionRequest { -} - -message Version { - // A verbose description of the daemon's commit. - string commit = 1; - - // The SHA1 commit hash that the daemon is compiled with. - string commit_hash = 2; - - // The semantic version. - string version = 3; - - // The major application version. - uint32 app_major = 4; - - // The minor application version. - uint32 app_minor = 5; - - // The application patch number. - uint32 app_patch = 6; - - // The application pre-release modifier, possibly empty. - string app_pre_release = 7; - - // The list of build tags that were supplied during compilation. - repeated string build_tags = 8; - - // The version of go that compiled the executable. - string go_version = 9; -} diff --git a/lnd/lnrpc/verrpc/verrpc.swagger.json b/lnd/lnrpc/verrpc/verrpc.swagger.json deleted file mode 100644 index b5626d90..00000000 --- a/lnd/lnrpc/verrpc/verrpc.swagger.json +++ /dev/null @@ -1,120 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "verrpc/verrpc.proto", - "version": "version not set" - }, - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/v2/versioner/version": { - "get": { - "summary": "lncli: `version`\nGetVersion returns the current version and build information of the running\ndaemon.", - "operationId": "Versioner_GetVersion", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/verrpcVersion" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "Versioner" - ] - } - } - }, - "definitions": { - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string" - }, - "value": { - "type": "string", - "format": "byte" - } - } - }, - "runtimeError": { - "type": "object", - "properties": { - "error": { - "type": "string" - }, - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - }, - "verrpcVersion": { - "type": "object", - "properties": { - "commit": { - "type": "string", - "description": "A verbose description of the daemon's commit." - }, - "commit_hash": { - "type": "string", - "description": "The SHA1 commit hash that the daemon is compiled with." - }, - "version": { - "type": "string", - "description": "The semantic version." - }, - "app_major": { - "type": "integer", - "format": "int64", - "description": "The major application version." - }, - "app_minor": { - "type": "integer", - "format": "int64", - "description": "The minor application version." - }, - "app_patch": { - "type": "integer", - "format": "int64", - "description": "The application patch number." - }, - "app_pre_release": { - "type": "string", - "description": "The application pre-release modifier, possibly empty." - }, - "build_tags": { - "type": "array", - "items": { - "type": "string" - }, - "description": "The list of build tags that were supplied during compilation." - }, - "go_version": { - "type": "string", - "description": "The version of go that compiled the executable." - } - } - } - } -} diff --git a/lnd/lnrpc/walletrpc/config_active.go b/lnd/lnrpc/walletrpc/config_active.go deleted file mode 100644 index bd2579c4..00000000 --- a/lnd/lnrpc/walletrpc/config_active.go +++ /dev/null @@ -1,62 +0,0 @@ -// +build walletrpc - -package walletrpc - -import ( - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee" - "github.com/pkt-cash/pktd/lnd/macaroons" - "github.com/pkt-cash/pktd/lnd/sweep" -) - -// Config is the primary configuration struct for the WalletKit RPC server. It -// contains all the items required for the signer rpc server to carry out its -// duties. The fields with struct tags are meant to be parsed as normal -// configuration options, while if able to be populated, the latter fields MUST -// also be specified. -type Config struct { - // WalletKitMacPath is the path for the signer macaroon. If unspecified - // then we assume that the macaroon will be found under the network - // directory, named DefaultWalletKitMacFilename. - WalletKitMacPath string `long:"walletkitmacaroonpath" description:"Path to the wallet kit macaroon"` - - // NetworkDir is the main network directory wherein the signer rpc - // server will find the macaroon named DefaultWalletKitMacFilename. - NetworkDir string - - // MacService is the main macaroon service that we'll use to handle - // authentication for the signer rpc server. - MacService *macaroons.Service - - // FeeEstimator is an instance of the primary fee estimator instance - // the WalletKit will use to respond to fee estimation requests. - FeeEstimator chainfee.Estimator - - // Wallet is the primary wallet that the WalletKit will use to proxy - // any relevant requests to. - Wallet lnwallet.WalletController - - // CoinSelectionLocker allows the caller to perform an operation, which - // is synchronized with all coin selection attempts. This can be used - // when an operation requires that all coin selection operations cease - // forward progress. Think of this as an exclusive lock on coin - // selection operations. - CoinSelectionLocker sweep.CoinSelectionLocker - - // KeyRing is an interface that the WalletKit will use to derive any - // keys due to incoming client requests. - KeyRing keychain.KeyRing - - // Sweeper is the central batching engine of lnd. It is responsible for - // sweeping inputs in batches back into the wallet. - Sweeper *sweep.UtxoSweeper - - // Chain is an interface that the WalletKit will use to determine state - // about the backing chain of the wallet. - Chain lnwallet.BlockChainIO - - // ChainParams are the parameters of the wallet's backing chain. - ChainParams *chaincfg.Params -} diff --git a/lnd/lnrpc/walletrpc/config_default.go b/lnd/lnrpc/walletrpc/config_default.go deleted file mode 100644 index 16bbbcc8..00000000 --- a/lnd/lnrpc/walletrpc/config_default.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !walletrpc - -package walletrpc - -// Config is the primary configuration struct for the WalletKit RPC server. -// When the server isn't active (via the build flag), callers outside this -// package will see this shell of a config file. -type Config struct{} diff --git a/lnd/lnrpc/walletrpc/driver.go b/lnd/lnrpc/walletrpc/driver.go deleted file mode 100644 index 9d0bcf38..00000000 --- a/lnd/lnrpc/walletrpc/driver.go +++ /dev/null @@ -1,81 +0,0 @@ -// +build walletrpc - -package walletrpc - -import ( - "fmt" - - "github.com/pkt-cash/pktd/lnd/lnrpc" -) - -// createNewSubServer is a helper method that will create the new WalletKit RPC -// sub server given the main config dispatcher method. If we're unable to find -// the config that is meant for us in the config dispatcher, then we'll exit -// with an error. -func createNewSubServer(configRegistry lnrpc.SubServerConfigDispatcher) (lnrpc.SubServer, lnrpc.MacaroonPerms, er.R) { - // We'll attempt to look up the config that we expect, according to our - // subServerName name. If we can't find this, then we'll exit with an - // error, as we're unable to properly initialize ourselves without this - // config. - walletKitServerConf, ok := configRegistry.FetchConfig(subServerName) - if !ok { - return nil, nil, er.Errorf("unable to find config for "+ - "subserver type %s", subServerName) - } - - // Now that we've found an object mapping to our service name, we'll - // ensure that it's the type we need. - config, ok := walletKitServerConf.(*Config) - if !ok { - return nil, nil, er.Errorf("wrong type of config for "+ - "subserver %s, expected %T got %T", subServerName, - &Config{}, walletKitServerConf) - } - - // Before we try to make the new WalletKit service instance, we'll - // perform some sanity checks on the arguments to ensure that they're - // useable. - switch { - case config.MacService != nil && config.NetworkDir == "": - return nil, nil, er.Errorf("NetworkDir must be set to " + - "create WalletKit RPC server") - - case config.FeeEstimator == nil: - return nil, nil, er.Errorf("FeeEstimator must be set to " + - "create WalletKit RPC server") - - case config.Wallet == nil: - return nil, nil, er.Errorf("Wallet must be set to create " + - "WalletKit RPC server") - - case config.KeyRing == nil: - return nil, nil, er.Errorf("KeyRing must be set to create " + - "WalletKit RPC server") - - case config.Sweeper == nil: - return nil, nil, er.Errorf("Sweeper must be set to create " + - "WalletKit RPC server") - - case config.Chain == nil: - return nil, nil, er.Errorf("Chain must be set to create " + - "WalletKit RPC server") - } - - return New(config) -} - -func init() { - subServer := &lnrpc.SubServerDriver{ - SubServerName: subServerName, - New: func(c lnrpc.SubServerConfigDispatcher) (lnrpc.SubServer, lnrpc.MacaroonPerms, er.R) { - return createNewSubServer(c) - }, - } - - // If the build tag is active, then we'll register ourselves as a - // sub-RPC server within the global lnrpc package namespace. - if err := lnrpc.RegisterSubServer(subServer); err != nil { - panic(fmt.Sprintf("failed to register sub server driver '%s': %v", - subServerName, err)) - } -} diff --git a/lnd/lnrpc/walletrpc/psbt.go b/lnd/lnrpc/walletrpc/psbt.go deleted file mode 100644 index a8a8d845..00000000 --- a/lnd/lnrpc/walletrpc/psbt.go +++ /dev/null @@ -1,89 +0,0 @@ -// +build walletrpc - -package walletrpc - -import ( - "fmt" - "math" - "time" - - "github.com/pkt-cash/pktd/wire" - "github.com/pkt-cash/pktd/btcutil/psbt" - "github.com/pkt-cash/pktd/pktwallet/wtxmgr" - "github.com/pkt-cash/pktd/lnd/lnwallet" -) - -const ( - defaultMinConf = 1 - defaultMaxConf = math.MaxInt32 -) - -// utxoLock is a type that contains an outpoint of an UTXO and its lock lease -// information. -type utxoLock struct { - lockID wtxmgr.LockID - outpoint wire.OutPoint - expiration time.Time -} - -// verifyInputsUnspent checks that all inputs are contained in the list of -// known, non-locked UTXOs given. -func verifyInputsUnspent(inputs []*wire.TxIn, utxos []*lnwallet.Utxo) er.R { - // TODO(guggero): Pass in UTXOs as a map to make lookup more efficient. - for idx, txIn := range inputs { - found := false - for _, u := range utxos { - if u.OutPoint == txIn.PreviousOutPoint { - found = true - break - } - } - - if !found { - return er.Errorf("input %d not found in list of non-"+ - "locked UTXO", idx) - } - } - - return nil -} - -// lockInputs requests a lock lease for all inputs specified in a PSBT packet -// by using the internal, static lock ID of lnd's wallet. -func lockInputs(w lnwallet.WalletController, packet *psbt.Packet) ([]*utxoLock, - error) { - - locks := make([]*utxoLock, len(packet.UnsignedTx.TxIn)) - for idx, rawInput := range packet.UnsignedTx.TxIn { - lock := &utxoLock{ - lockID: LndInternalLockID, - outpoint: rawInput.PreviousOutPoint, - } - - expiration, err := w.LeaseOutput(lock.lockID, lock.outpoint) - if err != nil { - // If we run into a problem with locking one output, we - // should try to unlock those that we successfully - // locked so far. If that fails as well, there's not - // much we can do. - for i := 0; i < idx; i++ { - op := locks[i].outpoint - if err := w.ReleaseOutput( - LndInternalLockID, op, - ); err != nil { - - log.Errorf("could not release the "+ - "lock on %v: %v", op, err) - } - } - - return nil, er.Errorf("could not lease a lock on "+ - "UTXO: %v", err) - } - - lock.expiration = expiration - locks[idx] = lock - } - - return locks, nil -} diff --git a/lnd/lnrpc/walletrpc/walletkit.pb.go b/lnd/lnrpc/walletrpc/walletkit.pb.go deleted file mode 100644 index 8fa12e71..00000000 --- a/lnd/lnrpc/walletrpc/walletkit.pb.go +++ /dev/null @@ -1,2722 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: walletrpc/walletkit.proto - -package walletrpc - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - lnrpc "github.com/pkt-cash/pktd/lnd/lnrpc" - signrpc "github.com/pkt-cash/pktd/lnd/lnrpc/signrpc" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type WitnessType int32 - -const ( - WitnessType_UNKNOWN_WITNESS WitnessType = 0 - // - //A witness that allows us to spend the output of a commitment transaction - //after a relative lock-time lockout. - WitnessType_COMMITMENT_TIME_LOCK WitnessType = 1 - // - //A witness that allows us to spend a settled no-delay output immediately on a - //counterparty's commitment transaction. - WitnessType_COMMITMENT_NO_DELAY WitnessType = 2 - // - //A witness that allows us to sweep the settled output of a malicious - //counterparty's who broadcasts a revoked commitment transaction. - WitnessType_COMMITMENT_REVOKE WitnessType = 3 - // - //A witness that allows us to sweep an HTLC which we offered to the remote - //party in the case that they broadcast a revoked commitment state. - WitnessType_HTLC_OFFERED_REVOKE WitnessType = 4 - // - //A witness that allows us to sweep an HTLC output sent to us in the case that - //the remote party broadcasts a revoked commitment state. - WitnessType_HTLC_ACCEPTED_REVOKE WitnessType = 5 - // - //A witness that allows us to sweep an HTLC output that we extended to a - //party, but was never fulfilled. This HTLC output isn't directly on the - //commitment transaction, but is the result of a confirmed second-level HTLC - //transaction. As a result, we can only spend this after a CSV delay. - WitnessType_HTLC_OFFERED_TIMEOUT_SECOND_LEVEL WitnessType = 6 - // - //A witness that allows us to sweep an HTLC output that was offered to us, and - //for which we have a payment preimage. This HTLC output isn't directly on our - //commitment transaction, but is the result of confirmed second-level HTLC - //transaction. As a result, we can only spend this after a CSV delay. - WitnessType_HTLC_ACCEPTED_SUCCESS_SECOND_LEVEL WitnessType = 7 - // - //A witness that allows us to sweep an HTLC that we offered to the remote - //party which lies in the commitment transaction of the remote party. We can - //spend this output after the absolute CLTV timeout of the HTLC as passed. - WitnessType_HTLC_OFFERED_REMOTE_TIMEOUT WitnessType = 8 - // - //A witness that allows us to sweep an HTLC that was offered to us by the - //remote party. We use this witness in the case that the remote party goes to - //chain, and we know the pre-image to the HTLC. We can sweep this without any - //additional timeout. - WitnessType_HTLC_ACCEPTED_REMOTE_SUCCESS WitnessType = 9 - // - //A witness that allows us to sweep an HTLC from the remote party's commitment - //transaction in the case that the broadcast a revoked commitment, but then - //also immediately attempt to go to the second level to claim the HTLC. - WitnessType_HTLC_SECOND_LEVEL_REVOKE WitnessType = 10 - // - //A witness type that allows us to spend a regular p2wkh output that's sent to - //an output which is under complete control of the backing wallet. - WitnessType_WITNESS_KEY_HASH WitnessType = 11 - // - //A witness type that allows us to sweep an output that sends to a nested P2SH - //script that pays to a key solely under our control. - WitnessType_NESTED_WITNESS_KEY_HASH WitnessType = 12 - // - //A witness type that allows us to spend our anchor on the commitment - //transaction. - WitnessType_COMMITMENT_ANCHOR WitnessType = 13 -) - -var WitnessType_name = map[int32]string{ - 0: "UNKNOWN_WITNESS", - 1: "COMMITMENT_TIME_LOCK", - 2: "COMMITMENT_NO_DELAY", - 3: "COMMITMENT_REVOKE", - 4: "HTLC_OFFERED_REVOKE", - 5: "HTLC_ACCEPTED_REVOKE", - 6: "HTLC_OFFERED_TIMEOUT_SECOND_LEVEL", - 7: "HTLC_ACCEPTED_SUCCESS_SECOND_LEVEL", - 8: "HTLC_OFFERED_REMOTE_TIMEOUT", - 9: "HTLC_ACCEPTED_REMOTE_SUCCESS", - 10: "HTLC_SECOND_LEVEL_REVOKE", - 11: "WITNESS_KEY_HASH", - 12: "NESTED_WITNESS_KEY_HASH", - 13: "COMMITMENT_ANCHOR", -} - -var WitnessType_value = map[string]int32{ - "UNKNOWN_WITNESS": 0, - "COMMITMENT_TIME_LOCK": 1, - "COMMITMENT_NO_DELAY": 2, - "COMMITMENT_REVOKE": 3, - "HTLC_OFFERED_REVOKE": 4, - "HTLC_ACCEPTED_REVOKE": 5, - "HTLC_OFFERED_TIMEOUT_SECOND_LEVEL": 6, - "HTLC_ACCEPTED_SUCCESS_SECOND_LEVEL": 7, - "HTLC_OFFERED_REMOTE_TIMEOUT": 8, - "HTLC_ACCEPTED_REMOTE_SUCCESS": 9, - "HTLC_SECOND_LEVEL_REVOKE": 10, - "WITNESS_KEY_HASH": 11, - "NESTED_WITNESS_KEY_HASH": 12, - "COMMITMENT_ANCHOR": 13, -} - -func (x WitnessType) String() string { - return proto.EnumName(WitnessType_name, int32(x)) -} - -func (WitnessType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{0} -} - -type ListUnspentRequest struct { - // The minimum number of confirmations to be included. - MinConfs int32 `protobuf:"varint,1,opt,name=min_confs,json=minConfs,proto3" json:"min_confs,omitempty"` - // The maximum number of confirmations to be included. - MaxConfs int32 `protobuf:"varint,2,opt,name=max_confs,json=maxConfs,proto3" json:"max_confs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListUnspentRequest) Reset() { *m = ListUnspentRequest{} } -func (m *ListUnspentRequest) String() string { return proto.CompactTextString(m) } -func (*ListUnspentRequest) ProtoMessage() {} -func (*ListUnspentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{0} -} - -func (m *ListUnspentRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListUnspentRequest.Unmarshal(m, b) -} -func (m *ListUnspentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListUnspentRequest.Marshal(b, m, deterministic) -} -func (m *ListUnspentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListUnspentRequest.Merge(m, src) -} -func (m *ListUnspentRequest) XXX_Size() int { - return xxx_messageInfo_ListUnspentRequest.Size(m) -} -func (m *ListUnspentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListUnspentRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListUnspentRequest proto.InternalMessageInfo - -func (m *ListUnspentRequest) GetMinConfs() int32 { - if m != nil { - return m.MinConfs - } - return 0 -} - -func (m *ListUnspentRequest) GetMaxConfs() int32 { - if m != nil { - return m.MaxConfs - } - return 0 -} - -type ListUnspentResponse struct { - // A list of utxos satisfying the specified number of confirmations. - Utxos []*lnrpc.Utxo `protobuf:"bytes,1,rep,name=utxos,proto3" json:"utxos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListUnspentResponse) Reset() { *m = ListUnspentResponse{} } -func (m *ListUnspentResponse) String() string { return proto.CompactTextString(m) } -func (*ListUnspentResponse) ProtoMessage() {} -func (*ListUnspentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{1} -} - -func (m *ListUnspentResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListUnspentResponse.Unmarshal(m, b) -} -func (m *ListUnspentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListUnspentResponse.Marshal(b, m, deterministic) -} -func (m *ListUnspentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListUnspentResponse.Merge(m, src) -} -func (m *ListUnspentResponse) XXX_Size() int { - return xxx_messageInfo_ListUnspentResponse.Size(m) -} -func (m *ListUnspentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListUnspentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListUnspentResponse proto.InternalMessageInfo - -func (m *ListUnspentResponse) GetUtxos() []*lnrpc.Utxo { - if m != nil { - return m.Utxos - } - return nil -} - -type LeaseOutputRequest struct { - // - //An ID of 32 random bytes that must be unique for each distinct application - //using this RPC which will be used to bound the output lease to. - Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // The identifying outpoint of the output being leased. - Outpoint *lnrpc.OutPoint `protobuf:"bytes,2,opt,name=outpoint,proto3" json:"outpoint,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseOutputRequest) Reset() { *m = LeaseOutputRequest{} } -func (m *LeaseOutputRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseOutputRequest) ProtoMessage() {} -func (*LeaseOutputRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{2} -} - -func (m *LeaseOutputRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LeaseOutputRequest.Unmarshal(m, b) -} -func (m *LeaseOutputRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LeaseOutputRequest.Marshal(b, m, deterministic) -} -func (m *LeaseOutputRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseOutputRequest.Merge(m, src) -} -func (m *LeaseOutputRequest) XXX_Size() int { - return xxx_messageInfo_LeaseOutputRequest.Size(m) -} -func (m *LeaseOutputRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseOutputRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseOutputRequest proto.InternalMessageInfo - -func (m *LeaseOutputRequest) GetId() []byte { - if m != nil { - return m.Id - } - return nil -} - -func (m *LeaseOutputRequest) GetOutpoint() *lnrpc.OutPoint { - if m != nil { - return m.Outpoint - } - return nil -} - -type LeaseOutputResponse struct { - // - //The absolute expiration of the output lease represented as a unix timestamp. - Expiration uint64 `protobuf:"varint,1,opt,name=expiration,proto3" json:"expiration,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseOutputResponse) Reset() { *m = LeaseOutputResponse{} } -func (m *LeaseOutputResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseOutputResponse) ProtoMessage() {} -func (*LeaseOutputResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{3} -} - -func (m *LeaseOutputResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LeaseOutputResponse.Unmarshal(m, b) -} -func (m *LeaseOutputResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LeaseOutputResponse.Marshal(b, m, deterministic) -} -func (m *LeaseOutputResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseOutputResponse.Merge(m, src) -} -func (m *LeaseOutputResponse) XXX_Size() int { - return xxx_messageInfo_LeaseOutputResponse.Size(m) -} -func (m *LeaseOutputResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseOutputResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseOutputResponse proto.InternalMessageInfo - -func (m *LeaseOutputResponse) GetExpiration() uint64 { - if m != nil { - return m.Expiration - } - return 0 -} - -type ReleaseOutputRequest struct { - // The unique ID that was used to lock the output. - Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // The identifying outpoint of the output being released. - Outpoint *lnrpc.OutPoint `protobuf:"bytes,2,opt,name=outpoint,proto3" json:"outpoint,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReleaseOutputRequest) Reset() { *m = ReleaseOutputRequest{} } -func (m *ReleaseOutputRequest) String() string { return proto.CompactTextString(m) } -func (*ReleaseOutputRequest) ProtoMessage() {} -func (*ReleaseOutputRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{4} -} - -func (m *ReleaseOutputRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReleaseOutputRequest.Unmarshal(m, b) -} -func (m *ReleaseOutputRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReleaseOutputRequest.Marshal(b, m, deterministic) -} -func (m *ReleaseOutputRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReleaseOutputRequest.Merge(m, src) -} -func (m *ReleaseOutputRequest) XXX_Size() int { - return xxx_messageInfo_ReleaseOutputRequest.Size(m) -} -func (m *ReleaseOutputRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReleaseOutputRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ReleaseOutputRequest proto.InternalMessageInfo - -func (m *ReleaseOutputRequest) GetId() []byte { - if m != nil { - return m.Id - } - return nil -} - -func (m *ReleaseOutputRequest) GetOutpoint() *lnrpc.OutPoint { - if m != nil { - return m.Outpoint - } - return nil -} - -type ReleaseOutputResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReleaseOutputResponse) Reset() { *m = ReleaseOutputResponse{} } -func (m *ReleaseOutputResponse) String() string { return proto.CompactTextString(m) } -func (*ReleaseOutputResponse) ProtoMessage() {} -func (*ReleaseOutputResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{5} -} - -func (m *ReleaseOutputResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReleaseOutputResponse.Unmarshal(m, b) -} -func (m *ReleaseOutputResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReleaseOutputResponse.Marshal(b, m, deterministic) -} -func (m *ReleaseOutputResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReleaseOutputResponse.Merge(m, src) -} -func (m *ReleaseOutputResponse) XXX_Size() int { - return xxx_messageInfo_ReleaseOutputResponse.Size(m) -} -func (m *ReleaseOutputResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ReleaseOutputResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ReleaseOutputResponse proto.InternalMessageInfo - -type KeyReq struct { - // - //Is the key finger print of the root pubkey that this request is targeting. - //This allows the WalletKit to possibly serve out keys for multiple HD chains - //via public derivation. - KeyFingerPrint int32 `protobuf:"varint,1,opt,name=key_finger_print,json=keyFingerPrint,proto3" json:"key_finger_print,omitempty"` - // - //The target key family to derive a key from. In other contexts, this is - //known as the "account". - KeyFamily int32 `protobuf:"varint,2,opt,name=key_family,json=keyFamily,proto3" json:"key_family,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KeyReq) Reset() { *m = KeyReq{} } -func (m *KeyReq) String() string { return proto.CompactTextString(m) } -func (*KeyReq) ProtoMessage() {} -func (*KeyReq) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{6} -} - -func (m *KeyReq) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_KeyReq.Unmarshal(m, b) -} -func (m *KeyReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_KeyReq.Marshal(b, m, deterministic) -} -func (m *KeyReq) XXX_Merge(src proto.Message) { - xxx_messageInfo_KeyReq.Merge(m, src) -} -func (m *KeyReq) XXX_Size() int { - return xxx_messageInfo_KeyReq.Size(m) -} -func (m *KeyReq) XXX_DiscardUnknown() { - xxx_messageInfo_KeyReq.DiscardUnknown(m) -} - -var xxx_messageInfo_KeyReq proto.InternalMessageInfo - -func (m *KeyReq) GetKeyFingerPrint() int32 { - if m != nil { - return m.KeyFingerPrint - } - return 0 -} - -func (m *KeyReq) GetKeyFamily() int32 { - if m != nil { - return m.KeyFamily - } - return 0 -} - -type AddrRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddrRequest) Reset() { *m = AddrRequest{} } -func (m *AddrRequest) String() string { return proto.CompactTextString(m) } -func (*AddrRequest) ProtoMessage() {} -func (*AddrRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{7} -} - -func (m *AddrRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AddrRequest.Unmarshal(m, b) -} -func (m *AddrRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AddrRequest.Marshal(b, m, deterministic) -} -func (m *AddrRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddrRequest.Merge(m, src) -} -func (m *AddrRequest) XXX_Size() int { - return xxx_messageInfo_AddrRequest.Size(m) -} -func (m *AddrRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AddrRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AddrRequest proto.InternalMessageInfo - -type AddrResponse struct { - // - //The address encoded using a bech32 format. - Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddrResponse) Reset() { *m = AddrResponse{} } -func (m *AddrResponse) String() string { return proto.CompactTextString(m) } -func (*AddrResponse) ProtoMessage() {} -func (*AddrResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{8} -} - -func (m *AddrResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AddrResponse.Unmarshal(m, b) -} -func (m *AddrResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AddrResponse.Marshal(b, m, deterministic) -} -func (m *AddrResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddrResponse.Merge(m, src) -} -func (m *AddrResponse) XXX_Size() int { - return xxx_messageInfo_AddrResponse.Size(m) -} -func (m *AddrResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AddrResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AddrResponse proto.InternalMessageInfo - -func (m *AddrResponse) GetAddr() string { - if m != nil { - return m.Addr - } - return "" -} - -type Transaction struct { - // - //The raw serialized transaction. - TxHex []byte `protobuf:"bytes,1,opt,name=tx_hex,json=txHex,proto3" json:"tx_hex,omitempty"` - // - //An optional label to save with the transaction. Limited to 500 characters. - Label string `protobuf:"bytes,2,opt,name=label,proto3" json:"label,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Transaction) Reset() { *m = Transaction{} } -func (m *Transaction) String() string { return proto.CompactTextString(m) } -func (*Transaction) ProtoMessage() {} -func (*Transaction) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{9} -} - -func (m *Transaction) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Transaction.Unmarshal(m, b) -} -func (m *Transaction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Transaction.Marshal(b, m, deterministic) -} -func (m *Transaction) XXX_Merge(src proto.Message) { - xxx_messageInfo_Transaction.Merge(m, src) -} -func (m *Transaction) XXX_Size() int { - return xxx_messageInfo_Transaction.Size(m) -} -func (m *Transaction) XXX_DiscardUnknown() { - xxx_messageInfo_Transaction.DiscardUnknown(m) -} - -var xxx_messageInfo_Transaction proto.InternalMessageInfo - -func (m *Transaction) GetTxHex() []byte { - if m != nil { - return m.TxHex - } - return nil -} - -func (m *Transaction) GetLabel() string { - if m != nil { - return m.Label - } - return "" -} - -type PublishResponse struct { - // - //If blank, then no error occurred and the transaction was successfully - //published. If not the empty string, then a string representation of the - //broadcast error. - // - //TODO(roasbeef): map to a proper enum type - PublishError string `protobuf:"bytes,1,opt,name=publish_error,json=publishError,proto3" json:"publish_error,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PublishResponse) Reset() { *m = PublishResponse{} } -func (m *PublishResponse) String() string { return proto.CompactTextString(m) } -func (*PublishResponse) ProtoMessage() {} -func (*PublishResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{10} -} - -func (m *PublishResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PublishResponse.Unmarshal(m, b) -} -func (m *PublishResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PublishResponse.Marshal(b, m, deterministic) -} -func (m *PublishResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PublishResponse.Merge(m, src) -} -func (m *PublishResponse) XXX_Size() int { - return xxx_messageInfo_PublishResponse.Size(m) -} -func (m *PublishResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PublishResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_PublishResponse proto.InternalMessageInfo - -func (m *PublishResponse) GetPublishError() string { - if m != nil { - return m.PublishError - } - return "" -} - -type SendOutputsRequest struct { - // - //The number of satoshis per kilo weight that should be used when crafting - //this transaction. - SatPerKw int64 `protobuf:"varint,1,opt,name=sat_per_kw,json=satPerKw,proto3" json:"sat_per_kw,omitempty"` - // - //A slice of the outputs that should be created in the transaction produced. - Outputs []*signrpc.TxOut `protobuf:"bytes,2,rep,name=outputs,proto3" json:"outputs,omitempty"` - // An optional label for the transaction, limited to 500 characters. - Label string `protobuf:"bytes,3,opt,name=label,proto3" json:"label,omitempty"` - // The minimum number of confirmations each one of your outputs used for - // the transaction must satisfy. - MinConfs int32 `protobuf:"varint,4,opt,name=min_confs,json=minConfs,proto3" json:"min_confs,omitempty"` - // Whether unconfirmed outputs should be used as inputs for the transaction. - SpendUnconfirmed bool `protobuf:"varint,5,opt,name=spend_unconfirmed,json=spendUnconfirmed,proto3" json:"spend_unconfirmed,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendOutputsRequest) Reset() { *m = SendOutputsRequest{} } -func (m *SendOutputsRequest) String() string { return proto.CompactTextString(m) } -func (*SendOutputsRequest) ProtoMessage() {} -func (*SendOutputsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{11} -} - -func (m *SendOutputsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendOutputsRequest.Unmarshal(m, b) -} -func (m *SendOutputsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendOutputsRequest.Marshal(b, m, deterministic) -} -func (m *SendOutputsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendOutputsRequest.Merge(m, src) -} -func (m *SendOutputsRequest) XXX_Size() int { - return xxx_messageInfo_SendOutputsRequest.Size(m) -} -func (m *SendOutputsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SendOutputsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SendOutputsRequest proto.InternalMessageInfo - -func (m *SendOutputsRequest) GetSatPerKw() int64 { - if m != nil { - return m.SatPerKw - } - return 0 -} - -func (m *SendOutputsRequest) GetOutputs() []*signrpc.TxOut { - if m != nil { - return m.Outputs - } - return nil -} - -func (m *SendOutputsRequest) GetLabel() string { - if m != nil { - return m.Label - } - return "" -} - -func (m *SendOutputsRequest) GetMinConfs() int32 { - if m != nil { - return m.MinConfs - } - return 0 -} - -func (m *SendOutputsRequest) GetSpendUnconfirmed() bool { - if m != nil { - return m.SpendUnconfirmed - } - return false -} - -type SendOutputsResponse struct { - // - //The serialized transaction sent out on the network. - RawTx []byte `protobuf:"bytes,1,opt,name=raw_tx,json=rawTx,proto3" json:"raw_tx,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendOutputsResponse) Reset() { *m = SendOutputsResponse{} } -func (m *SendOutputsResponse) String() string { return proto.CompactTextString(m) } -func (*SendOutputsResponse) ProtoMessage() {} -func (*SendOutputsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{12} -} - -func (m *SendOutputsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendOutputsResponse.Unmarshal(m, b) -} -func (m *SendOutputsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendOutputsResponse.Marshal(b, m, deterministic) -} -func (m *SendOutputsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendOutputsResponse.Merge(m, src) -} -func (m *SendOutputsResponse) XXX_Size() int { - return xxx_messageInfo_SendOutputsResponse.Size(m) -} -func (m *SendOutputsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SendOutputsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SendOutputsResponse proto.InternalMessageInfo - -func (m *SendOutputsResponse) GetRawTx() []byte { - if m != nil { - return m.RawTx - } - return nil -} - -type EstimateFeeRequest struct { - // - //The number of confirmations to shoot for when estimating the fee. - ConfTarget int32 `protobuf:"varint,1,opt,name=conf_target,json=confTarget,proto3" json:"conf_target,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EstimateFeeRequest) Reset() { *m = EstimateFeeRequest{} } -func (m *EstimateFeeRequest) String() string { return proto.CompactTextString(m) } -func (*EstimateFeeRequest) ProtoMessage() {} -func (*EstimateFeeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{13} -} - -func (m *EstimateFeeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EstimateFeeRequest.Unmarshal(m, b) -} -func (m *EstimateFeeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EstimateFeeRequest.Marshal(b, m, deterministic) -} -func (m *EstimateFeeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_EstimateFeeRequest.Merge(m, src) -} -func (m *EstimateFeeRequest) XXX_Size() int { - return xxx_messageInfo_EstimateFeeRequest.Size(m) -} -func (m *EstimateFeeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_EstimateFeeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_EstimateFeeRequest proto.InternalMessageInfo - -func (m *EstimateFeeRequest) GetConfTarget() int32 { - if m != nil { - return m.ConfTarget - } - return 0 -} - -type EstimateFeeResponse struct { - // - //The amount of satoshis per kw that should be used in order to reach the - //confirmation target in the request. - SatPerKw int64 `protobuf:"varint,1,opt,name=sat_per_kw,json=satPerKw,proto3" json:"sat_per_kw,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EstimateFeeResponse) Reset() { *m = EstimateFeeResponse{} } -func (m *EstimateFeeResponse) String() string { return proto.CompactTextString(m) } -func (*EstimateFeeResponse) ProtoMessage() {} -func (*EstimateFeeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{14} -} - -func (m *EstimateFeeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EstimateFeeResponse.Unmarshal(m, b) -} -func (m *EstimateFeeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EstimateFeeResponse.Marshal(b, m, deterministic) -} -func (m *EstimateFeeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_EstimateFeeResponse.Merge(m, src) -} -func (m *EstimateFeeResponse) XXX_Size() int { - return xxx_messageInfo_EstimateFeeResponse.Size(m) -} -func (m *EstimateFeeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_EstimateFeeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_EstimateFeeResponse proto.InternalMessageInfo - -func (m *EstimateFeeResponse) GetSatPerKw() int64 { - if m != nil { - return m.SatPerKw - } - return 0 -} - -type PendingSweep struct { - // The outpoint of the output we're attempting to sweep. - Outpoint *lnrpc.OutPoint `protobuf:"bytes,1,opt,name=outpoint,proto3" json:"outpoint,omitempty"` - // The witness type of the output we're attempting to sweep. - WitnessType WitnessType `protobuf:"varint,2,opt,name=witness_type,json=witnessType,proto3,enum=walletrpc.WitnessType" json:"witness_type,omitempty"` - // The value of the output we're attempting to sweep. - AmountSat uint32 `protobuf:"varint,3,opt,name=amount_sat,json=amountSat,proto3" json:"amount_sat,omitempty"` - // - //The fee rate we'll use to sweep the output. The fee rate is only determined - //once a sweeping transaction for the output is created, so it's possible for - //this to be 0 before this. - SatPerByte uint32 `protobuf:"varint,4,opt,name=sat_per_byte,json=satPerByte,proto3" json:"sat_per_byte,omitempty"` - // The number of broadcast attempts we've made to sweep the output. - BroadcastAttempts uint32 `protobuf:"varint,5,opt,name=broadcast_attempts,json=broadcastAttempts,proto3" json:"broadcast_attempts,omitempty"` - // - //The next height of the chain at which we'll attempt to broadcast the - //sweep transaction of the output. - NextBroadcastHeight uint32 `protobuf:"varint,6,opt,name=next_broadcast_height,json=nextBroadcastHeight,proto3" json:"next_broadcast_height,omitempty"` - // The requested confirmation target for this output. - RequestedConfTarget uint32 `protobuf:"varint,8,opt,name=requested_conf_target,json=requestedConfTarget,proto3" json:"requested_conf_target,omitempty"` - // The requested fee rate, expressed in sat/byte, for this output. - RequestedSatPerByte uint32 `protobuf:"varint,9,opt,name=requested_sat_per_byte,json=requestedSatPerByte,proto3" json:"requested_sat_per_byte,omitempty"` - // - //Whether this input must be force-swept. This means that it is swept even - //if it has a negative yield. - Force bool `protobuf:"varint,7,opt,name=force,proto3" json:"force,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PendingSweep) Reset() { *m = PendingSweep{} } -func (m *PendingSweep) String() string { return proto.CompactTextString(m) } -func (*PendingSweep) ProtoMessage() {} -func (*PendingSweep) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{15} -} - -func (m *PendingSweep) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PendingSweep.Unmarshal(m, b) -} -func (m *PendingSweep) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PendingSweep.Marshal(b, m, deterministic) -} -func (m *PendingSweep) XXX_Merge(src proto.Message) { - xxx_messageInfo_PendingSweep.Merge(m, src) -} -func (m *PendingSweep) XXX_Size() int { - return xxx_messageInfo_PendingSweep.Size(m) -} -func (m *PendingSweep) XXX_DiscardUnknown() { - xxx_messageInfo_PendingSweep.DiscardUnknown(m) -} - -var xxx_messageInfo_PendingSweep proto.InternalMessageInfo - -func (m *PendingSweep) GetOutpoint() *lnrpc.OutPoint { - if m != nil { - return m.Outpoint - } - return nil -} - -func (m *PendingSweep) GetWitnessType() WitnessType { - if m != nil { - return m.WitnessType - } - return WitnessType_UNKNOWN_WITNESS -} - -func (m *PendingSweep) GetAmountSat() uint32 { - if m != nil { - return m.AmountSat - } - return 0 -} - -func (m *PendingSweep) GetSatPerByte() uint32 { - if m != nil { - return m.SatPerByte - } - return 0 -} - -func (m *PendingSweep) GetBroadcastAttempts() uint32 { - if m != nil { - return m.BroadcastAttempts - } - return 0 -} - -func (m *PendingSweep) GetNextBroadcastHeight() uint32 { - if m != nil { - return m.NextBroadcastHeight - } - return 0 -} - -func (m *PendingSweep) GetRequestedConfTarget() uint32 { - if m != nil { - return m.RequestedConfTarget - } - return 0 -} - -func (m *PendingSweep) GetRequestedSatPerByte() uint32 { - if m != nil { - return m.RequestedSatPerByte - } - return 0 -} - -func (m *PendingSweep) GetForce() bool { - if m != nil { - return m.Force - } - return false -} - -type PendingSweepsRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PendingSweepsRequest) Reset() { *m = PendingSweepsRequest{} } -func (m *PendingSweepsRequest) String() string { return proto.CompactTextString(m) } -func (*PendingSweepsRequest) ProtoMessage() {} -func (*PendingSweepsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{16} -} - -func (m *PendingSweepsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PendingSweepsRequest.Unmarshal(m, b) -} -func (m *PendingSweepsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PendingSweepsRequest.Marshal(b, m, deterministic) -} -func (m *PendingSweepsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PendingSweepsRequest.Merge(m, src) -} -func (m *PendingSweepsRequest) XXX_Size() int { - return xxx_messageInfo_PendingSweepsRequest.Size(m) -} -func (m *PendingSweepsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PendingSweepsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PendingSweepsRequest proto.InternalMessageInfo - -type PendingSweepsResponse struct { - // - //The set of outputs currently being swept by lnd's central batching engine. - PendingSweeps []*PendingSweep `protobuf:"bytes,1,rep,name=pending_sweeps,json=pendingSweeps,proto3" json:"pending_sweeps,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PendingSweepsResponse) Reset() { *m = PendingSweepsResponse{} } -func (m *PendingSweepsResponse) String() string { return proto.CompactTextString(m) } -func (*PendingSweepsResponse) ProtoMessage() {} -func (*PendingSweepsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{17} -} - -func (m *PendingSweepsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PendingSweepsResponse.Unmarshal(m, b) -} -func (m *PendingSweepsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PendingSweepsResponse.Marshal(b, m, deterministic) -} -func (m *PendingSweepsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PendingSweepsResponse.Merge(m, src) -} -func (m *PendingSweepsResponse) XXX_Size() int { - return xxx_messageInfo_PendingSweepsResponse.Size(m) -} -func (m *PendingSweepsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PendingSweepsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_PendingSweepsResponse proto.InternalMessageInfo - -func (m *PendingSweepsResponse) GetPendingSweeps() []*PendingSweep { - if m != nil { - return m.PendingSweeps - } - return nil -} - -type BumpFeeRequest struct { - // The input we're attempting to bump the fee of. - Outpoint *lnrpc.OutPoint `protobuf:"bytes,1,opt,name=outpoint,proto3" json:"outpoint,omitempty"` - // The target number of blocks that the input should be spent within. - TargetConf uint32 `protobuf:"varint,2,opt,name=target_conf,json=targetConf,proto3" json:"target_conf,omitempty"` - // - //The fee rate, expressed in sat/byte, that should be used to spend the input - //with. - SatPerByte uint32 `protobuf:"varint,3,opt,name=sat_per_byte,json=satPerByte,proto3" json:"sat_per_byte,omitempty"` - // - //Whether this input must be force-swept. This means that it is swept even - //if it has a negative yield. - Force bool `protobuf:"varint,4,opt,name=force,proto3" json:"force,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BumpFeeRequest) Reset() { *m = BumpFeeRequest{} } -func (m *BumpFeeRequest) String() string { return proto.CompactTextString(m) } -func (*BumpFeeRequest) ProtoMessage() {} -func (*BumpFeeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{18} -} - -func (m *BumpFeeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BumpFeeRequest.Unmarshal(m, b) -} -func (m *BumpFeeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BumpFeeRequest.Marshal(b, m, deterministic) -} -func (m *BumpFeeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BumpFeeRequest.Merge(m, src) -} -func (m *BumpFeeRequest) XXX_Size() int { - return xxx_messageInfo_BumpFeeRequest.Size(m) -} -func (m *BumpFeeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BumpFeeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_BumpFeeRequest proto.InternalMessageInfo - -func (m *BumpFeeRequest) GetOutpoint() *lnrpc.OutPoint { - if m != nil { - return m.Outpoint - } - return nil -} - -func (m *BumpFeeRequest) GetTargetConf() uint32 { - if m != nil { - return m.TargetConf - } - return 0 -} - -func (m *BumpFeeRequest) GetSatPerByte() uint32 { - if m != nil { - return m.SatPerByte - } - return 0 -} - -func (m *BumpFeeRequest) GetForce() bool { - if m != nil { - return m.Force - } - return false -} - -type BumpFeeResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BumpFeeResponse) Reset() { *m = BumpFeeResponse{} } -func (m *BumpFeeResponse) String() string { return proto.CompactTextString(m) } -func (*BumpFeeResponse) ProtoMessage() {} -func (*BumpFeeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{19} -} - -func (m *BumpFeeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BumpFeeResponse.Unmarshal(m, b) -} -func (m *BumpFeeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BumpFeeResponse.Marshal(b, m, deterministic) -} -func (m *BumpFeeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_BumpFeeResponse.Merge(m, src) -} -func (m *BumpFeeResponse) XXX_Size() int { - return xxx_messageInfo_BumpFeeResponse.Size(m) -} -func (m *BumpFeeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_BumpFeeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_BumpFeeResponse proto.InternalMessageInfo - -type ListSweepsRequest struct { - // - //Retrieve the full sweep transaction details. If false, only the sweep txids - //will be returned. Note that some sweeps that LND publishes will have been - //replaced-by-fee, so will not be included in this output. - Verbose bool `protobuf:"varint,1,opt,name=verbose,proto3" json:"verbose,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListSweepsRequest) Reset() { *m = ListSweepsRequest{} } -func (m *ListSweepsRequest) String() string { return proto.CompactTextString(m) } -func (*ListSweepsRequest) ProtoMessage() {} -func (*ListSweepsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{20} -} - -func (m *ListSweepsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListSweepsRequest.Unmarshal(m, b) -} -func (m *ListSweepsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListSweepsRequest.Marshal(b, m, deterministic) -} -func (m *ListSweepsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListSweepsRequest.Merge(m, src) -} -func (m *ListSweepsRequest) XXX_Size() int { - return xxx_messageInfo_ListSweepsRequest.Size(m) -} -func (m *ListSweepsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListSweepsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListSweepsRequest proto.InternalMessageInfo - -func (m *ListSweepsRequest) GetVerbose() bool { - if m != nil { - return m.Verbose - } - return false -} - -type ListSweepsResponse struct { - // Types that are valid to be assigned to Sweeps: - // *ListSweepsResponse_TransactionDetails - // *ListSweepsResponse_TransactionIds - Sweeps isListSweepsResponse_Sweeps `protobuf_oneof:"sweeps"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListSweepsResponse) Reset() { *m = ListSweepsResponse{} } -func (m *ListSweepsResponse) String() string { return proto.CompactTextString(m) } -func (*ListSweepsResponse) ProtoMessage() {} -func (*ListSweepsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{21} -} - -func (m *ListSweepsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListSweepsResponse.Unmarshal(m, b) -} -func (m *ListSweepsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListSweepsResponse.Marshal(b, m, deterministic) -} -func (m *ListSweepsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListSweepsResponse.Merge(m, src) -} -func (m *ListSweepsResponse) XXX_Size() int { - return xxx_messageInfo_ListSweepsResponse.Size(m) -} -func (m *ListSweepsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListSweepsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListSweepsResponse proto.InternalMessageInfo - -type isListSweepsResponse_Sweeps interface { - isListSweepsResponse_Sweeps() -} - -type ListSweepsResponse_TransactionDetails struct { - TransactionDetails *lnrpc.TransactionDetails `protobuf:"bytes,1,opt,name=transaction_details,json=transactionDetails,proto3,oneof"` -} - -type ListSweepsResponse_TransactionIds struct { - TransactionIds *ListSweepsResponse_TransactionIDs `protobuf:"bytes,2,opt,name=transaction_ids,json=transactionIds,proto3,oneof"` -} - -func (*ListSweepsResponse_TransactionDetails) isListSweepsResponse_Sweeps() {} - -func (*ListSweepsResponse_TransactionIds) isListSweepsResponse_Sweeps() {} - -func (m *ListSweepsResponse) GetSweeps() isListSweepsResponse_Sweeps { - if m != nil { - return m.Sweeps - } - return nil -} - -func (m *ListSweepsResponse) GetTransactionDetails() *lnrpc.TransactionDetails { - if x, ok := m.GetSweeps().(*ListSweepsResponse_TransactionDetails); ok { - return x.TransactionDetails - } - return nil -} - -func (m *ListSweepsResponse) GetTransactionIds() *ListSweepsResponse_TransactionIDs { - if x, ok := m.GetSweeps().(*ListSweepsResponse_TransactionIds); ok { - return x.TransactionIds - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*ListSweepsResponse) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*ListSweepsResponse_TransactionDetails)(nil), - (*ListSweepsResponse_TransactionIds)(nil), - } -} - -type ListSweepsResponse_TransactionIDs struct { - // - //Reversed, hex-encoded string representing the transaction ids of the - //sweeps that our node has broadcast. Note that these transactions may - //not have confirmed yet, we record sweeps on broadcast, not confirmation. - TransactionIds []string `protobuf:"bytes,1,rep,name=transaction_ids,json=transactionIds,proto3" json:"transaction_ids,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListSweepsResponse_TransactionIDs) Reset() { *m = ListSweepsResponse_TransactionIDs{} } -func (m *ListSweepsResponse_TransactionIDs) String() string { return proto.CompactTextString(m) } -func (*ListSweepsResponse_TransactionIDs) ProtoMessage() {} -func (*ListSweepsResponse_TransactionIDs) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{21, 0} -} - -func (m *ListSweepsResponse_TransactionIDs) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListSweepsResponse_TransactionIDs.Unmarshal(m, b) -} -func (m *ListSweepsResponse_TransactionIDs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListSweepsResponse_TransactionIDs.Marshal(b, m, deterministic) -} -func (m *ListSweepsResponse_TransactionIDs) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListSweepsResponse_TransactionIDs.Merge(m, src) -} -func (m *ListSweepsResponse_TransactionIDs) XXX_Size() int { - return xxx_messageInfo_ListSweepsResponse_TransactionIDs.Size(m) -} -func (m *ListSweepsResponse_TransactionIDs) XXX_DiscardUnknown() { - xxx_messageInfo_ListSweepsResponse_TransactionIDs.DiscardUnknown(m) -} - -var xxx_messageInfo_ListSweepsResponse_TransactionIDs proto.InternalMessageInfo - -func (m *ListSweepsResponse_TransactionIDs) GetTransactionIds() []string { - if m != nil { - return m.TransactionIds - } - return nil -} - -type LabelTransactionRequest struct { - // The txid of the transaction to label. - Txid []byte `protobuf:"bytes,1,opt,name=txid,proto3" json:"txid,omitempty"` - // The label to add to the transaction, limited to 500 characters. - Label string `protobuf:"bytes,2,opt,name=label,proto3" json:"label,omitempty"` - // Whether to overwrite the existing label, if it is present. - Overwrite bool `protobuf:"varint,3,opt,name=overwrite,proto3" json:"overwrite,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LabelTransactionRequest) Reset() { *m = LabelTransactionRequest{} } -func (m *LabelTransactionRequest) String() string { return proto.CompactTextString(m) } -func (*LabelTransactionRequest) ProtoMessage() {} -func (*LabelTransactionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{22} -} - -func (m *LabelTransactionRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LabelTransactionRequest.Unmarshal(m, b) -} -func (m *LabelTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LabelTransactionRequest.Marshal(b, m, deterministic) -} -func (m *LabelTransactionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelTransactionRequest.Merge(m, src) -} -func (m *LabelTransactionRequest) XXX_Size() int { - return xxx_messageInfo_LabelTransactionRequest.Size(m) -} -func (m *LabelTransactionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LabelTransactionRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelTransactionRequest proto.InternalMessageInfo - -func (m *LabelTransactionRequest) GetTxid() []byte { - if m != nil { - return m.Txid - } - return nil -} - -func (m *LabelTransactionRequest) GetLabel() string { - if m != nil { - return m.Label - } - return "" -} - -func (m *LabelTransactionRequest) GetOverwrite() bool { - if m != nil { - return m.Overwrite - } - return false -} - -type LabelTransactionResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LabelTransactionResponse) Reset() { *m = LabelTransactionResponse{} } -func (m *LabelTransactionResponse) String() string { return proto.CompactTextString(m) } -func (*LabelTransactionResponse) ProtoMessage() {} -func (*LabelTransactionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{23} -} - -func (m *LabelTransactionResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LabelTransactionResponse.Unmarshal(m, b) -} -func (m *LabelTransactionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LabelTransactionResponse.Marshal(b, m, deterministic) -} -func (m *LabelTransactionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelTransactionResponse.Merge(m, src) -} -func (m *LabelTransactionResponse) XXX_Size() int { - return xxx_messageInfo_LabelTransactionResponse.Size(m) -} -func (m *LabelTransactionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LabelTransactionResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelTransactionResponse proto.InternalMessageInfo - -type FundPsbtRequest struct { - // Types that are valid to be assigned to Template: - // *FundPsbtRequest_Psbt - // *FundPsbtRequest_Raw - Template isFundPsbtRequest_Template `protobuf_oneof:"template"` - // Types that are valid to be assigned to Fees: - // *FundPsbtRequest_TargetConf - // *FundPsbtRequest_SatPerVbyte - Fees isFundPsbtRequest_Fees `protobuf_oneof:"fees"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FundPsbtRequest) Reset() { *m = FundPsbtRequest{} } -func (m *FundPsbtRequest) String() string { return proto.CompactTextString(m) } -func (*FundPsbtRequest) ProtoMessage() {} -func (*FundPsbtRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{24} -} - -func (m *FundPsbtRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FundPsbtRequest.Unmarshal(m, b) -} -func (m *FundPsbtRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FundPsbtRequest.Marshal(b, m, deterministic) -} -func (m *FundPsbtRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_FundPsbtRequest.Merge(m, src) -} -func (m *FundPsbtRequest) XXX_Size() int { - return xxx_messageInfo_FundPsbtRequest.Size(m) -} -func (m *FundPsbtRequest) XXX_DiscardUnknown() { - xxx_messageInfo_FundPsbtRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_FundPsbtRequest proto.InternalMessageInfo - -type isFundPsbtRequest_Template interface { - isFundPsbtRequest_Template() -} - -type FundPsbtRequest_Psbt struct { - Psbt []byte `protobuf:"bytes,1,opt,name=psbt,proto3,oneof"` -} - -type FundPsbtRequest_Raw struct { - Raw *TxTemplate `protobuf:"bytes,2,opt,name=raw,proto3,oneof"` -} - -func (*FundPsbtRequest_Psbt) isFundPsbtRequest_Template() {} - -func (*FundPsbtRequest_Raw) isFundPsbtRequest_Template() {} - -func (m *FundPsbtRequest) GetTemplate() isFundPsbtRequest_Template { - if m != nil { - return m.Template - } - return nil -} - -func (m *FundPsbtRequest) GetPsbt() []byte { - if x, ok := m.GetTemplate().(*FundPsbtRequest_Psbt); ok { - return x.Psbt - } - return nil -} - -func (m *FundPsbtRequest) GetRaw() *TxTemplate { - if x, ok := m.GetTemplate().(*FundPsbtRequest_Raw); ok { - return x.Raw - } - return nil -} - -type isFundPsbtRequest_Fees interface { - isFundPsbtRequest_Fees() -} - -type FundPsbtRequest_TargetConf struct { - TargetConf uint32 `protobuf:"varint,3,opt,name=target_conf,json=targetConf,proto3,oneof"` -} - -type FundPsbtRequest_SatPerVbyte struct { - SatPerVbyte uint32 `protobuf:"varint,4,opt,name=sat_per_vbyte,json=satPerVbyte,proto3,oneof"` -} - -func (*FundPsbtRequest_TargetConf) isFundPsbtRequest_Fees() {} - -func (*FundPsbtRequest_SatPerVbyte) isFundPsbtRequest_Fees() {} - -func (m *FundPsbtRequest) GetFees() isFundPsbtRequest_Fees { - if m != nil { - return m.Fees - } - return nil -} - -func (m *FundPsbtRequest) GetTargetConf() uint32 { - if x, ok := m.GetFees().(*FundPsbtRequest_TargetConf); ok { - return x.TargetConf - } - return 0 -} - -func (m *FundPsbtRequest) GetSatPerVbyte() uint32 { - if x, ok := m.GetFees().(*FundPsbtRequest_SatPerVbyte); ok { - return x.SatPerVbyte - } - return 0 -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*FundPsbtRequest) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*FundPsbtRequest_Psbt)(nil), - (*FundPsbtRequest_Raw)(nil), - (*FundPsbtRequest_TargetConf)(nil), - (*FundPsbtRequest_SatPerVbyte)(nil), - } -} - -type FundPsbtResponse struct { - // - //The funded but not yet signed PSBT packet. - FundedPsbt []byte `protobuf:"bytes,1,opt,name=funded_psbt,json=fundedPsbt,proto3" json:"funded_psbt,omitempty"` - // - //The index of the added change output or -1 if no change was left over. - ChangeOutputIndex int32 `protobuf:"varint,2,opt,name=change_output_index,json=changeOutputIndex,proto3" json:"change_output_index,omitempty"` - // - //The list of lock leases that were acquired for the inputs in the funded PSBT - //packet. - LockedUtxos []*UtxoLease `protobuf:"bytes,3,rep,name=locked_utxos,json=lockedUtxos,proto3" json:"locked_utxos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FundPsbtResponse) Reset() { *m = FundPsbtResponse{} } -func (m *FundPsbtResponse) String() string { return proto.CompactTextString(m) } -func (*FundPsbtResponse) ProtoMessage() {} -func (*FundPsbtResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{25} -} - -func (m *FundPsbtResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FundPsbtResponse.Unmarshal(m, b) -} -func (m *FundPsbtResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FundPsbtResponse.Marshal(b, m, deterministic) -} -func (m *FundPsbtResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_FundPsbtResponse.Merge(m, src) -} -func (m *FundPsbtResponse) XXX_Size() int { - return xxx_messageInfo_FundPsbtResponse.Size(m) -} -func (m *FundPsbtResponse) XXX_DiscardUnknown() { - xxx_messageInfo_FundPsbtResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_FundPsbtResponse proto.InternalMessageInfo - -func (m *FundPsbtResponse) GetFundedPsbt() []byte { - if m != nil { - return m.FundedPsbt - } - return nil -} - -func (m *FundPsbtResponse) GetChangeOutputIndex() int32 { - if m != nil { - return m.ChangeOutputIndex - } - return 0 -} - -func (m *FundPsbtResponse) GetLockedUtxos() []*UtxoLease { - if m != nil { - return m.LockedUtxos - } - return nil -} - -type TxTemplate struct { - // - //An optional list of inputs to use. Every input must be an UTXO known to the - //wallet that has not been locked before. The sum of all inputs must be - //sufficiently greater than the sum of all outputs to pay a miner fee with the - //fee rate specified in the parent message. - // - //If no inputs are specified, coin selection will be performed instead and - //inputs of sufficient value will be added to the resulting PSBT. - Inputs []*lnrpc.OutPoint `protobuf:"bytes,1,rep,name=inputs,proto3" json:"inputs,omitempty"` - // - //A map of all addresses and the amounts to send to in the funded PSBT. - Outputs map[string]uint64 `protobuf:"bytes,2,rep,name=outputs,proto3" json:"outputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TxTemplate) Reset() { *m = TxTemplate{} } -func (m *TxTemplate) String() string { return proto.CompactTextString(m) } -func (*TxTemplate) ProtoMessage() {} -func (*TxTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{26} -} - -func (m *TxTemplate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TxTemplate.Unmarshal(m, b) -} -func (m *TxTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TxTemplate.Marshal(b, m, deterministic) -} -func (m *TxTemplate) XXX_Merge(src proto.Message) { - xxx_messageInfo_TxTemplate.Merge(m, src) -} -func (m *TxTemplate) XXX_Size() int { - return xxx_messageInfo_TxTemplate.Size(m) -} -func (m *TxTemplate) XXX_DiscardUnknown() { - xxx_messageInfo_TxTemplate.DiscardUnknown(m) -} - -var xxx_messageInfo_TxTemplate proto.InternalMessageInfo - -func (m *TxTemplate) GetInputs() []*lnrpc.OutPoint { - if m != nil { - return m.Inputs - } - return nil -} - -func (m *TxTemplate) GetOutputs() map[string]uint64 { - if m != nil { - return m.Outputs - } - return nil -} - -type UtxoLease struct { - // - //A 32 byte random ID that identifies the lease. - Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // The identifying outpoint of the output being leased. - Outpoint *lnrpc.OutPoint `protobuf:"bytes,2,opt,name=outpoint,proto3" json:"outpoint,omitempty"` - // - //The absolute expiration of the output lease represented as a unix timestamp. - Expiration uint64 `protobuf:"varint,3,opt,name=expiration,proto3" json:"expiration,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UtxoLease) Reset() { *m = UtxoLease{} } -func (m *UtxoLease) String() string { return proto.CompactTextString(m) } -func (*UtxoLease) ProtoMessage() {} -func (*UtxoLease) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{27} -} - -func (m *UtxoLease) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UtxoLease.Unmarshal(m, b) -} -func (m *UtxoLease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UtxoLease.Marshal(b, m, deterministic) -} -func (m *UtxoLease) XXX_Merge(src proto.Message) { - xxx_messageInfo_UtxoLease.Merge(m, src) -} -func (m *UtxoLease) XXX_Size() int { - return xxx_messageInfo_UtxoLease.Size(m) -} -func (m *UtxoLease) XXX_DiscardUnknown() { - xxx_messageInfo_UtxoLease.DiscardUnknown(m) -} - -var xxx_messageInfo_UtxoLease proto.InternalMessageInfo - -func (m *UtxoLease) GetId() []byte { - if m != nil { - return m.Id - } - return nil -} - -func (m *UtxoLease) GetOutpoint() *lnrpc.OutPoint { - if m != nil { - return m.Outpoint - } - return nil -} - -func (m *UtxoLease) GetExpiration() uint64 { - if m != nil { - return m.Expiration - } - return 0 -} - -type FinalizePsbtRequest struct { - // - //A PSBT that should be signed and finalized. The PSBT must contain all - //required inputs, outputs, UTXO data and partial signatures of all other - //signers. - FundedPsbt []byte `protobuf:"bytes,1,opt,name=funded_psbt,json=fundedPsbt,proto3" json:"funded_psbt,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FinalizePsbtRequest) Reset() { *m = FinalizePsbtRequest{} } -func (m *FinalizePsbtRequest) String() string { return proto.CompactTextString(m) } -func (*FinalizePsbtRequest) ProtoMessage() {} -func (*FinalizePsbtRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{28} -} - -func (m *FinalizePsbtRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FinalizePsbtRequest.Unmarshal(m, b) -} -func (m *FinalizePsbtRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FinalizePsbtRequest.Marshal(b, m, deterministic) -} -func (m *FinalizePsbtRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_FinalizePsbtRequest.Merge(m, src) -} -func (m *FinalizePsbtRequest) XXX_Size() int { - return xxx_messageInfo_FinalizePsbtRequest.Size(m) -} -func (m *FinalizePsbtRequest) XXX_DiscardUnknown() { - xxx_messageInfo_FinalizePsbtRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_FinalizePsbtRequest proto.InternalMessageInfo - -func (m *FinalizePsbtRequest) GetFundedPsbt() []byte { - if m != nil { - return m.FundedPsbt - } - return nil -} - -type FinalizePsbtResponse struct { - // The fully signed and finalized transaction in PSBT format. - SignedPsbt []byte `protobuf:"bytes,1,opt,name=signed_psbt,json=signedPsbt,proto3" json:"signed_psbt,omitempty"` - // The fully signed and finalized transaction in the raw wire format. - RawFinalTx []byte `protobuf:"bytes,2,opt,name=raw_final_tx,json=rawFinalTx,proto3" json:"raw_final_tx,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FinalizePsbtResponse) Reset() { *m = FinalizePsbtResponse{} } -func (m *FinalizePsbtResponse) String() string { return proto.CompactTextString(m) } -func (*FinalizePsbtResponse) ProtoMessage() {} -func (*FinalizePsbtResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_6cc6942ac78249e5, []int{29} -} - -func (m *FinalizePsbtResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FinalizePsbtResponse.Unmarshal(m, b) -} -func (m *FinalizePsbtResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FinalizePsbtResponse.Marshal(b, m, deterministic) -} -func (m *FinalizePsbtResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_FinalizePsbtResponse.Merge(m, src) -} -func (m *FinalizePsbtResponse) XXX_Size() int { - return xxx_messageInfo_FinalizePsbtResponse.Size(m) -} -func (m *FinalizePsbtResponse) XXX_DiscardUnknown() { - xxx_messageInfo_FinalizePsbtResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_FinalizePsbtResponse proto.InternalMessageInfo - -func (m *FinalizePsbtResponse) GetSignedPsbt() []byte { - if m != nil { - return m.SignedPsbt - } - return nil -} - -func (m *FinalizePsbtResponse) GetRawFinalTx() []byte { - if m != nil { - return m.RawFinalTx - } - return nil -} - -func init() { - proto.RegisterEnum("walletrpc.WitnessType", WitnessType_name, WitnessType_value) - proto.RegisterType((*ListUnspentRequest)(nil), "walletrpc.ListUnspentRequest") - proto.RegisterType((*ListUnspentResponse)(nil), "walletrpc.ListUnspentResponse") - proto.RegisterType((*LeaseOutputRequest)(nil), "walletrpc.LeaseOutputRequest") - proto.RegisterType((*LeaseOutputResponse)(nil), "walletrpc.LeaseOutputResponse") - proto.RegisterType((*ReleaseOutputRequest)(nil), "walletrpc.ReleaseOutputRequest") - proto.RegisterType((*ReleaseOutputResponse)(nil), "walletrpc.ReleaseOutputResponse") - proto.RegisterType((*KeyReq)(nil), "walletrpc.KeyReq") - proto.RegisterType((*AddrRequest)(nil), "walletrpc.AddrRequest") - proto.RegisterType((*AddrResponse)(nil), "walletrpc.AddrResponse") - proto.RegisterType((*Transaction)(nil), "walletrpc.Transaction") - proto.RegisterType((*PublishResponse)(nil), "walletrpc.PublishResponse") - proto.RegisterType((*SendOutputsRequest)(nil), "walletrpc.SendOutputsRequest") - proto.RegisterType((*SendOutputsResponse)(nil), "walletrpc.SendOutputsResponse") - proto.RegisterType((*EstimateFeeRequest)(nil), "walletrpc.EstimateFeeRequest") - proto.RegisterType((*EstimateFeeResponse)(nil), "walletrpc.EstimateFeeResponse") - proto.RegisterType((*PendingSweep)(nil), "walletrpc.PendingSweep") - proto.RegisterType((*PendingSweepsRequest)(nil), "walletrpc.PendingSweepsRequest") - proto.RegisterType((*PendingSweepsResponse)(nil), "walletrpc.PendingSweepsResponse") - proto.RegisterType((*BumpFeeRequest)(nil), "walletrpc.BumpFeeRequest") - proto.RegisterType((*BumpFeeResponse)(nil), "walletrpc.BumpFeeResponse") - proto.RegisterType((*ListSweepsRequest)(nil), "walletrpc.ListSweepsRequest") - proto.RegisterType((*ListSweepsResponse)(nil), "walletrpc.ListSweepsResponse") - proto.RegisterType((*ListSweepsResponse_TransactionIDs)(nil), "walletrpc.ListSweepsResponse.TransactionIDs") - proto.RegisterType((*LabelTransactionRequest)(nil), "walletrpc.LabelTransactionRequest") - proto.RegisterType((*LabelTransactionResponse)(nil), "walletrpc.LabelTransactionResponse") - proto.RegisterType((*FundPsbtRequest)(nil), "walletrpc.FundPsbtRequest") - proto.RegisterType((*FundPsbtResponse)(nil), "walletrpc.FundPsbtResponse") - proto.RegisterType((*TxTemplate)(nil), "walletrpc.TxTemplate") - proto.RegisterMapType((map[string]uint64)(nil), "walletrpc.TxTemplate.OutputsEntry") - proto.RegisterType((*UtxoLease)(nil), "walletrpc.UtxoLease") - proto.RegisterType((*FinalizePsbtRequest)(nil), "walletrpc.FinalizePsbtRequest") - proto.RegisterType((*FinalizePsbtResponse)(nil), "walletrpc.FinalizePsbtResponse") -} - -func init() { proto.RegisterFile("walletrpc/walletkit.proto", fileDescriptor_6cc6942ac78249e5) } - -var fileDescriptor_6cc6942ac78249e5 = []byte{ - // 1792 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0xef, 0x6e, 0x22, 0xc9, - 0x11, 0x5f, 0x0c, 0xc6, 0x50, 0x80, 0x8d, 0x1b, 0xbc, 0x66, 0x59, 0xef, 0xd9, 0x3b, 0x97, 0xe4, - 0x9c, 0xdc, 0x2e, 0x2b, 0x79, 0x75, 0x97, 0x3d, 0x27, 0x8a, 0x62, 0xe3, 0xb1, 0xb0, 0xc0, 0xe0, - 0x6b, 0xf0, 0x5a, 0x9b, 0x7c, 0x18, 0x0d, 0x4c, 0xdb, 0x1e, 0x19, 0x66, 0xe6, 0x66, 0x1a, 0x33, - 0xe4, 0x53, 0x9e, 0x22, 0xd2, 0x49, 0x79, 0x87, 0x7b, 0x81, 0x3c, 0x50, 0x1e, 0x23, 0xea, 0x3f, - 0x0c, 0x3d, 0x80, 0x77, 0x15, 0x25, 0x1f, 0x56, 0x3b, 0x5d, 0xbf, 0xaa, 0x5f, 0x57, 0x57, 0x15, - 0x5d, 0xd5, 0x86, 0x17, 0x13, 0x73, 0x38, 0x24, 0xd4, 0xf7, 0x06, 0xef, 0xc4, 0xd7, 0x83, 0x4d, - 0x6b, 0x9e, 0xef, 0x52, 0x17, 0x65, 0x23, 0xa8, 0x9a, 0xf5, 0xbd, 0x81, 0x90, 0x56, 0xcb, 0x81, - 0x7d, 0xe7, 0x30, 0x75, 0xf6, 0x3f, 0xf1, 0x85, 0x54, 0x6b, 0x03, 0x6a, 0xd9, 0x01, 0xbd, 0x76, - 0x02, 0x8f, 0x38, 0x14, 0x93, 0x9f, 0xc6, 0x24, 0xa0, 0xe8, 0x25, 0x64, 0x47, 0xb6, 0x63, 0x0c, - 0x5c, 0xe7, 0x36, 0xa8, 0x24, 0x0e, 0x12, 0x87, 0xeb, 0x38, 0x33, 0xb2, 0x9d, 0x3a, 0x5b, 0x73, - 0xd0, 0x0c, 0x25, 0xb8, 0x26, 0x41, 0x33, 0xe4, 0xa0, 0xf6, 0x01, 0x4a, 0x31, 0xbe, 0xc0, 0x73, - 0x9d, 0x80, 0xa0, 0xd7, 0xb0, 0x3e, 0xa6, 0xa1, 0xcb, 0xc8, 0x92, 0x87, 0xb9, 0xa3, 0x5c, 0x6d, - 0xc8, 0x5c, 0xa9, 0x5d, 0xd3, 0xd0, 0xc5, 0x02, 0xd1, 0x7e, 0x04, 0xd4, 0x22, 0x66, 0x40, 0x3a, - 0x63, 0xea, 0x8d, 0x23, 0x4f, 0x36, 0x61, 0xcd, 0xb6, 0xb8, 0x0b, 0x79, 0xbc, 0x66, 0x5b, 0xe8, - 0x5b, 0xc8, 0xb8, 0x63, 0xea, 0xb9, 0xb6, 0x43, 0xf9, 0xde, 0xb9, 0xa3, 0x2d, 0xc9, 0xd5, 0x19, - 0xd3, 0x2b, 0x26, 0xc6, 0x91, 0x82, 0xf6, 0x1d, 0x94, 0x62, 0x94, 0xd2, 0x99, 0xaf, 0x00, 0x48, - 0xe8, 0xd9, 0xbe, 0x49, 0x6d, 0xd7, 0xe1, 0xdc, 0x29, 0xac, 0x48, 0xb4, 0x2e, 0x94, 0x31, 0x19, - 0xfe, 0x9f, 0x7d, 0xd9, 0x85, 0x9d, 0x05, 0x52, 0xe1, 0x8d, 0xf6, 0x23, 0xa4, 0x9b, 0x64, 0x8a, - 0xc9, 0x4f, 0xe8, 0x10, 0x8a, 0x0f, 0x64, 0x6a, 0xdc, 0xda, 0xce, 0x1d, 0xf1, 0x0d, 0xcf, 0x67, - 0xbc, 0x22, 0xf8, 0x9b, 0x0f, 0x64, 0x7a, 0xce, 0xc5, 0x57, 0x4c, 0x8a, 0x5e, 0x01, 0x70, 0x4d, - 0x73, 0x64, 0x0f, 0xa7, 0x32, 0x07, 0x59, 0xa6, 0xc3, 0x05, 0x5a, 0x01, 0x72, 0x27, 0x96, 0xe5, - 0x4b, 0xbf, 0x35, 0x0d, 0xf2, 0x62, 0x29, 0xcf, 0x8f, 0x20, 0x65, 0x5a, 0x96, 0xcf, 0xb9, 0xb3, - 0x98, 0x7f, 0x6b, 0xc7, 0x90, 0xeb, 0xf9, 0xa6, 0x13, 0x98, 0x03, 0x16, 0x02, 0xb4, 0x03, 0x69, - 0x1a, 0x1a, 0xf7, 0x24, 0x94, 0xc7, 0x5d, 0xa7, 0x61, 0x83, 0x84, 0xa8, 0x0c, 0xeb, 0x43, 0xb3, - 0x4f, 0x86, 0x7c, 0xcb, 0x2c, 0x16, 0x0b, 0xed, 0x7b, 0xd8, 0xba, 0x1a, 0xf7, 0x87, 0x76, 0x70, - 0x1f, 0x6d, 0xf1, 0x35, 0x14, 0x3c, 0x21, 0x32, 0x88, 0xef, 0xbb, 0xb3, 0xbd, 0xf2, 0x52, 0xa8, - 0x33, 0x99, 0xf6, 0xaf, 0x04, 0xa0, 0x2e, 0x71, 0x2c, 0x11, 0x90, 0x60, 0x16, 0xe6, 0x3d, 0x80, - 0xc0, 0xa4, 0x86, 0x47, 0x7c, 0xe3, 0x61, 0xc2, 0x0d, 0x93, 0x38, 0x13, 0x98, 0xf4, 0x8a, 0xf8, - 0xcd, 0x09, 0x3a, 0x84, 0x0d, 0x57, 0xe8, 0x57, 0xd6, 0x78, 0x2d, 0x6d, 0xd6, 0x64, 0x61, 0xd7, - 0x7a, 0x61, 0x67, 0x4c, 0xf1, 0x0c, 0x9e, 0x3b, 0x9b, 0x54, 0x9c, 0x8d, 0x97, 0x76, 0x6a, 0xa1, - 0xb4, 0xbf, 0x85, 0x6d, 0x56, 0xb7, 0x96, 0x31, 0x76, 0x98, 0x82, 0xed, 0x8f, 0x88, 0x55, 0x59, - 0x3f, 0x48, 0x1c, 0x66, 0x70, 0x91, 0x03, 0xd7, 0x73, 0xb9, 0xf6, 0x06, 0x4a, 0x31, 0xef, 0xe5, - 0xd1, 0x77, 0x20, 0xed, 0x9b, 0x13, 0x83, 0x46, 0xa1, 0xf3, 0xcd, 0x49, 0x2f, 0xd4, 0xbe, 0x03, - 0xa4, 0x07, 0xd4, 0x1e, 0x99, 0x94, 0x9c, 0x13, 0x32, 0x3b, 0xeb, 0x3e, 0xe4, 0x18, 0xa1, 0x41, - 0x4d, 0xff, 0x8e, 0xcc, 0xb2, 0x0d, 0x4c, 0xd4, 0xe3, 0x12, 0xed, 0x3d, 0x94, 0x62, 0x66, 0x72, - 0x93, 0xcf, 0xc6, 0x48, 0xfb, 0x39, 0x09, 0xf9, 0x2b, 0xe2, 0x58, 0xb6, 0x73, 0xd7, 0x9d, 0x10, - 0xe2, 0xc5, 0x2a, 0x35, 0xf1, 0x85, 0x4a, 0x45, 0x3f, 0x40, 0x7e, 0x62, 0x53, 0x87, 0x04, 0x81, - 0x41, 0xa7, 0x1e, 0xe1, 0xb9, 0xde, 0x3c, 0x7a, 0x5e, 0x8b, 0x6e, 0x95, 0xda, 0x8d, 0x80, 0x7b, - 0x53, 0x8f, 0xe0, 0xdc, 0x64, 0xbe, 0x60, 0x75, 0x69, 0x8e, 0xdc, 0xb1, 0x43, 0x8d, 0xc0, 0xa4, - 0x3c, 0xee, 0x05, 0x9c, 0x15, 0x92, 0xae, 0x49, 0xd1, 0x01, 0xe4, 0x67, 0x5e, 0xf7, 0xa7, 0x94, - 0xf0, 0xf0, 0x17, 0x30, 0x08, 0xbf, 0x4f, 0xa7, 0x94, 0xa0, 0xb7, 0x80, 0xfa, 0xbe, 0x6b, 0x5a, - 0x03, 0x33, 0xa0, 0x86, 0x49, 0x29, 0x19, 0x79, 0x34, 0xe0, 0x19, 0x28, 0xe0, 0xed, 0x08, 0x39, - 0x91, 0x00, 0x3a, 0x82, 0x1d, 0x87, 0x84, 0xd4, 0x98, 0xdb, 0xdc, 0x13, 0xfb, 0xee, 0x9e, 0x56, - 0xd2, 0xdc, 0xa2, 0xc4, 0xc0, 0xd3, 0x19, 0xd6, 0xe0, 0x10, 0xb3, 0xf1, 0x45, 0xf4, 0x89, 0x65, - 0xa8, 0xc1, 0xcf, 0x08, 0x9b, 0x08, 0xac, 0x47, 0x59, 0x40, 0xef, 0xe1, 0xf9, 0xdc, 0x26, 0x76, - 0x84, 0xec, 0x82, 0x51, 0x77, 0x7e, 0x96, 0x32, 0xac, 0xdf, 0xba, 0xfe, 0x80, 0x54, 0x36, 0x78, - 0x01, 0x89, 0x85, 0xf6, 0x1c, 0xca, 0x6a, 0x6a, 0x66, 0x55, 0xaf, 0xdd, 0xc0, 0xce, 0x82, 0x5c, - 0xa6, 0xfa, 0x4f, 0xb0, 0xe9, 0x09, 0xc0, 0x08, 0x38, 0x22, 0xef, 0xd0, 0x5d, 0x25, 0x21, 0xaa, - 0x25, 0x2e, 0x78, 0x2a, 0x8f, 0xf6, 0x8f, 0x04, 0x6c, 0x9e, 0x8e, 0x47, 0x9e, 0x52, 0x75, 0xff, - 0x55, 0x39, 0xec, 0x43, 0x4e, 0x04, 0x88, 0x07, 0x8b, 0x57, 0x43, 0x01, 0x83, 0x10, 0xb1, 0x10, - 0x2d, 0x65, 0x35, 0xb9, 0x94, 0xd5, 0x28, 0x12, 0x29, 0x35, 0x12, 0xdb, 0xb0, 0x15, 0xf9, 0x25, - 0xef, 0xc2, 0xb7, 0xb0, 0xcd, 0xba, 0x47, 0x2c, 0x32, 0xa8, 0x02, 0x1b, 0x8f, 0xc4, 0xef, 0xbb, - 0x01, 0xe1, 0xce, 0x66, 0xf0, 0x6c, 0xa9, 0xfd, 0x7d, 0x4d, 0x74, 0xaf, 0x85, 0x88, 0xb5, 0xa0, - 0x44, 0xe7, 0x77, 0x99, 0x61, 0x11, 0x6a, 0xda, 0xc3, 0x40, 0x9e, 0xf4, 0x85, 0x3c, 0xa9, 0x72, - 0xdb, 0x9d, 0x09, 0x85, 0xc6, 0x33, 0x8c, 0xe8, 0x92, 0x14, 0xdd, 0xc0, 0x96, 0xca, 0x66, 0x5b, - 0x81, 0xbc, 0xec, 0xdf, 0x28, 0x09, 0x58, 0xf6, 0x42, 0xdd, 0xe0, 0xe2, 0x8c, 0x91, 0x6f, 0x2a, - 0x34, 0x17, 0x56, 0x50, 0xfd, 0x01, 0x36, 0xe3, 0x3a, 0xe8, 0x9b, 0xe5, 0xad, 0x58, 0xae, 0xb3, - 0x8b, 0xa6, 0xa7, 0x19, 0x48, 0x8b, 0x5a, 0xd0, 0x4c, 0xd8, 0x6d, 0xb1, 0x7b, 0x4d, 0x61, 0x9a, - 0xc5, 0x0d, 0x41, 0x8a, 0x86, 0x51, 0xc3, 0xe2, 0xdf, 0xab, 0x2f, 0x70, 0xb4, 0x07, 0x59, 0xf7, - 0x91, 0xf8, 0x13, 0xdf, 0x96, 0xe9, 0xcb, 0xe0, 0xb9, 0x40, 0xab, 0x42, 0x65, 0x79, 0x0b, 0x99, - 0xb0, 0x5f, 0x12, 0xb0, 0x75, 0x3e, 0x76, 0xac, 0xab, 0xa0, 0x1f, 0xb5, 0xc9, 0x32, 0xa4, 0xbc, - 0xa0, 0x2f, 0x2a, 0x2b, 0xdf, 0x78, 0x86, 0xf9, 0x0a, 0xfd, 0x16, 0x92, 0xbe, 0x39, 0x91, 0xa1, - 0xdb, 0x51, 0x42, 0xd7, 0x0b, 0x7b, 0x64, 0xe4, 0x0d, 0x4d, 0x4a, 0x1a, 0xcf, 0x30, 0xd3, 0x41, - 0xaf, 0xe3, 0x15, 0xc7, 0xeb, 0xa9, 0x91, 0x88, 0xd5, 0xdc, 0xaf, 0xa0, 0x30, 0xab, 0xb9, 0xc7, - 0xf9, 0x55, 0xd2, 0x48, 0xe0, 0x9c, 0x28, 0xbb, 0x8f, 0x4c, 0x78, 0x0a, 0x90, 0xa1, 0x92, 0xfb, - 0x34, 0x0d, 0xa9, 0x5b, 0x42, 0x02, 0xed, 0x9f, 0x09, 0x28, 0xce, 0x3d, 0x96, 0x15, 0xb3, 0x0f, - 0xb9, 0xdb, 0xb1, 0x63, 0x11, 0xcb, 0x98, 0x7b, 0x8e, 0x41, 0x88, 0x98, 0x22, 0xaa, 0x41, 0x69, - 0x70, 0x6f, 0x3a, 0x77, 0xc4, 0x10, 0xdd, 0xc5, 0xb0, 0x1d, 0x8b, 0x84, 0xb2, 0xf3, 0x6e, 0x0b, - 0x48, 0x34, 0x82, 0x0b, 0x06, 0xa0, 0xdf, 0x43, 0x7e, 0xe8, 0x0e, 0x1e, 0x88, 0x65, 0x88, 0xb1, - 0x27, 0xc9, 0x7f, 0xb2, 0x65, 0xe5, 0xd8, 0x6c, 0xf4, 0xe1, 0xc3, 0x09, 0xce, 0x09, 0xcd, 0x6b, - 0x3e, 0x05, 0xfd, 0x92, 0x00, 0x98, 0x47, 0x04, 0x7d, 0x03, 0x69, 0xdb, 0xe1, 0xcd, 0x4e, 0xfc, - 0xe8, 0x97, 0x7e, 0xa7, 0x12, 0x46, 0x7f, 0x5c, 0x6c, 0x8b, 0xda, 0xca, 0x10, 0xd7, 0x64, 0xb7, - 0xd2, 0x1d, 0xea, 0x4f, 0xa3, 0x56, 0x59, 0x3d, 0x86, 0xbc, 0x0a, 0xa0, 0x22, 0x24, 0x1f, 0xc8, - 0x54, 0x36, 0x6d, 0xf6, 0xc9, 0x0a, 0xe7, 0xd1, 0x1c, 0x8e, 0x45, 0x37, 0x48, 0x61, 0xb1, 0x38, - 0x5e, 0xfb, 0x90, 0xd0, 0xee, 0x21, 0x1b, 0x9d, 0xe5, 0x7f, 0x1a, 0x91, 0x16, 0xe6, 0xb2, 0xe4, - 0xd2, 0x5c, 0xf6, 0x3d, 0x94, 0xce, 0x6d, 0xc7, 0x1c, 0xda, 0x7f, 0x23, 0x6a, 0xbd, 0x7d, 0x29, - 0x79, 0xda, 0x27, 0x28, 0xc7, 0xed, 0xe6, 0x59, 0xe7, 0xb3, 0x70, 0xdc, 0x50, 0x88, 0x78, 0xd6, - 0x0f, 0x20, 0xcf, 0x5a, 0xf9, 0x2d, 0x33, 0x66, 0x0d, 0x7d, 0x4d, 0x68, 0xf8, 0xe6, 0x84, 0xf3, - 0xf5, 0xc2, 0xdf, 0xfd, 0x9c, 0x84, 0x9c, 0xd2, 0x0d, 0x51, 0x09, 0xb6, 0xae, 0xdb, 0xcd, 0x76, - 0xe7, 0xa6, 0x6d, 0xdc, 0x5c, 0xf4, 0xda, 0x7a, 0xb7, 0x5b, 0x7c, 0x86, 0x2a, 0x50, 0xae, 0x77, - 0x2e, 0x2f, 0x2f, 0x7a, 0x97, 0x7a, 0xbb, 0x67, 0xf4, 0x2e, 0x2e, 0x75, 0xa3, 0xd5, 0xa9, 0x37, - 0x8b, 0x09, 0xb4, 0x0b, 0x25, 0x05, 0x69, 0x77, 0x8c, 0x33, 0xbd, 0x75, 0xf2, 0xa9, 0xb8, 0x86, - 0x76, 0x60, 0x5b, 0x01, 0xb0, 0xfe, 0xb1, 0xd3, 0xd4, 0x8b, 0x49, 0xa6, 0xdf, 0xe8, 0xb5, 0xea, - 0x46, 0xe7, 0xfc, 0x5c, 0xc7, 0xfa, 0xd9, 0x0c, 0x48, 0xb1, 0x2d, 0x38, 0x70, 0x52, 0xaf, 0xeb, - 0x57, 0xbd, 0x39, 0xb2, 0x8e, 0x7e, 0x0d, 0xaf, 0x63, 0x26, 0x6c, 0xfb, 0xce, 0x75, 0xcf, 0xe8, - 0xea, 0xf5, 0x4e, 0xfb, 0xcc, 0x68, 0xe9, 0x1f, 0xf5, 0x56, 0x31, 0x8d, 0x7e, 0x03, 0x5a, 0x9c, - 0xa0, 0x7b, 0x5d, 0xaf, 0xeb, 0xdd, 0x6e, 0x5c, 0x6f, 0x03, 0xed, 0xc3, 0xcb, 0x05, 0x0f, 0x2e, - 0x3b, 0x3d, 0x7d, 0xc6, 0x5a, 0xcc, 0xa0, 0x03, 0xd8, 0x5b, 0xf4, 0x84, 0x6b, 0x48, 0xbe, 0x62, - 0x16, 0xed, 0x41, 0x85, 0x6b, 0xa8, 0xcc, 0x33, 0x7f, 0x01, 0x95, 0xa1, 0x28, 0x23, 0x67, 0x34, - 0xf5, 0x4f, 0x46, 0xe3, 0xa4, 0xdb, 0x28, 0xe6, 0xd0, 0x4b, 0xd8, 0x6d, 0xeb, 0x5d, 0x46, 0xb7, - 0x04, 0xe6, 0x17, 0x82, 0x75, 0xd2, 0xae, 0x37, 0x3a, 0xb8, 0x58, 0x38, 0xfa, 0x77, 0x06, 0xb2, - 0x37, 0xfc, 0x37, 0xd0, 0xb4, 0x29, 0x6a, 0x41, 0x4e, 0x79, 0x98, 0xa0, 0x57, 0x0b, 0x97, 0x77, - 0xfc, 0x01, 0x54, 0xfd, 0xea, 0x29, 0x38, 0x6a, 0x31, 0x39, 0xe5, 0x65, 0x11, 0x67, 0x5b, 0x7a, - 0x38, 0xc4, 0xd9, 0x56, 0x3c, 0x48, 0x30, 0x14, 0x62, 0x6f, 0x03, 0xb4, 0xaf, 0x18, 0xac, 0x7a, - 0x8a, 0x54, 0x0f, 0x9e, 0x56, 0x90, 0x9c, 0xc7, 0x50, 0x38, 0x23, 0xbe, 0xfd, 0x48, 0xda, 0x24, - 0xa4, 0x4d, 0x32, 0x45, 0xdb, 0x8a, 0x89, 0x78, 0x70, 0x54, 0x9f, 0x47, 0xa3, 0x73, 0x93, 0x4c, - 0xcf, 0x48, 0x30, 0xf0, 0x6d, 0x8f, 0xba, 0x3e, 0xfa, 0x00, 0x59, 0x61, 0xcb, 0xec, 0x4a, 0xaa, - 0x52, 0xcb, 0x1d, 0x98, 0xd4, 0xf5, 0x9f, 0xb4, 0xfc, 0x03, 0x64, 0xd8, 0x7e, 0xec, 0xb9, 0x81, - 0xd4, 0x89, 0x51, 0x79, 0x8e, 0x54, 0x77, 0x97, 0xe4, 0xd2, 0xe5, 0x06, 0x20, 0xf9, 0x8e, 0x50, - 0x9f, 0x22, 0x2a, 0x8d, 0x22, 0xaf, 0x56, 0xd5, 0xf9, 0x67, 0xe1, 0xf9, 0xd1, 0x82, 0x9c, 0x32, - 0x9a, 0xc7, 0xd2, 0xb3, 0xfc, 0xe0, 0x88, 0xa5, 0x67, 0xd5, 0x44, 0xdf, 0x82, 0x9c, 0x32, 0x83, - 0xc7, 0xd8, 0x96, 0x47, 0xfa, 0x18, 0xdb, 0xaa, 0xd1, 0x1d, 0x43, 0x21, 0x36, 0xe8, 0xc5, 0x92, - 0xbd, 0x6a, 0x34, 0x8c, 0x25, 0x7b, 0xf5, 0x8c, 0xf8, 0x67, 0xd8, 0x90, 0xa3, 0x14, 0x7a, 0xa1, - 0x28, 0xc7, 0xc7, 0xbe, 0x58, 0xc4, 0x16, 0x26, 0x2f, 0x74, 0x01, 0x30, 0x9f, 0x61, 0xd0, 0xde, - 0x13, 0xa3, 0x8d, 0xe0, 0x79, 0xf5, 0xd9, 0xc1, 0x07, 0xfd, 0x15, 0x8a, 0x8b, 0xf3, 0x02, 0x52, - 0xbb, 0xd1, 0x13, 0xf3, 0x4a, 0xf5, 0xeb, 0xcf, 0xea, 0x48, 0xf2, 0x3a, 0x64, 0x66, 0xdd, 0x1b, - 0xa9, 0xe7, 0x59, 0x18, 0x42, 0xaa, 0x2f, 0x57, 0x62, 0x92, 0xa4, 0x03, 0x79, 0xb5, 0x21, 0x20, - 0x35, 0x65, 0x2b, 0x3a, 0x4c, 0x75, 0xff, 0x49, 0x5c, 0x10, 0x9e, 0xd6, 0xfe, 0xf2, 0xe6, 0xce, - 0xa6, 0xf7, 0xe3, 0x7e, 0x6d, 0xe0, 0x8e, 0xde, 0x79, 0x0f, 0xf4, 0xed, 0xc0, 0x0c, 0xee, 0xd9, - 0x87, 0xf5, 0x6e, 0xe8, 0xb0, 0x7f, 0xf3, 0xbf, 0xd3, 0xf8, 0xde, 0xa0, 0x9f, 0xe6, 0x7f, 0x7c, - 0x79, 0xff, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9a, 0x37, 0x78, 0x02, 0xc5, 0x11, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// WalletKitClient is the client API for WalletKit service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type WalletKitClient interface { - // - //ListUnspent returns a list of all utxos spendable by the wallet with a - //number of confirmations between the specified minimum and maximum. - ListUnspent(ctx context.Context, in *ListUnspentRequest, opts ...grpc.CallOption) (*ListUnspentResponse, error) - // - //LeaseOutput locks an output to the given ID, preventing it from being - //available for any future coin selection attempts. The absolute time of the - //lock's expiration is returned. The expiration of the lock can be extended by - //successive invocations of this RPC. Outputs can be unlocked before their - //expiration through `ReleaseOutput`. - LeaseOutput(ctx context.Context, in *LeaseOutputRequest, opts ...grpc.CallOption) (*LeaseOutputResponse, error) - // - //ReleaseOutput unlocks an output, allowing it to be available for coin - //selection if it remains unspent. The ID should match the one used to - //originally lock the output. - ReleaseOutput(ctx context.Context, in *ReleaseOutputRequest, opts ...grpc.CallOption) (*ReleaseOutputResponse, error) - // - //DeriveNextKey attempts to derive the *next* key within the key family - //(account in BIP43) specified. This method should return the next external - //child within this branch. - DeriveNextKey(ctx context.Context, in *KeyReq, opts ...grpc.CallOption) (*signrpc.KeyDescriptor, error) - // - //DeriveKey attempts to derive an arbitrary key specified by the passed - //KeyLocator. - DeriveKey(ctx context.Context, in *signrpc.KeyLocator, opts ...grpc.CallOption) (*signrpc.KeyDescriptor, error) - // - //NextAddr returns the next unused address within the wallet. - NextAddr(ctx context.Context, in *AddrRequest, opts ...grpc.CallOption) (*AddrResponse, error) - // - //PublishTransaction attempts to publish the passed transaction to the - //network. Once this returns without an error, the wallet will continually - //attempt to re-broadcast the transaction on start up, until it enters the - //chain. - PublishTransaction(ctx context.Context, in *Transaction, opts ...grpc.CallOption) (*PublishResponse, error) - // - //SendOutputs is similar to the existing sendmany call in Bitcoind, and - //allows the caller to create a transaction that sends to several outputs at - //once. This is ideal when wanting to batch create a set of transactions. - SendOutputs(ctx context.Context, in *SendOutputsRequest, opts ...grpc.CallOption) (*SendOutputsResponse, error) - // - //EstimateFee attempts to query the internal fee estimator of the wallet to - //determine the fee (in sat/kw) to attach to a transaction in order to - //achieve the confirmation target. - EstimateFee(ctx context.Context, in *EstimateFeeRequest, opts ...grpc.CallOption) (*EstimateFeeResponse, error) - // - //PendingSweeps returns lists of on-chain outputs that lnd is currently - //attempting to sweep within its central batching engine. Outputs with similar - //fee rates are batched together in order to sweep them within a single - //transaction. - // - //NOTE: Some of the fields within PendingSweepsRequest are not guaranteed to - //remain supported. This is an advanced API that depends on the internals of - //the UtxoSweeper, so things may change. - PendingSweeps(ctx context.Context, in *PendingSweepsRequest, opts ...grpc.CallOption) (*PendingSweepsResponse, error) - // - //BumpFee bumps the fee of an arbitrary input within a transaction. This RPC - //takes a different approach than bitcoind's bumpfee command. lnd has a - //central batching engine in which inputs with similar fee rates are batched - //together to save on transaction fees. Due to this, we cannot rely on - //bumping the fee on a specific transaction, since transactions can change at - //any point with the addition of new inputs. The list of inputs that - //currently exist within lnd's central batching engine can be retrieved - //through the PendingSweeps RPC. - // - //When bumping the fee of an input that currently exists within lnd's central - //batching engine, a higher fee transaction will be created that replaces the - //lower fee transaction through the Replace-By-Fee (RBF) policy. If it - // - //This RPC also serves useful when wanting to perform a Child-Pays-For-Parent - //(CPFP), where the child transaction pays for its parent's fee. This can be - //done by specifying an outpoint within the low fee transaction that is under - //the control of the wallet. - // - //The fee preference can be expressed either as a specific fee rate or a delta - //of blocks in which the output should be swept on-chain within. If a fee - //preference is not explicitly specified, then an error is returned. - // - //Note that this RPC currently doesn't perform any validation checks on the - //fee preference being provided. For now, the responsibility of ensuring that - //the new fee preference is sufficient is delegated to the user. - BumpFee(ctx context.Context, in *BumpFeeRequest, opts ...grpc.CallOption) (*BumpFeeResponse, error) - // - //ListSweeps returns a list of the sweep transactions our node has produced. - //Note that these sweeps may not be confirmed yet, as we record sweeps on - //broadcast, not confirmation. - ListSweeps(ctx context.Context, in *ListSweepsRequest, opts ...grpc.CallOption) (*ListSweepsResponse, error) - // - //LabelTransaction adds a label to a transaction. If the transaction already - //has a label the call will fail unless the overwrite bool is set. This will - //overwrite the exiting transaction label. Labels must not be empty, and - //cannot exceed 500 characters. - LabelTransaction(ctx context.Context, in *LabelTransactionRequest, opts ...grpc.CallOption) (*LabelTransactionResponse, error) - // - //FundPsbt creates a fully populated PSBT that contains enough inputs to fund - //the outputs specified in the template. There are two ways of specifying a - //template: Either by passing in a PSBT with at least one output declared or - //by passing in a raw TxTemplate message. - // - //If there are no inputs specified in the template, coin selection is - //performed automatically. If the template does contain any inputs, it is - //assumed that full coin selection happened externally and no additional - //inputs are added. If the specified inputs aren't enough to fund the outputs - //with the given fee rate, an error is returned. - // - //After either selecting or verifying the inputs, all input UTXOs are locked - //with an internal app ID. - // - //NOTE: If this method returns without an error, it is the caller's - //responsibility to either spend the locked UTXOs (by finalizing and then - //publishing the transaction) or to unlock/release the locked UTXOs in case of - //an error on the caller's side. - FundPsbt(ctx context.Context, in *FundPsbtRequest, opts ...grpc.CallOption) (*FundPsbtResponse, error) - // - //FinalizePsbt expects a partial transaction with all inputs and outputs fully - //declared and tries to sign all inputs that belong to the wallet. Lnd must be - //the last signer of the transaction. That means, if there are any unsigned - //non-witness inputs or inputs without UTXO information attached or inputs - //without witness data that do not belong to lnd's wallet, this method will - //fail. If no error is returned, the PSBT is ready to be extracted and the - //final TX within to be broadcast. - // - //NOTE: This method does NOT publish the transaction once finalized. It is the - //caller's responsibility to either publish the transaction on success or - //unlock/release any locked UTXOs in case of an error in this method. - FinalizePsbt(ctx context.Context, in *FinalizePsbtRequest, opts ...grpc.CallOption) (*FinalizePsbtResponse, error) -} - -type walletKitClient struct { - cc *grpc.ClientConn -} - -func NewWalletKitClient(cc *grpc.ClientConn) WalletKitClient { - return &walletKitClient{cc} -} - -func (c *walletKitClient) ListUnspent(ctx context.Context, in *ListUnspentRequest, opts ...grpc.CallOption) (*ListUnspentResponse, error) { - out := new(ListUnspentResponse) - err := c.cc.Invoke(ctx, "/walletrpc.WalletKit/ListUnspent", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *walletKitClient) LeaseOutput(ctx context.Context, in *LeaseOutputRequest, opts ...grpc.CallOption) (*LeaseOutputResponse, error) { - out := new(LeaseOutputResponse) - err := c.cc.Invoke(ctx, "/walletrpc.WalletKit/LeaseOutput", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *walletKitClient) ReleaseOutput(ctx context.Context, in *ReleaseOutputRequest, opts ...grpc.CallOption) (*ReleaseOutputResponse, error) { - out := new(ReleaseOutputResponse) - err := c.cc.Invoke(ctx, "/walletrpc.WalletKit/ReleaseOutput", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *walletKitClient) DeriveNextKey(ctx context.Context, in *KeyReq, opts ...grpc.CallOption) (*signrpc.KeyDescriptor, error) { - out := new(signrpc.KeyDescriptor) - err := c.cc.Invoke(ctx, "/walletrpc.WalletKit/DeriveNextKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *walletKitClient) DeriveKey(ctx context.Context, in *signrpc.KeyLocator, opts ...grpc.CallOption) (*signrpc.KeyDescriptor, error) { - out := new(signrpc.KeyDescriptor) - err := c.cc.Invoke(ctx, "/walletrpc.WalletKit/DeriveKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *walletKitClient) NextAddr(ctx context.Context, in *AddrRequest, opts ...grpc.CallOption) (*AddrResponse, error) { - out := new(AddrResponse) - err := c.cc.Invoke(ctx, "/walletrpc.WalletKit/NextAddr", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *walletKitClient) PublishTransaction(ctx context.Context, in *Transaction, opts ...grpc.CallOption) (*PublishResponse, error) { - out := new(PublishResponse) - err := c.cc.Invoke(ctx, "/walletrpc.WalletKit/PublishTransaction", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *walletKitClient) SendOutputs(ctx context.Context, in *SendOutputsRequest, opts ...grpc.CallOption) (*SendOutputsResponse, error) { - out := new(SendOutputsResponse) - err := c.cc.Invoke(ctx, "/walletrpc.WalletKit/SendOutputs", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *walletKitClient) EstimateFee(ctx context.Context, in *EstimateFeeRequest, opts ...grpc.CallOption) (*EstimateFeeResponse, error) { - out := new(EstimateFeeResponse) - err := c.cc.Invoke(ctx, "/walletrpc.WalletKit/EstimateFee", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *walletKitClient) PendingSweeps(ctx context.Context, in *PendingSweepsRequest, opts ...grpc.CallOption) (*PendingSweepsResponse, error) { - out := new(PendingSweepsResponse) - err := c.cc.Invoke(ctx, "/walletrpc.WalletKit/PendingSweeps", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *walletKitClient) BumpFee(ctx context.Context, in *BumpFeeRequest, opts ...grpc.CallOption) (*BumpFeeResponse, error) { - out := new(BumpFeeResponse) - err := c.cc.Invoke(ctx, "/walletrpc.WalletKit/BumpFee", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *walletKitClient) ListSweeps(ctx context.Context, in *ListSweepsRequest, opts ...grpc.CallOption) (*ListSweepsResponse, error) { - out := new(ListSweepsResponse) - err := c.cc.Invoke(ctx, "/walletrpc.WalletKit/ListSweeps", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *walletKitClient) LabelTransaction(ctx context.Context, in *LabelTransactionRequest, opts ...grpc.CallOption) (*LabelTransactionResponse, error) { - out := new(LabelTransactionResponse) - err := c.cc.Invoke(ctx, "/walletrpc.WalletKit/LabelTransaction", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *walletKitClient) FundPsbt(ctx context.Context, in *FundPsbtRequest, opts ...grpc.CallOption) (*FundPsbtResponse, error) { - out := new(FundPsbtResponse) - err := c.cc.Invoke(ctx, "/walletrpc.WalletKit/FundPsbt", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *walletKitClient) FinalizePsbt(ctx context.Context, in *FinalizePsbtRequest, opts ...grpc.CallOption) (*FinalizePsbtResponse, error) { - out := new(FinalizePsbtResponse) - err := c.cc.Invoke(ctx, "/walletrpc.WalletKit/FinalizePsbt", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// WalletKitServer is the server API for WalletKit service. -type WalletKitServer interface { - // - //ListUnspent returns a list of all utxos spendable by the wallet with a - //number of confirmations between the specified minimum and maximum. - ListUnspent(context.Context, *ListUnspentRequest) (*ListUnspentResponse, error) - // - //LeaseOutput locks an output to the given ID, preventing it from being - //available for any future coin selection attempts. The absolute time of the - //lock's expiration is returned. The expiration of the lock can be extended by - //successive invocations of this RPC. Outputs can be unlocked before their - //expiration through `ReleaseOutput`. - LeaseOutput(context.Context, *LeaseOutputRequest) (*LeaseOutputResponse, error) - // - //ReleaseOutput unlocks an output, allowing it to be available for coin - //selection if it remains unspent. The ID should match the one used to - //originally lock the output. - ReleaseOutput(context.Context, *ReleaseOutputRequest) (*ReleaseOutputResponse, error) - // - //DeriveNextKey attempts to derive the *next* key within the key family - //(account in BIP43) specified. This method should return the next external - //child within this branch. - DeriveNextKey(context.Context, *KeyReq) (*signrpc.KeyDescriptor, error) - // - //DeriveKey attempts to derive an arbitrary key specified by the passed - //KeyLocator. - DeriveKey(context.Context, *signrpc.KeyLocator) (*signrpc.KeyDescriptor, error) - // - //NextAddr returns the next unused address within the wallet. - NextAddr(context.Context, *AddrRequest) (*AddrResponse, error) - // - //PublishTransaction attempts to publish the passed transaction to the - //network. Once this returns without an error, the wallet will continually - //attempt to re-broadcast the transaction on start up, until it enters the - //chain. - PublishTransaction(context.Context, *Transaction) (*PublishResponse, error) - // - //SendOutputs is similar to the existing sendmany call in Bitcoind, and - //allows the caller to create a transaction that sends to several outputs at - //once. This is ideal when wanting to batch create a set of transactions. - SendOutputs(context.Context, *SendOutputsRequest) (*SendOutputsResponse, error) - // - //EstimateFee attempts to query the internal fee estimator of the wallet to - //determine the fee (in sat/kw) to attach to a transaction in order to - //achieve the confirmation target. - EstimateFee(context.Context, *EstimateFeeRequest) (*EstimateFeeResponse, error) - // - //PendingSweeps returns lists of on-chain outputs that lnd is currently - //attempting to sweep within its central batching engine. Outputs with similar - //fee rates are batched together in order to sweep them within a single - //transaction. - // - //NOTE: Some of the fields within PendingSweepsRequest are not guaranteed to - //remain supported. This is an advanced API that depends on the internals of - //the UtxoSweeper, so things may change. - PendingSweeps(context.Context, *PendingSweepsRequest) (*PendingSweepsResponse, error) - // - //BumpFee bumps the fee of an arbitrary input within a transaction. This RPC - //takes a different approach than bitcoind's bumpfee command. lnd has a - //central batching engine in which inputs with similar fee rates are batched - //together to save on transaction fees. Due to this, we cannot rely on - //bumping the fee on a specific transaction, since transactions can change at - //any point with the addition of new inputs. The list of inputs that - //currently exist within lnd's central batching engine can be retrieved - //through the PendingSweeps RPC. - // - //When bumping the fee of an input that currently exists within lnd's central - //batching engine, a higher fee transaction will be created that replaces the - //lower fee transaction through the Replace-By-Fee (RBF) policy. If it - // - //This RPC also serves useful when wanting to perform a Child-Pays-For-Parent - //(CPFP), where the child transaction pays for its parent's fee. This can be - //done by specifying an outpoint within the low fee transaction that is under - //the control of the wallet. - // - //The fee preference can be expressed either as a specific fee rate or a delta - //of blocks in which the output should be swept on-chain within. If a fee - //preference is not explicitly specified, then an error is returned. - // - //Note that this RPC currently doesn't perform any validation checks on the - //fee preference being provided. For now, the responsibility of ensuring that - //the new fee preference is sufficient is delegated to the user. - BumpFee(context.Context, *BumpFeeRequest) (*BumpFeeResponse, error) - // - //ListSweeps returns a list of the sweep transactions our node has produced. - //Note that these sweeps may not be confirmed yet, as we record sweeps on - //broadcast, not confirmation. - ListSweeps(context.Context, *ListSweepsRequest) (*ListSweepsResponse, error) - // - //LabelTransaction adds a label to a transaction. If the transaction already - //has a label the call will fail unless the overwrite bool is set. This will - //overwrite the exiting transaction label. Labels must not be empty, and - //cannot exceed 500 characters. - LabelTransaction(context.Context, *LabelTransactionRequest) (*LabelTransactionResponse, error) - // - //FundPsbt creates a fully populated PSBT that contains enough inputs to fund - //the outputs specified in the template. There are two ways of specifying a - //template: Either by passing in a PSBT with at least one output declared or - //by passing in a raw TxTemplate message. - // - //If there are no inputs specified in the template, coin selection is - //performed automatically. If the template does contain any inputs, it is - //assumed that full coin selection happened externally and no additional - //inputs are added. If the specified inputs aren't enough to fund the outputs - //with the given fee rate, an error is returned. - // - //After either selecting or verifying the inputs, all input UTXOs are locked - //with an internal app ID. - // - //NOTE: If this method returns without an error, it is the caller's - //responsibility to either spend the locked UTXOs (by finalizing and then - //publishing the transaction) or to unlock/release the locked UTXOs in case of - //an error on the caller's side. - FundPsbt(context.Context, *FundPsbtRequest) (*FundPsbtResponse, error) - // - //FinalizePsbt expects a partial transaction with all inputs and outputs fully - //declared and tries to sign all inputs that belong to the wallet. Lnd must be - //the last signer of the transaction. That means, if there are any unsigned - //non-witness inputs or inputs without UTXO information attached or inputs - //without witness data that do not belong to lnd's wallet, this method will - //fail. If no error is returned, the PSBT is ready to be extracted and the - //final TX within to be broadcast. - // - //NOTE: This method does NOT publish the transaction once finalized. It is the - //caller's responsibility to either publish the transaction on success or - //unlock/release any locked UTXOs in case of an error in this method. - FinalizePsbt(context.Context, *FinalizePsbtRequest) (*FinalizePsbtResponse, error) -} - -// UnimplementedWalletKitServer can be embedded to have forward compatible implementations. -type UnimplementedWalletKitServer struct { -} - -func (*UnimplementedWalletKitServer) ListUnspent(ctx context.Context, req *ListUnspentRequest) (*ListUnspentResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListUnspent not implemented") -} -func (*UnimplementedWalletKitServer) LeaseOutput(ctx context.Context, req *LeaseOutputRequest) (*LeaseOutputResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LeaseOutput not implemented") -} -func (*UnimplementedWalletKitServer) ReleaseOutput(ctx context.Context, req *ReleaseOutputRequest) (*ReleaseOutputResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ReleaseOutput not implemented") -} -func (*UnimplementedWalletKitServer) DeriveNextKey(ctx context.Context, req *KeyReq) (*signrpc.KeyDescriptor, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeriveNextKey not implemented") -} -func (*UnimplementedWalletKitServer) DeriveKey(ctx context.Context, req *signrpc.KeyLocator) (*signrpc.KeyDescriptor, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeriveKey not implemented") -} -func (*UnimplementedWalletKitServer) NextAddr(ctx context.Context, req *AddrRequest) (*AddrResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NextAddr not implemented") -} -func (*UnimplementedWalletKitServer) PublishTransaction(ctx context.Context, req *Transaction) (*PublishResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method PublishTransaction not implemented") -} -func (*UnimplementedWalletKitServer) SendOutputs(ctx context.Context, req *SendOutputsRequest) (*SendOutputsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SendOutputs not implemented") -} -func (*UnimplementedWalletKitServer) EstimateFee(ctx context.Context, req *EstimateFeeRequest) (*EstimateFeeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method EstimateFee not implemented") -} -func (*UnimplementedWalletKitServer) PendingSweeps(ctx context.Context, req *PendingSweepsRequest) (*PendingSweepsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method PendingSweeps not implemented") -} -func (*UnimplementedWalletKitServer) BumpFee(ctx context.Context, req *BumpFeeRequest) (*BumpFeeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method BumpFee not implemented") -} -func (*UnimplementedWalletKitServer) ListSweeps(ctx context.Context, req *ListSweepsRequest) (*ListSweepsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListSweeps not implemented") -} -func (*UnimplementedWalletKitServer) LabelTransaction(ctx context.Context, req *LabelTransactionRequest) (*LabelTransactionResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LabelTransaction not implemented") -} -func (*UnimplementedWalletKitServer) FundPsbt(ctx context.Context, req *FundPsbtRequest) (*FundPsbtResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method FundPsbt not implemented") -} -func (*UnimplementedWalletKitServer) FinalizePsbt(ctx context.Context, req *FinalizePsbtRequest) (*FinalizePsbtResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method FinalizePsbt not implemented") -} - -func RegisterWalletKitServer(s *grpc.Server, srv WalletKitServer) { - s.RegisterService(&_WalletKit_serviceDesc, srv) -} - -func _WalletKit_ListUnspent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListUnspentRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WalletKitServer).ListUnspent(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/walletrpc.WalletKit/ListUnspent", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WalletKitServer).ListUnspent(ctx, req.(*ListUnspentRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _WalletKit_LeaseOutput_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LeaseOutputRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WalletKitServer).LeaseOutput(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/walletrpc.WalletKit/LeaseOutput", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WalletKitServer).LeaseOutput(ctx, req.(*LeaseOutputRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _WalletKit_ReleaseOutput_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReleaseOutputRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WalletKitServer).ReleaseOutput(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/walletrpc.WalletKit/ReleaseOutput", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WalletKitServer).ReleaseOutput(ctx, req.(*ReleaseOutputRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _WalletKit_DeriveNextKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(KeyReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WalletKitServer).DeriveNextKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/walletrpc.WalletKit/DeriveNextKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WalletKitServer).DeriveNextKey(ctx, req.(*KeyReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _WalletKit_DeriveKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(signrpc.KeyLocator) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WalletKitServer).DeriveKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/walletrpc.WalletKit/DeriveKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WalletKitServer).DeriveKey(ctx, req.(*signrpc.KeyLocator)) - } - return interceptor(ctx, in, info, handler) -} - -func _WalletKit_NextAddr_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddrRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WalletKitServer).NextAddr(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/walletrpc.WalletKit/NextAddr", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WalletKitServer).NextAddr(ctx, req.(*AddrRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _WalletKit_PublishTransaction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Transaction) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WalletKitServer).PublishTransaction(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/walletrpc.WalletKit/PublishTransaction", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WalletKitServer).PublishTransaction(ctx, req.(*Transaction)) - } - return interceptor(ctx, in, info, handler) -} - -func _WalletKit_SendOutputs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SendOutputsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WalletKitServer).SendOutputs(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/walletrpc.WalletKit/SendOutputs", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WalletKitServer).SendOutputs(ctx, req.(*SendOutputsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _WalletKit_EstimateFee_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(EstimateFeeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WalletKitServer).EstimateFee(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/walletrpc.WalletKit/EstimateFee", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WalletKitServer).EstimateFee(ctx, req.(*EstimateFeeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _WalletKit_PendingSweeps_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PendingSweepsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WalletKitServer).PendingSweeps(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/walletrpc.WalletKit/PendingSweeps", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WalletKitServer).PendingSweeps(ctx, req.(*PendingSweepsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _WalletKit_BumpFee_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(BumpFeeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WalletKitServer).BumpFee(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/walletrpc.WalletKit/BumpFee", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WalletKitServer).BumpFee(ctx, req.(*BumpFeeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _WalletKit_ListSweeps_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListSweepsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WalletKitServer).ListSweeps(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/walletrpc.WalletKit/ListSweeps", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WalletKitServer).ListSweeps(ctx, req.(*ListSweepsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _WalletKit_LabelTransaction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LabelTransactionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WalletKitServer).LabelTransaction(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/walletrpc.WalletKit/LabelTransaction", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WalletKitServer).LabelTransaction(ctx, req.(*LabelTransactionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _WalletKit_FundPsbt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(FundPsbtRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WalletKitServer).FundPsbt(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/walletrpc.WalletKit/FundPsbt", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WalletKitServer).FundPsbt(ctx, req.(*FundPsbtRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _WalletKit_FinalizePsbt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(FinalizePsbtRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WalletKitServer).FinalizePsbt(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/walletrpc.WalletKit/FinalizePsbt", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WalletKitServer).FinalizePsbt(ctx, req.(*FinalizePsbtRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _WalletKit_serviceDesc = grpc.ServiceDesc{ - ServiceName: "walletrpc.WalletKit", - HandlerType: (*WalletKitServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "ListUnspent", - Handler: _WalletKit_ListUnspent_Handler, - }, - { - MethodName: "LeaseOutput", - Handler: _WalletKit_LeaseOutput_Handler, - }, - { - MethodName: "ReleaseOutput", - Handler: _WalletKit_ReleaseOutput_Handler, - }, - { - MethodName: "DeriveNextKey", - Handler: _WalletKit_DeriveNextKey_Handler, - }, - { - MethodName: "DeriveKey", - Handler: _WalletKit_DeriveKey_Handler, - }, - { - MethodName: "NextAddr", - Handler: _WalletKit_NextAddr_Handler, - }, - { - MethodName: "PublishTransaction", - Handler: _WalletKit_PublishTransaction_Handler, - }, - { - MethodName: "SendOutputs", - Handler: _WalletKit_SendOutputs_Handler, - }, - { - MethodName: "EstimateFee", - Handler: _WalletKit_EstimateFee_Handler, - }, - { - MethodName: "PendingSweeps", - Handler: _WalletKit_PendingSweeps_Handler, - }, - { - MethodName: "BumpFee", - Handler: _WalletKit_BumpFee_Handler, - }, - { - MethodName: "ListSweeps", - Handler: _WalletKit_ListSweeps_Handler, - }, - { - MethodName: "LabelTransaction", - Handler: _WalletKit_LabelTransaction_Handler, - }, - { - MethodName: "FundPsbt", - Handler: _WalletKit_FundPsbt_Handler, - }, - { - MethodName: "FinalizePsbt", - Handler: _WalletKit_FinalizePsbt_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "walletrpc/walletkit.proto", -} diff --git a/lnd/lnrpc/walletrpc/walletkit.pb.gw.go b/lnd/lnrpc/walletrpc/walletkit.pb.gw.go deleted file mode 100644 index 84ca347a..00000000 --- a/lnd/lnrpc/walletrpc/walletkit.pb.gw.go +++ /dev/null @@ -1,1258 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: walletrpc/walletkit.proto - -/* -Package walletrpc is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package walletrpc - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "github.com/pkt-cash/pktd/lnd/lnrpc/signrpc" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage - -var ( - filter_WalletKit_ListUnspent_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_WalletKit_ListUnspent_0(ctx context.Context, marshaler runtime.Marshaler, client WalletKitClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListUnspentRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WalletKit_ListUnspent_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ListUnspent(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WalletKit_ListUnspent_0(ctx context.Context, marshaler runtime.Marshaler, server WalletKitServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListUnspentRequest - var metadata runtime.ServerMetadata - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_WalletKit_ListUnspent_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ListUnspent(ctx, &protoReq) - return msg, metadata, err - -} - -func request_WalletKit_LeaseOutput_0(ctx context.Context, marshaler runtime.Marshaler, client WalletKitClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq LeaseOutputRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.LeaseOutput(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WalletKit_LeaseOutput_0(ctx context.Context, marshaler runtime.Marshaler, server WalletKitServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq LeaseOutputRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.LeaseOutput(ctx, &protoReq) - return msg, metadata, err - -} - -func request_WalletKit_ReleaseOutput_0(ctx context.Context, marshaler runtime.Marshaler, client WalletKitClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ReleaseOutputRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ReleaseOutput(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WalletKit_ReleaseOutput_0(ctx context.Context, marshaler runtime.Marshaler, server WalletKitServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ReleaseOutputRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ReleaseOutput(ctx, &protoReq) - return msg, metadata, err - -} - -func request_WalletKit_DeriveNextKey_0(ctx context.Context, marshaler runtime.Marshaler, client WalletKitClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq KeyReq - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.DeriveNextKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WalletKit_DeriveNextKey_0(ctx context.Context, marshaler runtime.Marshaler, server WalletKitServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq KeyReq - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.DeriveNextKey(ctx, &protoReq) - return msg, metadata, err - -} - -func request_WalletKit_DeriveKey_0(ctx context.Context, marshaler runtime.Marshaler, client WalletKitClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq signrpc.KeyLocator - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.DeriveKey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WalletKit_DeriveKey_0(ctx context.Context, marshaler runtime.Marshaler, server WalletKitServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq signrpc.KeyLocator - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.DeriveKey(ctx, &protoReq) - return msg, metadata, err - -} - -func request_WalletKit_NextAddr_0(ctx context.Context, marshaler runtime.Marshaler, client WalletKitClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AddrRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.NextAddr(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WalletKit_NextAddr_0(ctx context.Context, marshaler runtime.Marshaler, server WalletKitServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AddrRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.NextAddr(ctx, &protoReq) - return msg, metadata, err - -} - -func request_WalletKit_PublishTransaction_0(ctx context.Context, marshaler runtime.Marshaler, client WalletKitClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq Transaction - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.PublishTransaction(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WalletKit_PublishTransaction_0(ctx context.Context, marshaler runtime.Marshaler, server WalletKitServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq Transaction - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.PublishTransaction(ctx, &protoReq) - return msg, metadata, err - -} - -func request_WalletKit_SendOutputs_0(ctx context.Context, marshaler runtime.Marshaler, client WalletKitClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SendOutputsRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.SendOutputs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WalletKit_SendOutputs_0(ctx context.Context, marshaler runtime.Marshaler, server WalletKitServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SendOutputsRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.SendOutputs(ctx, &protoReq) - return msg, metadata, err - -} - -func request_WalletKit_EstimateFee_0(ctx context.Context, marshaler runtime.Marshaler, client WalletKitClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq EstimateFeeRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["conf_target"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "conf_target") - } - - protoReq.ConfTarget, err = runtime.Int32(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "conf_target", err) - } - - msg, err := client.EstimateFee(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WalletKit_EstimateFee_0(ctx context.Context, marshaler runtime.Marshaler, server WalletKitServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq EstimateFeeRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["conf_target"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "conf_target") - } - - protoReq.ConfTarget, err = runtime.Int32(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "conf_target", err) - } - - msg, err := server.EstimateFee(ctx, &protoReq) - return msg, metadata, err - -} - -func request_WalletKit_PendingSweeps_0(ctx context.Context, marshaler runtime.Marshaler, client WalletKitClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PendingSweepsRequest - var metadata runtime.ServerMetadata - - msg, err := client.PendingSweeps(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WalletKit_PendingSweeps_0(ctx context.Context, marshaler runtime.Marshaler, server WalletKitServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PendingSweepsRequest - var metadata runtime.ServerMetadata - - msg, err := server.PendingSweeps(ctx, &protoReq) - return msg, metadata, err - -} - -func request_WalletKit_BumpFee_0(ctx context.Context, marshaler runtime.Marshaler, client WalletKitClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq BumpFeeRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.BumpFee(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WalletKit_BumpFee_0(ctx context.Context, marshaler runtime.Marshaler, server WalletKitServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq BumpFeeRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.BumpFee(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_WalletKit_ListSweeps_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_WalletKit_ListSweeps_0(ctx context.Context, marshaler runtime.Marshaler, client WalletKitClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListSweepsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WalletKit_ListSweeps_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ListSweeps(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WalletKit_ListSweeps_0(ctx context.Context, marshaler runtime.Marshaler, server WalletKitServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListSweepsRequest - var metadata runtime.ServerMetadata - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_WalletKit_ListSweeps_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ListSweeps(ctx, &protoReq) - return msg, metadata, err - -} - -func request_WalletKit_LabelTransaction_0(ctx context.Context, marshaler runtime.Marshaler, client WalletKitClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq LabelTransactionRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.LabelTransaction(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WalletKit_LabelTransaction_0(ctx context.Context, marshaler runtime.Marshaler, server WalletKitServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq LabelTransactionRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.LabelTransaction(ctx, &protoReq) - return msg, metadata, err - -} - -func request_WalletKit_FundPsbt_0(ctx context.Context, marshaler runtime.Marshaler, client WalletKitClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq FundPsbtRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.FundPsbt(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WalletKit_FundPsbt_0(ctx context.Context, marshaler runtime.Marshaler, server WalletKitServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq FundPsbtRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.FundPsbt(ctx, &protoReq) - return msg, metadata, err - -} - -func request_WalletKit_FinalizePsbt_0(ctx context.Context, marshaler runtime.Marshaler, client WalletKitClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq FinalizePsbtRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.FinalizePsbt(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WalletKit_FinalizePsbt_0(ctx context.Context, marshaler runtime.Marshaler, server WalletKitServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq FinalizePsbtRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.FinalizePsbt(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterWalletKitHandlerServer registers the http handlers for service WalletKit to "mux". -// UnaryRPC :call WalletKitServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -func RegisterWalletKitHandlerServer(ctx context.Context, mux *runtime.ServeMux, server WalletKitServer) error { - - mux.Handle("POST", pattern_WalletKit_ListUnspent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WalletKit_ListUnspent_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_ListUnspent_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletKit_LeaseOutput_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WalletKit_LeaseOutput_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_LeaseOutput_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletKit_ReleaseOutput_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WalletKit_ReleaseOutput_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_ReleaseOutput_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletKit_DeriveNextKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WalletKit_DeriveNextKey_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_DeriveNextKey_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletKit_DeriveKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WalletKit_DeriveKey_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_DeriveKey_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletKit_NextAddr_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WalletKit_NextAddr_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_NextAddr_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletKit_PublishTransaction_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WalletKit_PublishTransaction_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_PublishTransaction_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletKit_SendOutputs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WalletKit_SendOutputs_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_SendOutputs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_WalletKit_EstimateFee_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WalletKit_EstimateFee_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_EstimateFee_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_WalletKit_PendingSweeps_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WalletKit_PendingSweeps_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_PendingSweeps_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletKit_BumpFee_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WalletKit_BumpFee_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_BumpFee_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_WalletKit_ListSweeps_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WalletKit_ListSweeps_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_ListSweeps_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletKit_LabelTransaction_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WalletKit_LabelTransaction_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_LabelTransaction_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletKit_FundPsbt_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WalletKit_FundPsbt_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_FundPsbt_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletKit_FinalizePsbt_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WalletKit_FinalizePsbt_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_FinalizePsbt_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterWalletKitHandlerFromEndpoint is same as RegisterWalletKitHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterWalletKitHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterWalletKitHandler(ctx, mux, conn) -} - -// RegisterWalletKitHandler registers the http handlers for service WalletKit to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterWalletKitHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterWalletKitHandlerClient(ctx, mux, NewWalletKitClient(conn)) -} - -// RegisterWalletKitHandlerClient registers the http handlers for service WalletKit -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "WalletKitClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "WalletKitClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "WalletKitClient" to call the correct interceptors. -func RegisterWalletKitHandlerClient(ctx context.Context, mux *runtime.ServeMux, client WalletKitClient) error { - - mux.Handle("POST", pattern_WalletKit_ListUnspent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WalletKit_ListUnspent_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_ListUnspent_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletKit_LeaseOutput_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WalletKit_LeaseOutput_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_LeaseOutput_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletKit_ReleaseOutput_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WalletKit_ReleaseOutput_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_ReleaseOutput_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletKit_DeriveNextKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WalletKit_DeriveNextKey_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_DeriveNextKey_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletKit_DeriveKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WalletKit_DeriveKey_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_DeriveKey_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletKit_NextAddr_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WalletKit_NextAddr_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_NextAddr_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletKit_PublishTransaction_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WalletKit_PublishTransaction_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_PublishTransaction_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletKit_SendOutputs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WalletKit_SendOutputs_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_SendOutputs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_WalletKit_EstimateFee_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WalletKit_EstimateFee_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_EstimateFee_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_WalletKit_PendingSweeps_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WalletKit_PendingSweeps_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_PendingSweeps_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletKit_BumpFee_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WalletKit_BumpFee_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_BumpFee_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_WalletKit_ListSweeps_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WalletKit_ListSweeps_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_ListSweeps_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletKit_LabelTransaction_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WalletKit_LabelTransaction_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_LabelTransaction_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletKit_FundPsbt_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WalletKit_FundPsbt_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_FundPsbt_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletKit_FinalizePsbt_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WalletKit_FinalizePsbt_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletKit_FinalizePsbt_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_WalletKit_ListUnspent_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "wallet", "utxos"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_WalletKit_LeaseOutput_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v2", "wallet", "utxos", "lease"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_WalletKit_ReleaseOutput_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v2", "wallet", "utxos", "release"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_WalletKit_DeriveNextKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v2", "wallet", "key", "next"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_WalletKit_DeriveKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "wallet", "key"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_WalletKit_NextAddr_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v2", "wallet", "address", "next"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_WalletKit_PublishTransaction_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "wallet", "tx"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_WalletKit_SendOutputs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "wallet", "send"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_WalletKit_EstimateFee_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v2", "wallet", "estimatefee", "conf_target"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_WalletKit_PendingSweeps_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v2", "wallet", "sweeps", "pending"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_WalletKit_BumpFee_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "wallet", "bumpfee"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_WalletKit_ListSweeps_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "wallet", "sweeps"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_WalletKit_LabelTransaction_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v2", "wallet", "tx", "label"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_WalletKit_FundPsbt_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v2", "wallet", "psbt", "fund"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_WalletKit_FinalizePsbt_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v2", "wallet", "psbt", "finalize"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_WalletKit_ListUnspent_0 = runtime.ForwardResponseMessage - - forward_WalletKit_LeaseOutput_0 = runtime.ForwardResponseMessage - - forward_WalletKit_ReleaseOutput_0 = runtime.ForwardResponseMessage - - forward_WalletKit_DeriveNextKey_0 = runtime.ForwardResponseMessage - - forward_WalletKit_DeriveKey_0 = runtime.ForwardResponseMessage - - forward_WalletKit_NextAddr_0 = runtime.ForwardResponseMessage - - forward_WalletKit_PublishTransaction_0 = runtime.ForwardResponseMessage - - forward_WalletKit_SendOutputs_0 = runtime.ForwardResponseMessage - - forward_WalletKit_EstimateFee_0 = runtime.ForwardResponseMessage - - forward_WalletKit_PendingSweeps_0 = runtime.ForwardResponseMessage - - forward_WalletKit_BumpFee_0 = runtime.ForwardResponseMessage - - forward_WalletKit_ListSweeps_0 = runtime.ForwardResponseMessage - - forward_WalletKit_LabelTransaction_0 = runtime.ForwardResponseMessage - - forward_WalletKit_FundPsbt_0 = runtime.ForwardResponseMessage - - forward_WalletKit_FinalizePsbt_0 = runtime.ForwardResponseMessage -) diff --git a/lnd/lnrpc/walletrpc/walletkit.proto b/lnd/lnrpc/walletrpc/walletkit.proto deleted file mode 100644 index b61b0c90..00000000 --- a/lnd/lnrpc/walletrpc/walletkit.proto +++ /dev/null @@ -1,601 +0,0 @@ -syntax = "proto3"; - -import "rpc.proto"; -import "signrpc/signer.proto"; - -package walletrpc; - -option go_package = "github.com/pkt-cash/pktd/lnd/lnrpc/walletrpc"; - -// WalletKit is a service that gives access to the core functionalities of the -// daemon's wallet. -service WalletKit { - /* - ListUnspent returns a list of all utxos spendable by the wallet with a - number of confirmations between the specified minimum and maximum. - */ - rpc ListUnspent (ListUnspentRequest) returns (ListUnspentResponse); - - /* - LeaseOutput locks an output to the given ID, preventing it from being - available for any future coin selection attempts. The absolute time of the - lock's expiration is returned. The expiration of the lock can be extended by - successive invocations of this RPC. Outputs can be unlocked before their - expiration through `ReleaseOutput`. - */ - rpc LeaseOutput (LeaseOutputRequest) returns (LeaseOutputResponse); - - /* - ReleaseOutput unlocks an output, allowing it to be available for coin - selection if it remains unspent. The ID should match the one used to - originally lock the output. - */ - rpc ReleaseOutput (ReleaseOutputRequest) returns (ReleaseOutputResponse); - - /* - DeriveNextKey attempts to derive the *next* key within the key family - (account in BIP43) specified. This method should return the next external - child within this branch. - */ - rpc DeriveNextKey (KeyReq) returns (signrpc.KeyDescriptor); - - /* - DeriveKey attempts to derive an arbitrary key specified by the passed - KeyLocator. - */ - rpc DeriveKey (signrpc.KeyLocator) returns (signrpc.KeyDescriptor); - - /* - NextAddr returns the next unused address within the wallet. - */ - rpc NextAddr (AddrRequest) returns (AddrResponse); - - /* - PublishTransaction attempts to publish the passed transaction to the - network. Once this returns without an error, the wallet will continually - attempt to re-broadcast the transaction on start up, until it enters the - chain. - */ - rpc PublishTransaction (Transaction) returns (PublishResponse); - - /* - SendOutputs is similar to the existing sendmany call in Bitcoind, and - allows the caller to create a transaction that sends to several outputs at - once. This is ideal when wanting to batch create a set of transactions. - */ - rpc SendOutputs (SendOutputsRequest) returns (SendOutputsResponse); - - /* - EstimateFee attempts to query the internal fee estimator of the wallet to - determine the fee (in sat/kw) to attach to a transaction in order to - achieve the confirmation target. - */ - rpc EstimateFee (EstimateFeeRequest) returns (EstimateFeeResponse); - - /* - PendingSweeps returns lists of on-chain outputs that lnd is currently - attempting to sweep within its central batching engine. Outputs with similar - fee rates are batched together in order to sweep them within a single - transaction. - - NOTE: Some of the fields within PendingSweepsRequest are not guaranteed to - remain supported. This is an advanced API that depends on the internals of - the UtxoSweeper, so things may change. - */ - rpc PendingSweeps (PendingSweepsRequest) returns (PendingSweepsResponse); - - /* - BumpFee bumps the fee of an arbitrary input within a transaction. This RPC - takes a different approach than bitcoind's bumpfee command. lnd has a - central batching engine in which inputs with similar fee rates are batched - together to save on transaction fees. Due to this, we cannot rely on - bumping the fee on a specific transaction, since transactions can change at - any point with the addition of new inputs. The list of inputs that - currently exist within lnd's central batching engine can be retrieved - through the PendingSweeps RPC. - - When bumping the fee of an input that currently exists within lnd's central - batching engine, a higher fee transaction will be created that replaces the - lower fee transaction through the Replace-By-Fee (RBF) policy. If it - - This RPC also serves useful when wanting to perform a Child-Pays-For-Parent - (CPFP), where the child transaction pays for its parent's fee. This can be - done by specifying an outpoint within the low fee transaction that is under - the control of the wallet. - - The fee preference can be expressed either as a specific fee rate or a delta - of blocks in which the output should be swept on-chain within. If a fee - preference is not explicitly specified, then an error is returned. - - Note that this RPC currently doesn't perform any validation checks on the - fee preference being provided. For now, the responsibility of ensuring that - the new fee preference is sufficient is delegated to the user. - */ - rpc BumpFee (BumpFeeRequest) returns (BumpFeeResponse); - - /* - ListSweeps returns a list of the sweep transactions our node has produced. - Note that these sweeps may not be confirmed yet, as we record sweeps on - broadcast, not confirmation. - */ - rpc ListSweeps (ListSweepsRequest) returns (ListSweepsResponse); - - /* - LabelTransaction adds a label to a transaction. If the transaction already - has a label the call will fail unless the overwrite bool is set. This will - overwrite the exiting transaction label. Labels must not be empty, and - cannot exceed 500 characters. - */ - rpc LabelTransaction (LabelTransactionRequest) - returns (LabelTransactionResponse); - - /* - FundPsbt creates a fully populated PSBT that contains enough inputs to fund - the outputs specified in the template. There are two ways of specifying a - template: Either by passing in a PSBT with at least one output declared or - by passing in a raw TxTemplate message. - - If there are no inputs specified in the template, coin selection is - performed automatically. If the template does contain any inputs, it is - assumed that full coin selection happened externally and no additional - inputs are added. If the specified inputs aren't enough to fund the outputs - with the given fee rate, an error is returned. - - After either selecting or verifying the inputs, all input UTXOs are locked - with an internal app ID. - - NOTE: If this method returns without an error, it is the caller's - responsibility to either spend the locked UTXOs (by finalizing and then - publishing the transaction) or to unlock/release the locked UTXOs in case of - an error on the caller's side. - */ - rpc FundPsbt (FundPsbtRequest) returns (FundPsbtResponse); - - /* - FinalizePsbt expects a partial transaction with all inputs and outputs fully - declared and tries to sign all inputs that belong to the wallet. Lnd must be - the last signer of the transaction. That means, if there are any unsigned - non-witness inputs or inputs without UTXO information attached or inputs - without witness data that do not belong to lnd's wallet, this method will - fail. If no error is returned, the PSBT is ready to be extracted and the - final TX within to be broadcast. - - NOTE: This method does NOT publish the transaction once finalized. It is the - caller's responsibility to either publish the transaction on success or - unlock/release any locked UTXOs in case of an error in this method. - */ - rpc FinalizePsbt (FinalizePsbtRequest) returns (FinalizePsbtResponse); -} - -message ListUnspentRequest { - // The minimum number of confirmations to be included. - int32 min_confs = 1; - - // The maximum number of confirmations to be included. - int32 max_confs = 2; -} - -message ListUnspentResponse { - // A list of utxos satisfying the specified number of confirmations. - repeated lnrpc.Utxo utxos = 1; -} - -message LeaseOutputRequest { - /* - An ID of 32 random bytes that must be unique for each distinct application - using this RPC which will be used to bound the output lease to. - */ - bytes id = 1; - - // The identifying outpoint of the output being leased. - lnrpc.OutPoint outpoint = 2; -} - -message LeaseOutputResponse { - /* - The absolute expiration of the output lease represented as a unix timestamp. - */ - uint64 expiration = 1; -} - -message ReleaseOutputRequest { - // The unique ID that was used to lock the output. - bytes id = 1; - - // The identifying outpoint of the output being released. - lnrpc.OutPoint outpoint = 2; -} - -message ReleaseOutputResponse { -} - -message KeyReq { - /* - Is the key finger print of the root pubkey that this request is targeting. - This allows the WalletKit to possibly serve out keys for multiple HD chains - via public derivation. - */ - int32 key_finger_print = 1; - - /* - The target key family to derive a key from. In other contexts, this is - known as the "account". - */ - int32 key_family = 2; -} - -message AddrRequest { - // No fields, as we always give out a p2wkh address. -} -message AddrResponse { - /* - The address encoded using a bech32 format. - */ - string addr = 1; -} - -message Transaction { - /* - The raw serialized transaction. - */ - bytes tx_hex = 1; - - /* - An optional label to save with the transaction. Limited to 500 characters. - */ - string label = 2; -} -message PublishResponse { - /* - If blank, then no error occurred and the transaction was successfully - published. If not the empty string, then a string representation of the - broadcast error. - - TODO(roasbeef): map to a proper enum type - */ - string publish_error = 1; -} - -message SendOutputsRequest { - /* - The number of satoshis per kilo weight that should be used when crafting - this transaction. - */ - int64 sat_per_kw = 1; - - /* - A slice of the outputs that should be created in the transaction produced. - */ - repeated signrpc.TxOut outputs = 2; - - // An optional label for the transaction, limited to 500 characters. - string label = 3; - - // The minimum number of confirmations each one of your outputs used for - // the transaction must satisfy. - int32 min_confs = 4; - - // Whether unconfirmed outputs should be used as inputs for the transaction. - bool spend_unconfirmed = 5; -} -message SendOutputsResponse { - /* - The serialized transaction sent out on the network. - */ - bytes raw_tx = 1; -} - -message EstimateFeeRequest { - /* - The number of confirmations to shoot for when estimating the fee. - */ - int32 conf_target = 1; -} -message EstimateFeeResponse { - /* - The amount of satoshis per kw that should be used in order to reach the - confirmation target in the request. - */ - int64 sat_per_kw = 1; -} - -enum WitnessType { - UNKNOWN_WITNESS = 0; - - /* - A witness that allows us to spend the output of a commitment transaction - after a relative lock-time lockout. - */ - COMMITMENT_TIME_LOCK = 1; - - /* - A witness that allows us to spend a settled no-delay output immediately on a - counterparty's commitment transaction. - */ - COMMITMENT_NO_DELAY = 2; - - /* - A witness that allows us to sweep the settled output of a malicious - counterparty's who broadcasts a revoked commitment transaction. - */ - COMMITMENT_REVOKE = 3; - - /* - A witness that allows us to sweep an HTLC which we offered to the remote - party in the case that they broadcast a revoked commitment state. - */ - HTLC_OFFERED_REVOKE = 4; - - /* - A witness that allows us to sweep an HTLC output sent to us in the case that - the remote party broadcasts a revoked commitment state. - */ - HTLC_ACCEPTED_REVOKE = 5; - - /* - A witness that allows us to sweep an HTLC output that we extended to a - party, but was never fulfilled. This HTLC output isn't directly on the - commitment transaction, but is the result of a confirmed second-level HTLC - transaction. As a result, we can only spend this after a CSV delay. - */ - HTLC_OFFERED_TIMEOUT_SECOND_LEVEL = 6; - - /* - A witness that allows us to sweep an HTLC output that was offered to us, and - for which we have a payment preimage. This HTLC output isn't directly on our - commitment transaction, but is the result of confirmed second-level HTLC - transaction. As a result, we can only spend this after a CSV delay. - */ - HTLC_ACCEPTED_SUCCESS_SECOND_LEVEL = 7; - - /* - A witness that allows us to sweep an HTLC that we offered to the remote - party which lies in the commitment transaction of the remote party. We can - spend this output after the absolute CLTV timeout of the HTLC as passed. - */ - HTLC_OFFERED_REMOTE_TIMEOUT = 8; - - /* - A witness that allows us to sweep an HTLC that was offered to us by the - remote party. We use this witness in the case that the remote party goes to - chain, and we know the pre-image to the HTLC. We can sweep this without any - additional timeout. - */ - HTLC_ACCEPTED_REMOTE_SUCCESS = 9; - - /* - A witness that allows us to sweep an HTLC from the remote party's commitment - transaction in the case that the broadcast a revoked commitment, but then - also immediately attempt to go to the second level to claim the HTLC. - */ - HTLC_SECOND_LEVEL_REVOKE = 10; - - /* - A witness type that allows us to spend a regular p2wkh output that's sent to - an output which is under complete control of the backing wallet. - */ - WITNESS_KEY_HASH = 11; - - /* - A witness type that allows us to sweep an output that sends to a nested P2SH - script that pays to a key solely under our control. - */ - NESTED_WITNESS_KEY_HASH = 12; - - /* - A witness type that allows us to spend our anchor on the commitment - transaction. - */ - COMMITMENT_ANCHOR = 13; -} - -message PendingSweep { - // The outpoint of the output we're attempting to sweep. - lnrpc.OutPoint outpoint = 1; - - // The witness type of the output we're attempting to sweep. - WitnessType witness_type = 2; - - // The value of the output we're attempting to sweep. - uint32 amount_sat = 3; - - /* - The fee rate we'll use to sweep the output. The fee rate is only determined - once a sweeping transaction for the output is created, so it's possible for - this to be 0 before this. - */ - uint32 sat_per_byte = 4; - - // The number of broadcast attempts we've made to sweep the output. - uint32 broadcast_attempts = 5; - - /* - The next height of the chain at which we'll attempt to broadcast the - sweep transaction of the output. - */ - uint32 next_broadcast_height = 6; - - // The requested confirmation target for this output. - uint32 requested_conf_target = 8; - - // The requested fee rate, expressed in sat/byte, for this output. - uint32 requested_sat_per_byte = 9; - - /* - Whether this input must be force-swept. This means that it is swept even - if it has a negative yield. - */ - bool force = 7; -} - -message PendingSweepsRequest { -} - -message PendingSweepsResponse { - /* - The set of outputs currently being swept by lnd's central batching engine. - */ - repeated PendingSweep pending_sweeps = 1; -} - -message BumpFeeRequest { - // The input we're attempting to bump the fee of. - lnrpc.OutPoint outpoint = 1; - - // The target number of blocks that the input should be spent within. - uint32 target_conf = 2; - - /* - The fee rate, expressed in sat/byte, that should be used to spend the input - with. - */ - uint32 sat_per_byte = 3; - - /* - Whether this input must be force-swept. This means that it is swept even - if it has a negative yield. - */ - bool force = 4; -} - -message BumpFeeResponse { -} - -message ListSweepsRequest { - /* - Retrieve the full sweep transaction details. If false, only the sweep txids - will be returned. Note that some sweeps that LND publishes will have been - replaced-by-fee, so will not be included in this output. - */ - bool verbose = 1; -} - -message ListSweepsResponse { - message TransactionIDs { - /* - Reversed, hex-encoded string representing the transaction ids of the - sweeps that our node has broadcast. Note that these transactions may - not have confirmed yet, we record sweeps on broadcast, not confirmation. - */ - repeated string transaction_ids = 1; - } - - oneof sweeps { - lnrpc.TransactionDetails transaction_details = 1; - TransactionIDs transaction_ids = 2; - } -} - -message LabelTransactionRequest { - // The txid of the transaction to label. - bytes txid = 1; - - // The label to add to the transaction, limited to 500 characters. - string label = 2; - - // Whether to overwrite the existing label, if it is present. - bool overwrite = 3; -} - -message LabelTransactionResponse { -} - -message FundPsbtRequest { - oneof template { - /* - Use an existing PSBT packet as the template for the funded PSBT. - - The packet must contain at least one non-dust output. If one or more - inputs are specified, no coin selection is performed. In that case every - input must be an UTXO known to the wallet that has not been locked - before. The sum of all inputs must be sufficiently greater than the sum - of all outputs to pay a miner fee with the specified fee rate. A change - output is added to the PSBT if necessary. - */ - bytes psbt = 1; - - /* - Use the outputs and optional inputs from this raw template. - */ - TxTemplate raw = 2; - } - - oneof fees { - /* - The target number of blocks that the transaction should be confirmed in. - */ - uint32 target_conf = 3; - - /* - The fee rate, expressed in sat/vbyte, that should be used to spend the - input with. - */ - uint32 sat_per_vbyte = 4; - } -} -message FundPsbtResponse { - /* - The funded but not yet signed PSBT packet. - */ - bytes funded_psbt = 1; - - /* - The index of the added change output or -1 if no change was left over. - */ - int32 change_output_index = 2; - - /* - The list of lock leases that were acquired for the inputs in the funded PSBT - packet. - */ - repeated UtxoLease locked_utxos = 3; -} - -message TxTemplate { - /* - An optional list of inputs to use. Every input must be an UTXO known to the - wallet that has not been locked before. The sum of all inputs must be - sufficiently greater than the sum of all outputs to pay a miner fee with the - fee rate specified in the parent message. - - If no inputs are specified, coin selection will be performed instead and - inputs of sufficient value will be added to the resulting PSBT. - */ - repeated lnrpc.OutPoint inputs = 1; - - /* - A map of all addresses and the amounts to send to in the funded PSBT. - */ - map outputs = 2; -} - -message UtxoLease { - /* - A 32 byte random ID that identifies the lease. - */ - bytes id = 1; - - // The identifying outpoint of the output being leased. - lnrpc.OutPoint outpoint = 2; - - /* - The absolute expiration of the output lease represented as a unix timestamp. - */ - uint64 expiration = 3; -} - -message FinalizePsbtRequest { - /* - A PSBT that should be signed and finalized. The PSBT must contain all - required inputs, outputs, UTXO data and partial signatures of all other - signers. - */ - bytes funded_psbt = 1; -} -message FinalizePsbtResponse { - // The fully signed and finalized transaction in PSBT format. - bytes signed_psbt = 1; - - // The fully signed and finalized transaction in the raw wire format. - bytes raw_final_tx = 2; -} diff --git a/lnd/lnrpc/walletrpc/walletkit.swagger.json b/lnd/lnrpc/walletrpc/walletkit.swagger.json deleted file mode 100644 index 8e93d227..00000000 --- a/lnd/lnrpc/walletrpc/walletkit.swagger.json +++ /dev/null @@ -1,1112 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "walletrpc/walletkit.proto", - "version": "version not set" - }, - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/v2/wallet/address/next": { - "post": { - "summary": "NextAddr returns the next unused address within the wallet.", - "operationId": "WalletKit_NextAddr", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/walletrpcAddrResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/walletrpcAddrRequest" - } - } - ], - "tags": [ - "WalletKit" - ] - } - }, - "/v2/wallet/bumpfee": { - "post": { - "summary": "BumpFee bumps the fee of an arbitrary input within a transaction. This RPC\ntakes a different approach than bitcoind's bumpfee command. lnd has a\ncentral batching engine in which inputs with similar fee rates are batched\ntogether to save on transaction fees. Due to this, we cannot rely on\nbumping the fee on a specific transaction, since transactions can change at\nany point with the addition of new inputs. The list of inputs that\ncurrently exist within lnd's central batching engine can be retrieved\nthrough the PendingSweeps RPC.", - "description": "When bumping the fee of an input that currently exists within lnd's central\nbatching engine, a higher fee transaction will be created that replaces the\nlower fee transaction through the Replace-By-Fee (RBF) policy. If it\n\nThis RPC also serves useful when wanting to perform a Child-Pays-For-Parent\n(CPFP), where the child transaction pays for its parent's fee. This can be\ndone by specifying an outpoint within the low fee transaction that is under\nthe control of the wallet.\n\nThe fee preference can be expressed either as a specific fee rate or a delta\nof blocks in which the output should be swept on-chain within. If a fee\npreference is not explicitly specified, then an error is returned.\n\nNote that this RPC currently doesn't perform any validation checks on the\nfee preference being provided. For now, the responsibility of ensuring that\nthe new fee preference is sufficient is delegated to the user.", - "operationId": "WalletKit_BumpFee", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/walletrpcBumpFeeResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/walletrpcBumpFeeRequest" - } - } - ], - "tags": [ - "WalletKit" - ] - } - }, - "/v2/wallet/estimatefee/{conf_target}": { - "get": { - "summary": "EstimateFee attempts to query the internal fee estimator of the wallet to\ndetermine the fee (in sat/kw) to attach to a transaction in order to\nachieve the confirmation target.", - "operationId": "WalletKit_EstimateFee", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/walletrpcEstimateFeeResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "conf_target", - "description": "The number of confirmations to shoot for when estimating the fee.", - "in": "path", - "required": true, - "type": "integer", - "format": "int32" - } - ], - "tags": [ - "WalletKit" - ] - } - }, - "/v2/wallet/key": { - "post": { - "summary": "DeriveKey attempts to derive an arbitrary key specified by the passed\nKeyLocator.", - "operationId": "WalletKit_DeriveKey", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/signrpcKeyDescriptor" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/signrpcKeyLocator" - } - } - ], - "tags": [ - "WalletKit" - ] - } - }, - "/v2/wallet/key/next": { - "post": { - "summary": "DeriveNextKey attempts to derive the *next* key within the key family\n(account in BIP43) specified. This method should return the next external\nchild within this branch.", - "operationId": "WalletKit_DeriveNextKey", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/signrpcKeyDescriptor" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/walletrpcKeyReq" - } - } - ], - "tags": [ - "WalletKit" - ] - } - }, - "/v2/wallet/psbt/finalize": { - "post": { - "summary": "FinalizePsbt expects a partial transaction with all inputs and outputs fully\ndeclared and tries to sign all inputs that belong to the wallet. Lnd must be\nthe last signer of the transaction. That means, if there are any unsigned\nnon-witness inputs or inputs without UTXO information attached or inputs\nwithout witness data that do not belong to lnd's wallet, this method will\nfail. If no error is returned, the PSBT is ready to be extracted and the\nfinal TX within to be broadcast.", - "description": "NOTE: This method does NOT publish the transaction once finalized. It is the\ncaller's responsibility to either publish the transaction on success or\nunlock/release any locked UTXOs in case of an error in this method.", - "operationId": "WalletKit_FinalizePsbt", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/walletrpcFinalizePsbtResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/walletrpcFinalizePsbtRequest" - } - } - ], - "tags": [ - "WalletKit" - ] - } - }, - "/v2/wallet/psbt/fund": { - "post": { - "summary": "FundPsbt creates a fully populated PSBT that contains enough inputs to fund\nthe outputs specified in the template. There are two ways of specifying a\ntemplate: Either by passing in a PSBT with at least one output declared or\nby passing in a raw TxTemplate message.", - "description": "If there are no inputs specified in the template, coin selection is\nperformed automatically. If the template does contain any inputs, it is\nassumed that full coin selection happened externally and no additional\ninputs are added. If the specified inputs aren't enough to fund the outputs\nwith the given fee rate, an error is returned.\n\nAfter either selecting or verifying the inputs, all input UTXOs are locked\nwith an internal app ID.\n\nNOTE: If this method returns without an error, it is the caller's\nresponsibility to either spend the locked UTXOs (by finalizing and then\npublishing the transaction) or to unlock/release the locked UTXOs in case of\nan error on the caller's side.", - "operationId": "WalletKit_FundPsbt", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/walletrpcFundPsbtResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/walletrpcFundPsbtRequest" - } - } - ], - "tags": [ - "WalletKit" - ] - } - }, - "/v2/wallet/send": { - "post": { - "summary": "SendOutputs is similar to the existing sendmany call in Bitcoind, and\nallows the caller to create a transaction that sends to several outputs at\nonce. This is ideal when wanting to batch create a set of transactions.", - "operationId": "WalletKit_SendOutputs", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/walletrpcSendOutputsResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/walletrpcSendOutputsRequest" - } - } - ], - "tags": [ - "WalletKit" - ] - } - }, - "/v2/wallet/sweeps": { - "get": { - "summary": "ListSweeps returns a list of the sweep transactions our node has produced.\nNote that these sweeps may not be confirmed yet, as we record sweeps on\nbroadcast, not confirmation.", - "operationId": "WalletKit_ListSweeps", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/walletrpcListSweepsResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "verbose", - "description": "Retrieve the full sweep transaction details. If false, only the sweep txids\nwill be returned. Note that some sweeps that LND publishes will have been\nreplaced-by-fee, so will not be included in this output.", - "in": "query", - "required": false, - "type": "boolean" - } - ], - "tags": [ - "WalletKit" - ] - } - }, - "/v2/wallet/sweeps/pending": { - "get": { - "summary": "PendingSweeps returns lists of on-chain outputs that lnd is currently\nattempting to sweep within its central batching engine. Outputs with similar\nfee rates are batched together in order to sweep them within a single\ntransaction.", - "description": "NOTE: Some of the fields within PendingSweepsRequest are not guaranteed to\nremain supported. This is an advanced API that depends on the internals of\nthe UtxoSweeper, so things may change.", - "operationId": "WalletKit_PendingSweeps", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/walletrpcPendingSweepsResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "WalletKit" - ] - } - }, - "/v2/wallet/tx": { - "post": { - "summary": "PublishTransaction attempts to publish the passed transaction to the\nnetwork. Once this returns without an error, the wallet will continually\nattempt to re-broadcast the transaction on start up, until it enters the\nchain.", - "operationId": "WalletKit_PublishTransaction", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/walletrpcPublishResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/walletrpcTransaction" - } - } - ], - "tags": [ - "WalletKit" - ] - } - }, - "/v2/wallet/tx/label": { - "post": { - "summary": "LabelTransaction adds a label to a transaction. If the transaction already\nhas a label the call will fail unless the overwrite bool is set. This will\noverwrite the exiting transaction label. Labels must not be empty, and\ncannot exceed 500 characters.", - "operationId": "WalletKit_LabelTransaction", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/walletrpcLabelTransactionResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/walletrpcLabelTransactionRequest" - } - } - ], - "tags": [ - "WalletKit" - ] - } - }, - "/v2/wallet/utxos": { - "post": { - "summary": "ListUnspent returns a list of all utxos spendable by the wallet with a\nnumber of confirmations between the specified minimum and maximum.", - "operationId": "WalletKit_ListUnspent", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/walletrpcListUnspentResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "WalletKit" - ] - } - }, - "/v2/wallet/utxos/lease": { - "post": { - "summary": "LeaseOutput locks an output to the given ID, preventing it from being\navailable for any future coin selection attempts. The absolute time of the\nlock's expiration is returned. The expiration of the lock can be extended by\nsuccessive invocations of this RPC. Outputs can be unlocked before their\nexpiration through `ReleaseOutput`.", - "operationId": "WalletKit_LeaseOutput", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/walletrpcLeaseOutputResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/walletrpcLeaseOutputRequest" - } - } - ], - "tags": [ - "WalletKit" - ] - } - }, - "/v2/wallet/utxos/release": { - "post": { - "summary": "ReleaseOutput unlocks an output, allowing it to be available for coin\nselection if it remains unspent. The ID should match the one used to\noriginally lock the output.", - "operationId": "WalletKit_ReleaseOutput", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/walletrpcReleaseOutputResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/walletrpcReleaseOutputRequest" - } - } - ], - "tags": [ - "WalletKit" - ] - } - } - }, - "definitions": { - "ListSweepsResponseTransactionIDs": { - "type": "object", - "properties": { - "transaction_ids": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Reversed, hex-encoded string representing the transaction ids of the\nsweeps that our node has broadcast. Note that these transactions may\nnot have confirmed yet, we record sweeps on broadcast, not confirmation." - } - } - }, - "lnrpcAddressType": { - "type": "string", - "enum": [ - "WITNESS_PUBKEY_HASH", - "NESTED_PUBKEY_HASH", - "UNUSED_WITNESS_PUBKEY_HASH", - "UNUSED_NESTED_PUBKEY_HASH" - ], - "default": "WITNESS_PUBKEY_HASH", - "description": "- `p2wkh`: Pay to witness key hash (`WITNESS_PUBKEY_HASH` = 0)\n- `np2wkh`: Pay to nested witness key hash (`NESTED_PUBKEY_HASH` = 1)", - "title": "`AddressType` has to be one of:" - }, - "lnrpcOutPoint": { - "type": "object", - "properties": { - "txid_bytes": { - "type": "string", - "format": "byte", - "description": "Raw bytes representing the transaction id." - }, - "txid_str": { - "type": "string", - "description": "Reversed, hex-encoded string representing the transaction id." - }, - "output_index": { - "type": "integer", - "format": "int64", - "description": "The index of the output on the transaction." - } - } - }, - "lnrpcTransaction": { - "type": "object", - "properties": { - "tx_hash": { - "type": "string", - "title": "The transaction hash" - }, - "amount": { - "type": "string", - "format": "int64", - "title": "The transaction amount, denominated in satoshis" - }, - "num_confirmations": { - "type": "integer", - "format": "int32", - "title": "The number of confirmations" - }, - "block_hash": { - "type": "string", - "title": "The hash of the block this transaction was included in" - }, - "block_height": { - "type": "integer", - "format": "int32", - "title": "The height of the block this transaction was included in" - }, - "time_stamp": { - "type": "string", - "format": "int64", - "title": "Timestamp of this transaction" - }, - "total_fees": { - "type": "string", - "format": "int64", - "title": "Fees paid for this transaction" - }, - "dest_addresses": { - "type": "array", - "items": { - "type": "string" - }, - "title": "Addresses that received funds for this transaction" - }, - "raw_tx_hex": { - "type": "string", - "description": "The raw transaction hex." - }, - "label": { - "type": "string", - "description": "A label that was optionally set on transaction broadcast." - } - } - }, - "lnrpcTransactionDetails": { - "type": "object", - "properties": { - "transactions": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcTransaction" - }, - "description": "The list of transactions relevant to the wallet." - } - } - }, - "lnrpcUtxo": { - "type": "object", - "properties": { - "address_type": { - "$ref": "#/definitions/lnrpcAddressType", - "title": "The type of address" - }, - "address": { - "type": "string", - "title": "The address" - }, - "amount_sat": { - "type": "string", - "format": "int64", - "title": "The value of the unspent coin in satoshis" - }, - "pk_script": { - "type": "string", - "title": "The pkscript in hex" - }, - "outpoint": { - "$ref": "#/definitions/lnrpcOutPoint", - "title": "The outpoint in format txid:n" - }, - "confirmations": { - "type": "string", - "format": "int64", - "title": "The number of confirmations for the Utxo" - } - } - }, - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string" - }, - "value": { - "type": "string", - "format": "byte" - } - } - }, - "runtimeError": { - "type": "object", - "properties": { - "error": { - "type": "string" - }, - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - }, - "signrpcKeyDescriptor": { - "type": "object", - "properties": { - "raw_key_bytes": { - "type": "string", - "format": "byte", - "description": "The raw bytes of the key being identified. Either this or the KeyLocator\nmust be specified." - }, - "key_loc": { - "$ref": "#/definitions/signrpcKeyLocator", - "description": "The key locator that identifies which key to use for signing. Either this\nor the raw bytes of the target key must be specified." - } - } - }, - "signrpcKeyLocator": { - "type": "object", - "properties": { - "key_family": { - "type": "integer", - "format": "int32", - "description": "The family of key being identified." - }, - "key_index": { - "type": "integer", - "format": "int32", - "description": "The precise index of the key being identified." - } - } - }, - "signrpcTxOut": { - "type": "object", - "properties": { - "value": { - "type": "string", - "format": "int64", - "description": "The value of the output being spent." - }, - "pk_script": { - "type": "string", - "format": "byte", - "description": "The script of the output being spent." - } - } - }, - "walletrpcAddrRequest": { - "type": "object" - }, - "walletrpcAddrResponse": { - "type": "object", - "properties": { - "addr": { - "type": "string", - "description": "The address encoded using a bech32 format." - } - } - }, - "walletrpcBumpFeeRequest": { - "type": "object", - "properties": { - "outpoint": { - "$ref": "#/definitions/lnrpcOutPoint", - "description": "The input we're attempting to bump the fee of." - }, - "target_conf": { - "type": "integer", - "format": "int64", - "description": "The target number of blocks that the input should be spent within." - }, - "sat_per_byte": { - "type": "integer", - "format": "int64", - "description": "The fee rate, expressed in sat/byte, that should be used to spend the input\nwith." - }, - "force": { - "type": "boolean", - "description": "Whether this input must be force-swept. This means that it is swept even\nif it has a negative yield." - } - } - }, - "walletrpcBumpFeeResponse": { - "type": "object" - }, - "walletrpcEstimateFeeResponse": { - "type": "object", - "properties": { - "sat_per_kw": { - "type": "string", - "format": "int64", - "description": "The amount of satoshis per kw that should be used in order to reach the\nconfirmation target in the request." - } - } - }, - "walletrpcFinalizePsbtRequest": { - "type": "object", - "properties": { - "funded_psbt": { - "type": "string", - "format": "byte", - "description": "A PSBT that should be signed and finalized. The PSBT must contain all\nrequired inputs, outputs, UTXO data and partial signatures of all other\nsigners." - } - } - }, - "walletrpcFinalizePsbtResponse": { - "type": "object", - "properties": { - "signed_psbt": { - "type": "string", - "format": "byte", - "description": "The fully signed and finalized transaction in PSBT format." - }, - "raw_final_tx": { - "type": "string", - "format": "byte", - "description": "The fully signed and finalized transaction in the raw wire format." - } - } - }, - "walletrpcFundPsbtRequest": { - "type": "object", - "properties": { - "psbt": { - "type": "string", - "format": "byte", - "description": "Use an existing PSBT packet as the template for the funded PSBT.\n\nThe packet must contain at least one non-dust output. If one or more\ninputs are specified, no coin selection is performed. In that case every\ninput must be an UTXO known to the wallet that has not been locked\nbefore. The sum of all inputs must be sufficiently greater than the sum\nof all outputs to pay a miner fee with the specified fee rate. A change\noutput is added to the PSBT if necessary." - }, - "raw": { - "$ref": "#/definitions/walletrpcTxTemplate", - "description": "Use the outputs and optional inputs from this raw template." - }, - "target_conf": { - "type": "integer", - "format": "int64", - "description": "The target number of blocks that the transaction should be confirmed in." - }, - "sat_per_vbyte": { - "type": "integer", - "format": "int64", - "description": "The fee rate, expressed in sat/vbyte, that should be used to spend the\ninput with." - } - } - }, - "walletrpcFundPsbtResponse": { - "type": "object", - "properties": { - "funded_psbt": { - "type": "string", - "format": "byte", - "description": "The funded but not yet signed PSBT packet." - }, - "change_output_index": { - "type": "integer", - "format": "int32", - "description": "The index of the added change output or -1 if no change was left over." - }, - "locked_utxos": { - "type": "array", - "items": { - "$ref": "#/definitions/walletrpcUtxoLease" - }, - "description": "The list of lock leases that were acquired for the inputs in the funded PSBT\npacket." - } - } - }, - "walletrpcKeyReq": { - "type": "object", - "properties": { - "key_finger_print": { - "type": "integer", - "format": "int32", - "description": "Is the key finger print of the root pubkey that this request is targeting.\nThis allows the WalletKit to possibly serve out keys for multiple HD chains\nvia public derivation." - }, - "key_family": { - "type": "integer", - "format": "int32", - "description": "The target key family to derive a key from. In other contexts, this is\nknown as the \"account\"." - } - } - }, - "walletrpcLabelTransactionRequest": { - "type": "object", - "properties": { - "txid": { - "type": "string", - "format": "byte", - "description": "The txid of the transaction to label." - }, - "label": { - "type": "string", - "description": "The label to add to the transaction, limited to 500 characters." - }, - "overwrite": { - "type": "boolean", - "description": "Whether to overwrite the existing label, if it is present." - } - } - }, - "walletrpcLabelTransactionResponse": { - "type": "object" - }, - "walletrpcLeaseOutputRequest": { - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "byte", - "description": "An ID of 32 random bytes that must be unique for each distinct application\nusing this RPC which will be used to bound the output lease to." - }, - "outpoint": { - "$ref": "#/definitions/lnrpcOutPoint", - "description": "The identifying outpoint of the output being leased." - } - } - }, - "walletrpcLeaseOutputResponse": { - "type": "object", - "properties": { - "expiration": { - "type": "string", - "format": "uint64", - "description": "The absolute expiration of the output lease represented as a unix timestamp." - } - } - }, - "walletrpcListSweepsResponse": { - "type": "object", - "properties": { - "transaction_details": { - "$ref": "#/definitions/lnrpcTransactionDetails" - }, - "transaction_ids": { - "$ref": "#/definitions/ListSweepsResponseTransactionIDs" - } - } - }, - "walletrpcListUnspentResponse": { - "type": "object", - "properties": { - "utxos": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcUtxo" - }, - "description": "A list of utxos satisfying the specified number of confirmations." - } - } - }, - "walletrpcPendingSweep": { - "type": "object", - "properties": { - "outpoint": { - "$ref": "#/definitions/lnrpcOutPoint", - "description": "The outpoint of the output we're attempting to sweep." - }, - "witness_type": { - "$ref": "#/definitions/walletrpcWitnessType", - "description": "The witness type of the output we're attempting to sweep." - }, - "amount_sat": { - "type": "integer", - "format": "int64", - "description": "The value of the output we're attempting to sweep." - }, - "sat_per_byte": { - "type": "integer", - "format": "int64", - "description": "The fee rate we'll use to sweep the output. The fee rate is only determined\nonce a sweeping transaction for the output is created, so it's possible for\nthis to be 0 before this." - }, - "broadcast_attempts": { - "type": "integer", - "format": "int64", - "description": "The number of broadcast attempts we've made to sweep the output." - }, - "next_broadcast_height": { - "type": "integer", - "format": "int64", - "description": "The next height of the chain at which we'll attempt to broadcast the\nsweep transaction of the output." - }, - "requested_conf_target": { - "type": "integer", - "format": "int64", - "description": "The requested confirmation target for this output." - }, - "requested_sat_per_byte": { - "type": "integer", - "format": "int64", - "description": "The requested fee rate, expressed in sat/byte, for this output." - }, - "force": { - "type": "boolean", - "description": "Whether this input must be force-swept. This means that it is swept even\nif it has a negative yield." - } - } - }, - "walletrpcPendingSweepsResponse": { - "type": "object", - "properties": { - "pending_sweeps": { - "type": "array", - "items": { - "$ref": "#/definitions/walletrpcPendingSweep" - }, - "description": "The set of outputs currently being swept by lnd's central batching engine." - } - } - }, - "walletrpcPublishResponse": { - "type": "object", - "properties": { - "publish_error": { - "type": "string", - "description": "If blank, then no error occurred and the transaction was successfully\npublished. If not the empty string, then a string representation of the\nbroadcast error.\n\nTODO(roasbeef): map to a proper enum type" - } - } - }, - "walletrpcReleaseOutputRequest": { - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "byte", - "description": "The unique ID that was used to lock the output." - }, - "outpoint": { - "$ref": "#/definitions/lnrpcOutPoint", - "description": "The identifying outpoint of the output being released." - } - } - }, - "walletrpcReleaseOutputResponse": { - "type": "object" - }, - "walletrpcSendOutputsRequest": { - "type": "object", - "properties": { - "sat_per_kw": { - "type": "string", - "format": "int64", - "description": "The number of satoshis per kilo weight that should be used when crafting\nthis transaction." - }, - "outputs": { - "type": "array", - "items": { - "$ref": "#/definitions/signrpcTxOut" - }, - "description": "A slice of the outputs that should be created in the transaction produced." - }, - "label": { - "type": "string", - "description": "An optional label for the transaction, limited to 500 characters." - }, - "min_confs": { - "type": "integer", - "format": "int32", - "description": "The minimum number of confirmations each one of your outputs used for\nthe transaction must satisfy." - }, - "spend_unconfirmed": { - "type": "boolean", - "description": "Whether unconfirmed outputs should be used as inputs for the transaction." - } - } - }, - "walletrpcSendOutputsResponse": { - "type": "object", - "properties": { - "raw_tx": { - "type": "string", - "format": "byte", - "description": "The serialized transaction sent out on the network." - } - } - }, - "walletrpcTransaction": { - "type": "object", - "properties": { - "tx_hex": { - "type": "string", - "format": "byte", - "description": "The raw serialized transaction." - }, - "label": { - "type": "string", - "description": "An optional label to save with the transaction. Limited to 500 characters." - } - } - }, - "walletrpcTxTemplate": { - "type": "object", - "properties": { - "inputs": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcOutPoint" - }, - "description": "An optional list of inputs to use. Every input must be an UTXO known to the\nwallet that has not been locked before. The sum of all inputs must be\nsufficiently greater than the sum of all outputs to pay a miner fee with the\nfee rate specified in the parent message.\n\nIf no inputs are specified, coin selection will be performed instead and\ninputs of sufficient value will be added to the resulting PSBT." - }, - "outputs": { - "type": "object", - "additionalProperties": { - "type": "string", - "format": "uint64" - }, - "description": "A map of all addresses and the amounts to send to in the funded PSBT." - } - } - }, - "walletrpcUtxoLease": { - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "byte", - "description": "A 32 byte random ID that identifies the lease." - }, - "outpoint": { - "$ref": "#/definitions/lnrpcOutPoint", - "description": "The identifying outpoint of the output being leased." - }, - "expiration": { - "type": "string", - "format": "uint64", - "description": "The absolute expiration of the output lease represented as a unix timestamp." - } - } - }, - "walletrpcWitnessType": { - "type": "string", - "enum": [ - "UNKNOWN_WITNESS", - "COMMITMENT_TIME_LOCK", - "COMMITMENT_NO_DELAY", - "COMMITMENT_REVOKE", - "HTLC_OFFERED_REVOKE", - "HTLC_ACCEPTED_REVOKE", - "HTLC_OFFERED_TIMEOUT_SECOND_LEVEL", - "HTLC_ACCEPTED_SUCCESS_SECOND_LEVEL", - "HTLC_OFFERED_REMOTE_TIMEOUT", - "HTLC_ACCEPTED_REMOTE_SUCCESS", - "HTLC_SECOND_LEVEL_REVOKE", - "WITNESS_KEY_HASH", - "NESTED_WITNESS_KEY_HASH", - "COMMITMENT_ANCHOR" - ], - "default": "UNKNOWN_WITNESS", - "description": " - COMMITMENT_TIME_LOCK: A witness that allows us to spend the output of a commitment transaction\nafter a relative lock-time lockout.\n - COMMITMENT_NO_DELAY: A witness that allows us to spend a settled no-delay output immediately on a\ncounterparty's commitment transaction.\n - COMMITMENT_REVOKE: A witness that allows us to sweep the settled output of a malicious\ncounterparty's who broadcasts a revoked commitment transaction.\n - HTLC_OFFERED_REVOKE: A witness that allows us to sweep an HTLC which we offered to the remote\nparty in the case that they broadcast a revoked commitment state.\n - HTLC_ACCEPTED_REVOKE: A witness that allows us to sweep an HTLC output sent to us in the case that\nthe remote party broadcasts a revoked commitment state.\n - HTLC_OFFERED_TIMEOUT_SECOND_LEVEL: A witness that allows us to sweep an HTLC output that we extended to a\nparty, but was never fulfilled. This HTLC output isn't directly on the\ncommitment transaction, but is the result of a confirmed second-level HTLC\ntransaction. As a result, we can only spend this after a CSV delay.\n - HTLC_ACCEPTED_SUCCESS_SECOND_LEVEL: A witness that allows us to sweep an HTLC output that was offered to us, and\nfor which we have a payment preimage. This HTLC output isn't directly on our\ncommitment transaction, but is the result of confirmed second-level HTLC\ntransaction. As a result, we can only spend this after a CSV delay.\n - HTLC_OFFERED_REMOTE_TIMEOUT: A witness that allows us to sweep an HTLC that we offered to the remote\nparty which lies in the commitment transaction of the remote party. We can\nspend this output after the absolute CLTV timeout of the HTLC as passed.\n - HTLC_ACCEPTED_REMOTE_SUCCESS: A witness that allows us to sweep an HTLC that was offered to us by the\nremote party. We use this witness in the case that the remote party goes to\nchain, and we know the pre-image to the HTLC. We can sweep this without any\nadditional timeout.\n - HTLC_SECOND_LEVEL_REVOKE: A witness that allows us to sweep an HTLC from the remote party's commitment\ntransaction in the case that the broadcast a revoked commitment, but then\nalso immediately attempt to go to the second level to claim the HTLC.\n - WITNESS_KEY_HASH: A witness type that allows us to spend a regular p2wkh output that's sent to\nan output which is under complete control of the backing wallet.\n - NESTED_WITNESS_KEY_HASH: A witness type that allows us to sweep an output that sends to a nested P2SH\nscript that pays to a key solely under our control.\n - COMMITMENT_ANCHOR: A witness type that allows us to spend our anchor on the commitment\ntransaction." - } - } -} diff --git a/lnd/lnrpc/walletrpc/walletkit_server.go b/lnd/lnrpc/walletrpc/walletkit_server.go deleted file mode 100644 index 9c97f40a..00000000 --- a/lnd/lnrpc/walletrpc/walletkit_server.go +++ /dev/null @@ -1,1120 +0,0 @@ -// +build walletrpc - -package walletrpc - -import ( - "bytes" - "context" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/psbt" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/labels" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/signrpc" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwallet/btcwallet" - "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee" - "github.com/pkt-cash/pktd/lnd/macaroons" - "github.com/pkt-cash/pktd/lnd/sweep" - "github.com/pkt-cash/pktd/pktwallet/wtxmgr" - "github.com/pkt-cash/pktd/txscript" - "github.com/pkt-cash/pktd/wire" - "google.golang.org/grpc" - "gopkg.in/macaroon-bakery.v2/bakery" -) - -const ( - // subServerName is the name of the sub rpc server. We'll use this name - // to register ourselves, and we also require that the main - // SubServerConfigDispatcher instance recognize as the name of our - subServerName = "WalletKitRPC" -) - -var ( - // macaroonOps are the set of capabilities that our minted macaroon (if - // it doesn't already exist) will have. - macaroonOps = []bakery.Op{ - { - Entity: "address", - Action: "write", - }, - { - Entity: "address", - Action: "read", - }, - { - Entity: "onchain", - Action: "write", - }, - { - Entity: "onchain", - Action: "read", - }, - } - - // macPermissions maps RPC calls to the permissions they require. - macPermissions = map[string][]bakery.Op{ - "/walletrpc.WalletKit/DeriveNextKey": {{ - Entity: "address", - Action: "read", - }}, - "/walletrpc.WalletKit/DeriveKey": {{ - Entity: "address", - Action: "read", - }}, - "/walletrpc.WalletKit/NextAddr": {{ - Entity: "address", - Action: "read", - }}, - "/walletrpc.WalletKit/PublishTransaction": {{ - Entity: "onchain", - Action: "write", - }}, - "/walletrpc.WalletKit/SendOutputs": {{ - Entity: "onchain", - Action: "write", - }}, - "/walletrpc.WalletKit/EstimateFee": {{ - Entity: "onchain", - Action: "read", - }}, - "/walletrpc.WalletKit/PendingSweeps": {{ - Entity: "onchain", - Action: "read", - }}, - "/walletrpc.WalletKit/BumpFee": {{ - Entity: "onchain", - Action: "write", - }}, - "/walletrpc.WalletKit/ListSweeps": {{ - Entity: "onchain", - Action: "read", - }}, - "/walletrpc.WalletKit/LabelTransaction": {{ - Entity: "onchain", - Action: "write", - }}, - "/walletrpc.WalletKit/LeaseOutput": {{ - Entity: "onchain", - Action: "write", - }}, - "/walletrpc.WalletKit/ReleaseOutput": {{ - Entity: "onchain", - Action: "write", - }}, - "/walletrpc.WalletKit/ListUnspent": {{ - Entity: "onchain", - Action: "read", - }}, - "/walletrpc.WalletKit/FundPsbt": {{ - Entity: "onchain", - Action: "write", - }}, - "/walletrpc.WalletKit/FinalizePsbt": {{ - Entity: "onchain", - Action: "write", - }}, - } - - // DefaultWalletKitMacFilename is the default name of the wallet kit - // macaroon that we expect to find via a file handle within the main - // configuration file in this package. - DefaultWalletKitMacFilename = "walletkit.macaroon" - - // LndInternalLockID is the binary representation of the SHA256 hash of - // the string "lnd-internal-lock-id" and is used for UTXO lock leases to - // identify that we ourselves are locking an UTXO, for example when - // giving out a funded PSBT. The ID corresponds to the hex value of - // ede19a92ed321a4705f8a1cccc1d4f6182545d4bb4fae08bd5937831b7e38f98. - LndInternalLockID = wtxmgr.LockID{ - 0xed, 0xe1, 0x9a, 0x92, 0xed, 0x32, 0x1a, 0x47, - 0x05, 0xf8, 0xa1, 0xcc, 0xcc, 0x1d, 0x4f, 0x61, - 0x82, 0x54, 0x5d, 0x4b, 0xb4, 0xfa, 0xe0, 0x8b, - 0xd5, 0x93, 0x78, 0x31, 0xb7, 0xe3, 0x8f, 0x98, - } -) - -// ErrZeroLabel is returned when an attempt is made to label a transaction with -// an empty label. -var ErrZeroLabel = Err.CodeWithDetail("ErrZeroLabel", "cannot label transaction with empty label") - -// WalletKit is a sub-RPC server that exposes a tool kit which allows clients -// to execute common wallet operations. This includes requesting new addresses, -// keys (for contracts!), and publishing transactions. -type WalletKit struct { - cfg *Config -} - -// A compile time check to ensure that WalletKit fully implements the -// WalletKitServer gRPC service. -var _ WalletKitServer = (*WalletKit)(nil) - -// New creates a new instance of the WalletKit sub-RPC server. -func New(cfg *Config) (*WalletKit, lnrpc.MacaroonPerms, er.R) { - // If the path of the wallet kit macaroon wasn't specified, then we'll - // assume that it's found at the default network directory. - if cfg.WalletKitMacPath == "" { - cfg.WalletKitMacPath = filepath.Join( - cfg.NetworkDir, DefaultWalletKitMacFilename, - ) - } - - // Now that we know the full path of the wallet kit macaroon, we can - // check to see if we need to create it or not. If stateless_init is set - // then we don't write the macaroons. - macFilePath := cfg.WalletKitMacPath - if cfg.MacService != nil && !cfg.MacService.StatelessInit && - !lnrpc.FileExists(macFilePath) { - - log.Infof("Baking macaroons for WalletKit RPC Server at: %v", - macFilePath) - - // At this point, we know that the wallet kit macaroon doesn't - // yet, exist, so we need to create it with the help of the - // main macaroon service. - walletKitMac, err := cfg.MacService.NewMacaroon( - context.Background(), macaroons.DefaultRootKeyID, - macaroonOps..., - ) - if err != nil { - return nil, nil, err - } - walletKitMacBytes, err := walletKitMac.M().MarshalBinary() - if err != nil { - return nil, nil, err - } - err = ioutil.WriteFile(macFilePath, walletKitMacBytes, 0644) - if err != nil { - _ = os.Remove(macFilePath) - return nil, nil, err - } - } - - walletKit := &WalletKit{ - cfg: cfg, - } - - return walletKit, macPermissions, nil -} - -// Start launches any helper goroutines required for the sub-server to function. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (w *WalletKit) Start() er.R { - return nil -} - -// Stop signals any active goroutines for a graceful closure. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (w *WalletKit) Stop() er.R { - return nil -} - -// Name returns a unique string representation of the sub-server. This can be -// used to identify the sub-server and also de-duplicate them. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (w *WalletKit) Name() string { - return subServerName -} - -// RegisterWithRootServer will be called by the root gRPC server to direct a -// sub RPC server to register itself with the main gRPC root server. Until this -// is called, each sub-server won't be able to have requests routed towards it. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (w *WalletKit) RegisterWithRootServer(grpcServer *grpc.Server) er.R { - // We make sure that we register it with the main gRPC server to ensure - // all our methods are routed properly. - RegisterWalletKitServer(grpcServer, w) - - log.Debugf("WalletKit RPC server successfully registered with " + - "root gRPC server") - - return nil -} - -// RegisterWithRestServer will be called by the root REST mux to direct a sub -// RPC server to register itself with the main REST mux server. Until this is -// called, each sub-server won't be able to have requests routed towards it. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (w *WalletKit) RegisterWithRestServer(ctx context.Context, - mux *runtime.ServeMux, dest string, opts []grpc.DialOption) er.R { - - // We make sure that we register it with the main REST server to ensure - // all our methods are routed properly. - err := RegisterWalletKitHandlerFromEndpoint(ctx, mux, dest, opts) - if err != nil { - log.Errorf("Could not register WalletKit REST server "+ - "with root REST server: %v", err) - return err - } - - log.Debugf("WalletKit REST server successfully registered with " + - "root REST server") - return nil -} - -// ListUnspent returns useful information about each unspent output owned by the -// wallet, as reported by the underlying `ListUnspentWitness`; the information -// returned is: outpoint, amount in satoshis, address, address type, -// scriptPubKey in hex and number of confirmations. The result is filtered to -// contain outputs whose number of confirmations is between a -// minimum and maximum number of confirmations specified by the user, with 0 -// meaning unconfirmed. -func (w *WalletKit) ListUnspent(ctx context.Context, - req *ListUnspentRequest) (*ListUnspentResponse, er.R) { - - // Validate the confirmation arguments. - minConfs, maxConfs, err := lnrpc.ParseConfs(req.MinConfs, req.MaxConfs) - if err != nil { - return nil, err - } - - // With our arguments validated, we'll query the internal wallet for - // the set of UTXOs that match our query. - // - // We'll acquire the global coin selection lock to ensure there aren't - // any other concurrent processes attempting to lock any UTXOs which may - // be shown available to us. - var utxos []*lnwallet.Utxo - err = w.cfg.CoinSelectionLocker.WithCoinSelectLock(func() er.R { - utxos, err = w.cfg.Wallet.ListUnspentWitness(minConfs, maxConfs) - return err - }) - if err != nil { - return nil, err - } - - rpcUtxos, err := lnrpc.MarshalUtxos(utxos, w.cfg.ChainParams) - if err != nil { - return nil, err - } - - return &ListUnspentResponse{ - Utxos: rpcUtxos, - }, nil -} - -// LeaseOutput locks an output to the given ID, preventing it from being -// available for any future coin selection attempts. The absolute time of the -// lock's expiration is returned. The expiration of the lock can be extended by -// successive invocations of this call. Outputs can be unlocked before their -// expiration through `ReleaseOutput`. -// -// If the output is not known, wtxmgr.ErrUnknownOutput is returned. If the -// output has already been locked to a different ID, then -// wtxmgr.ErrOutputAlreadyLocked is returned. -func (w *WalletKit) LeaseOutput(ctx context.Context, - req *LeaseOutputRequest) (*LeaseOutputResponse, er.R) { - - if len(req.Id) != 32 { - return nil, er.New("id must be 32 random bytes") - } - var lockID wtxmgr.LockID - copy(lockID[:], req.Id) - - // Don't allow ID's of 32 bytes, but all zeros. - if lockID == (wtxmgr.LockID{}) { - return nil, er.New("id must be 32 random bytes") - } - - // Don't allow our internal ID to be used externally for locking. Only - // unlocking is allowed. - if lockID == LndInternalLockID { - return nil, er.New("reserved id cannot be used") - } - - op, err := unmarshallOutPoint(req.Outpoint) - if err != nil { - return nil, err - } - - // Acquire the global coin selection lock to ensure there aren't any - // other concurrent processes attempting to lease the same UTXO. - var expiration time.Time - err = w.cfg.CoinSelectionLocker.WithCoinSelectLock(func() er.R { - expiration, err = w.cfg.Wallet.LeaseOutput(lockID, *op) - return err - }) - if err != nil { - return nil, err - } - - return &LeaseOutputResponse{ - Expiration: uint64(expiration.Unix()), - }, nil -} - -// ReleaseOutput unlocks an output, allowing it to be available for coin -// selection if it remains unspent. The ID should match the one used to -// originally lock the output. -func (w *WalletKit) ReleaseOutput(ctx context.Context, - req *ReleaseOutputRequest) (*ReleaseOutputResponse, er.R) { - - if len(req.Id) != 32 { - return nil, er.New("id must be 32 random bytes") - } - var lockID wtxmgr.LockID - copy(lockID[:], req.Id) - - op, err := unmarshallOutPoint(req.Outpoint) - if err != nil { - return nil, err - } - - // Acquire the global coin selection lock to maintain consistency as - // it's acquired when we initially leased the output. - err = w.cfg.CoinSelectionLocker.WithCoinSelectLock(func() er.R { - return w.cfg.Wallet.ReleaseOutput(lockID, *op) - }) - if err != nil { - return nil, err - } - - return &ReleaseOutputResponse{}, nil -} - -// DeriveNextKey attempts to derive the *next* key within the key family -// (account in BIP43) specified. This method should return the next external -// child within this branch. -func (w *WalletKit) DeriveNextKey(ctx context.Context, - req *KeyReq) (*signrpc.KeyDescriptor, er.R) { - - nextKeyDesc, err := w.cfg.KeyRing.DeriveNextKey( - keychain.KeyFamily(req.KeyFamily), - ) - if err != nil { - return nil, err - } - - return &signrpc.KeyDescriptor{ - KeyLoc: &signrpc.KeyLocator{ - KeyFamily: int32(nextKeyDesc.Family), - KeyIndex: int32(nextKeyDesc.Index), - }, - RawKeyBytes: nextKeyDesc.PubKey.SerializeCompressed(), - }, nil -} - -// DeriveKey attempts to derive an arbitrary key specified by the passed -// KeyLocator. -func (w *WalletKit) DeriveKey(ctx context.Context, - req *signrpc.KeyLocator) (*signrpc.KeyDescriptor, er.R) { - - keyDesc, err := w.cfg.KeyRing.DeriveKey(keychain.KeyLocator{ - Family: keychain.KeyFamily(req.KeyFamily), - Index: uint32(req.KeyIndex), - }) - if err != nil { - return nil, err - } - - return &signrpc.KeyDescriptor{ - KeyLoc: &signrpc.KeyLocator{ - KeyFamily: int32(keyDesc.Family), - KeyIndex: int32(keyDesc.Index), - }, - RawKeyBytes: keyDesc.PubKey.SerializeCompressed(), - }, nil -} - -// NextAddr returns the next unused address within the wallet. -func (w *WalletKit) NextAddr(ctx context.Context, - req *AddrRequest) (*AddrResponse, er.R) { - - addr, err := w.cfg.Wallet.NewAddress(lnwallet.WitnessPubKey, false) - if err != nil { - return nil, err - } - - return &AddrResponse{ - Addr: addr.String(), - }, nil -} - -// Attempts to publish the passed transaction to the network. Once this returns -// without an error, the wallet will continually attempt to re-broadcast the -// transaction on start up, until it enters the chain. -func (w *WalletKit) PublishTransaction(ctx context.Context, - req *Transaction) (*PublishResponse, er.R) { - - switch { - // If the client doesn't specify a transaction, then there's nothing to - // publish. - case len(req.TxHex) == 0: - return nil, er.Errorf("must provide a transaction to " + - "publish") - } - - tx := &wire.MsgTx{} - txReader := bytes.NewReader(req.TxHex) - if err := tx.Deserialize(txReader); err != nil { - return nil, err - } - - label, err := labels.ValidateAPI(req.Label) - if err != nil { - return nil, err - } - - err = w.cfg.Wallet.PublishTransaction(tx, label) - if err != nil { - return nil, err - } - - return &PublishResponse{}, nil -} - -// SendOutputs is similar to the existing sendmany call in Bitcoind, and allows -// the caller to create a transaction that sends to several outputs at once. -// This is ideal when wanting to batch create a set of transactions. -func (w *WalletKit) SendOutputs(ctx context.Context, - req *SendOutputsRequest) (*SendOutputsResponse, er.R) { - - switch { - // If the client didn't specify any outputs to create, then we can't - // proceed . - case len(req.Outputs) == 0: - return nil, er.Errorf("must specify at least one output " + - "to create") - } - - // Before we can request this transaction to be created, we'll need to - // amp the protos back into the format that the internal wallet will - // recognize. - outputsToCreate := make([]*wire.TxOut, 0, len(req.Outputs)) - for _, output := range req.Outputs { - outputsToCreate = append(outputsToCreate, &wire.TxOut{ - Value: output.Value, - PkScript: output.PkScript, - }) - } - - // Then, we'll extract the minimum number of confirmations that each - // output we use to fund the transaction should satisfy. - minConfs, err := lnrpc.ExtractMinConfs(req.MinConfs, req.SpendUnconfirmed) - if err != nil { - return nil, err - } - - label, err := labels.ValidateAPI(req.Label) - if err != nil { - return nil, err - } - - // Now that we have the outputs mapped, we can request that the wallet - // attempt to create this transaction. - tx, err := w.cfg.Wallet.SendOutputs( - outputsToCreate, chainfee.SatPerKWeight(req.SatPerKw), minConfs, label, - ) - if err != nil { - return nil, err - } - - var b bytes.Buffer - if err := tx.Serialize(&b); err != nil { - return nil, err - } - - return &SendOutputsResponse{ - RawTx: b.Bytes(), - }, nil -} - -// EstimateFee attempts to query the internal fee estimator of the wallet to -// determine the fee (in sat/kw) to attach to a transaction in order to achieve -// the confirmation target. -func (w *WalletKit) EstimateFee(ctx context.Context, - req *EstimateFeeRequest) (*EstimateFeeResponse, er.R) { - - switch { - // A confirmation target of zero doesn't make any sense. Similarly, we - // reject confirmation targets of 1 as they're unreasonable. - case req.ConfTarget == 0 || req.ConfTarget == 1: - return nil, er.Errorf("confirmation target must be greater " + - "than 1") - } - - satPerKw, err := w.cfg.FeeEstimator.EstimateFeePerKW( - uint32(req.ConfTarget), - ) - if err != nil { - return nil, err - } - - return &EstimateFeeResponse{ - SatPerKw: int64(satPerKw), - }, nil -} - -// PendingSweeps returns lists of on-chain outputs that lnd is currently -// attempting to sweep within its central batching engine. Outputs with similar -// fee rates are batched together in order to sweep them within a single -// transaction. The fee rate of each sweeping transaction is determined by -// taking the average fee rate of all the outputs it's trying to sweep. -func (w *WalletKit) PendingSweeps(ctx context.Context, - in *PendingSweepsRequest) (*PendingSweepsResponse, er.R) { - - // Retrieve all of the outputs the UtxoSweeper is currently trying to - // sweep. - pendingInputs, err := w.cfg.Sweeper.PendingInputs() - if err != nil { - return nil, err - } - - // Convert them into their respective RPC format. - rpcPendingSweeps := make([]*PendingSweep, 0, len(pendingInputs)) - for _, pendingInput := range pendingInputs { - var witnessType WitnessType - switch pendingInput.WitnessType { - case input.CommitmentTimeLock: - witnessType = WitnessType_COMMITMENT_TIME_LOCK - case input.CommitmentNoDelay: - witnessType = WitnessType_COMMITMENT_NO_DELAY - case input.CommitmentRevoke: - witnessType = WitnessType_COMMITMENT_REVOKE - case input.HtlcOfferedRevoke: - witnessType = WitnessType_HTLC_OFFERED_REVOKE - case input.HtlcAcceptedRevoke: - witnessType = WitnessType_HTLC_ACCEPTED_REVOKE - case input.HtlcOfferedTimeoutSecondLevel: - witnessType = WitnessType_HTLC_OFFERED_TIMEOUT_SECOND_LEVEL - case input.HtlcAcceptedSuccessSecondLevel: - witnessType = WitnessType_HTLC_ACCEPTED_SUCCESS_SECOND_LEVEL - case input.HtlcOfferedRemoteTimeout: - witnessType = WitnessType_HTLC_OFFERED_REMOTE_TIMEOUT - case input.HtlcAcceptedRemoteSuccess: - witnessType = WitnessType_HTLC_ACCEPTED_REMOTE_SUCCESS - case input.HtlcSecondLevelRevoke: - witnessType = WitnessType_HTLC_SECOND_LEVEL_REVOKE - case input.WitnessKeyHash: - witnessType = WitnessType_WITNESS_KEY_HASH - case input.NestedWitnessKeyHash: - witnessType = WitnessType_NESTED_WITNESS_KEY_HASH - case input.CommitmentAnchor: - witnessType = WitnessType_COMMITMENT_ANCHOR - default: - log.Warnf("Unhandled witness type %v for input %v", - pendingInput.WitnessType, pendingInput.OutPoint) - } - - op := &lnrpc.OutPoint{ - TxidBytes: pendingInput.OutPoint.Hash[:], - OutputIndex: pendingInput.OutPoint.Index, - } - amountSat := uint32(pendingInput.Amount) - satPerByte := uint32(pendingInput.LastFeeRate.FeePerKVByte() / 1000) - broadcastAttempts := uint32(pendingInput.BroadcastAttempts) - nextBroadcastHeight := uint32(pendingInput.NextBroadcastHeight) - - requestedFee := pendingInput.Params.Fee - requestedFeeRate := uint32(requestedFee.FeeRate.FeePerKVByte() / 1000) - - rpcPendingSweeps = append(rpcPendingSweeps, &PendingSweep{ - Outpoint: op, - WitnessType: witnessType, - AmountSat: amountSat, - SatPerByte: satPerByte, - BroadcastAttempts: broadcastAttempts, - NextBroadcastHeight: nextBroadcastHeight, - RequestedSatPerByte: requestedFeeRate, - RequestedConfTarget: requestedFee.ConfTarget, - Force: pendingInput.Params.Force, - }) - } - - return &PendingSweepsResponse{ - PendingSweeps: rpcPendingSweeps, - }, nil -} - -// unmarshallOutPoint converts an outpoint from its lnrpc type to its canonical -// type. -func unmarshallOutPoint(op *lnrpc.OutPoint) (*wire.OutPoint, er.R) { - if op == nil { - return nil, er.Errorf("empty outpoint provided") - } - - var hash chainhash.Hash - switch { - case len(op.TxidBytes) == 0 && len(op.TxidStr) == 0: - fallthrough - - case len(op.TxidBytes) != 0 && len(op.TxidStr) != 0: - return nil, er.Errorf("either TxidBytes or TxidStr must be " + - "specified, but not both") - - // The hash was provided as raw bytes. - case len(op.TxidBytes) != 0: - copy(hash[:], op.TxidBytes) - - // The hash was provided as a hex-encoded string. - case len(op.TxidStr) != 0: - h, err := chainhash.NewHashFromStr(op.TxidStr) - if err != nil { - return nil, err - } - hash = *h - } - - return &wire.OutPoint{ - Hash: hash, - Index: op.OutputIndex, - }, nil -} - -// BumpFee allows bumping the fee rate of an arbitrary input. A fee preference -// can be expressed either as a specific fee rate or a delta of blocks in which -// the output should be swept on-chain within. If a fee preference is not -// explicitly specified, then an error is returned. The status of the input -// sweep can be checked through the PendingSweeps RPC. -func (w *WalletKit) BumpFee(ctx context.Context, - in *BumpFeeRequest) (*BumpFeeResponse, er.R) { - - // Parse the outpoint from the request. - op, err := unmarshallOutPoint(in.Outpoint) - if err != nil { - return nil, err - } - - // Construct the request's fee preference. - satPerKw := chainfee.SatPerKVByte(in.SatPerByte * 1000).FeePerKWeight() - feePreference := sweep.FeePreference{ - ConfTarget: uint32(in.TargetConf), - FeeRate: satPerKw, - } - - // We'll attempt to bump the fee of the input through the UtxoSweeper. - // If it is currently attempting to sweep the input, then it'll simply - // bump its fee, which will result in a replacement transaction (RBF) - // being broadcast. If it is not aware of the input however, - // lnwallet.ErrNotMine is returned. - params := sweep.ParamsUpdate{ - Fee: feePreference, - Force: in.Force, - } - - _, err = w.cfg.Sweeper.UpdateParams(*op, params) - switch err { - case nil: - return &BumpFeeResponse{}, nil - case lnwallet.ErrNotMine: - break - default: - return nil, err - } - - log.Debugf("Attempting to CPFP outpoint %s", op) - - // Since we're unable to perform a bump through RBF, we'll assume the - // user is attempting to bump an unconfirmed transaction's fee rate by - // sweeping an output within it under control of the wallet with a - // higher fee rate, essentially performing a Child-Pays-For-Parent - // (CPFP). - // - // We'll gather all of the information required by the UtxoSweeper in - // order to sweep the output. - utxo, err := w.cfg.Wallet.FetchInputInfo(op) - if err != nil { - return nil, err - } - - // We're only able to bump the fee of unconfirmed transactions. - if utxo.Confirmations > 0 { - return nil, er.New("unable to bump fee of a confirmed " + - "transaction") - } - - var witnessType input.WitnessType - switch utxo.AddressType { - case lnwallet.WitnessPubKey: - witnessType = input.WitnessKeyHash - case lnwallet.NestedWitnessPubKey: - witnessType = input.NestedWitnessKeyHash - default: - return nil, er.Errorf("unknown input witness %v", op) - } - - signDesc := &input.SignDescriptor{ - Output: &wire.TxOut{ - PkScript: utxo.PkScript, - Value: int64(utxo.Value), - }, - HashType: params.SigHashAll, - } - - // We'll use the current height as the height hint since we're dealing - // with an unconfirmed transaction. - _, currentHeight, err := w.cfg.Chain.GetBestBlock() - if err != nil { - return nil, er.Errorf("unable to retrieve current height: %v", - err) - } - - input := input.NewBaseInput(op, witnessType, signDesc, uint32(currentHeight)) - if _, err = w.cfg.Sweeper.SweepInput(input, sweep.Params{Fee: feePreference}); err != nil { - return nil, err - } - - return &BumpFeeResponse{}, nil -} - -// ListSweeps returns a list of the sweeps that our node has published. -func (w *WalletKit) ListSweeps(ctx context.Context, - in *ListSweepsRequest) (*ListSweepsResponse, er.R) { - - sweeps, err := w.cfg.Sweeper.ListSweeps() - if err != nil { - return nil, err - } - - sweepTxns := make(map[string]bool) - for _, sweep := range sweeps { - sweepTxns[sweep.String()] = true - } - - // Some of our sweeps could have been replaced by fee, or dropped out - // of the mempool. Here, we lookup our wallet transactions so that we - // can match our list of sweeps against the list of transactions that - // the wallet is still tracking. - transactions, err := w.cfg.Wallet.ListTransactionDetails( - 0, btcwallet.UnconfirmedHeight, - ) - if err != nil { - return nil, err - } - - var ( - txids []string - txDetails []*lnwallet.TransactionDetail - ) - - for _, tx := range transactions { - _, ok := sweepTxns[tx.Hash.String()] - if !ok { - continue - } - - // Add the txid or full tx details depending on whether we want - // verbose output or not. - if in.Verbose { - txDetails = append(txDetails, tx) - } else { - txids = append(txids, tx.Hash.String()) - } - } - - if in.Verbose { - return &ListSweepsResponse{ - Sweeps: &ListSweepsResponse_TransactionDetails{ - TransactionDetails: lnrpc.RPCTransactionDetails( - txDetails, - ), - }, - }, nil - } - - return &ListSweepsResponse{ - Sweeps: &ListSweepsResponse_TransactionIds{ - TransactionIds: &ListSweepsResponse_TransactionIDs{ - TransactionIds: txids, - }, - }, - }, nil -} - -// LabelTransaction adds a label to a transaction. -func (w *WalletKit) LabelTransaction(ctx context.Context, - req *LabelTransactionRequest) (*LabelTransactionResponse, er.R) { - - // Check that the label provided in non-zero. - if len(req.Label) == 0 { - return nil, ErrZeroLabel.Default() - } - - // Validate the length of the non-zero label. We do not need to use the - // label returned here, because the original is non-zero so will not - // be replaced. - if _, err := labels.ValidateAPI(req.Label); err != nil { - return nil, err - } - - hash, err := chainhash.NewHash(req.Txid) - if err != nil { - return nil, err - } - - err = w.cfg.Wallet.LabelTransaction(*hash, req.Label, req.Overwrite) - return &LabelTransactionResponse{}, err -} - -// FundPsbt creates a fully populated PSBT that contains enough inputs to fund -// the outputs specified in the template. There are two ways of specifying a -// template: Either by passing in a PSBT with at least one output declared or -// by passing in a raw TxTemplate message. If there are no inputs specified in -// the template, coin selection is performed automatically. If the template does -// contain any inputs, it is assumed that full coin selection happened -// externally and no additional inputs are added. If the specified inputs aren't -// enough to fund the outputs with the given fee rate, an error is returned. -// After either selecting or verifying the inputs, all input UTXOs are locked -// with an internal app ID. -// -// NOTE: If this method returns without an error, it is the caller's -// responsibility to either spend the locked UTXOs (by finalizing and then -// publishing the transaction) or to unlock/release the locked UTXOs in case of -// an error on the caller's side. -func (w *WalletKit) FundPsbt(_ context.Context, - req *FundPsbtRequest) (*FundPsbtResponse, er.R) { - - var ( - err error - packet *psbt.Packet - feeSatPerKW chainfee.SatPerKWeight - locks []*utxoLock - rawPsbt bytes.Buffer - ) - - // There are two ways a user can specify what we call the template (a - // list of inputs and outputs to use in the PSBT): Either as a PSBT - // packet directly or as a special RPC message. Find out which one the - // user wants to use, they are mutually exclusive. - switch { - // The template is specified as a PSBT. All we have to do is parse it. - case req.GetPsbt() != nil: - r := bytes.NewReader(req.GetPsbt()) - packet, err = psbt.NewFromRawBytes(r, false) - if err != nil { - return nil, er.Errorf("could not parse PSBT: %v", err) - } - - // The template is specified as a RPC message. We need to create a new - // PSBT and copy the RPC information over. - case req.GetRaw() != nil: - tpl := req.GetRaw() - if len(tpl.Outputs) == 0 { - return nil, er.Errorf("no outputs specified") - } - - txOut := make([]*wire.TxOut, 0, len(tpl.Outputs)) - for addrStr, amt := range tpl.Outputs { - addr, err := btcutil.DecodeAddress( - addrStr, w.cfg.ChainParams, - ) - if err != nil { - return nil, er.Errorf("error parsing address "+ - "%s for network %s: %v", addrStr, - w.cfg.ChainParams.Name, err) - } - pkScript, err := txscript.PayToAddrScript(addr) - if err != nil { - return nil, er.Errorf("error getting pk "+ - "script for address %s: %v", addrStr, - err) - } - - txOut = append(txOut, &wire.TxOut{ - Value: int64(amt), - PkScript: pkScript, - }) - } - - txIn := make([]*wire.OutPoint, len(tpl.Inputs)) - for idx, in := range tpl.Inputs { - op, err := unmarshallOutPoint(in) - if err != nil { - return nil, er.Errorf("error parsing "+ - "outpoint: %v", err) - } - txIn[idx] = op - } - - sequences := make([]uint32, len(txIn)) - packet, err = psbt.New(txIn, txOut, 2, 0, sequences) - if err != nil { - return nil, er.Errorf("could not create PSBT: %v", err) - } - - default: - return nil, er.Errorf("transaction template missing, need " + - "to specify either PSBT or raw TX template") - } - - // Determine the desired transaction fee. - switch { - // Estimate the fee by the target number of blocks to confirmation. - case req.GetTargetConf() != 0: - targetConf := req.GetTargetConf() - if targetConf < 2 { - return nil, er.Errorf("confirmation target must be " + - "greater than 1") - } - - feeSatPerKW, err = w.cfg.FeeEstimator.EstimateFeePerKW( - targetConf, - ) - if err != nil { - return nil, er.Errorf("could not estimate fee: %v", - err) - } - - // Convert the fee to sat/kW from the specified sat/vByte. - case req.GetSatPerVbyte() != 0: - feeSatPerKW = chainfee.SatPerKVByte( - req.GetSatPerVbyte() * 1000, - ).FeePerKWeight() - - default: - return nil, er.Errorf("fee definition missing, need to " + - "specify either target_conf or set_per_vbyte") - } - - // The RPC parsing part is now over. Several of the following operations - // require us to hold the global coin selection lock so we do the rest - // of the tasks while holding the lock. The result is a list of locked - // UTXOs. - changeIndex := int32(-1) - err = w.cfg.CoinSelectionLocker.WithCoinSelectLock(func() er.R { - // In case the user did specify inputs, we need to make sure - // they are known to us, still unspent and not yet locked. - if len(packet.UnsignedTx.TxIn) > 0 { - // Get a list of all unspent witness outputs. - utxos, err := w.cfg.Wallet.ListUnspentWitness( - defaultMinConf, defaultMaxConf, - ) - if err != nil { - return err - } - - // Validate all inputs against our known list of UTXOs - // now. - err = verifyInputsUnspent(packet.UnsignedTx.TxIn, utxos) - if err != nil { - return err - } - } - - // We made sure the input from the user is as sane as possible. - // We can now ask the wallet to fund the TX. This will not yet - // lock any coins but might still change the wallet DB by - // generating a new change address. - changeIndex, err = w.cfg.Wallet.FundPsbt(packet, feeSatPerKW) - if err != nil { - return er.Errorf("wallet couldn't fund PSBT: %v", err) - } - - // Make sure we can properly serialize the packet. If this goes - // wrong then something isn't right with the inputs and we - // probably shouldn't try to lock any of them. - err = packet.Serialize(&rawPsbt) - if err != nil { - return er.Errorf("error serializing funded PSBT: %v", - err) - } - - // Now we have obtained a set of coins that can be used to fund - // the TX. Let's lock them to be sure they aren't spent by the - // time the PSBT is published. This is the action we do here - // that could cause an error. Therefore if some of the UTXOs - // cannot be locked, the rollback of the other's locks also - // happens in this function. If we ever need to do more after - // this function, we need to extract the rollback needs to be - // extracted into a defer. - locks, err = lockInputs(w.cfg.Wallet, packet) - if err != nil { - return er.Errorf("could not lock inputs: %v", err) - } - - return nil - }) - if err != nil { - return nil, err - } - - // Convert the lock leases to the RPC format. - rpcLocks := make([]*UtxoLease, len(locks)) - for idx, lock := range locks { - rpcLocks[idx] = &UtxoLease{ - Id: lock.lockID[:], - Outpoint: &lnrpc.OutPoint{ - TxidBytes: lock.outpoint.Hash[:], - TxidStr: lock.outpoint.Hash.String(), - OutputIndex: lock.outpoint.Index, - }, - Expiration: uint64(lock.expiration.Unix()), - } - } - - return &FundPsbtResponse{ - FundedPsbt: rawPsbt.Bytes(), - ChangeOutputIndex: changeIndex, - LockedUtxos: rpcLocks, - }, nil -} - -// FinalizePsbt expects a partial transaction with all inputs and outputs fully -// declared and tries to sign all inputs that belong to the wallet. Lnd must be -// the last signer of the transaction. That means, if there are any unsigned -// non-witness inputs or inputs without UTXO information attached or inputs -// without witness data that do not belong to lnd's wallet, this method will -// fail. If no error is returned, the PSBT is ready to be extracted and the -// final TX within to be broadcast. -// -// NOTE: This method does NOT publish the transaction once finalized. It is the -// caller's responsibility to either publish the transaction on success or -// unlock/release any locked UTXOs in case of an error in this method. -func (w *WalletKit) FinalizePsbt(_ context.Context, - req *FinalizePsbtRequest) (*FinalizePsbtResponse, er.R) { - - // Parse the funded PSBT. No additional checks are required at this - // level as the wallet will perform all of them. - packet, err := psbt.NewFromRawBytes( - bytes.NewReader(req.FundedPsbt), false, - ) - if err != nil { - return nil, er.Errorf("error parsing PSBT: %v", err) - } - - // Let the wallet do the heavy lifting. This will sign all inputs that - // we have the UTXO for. If some inputs can't be signed and don't have - // witness data attached, this will fail. - err = w.cfg.Wallet.FinalizePsbt(packet) - if err != nil { - return nil, er.Errorf("error finalizing PSBT: %v", err) - } - - var ( - finalPsbtBytes bytes.Buffer - finalTxBytes bytes.Buffer - ) - - // Serialize the finalized PSBT in both the packet and wire format. - err = packet.Serialize(&finalPsbtBytes) - if err != nil { - return nil, er.Errorf("error serializing PSBT: %v", err) - } - finalTx, err := psbt.Extract(packet) - if err != nil { - return nil, er.Errorf("unable to extract final TX: %v", err) - } - err = finalTx.Serialize(&finalTxBytes) - if err != nil { - return nil, er.Errorf("error serializing final TX: %v", err) - } - - return &FinalizePsbtResponse{ - SignedPsbt: finalPsbtBytes.Bytes(), - RawFinalTx: finalTxBytes.Bytes(), - }, nil -} diff --git a/lnd/lnrpc/walletunlocker.pb.go b/lnd/lnrpc/walletunlocker.pb.go deleted file mode 100644 index 53e6dae5..00000000 --- a/lnd/lnrpc/walletunlocker.pb.go +++ /dev/null @@ -1,828 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: walletunlocker.proto - -package lnrpc - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type GenSeedRequest struct { - // - //aezeed_passphrase is an optional user provided passphrase that will be used - //to encrypt the generated aezeed cipher seed. When using REST, this field - //must be encoded as base64. - AezeedPassphrase []byte `protobuf:"bytes,1,opt,name=aezeed_passphrase,json=aezeedPassphrase,proto3" json:"aezeed_passphrase,omitempty"` - // - //seed_entropy is an optional 16-bytes generated via CSPRNG. If not - //specified, then a fresh set of randomness will be used to create the seed. - //When using REST, this field must be encoded as base64. - SeedEntropy []byte `protobuf:"bytes,2,opt,name=seed_entropy,json=seedEntropy,proto3" json:"seed_entropy,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GenSeedRequest) Reset() { *m = GenSeedRequest{} } -func (m *GenSeedRequest) String() string { return proto.CompactTextString(m) } -func (*GenSeedRequest) ProtoMessage() {} -func (*GenSeedRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_76e3ed10ed53e4fd, []int{0} -} - -func (m *GenSeedRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GenSeedRequest.Unmarshal(m, b) -} -func (m *GenSeedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GenSeedRequest.Marshal(b, m, deterministic) -} -func (m *GenSeedRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenSeedRequest.Merge(m, src) -} -func (m *GenSeedRequest) XXX_Size() int { - return xxx_messageInfo_GenSeedRequest.Size(m) -} -func (m *GenSeedRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GenSeedRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GenSeedRequest proto.InternalMessageInfo - -func (m *GenSeedRequest) GetAezeedPassphrase() []byte { - if m != nil { - return m.AezeedPassphrase - } - return nil -} - -func (m *GenSeedRequest) GetSeedEntropy() []byte { - if m != nil { - return m.SeedEntropy - } - return nil -} - -type GenSeedResponse struct { - // - //cipher_seed_mnemonic is a 24-word mnemonic that encodes a prior aezeed - //cipher seed obtained by the user. This field is optional, as if not - //provided, then the daemon will generate a new cipher seed for the user. - //Otherwise, then the daemon will attempt to recover the wallet state linked - //to this cipher seed. - CipherSeedMnemonic []string `protobuf:"bytes,1,rep,name=cipher_seed_mnemonic,json=cipherSeedMnemonic,proto3" json:"cipher_seed_mnemonic,omitempty"` - // - //enciphered_seed are the raw aezeed cipher seed bytes. This is the raw - //cipher text before run through our mnemonic encoding scheme. - EncipheredSeed []byte `protobuf:"bytes,2,opt,name=enciphered_seed,json=encipheredSeed,proto3" json:"enciphered_seed,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GenSeedResponse) Reset() { *m = GenSeedResponse{} } -func (m *GenSeedResponse) String() string { return proto.CompactTextString(m) } -func (*GenSeedResponse) ProtoMessage() {} -func (*GenSeedResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_76e3ed10ed53e4fd, []int{1} -} - -func (m *GenSeedResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GenSeedResponse.Unmarshal(m, b) -} -func (m *GenSeedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GenSeedResponse.Marshal(b, m, deterministic) -} -func (m *GenSeedResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenSeedResponse.Merge(m, src) -} -func (m *GenSeedResponse) XXX_Size() int { - return xxx_messageInfo_GenSeedResponse.Size(m) -} -func (m *GenSeedResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GenSeedResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GenSeedResponse proto.InternalMessageInfo - -func (m *GenSeedResponse) GetCipherSeedMnemonic() []string { - if m != nil { - return m.CipherSeedMnemonic - } - return nil -} - -func (m *GenSeedResponse) GetEncipheredSeed() []byte { - if m != nil { - return m.EncipheredSeed - } - return nil -} - -type InitWalletRequest struct { - // - //wallet_password is the passphrase that should be used to encrypt the - //wallet. This MUST be at least 8 chars in length. After creation, this - //password is required to unlock the daemon. When using REST, this field - //must be encoded as base64. - WalletPassword []byte `protobuf:"bytes,1,opt,name=wallet_password,json=walletPassword,proto3" json:"wallet_password,omitempty"` - // - //cipher_seed_mnemonic is a 24-word mnemonic that encodes a prior aezeed - //cipher seed obtained by the user. This may have been generated by the - //GenSeed method, or be an existing seed. - CipherSeedMnemonic []string `protobuf:"bytes,2,rep,name=cipher_seed_mnemonic,json=cipherSeedMnemonic,proto3" json:"cipher_seed_mnemonic,omitempty"` - // - //aezeed_passphrase is an optional user provided passphrase that will be used - //to encrypt the generated aezeed cipher seed. When using REST, this field - //must be encoded as base64. - AezeedPassphrase []byte `protobuf:"bytes,3,opt,name=aezeed_passphrase,json=aezeedPassphrase,proto3" json:"aezeed_passphrase,omitempty"` - // - //recovery_window is an optional argument specifying the address lookahead - //when restoring a wallet seed. The recovery window applies to each - //individual branch of the BIP44 derivation paths. Supplying a recovery - //window of zero indicates that no addresses should be recovered, such after - //the first initialization of the wallet. - RecoveryWindow int32 `protobuf:"varint,4,opt,name=recovery_window,json=recoveryWindow,proto3" json:"recovery_window,omitempty"` - // - //channel_backups is an optional argument that allows clients to recover the - //settled funds within a set of channels. This should be populated if the - //user was unable to close out all channels and sweep funds before partial or - //total data loss occurred. If specified, then after on-chain recovery of - //funds, lnd begin to carry out the data loss recovery protocol in order to - //recover the funds in each channel from a remote force closed transaction. - ChannelBackups *ChanBackupSnapshot `protobuf:"bytes,5,opt,name=channel_backups,json=channelBackups,proto3" json:"channel_backups,omitempty"` - // - //stateless_init is an optional argument instructing the daemon NOT to create - //any *.macaroon files in its filesystem. If this parameter is set, then the - //admin macaroon returned in the response MUST be stored by the caller of the - //RPC as otherwise all access to the daemon will be lost! - StatelessInit bool `protobuf:"varint,6,opt,name=stateless_init,json=statelessInit,proto3" json:"stateless_init,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InitWalletRequest) Reset() { *m = InitWalletRequest{} } -func (m *InitWalletRequest) String() string { return proto.CompactTextString(m) } -func (*InitWalletRequest) ProtoMessage() {} -func (*InitWalletRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_76e3ed10ed53e4fd, []int{2} -} - -func (m *InitWalletRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InitWalletRequest.Unmarshal(m, b) -} -func (m *InitWalletRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InitWalletRequest.Marshal(b, m, deterministic) -} -func (m *InitWalletRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_InitWalletRequest.Merge(m, src) -} -func (m *InitWalletRequest) XXX_Size() int { - return xxx_messageInfo_InitWalletRequest.Size(m) -} -func (m *InitWalletRequest) XXX_DiscardUnknown() { - xxx_messageInfo_InitWalletRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_InitWalletRequest proto.InternalMessageInfo - -func (m *InitWalletRequest) GetWalletPassword() []byte { - if m != nil { - return m.WalletPassword - } - return nil -} - -func (m *InitWalletRequest) GetCipherSeedMnemonic() []string { - if m != nil { - return m.CipherSeedMnemonic - } - return nil -} - -func (m *InitWalletRequest) GetAezeedPassphrase() []byte { - if m != nil { - return m.AezeedPassphrase - } - return nil -} - -func (m *InitWalletRequest) GetRecoveryWindow() int32 { - if m != nil { - return m.RecoveryWindow - } - return 0 -} - -func (m *InitWalletRequest) GetChannelBackups() *ChanBackupSnapshot { - if m != nil { - return m.ChannelBackups - } - return nil -} - -func (m *InitWalletRequest) GetStatelessInit() bool { - if m != nil { - return m.StatelessInit - } - return false -} - -type InitWalletResponse struct { - // - //The binary serialized admin macaroon that can be used to access the daemon - //after creating the wallet. If the stateless_init parameter was set to true, - //this is the ONLY copy of the macaroon and MUST be stored safely by the - //caller. Otherwise a copy of this macaroon is also persisted on disk by the - //daemon, together with other macaroon files. - AdminMacaroon []byte `protobuf:"bytes,1,opt,name=admin_macaroon,json=adminMacaroon,proto3" json:"admin_macaroon,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InitWalletResponse) Reset() { *m = InitWalletResponse{} } -func (m *InitWalletResponse) String() string { return proto.CompactTextString(m) } -func (*InitWalletResponse) ProtoMessage() {} -func (*InitWalletResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_76e3ed10ed53e4fd, []int{3} -} - -func (m *InitWalletResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InitWalletResponse.Unmarshal(m, b) -} -func (m *InitWalletResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InitWalletResponse.Marshal(b, m, deterministic) -} -func (m *InitWalletResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_InitWalletResponse.Merge(m, src) -} -func (m *InitWalletResponse) XXX_Size() int { - return xxx_messageInfo_InitWalletResponse.Size(m) -} -func (m *InitWalletResponse) XXX_DiscardUnknown() { - xxx_messageInfo_InitWalletResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_InitWalletResponse proto.InternalMessageInfo - -func (m *InitWalletResponse) GetAdminMacaroon() []byte { - if m != nil { - return m.AdminMacaroon - } - return nil -} - -type UnlockWalletRequest struct { - // - //wallet_password should be the current valid passphrase for the daemon. This - //will be required to decrypt on-disk material that the daemon requires to - //function properly. When using REST, this field must be encoded as base64. - WalletPassword []byte `protobuf:"bytes,1,opt,name=wallet_password,json=walletPassword,proto3" json:"wallet_password,omitempty"` - // - //recovery_window is an optional argument specifying the address lookahead - //when restoring a wallet seed. The recovery window applies to each - //individual branch of the BIP44 derivation paths. Supplying a recovery - //window of zero indicates that no addresses should be recovered, such after - //the first initialization of the wallet. - RecoveryWindow int32 `protobuf:"varint,2,opt,name=recovery_window,json=recoveryWindow,proto3" json:"recovery_window,omitempty"` - // - //channel_backups is an optional argument that allows clients to recover the - //settled funds within a set of channels. This should be populated if the - //user was unable to close out all channels and sweep funds before partial or - //total data loss occurred. If specified, then after on-chain recovery of - //funds, lnd begin to carry out the data loss recovery protocol in order to - //recover the funds in each channel from a remote force closed transaction. - ChannelBackups *ChanBackupSnapshot `protobuf:"bytes,3,opt,name=channel_backups,json=channelBackups,proto3" json:"channel_backups,omitempty"` - // - //stateless_init is an optional argument instructing the daemon NOT to create - //any *.macaroon files in its file system. - StatelessInit bool `protobuf:"varint,4,opt,name=stateless_init,json=statelessInit,proto3" json:"stateless_init,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UnlockWalletRequest) Reset() { *m = UnlockWalletRequest{} } -func (m *UnlockWalletRequest) String() string { return proto.CompactTextString(m) } -func (*UnlockWalletRequest) ProtoMessage() {} -func (*UnlockWalletRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_76e3ed10ed53e4fd, []int{4} -} - -func (m *UnlockWalletRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UnlockWalletRequest.Unmarshal(m, b) -} -func (m *UnlockWalletRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UnlockWalletRequest.Marshal(b, m, deterministic) -} -func (m *UnlockWalletRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UnlockWalletRequest.Merge(m, src) -} -func (m *UnlockWalletRequest) XXX_Size() int { - return xxx_messageInfo_UnlockWalletRequest.Size(m) -} -func (m *UnlockWalletRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UnlockWalletRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_UnlockWalletRequest proto.InternalMessageInfo - -func (m *UnlockWalletRequest) GetWalletPassword() []byte { - if m != nil { - return m.WalletPassword - } - return nil -} - -func (m *UnlockWalletRequest) GetRecoveryWindow() int32 { - if m != nil { - return m.RecoveryWindow - } - return 0 -} - -func (m *UnlockWalletRequest) GetChannelBackups() *ChanBackupSnapshot { - if m != nil { - return m.ChannelBackups - } - return nil -} - -func (m *UnlockWalletRequest) GetStatelessInit() bool { - if m != nil { - return m.StatelessInit - } - return false -} - -type UnlockWalletResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UnlockWalletResponse) Reset() { *m = UnlockWalletResponse{} } -func (m *UnlockWalletResponse) String() string { return proto.CompactTextString(m) } -func (*UnlockWalletResponse) ProtoMessage() {} -func (*UnlockWalletResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_76e3ed10ed53e4fd, []int{5} -} - -func (m *UnlockWalletResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UnlockWalletResponse.Unmarshal(m, b) -} -func (m *UnlockWalletResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UnlockWalletResponse.Marshal(b, m, deterministic) -} -func (m *UnlockWalletResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UnlockWalletResponse.Merge(m, src) -} -func (m *UnlockWalletResponse) XXX_Size() int { - return xxx_messageInfo_UnlockWalletResponse.Size(m) -} -func (m *UnlockWalletResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UnlockWalletResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_UnlockWalletResponse proto.InternalMessageInfo - -type ChangePasswordRequest struct { - // - //current_password should be the current valid passphrase used to unlock the - //daemon. When using REST, this field must be encoded as base64. - CurrentPassword []byte `protobuf:"bytes,1,opt,name=current_password,json=currentPassword,proto3" json:"current_password,omitempty"` - // - //new_password should be the new passphrase that will be needed to unlock the - //daemon. When using REST, this field must be encoded as base64. - NewPassword []byte `protobuf:"bytes,2,opt,name=new_password,json=newPassword,proto3" json:"new_password,omitempty"` - // - //stateless_init is an optional argument instructing the daemon NOT to create - //any *.macaroon files in its filesystem. If this parameter is set, then the - //admin macaroon returned in the response MUST be stored by the caller of the - //RPC as otherwise all access to the daemon will be lost! - StatelessInit bool `protobuf:"varint,3,opt,name=stateless_init,json=statelessInit,proto3" json:"stateless_init,omitempty"` - // - //new_macaroon_root_key is an optional argument instructing the daemon to - //rotate the macaroon root key when set to true. This will invalidate all - //previously generated macaroons. - NewMacaroonRootKey bool `protobuf:"varint,4,opt,name=new_macaroon_root_key,json=newMacaroonRootKey,proto3" json:"new_macaroon_root_key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChangePasswordRequest) Reset() { *m = ChangePasswordRequest{} } -func (m *ChangePasswordRequest) String() string { return proto.CompactTextString(m) } -func (*ChangePasswordRequest) ProtoMessage() {} -func (*ChangePasswordRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_76e3ed10ed53e4fd, []int{6} -} - -func (m *ChangePasswordRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChangePasswordRequest.Unmarshal(m, b) -} -func (m *ChangePasswordRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChangePasswordRequest.Marshal(b, m, deterministic) -} -func (m *ChangePasswordRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChangePasswordRequest.Merge(m, src) -} -func (m *ChangePasswordRequest) XXX_Size() int { - return xxx_messageInfo_ChangePasswordRequest.Size(m) -} -func (m *ChangePasswordRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ChangePasswordRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ChangePasswordRequest proto.InternalMessageInfo - -func (m *ChangePasswordRequest) GetCurrentPassword() []byte { - if m != nil { - return m.CurrentPassword - } - return nil -} - -func (m *ChangePasswordRequest) GetNewPassword() []byte { - if m != nil { - return m.NewPassword - } - return nil -} - -func (m *ChangePasswordRequest) GetStatelessInit() bool { - if m != nil { - return m.StatelessInit - } - return false -} - -func (m *ChangePasswordRequest) GetNewMacaroonRootKey() bool { - if m != nil { - return m.NewMacaroonRootKey - } - return false -} - -type ChangePasswordResponse struct { - // - //The binary serialized admin macaroon that can be used to access the daemon - //after rotating the macaroon root key. If both the stateless_init and - //new_macaroon_root_key parameter were set to true, this is the ONLY copy of - //the macaroon that was created from the new root key and MUST be stored - //safely by the caller. Otherwise a copy of this macaroon is also persisted on - //disk by the daemon, together with other macaroon files. - AdminMacaroon []byte `protobuf:"bytes,1,opt,name=admin_macaroon,json=adminMacaroon,proto3" json:"admin_macaroon,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ChangePasswordResponse) Reset() { *m = ChangePasswordResponse{} } -func (m *ChangePasswordResponse) String() string { return proto.CompactTextString(m) } -func (*ChangePasswordResponse) ProtoMessage() {} -func (*ChangePasswordResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_76e3ed10ed53e4fd, []int{7} -} - -func (m *ChangePasswordResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChangePasswordResponse.Unmarshal(m, b) -} -func (m *ChangePasswordResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChangePasswordResponse.Marshal(b, m, deterministic) -} -func (m *ChangePasswordResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChangePasswordResponse.Merge(m, src) -} -func (m *ChangePasswordResponse) XXX_Size() int { - return xxx_messageInfo_ChangePasswordResponse.Size(m) -} -func (m *ChangePasswordResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ChangePasswordResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ChangePasswordResponse proto.InternalMessageInfo - -func (m *ChangePasswordResponse) GetAdminMacaroon() []byte { - if m != nil { - return m.AdminMacaroon - } - return nil -} - -func init() { - proto.RegisterType((*GenSeedRequest)(nil), "lnrpc.GenSeedRequest") - proto.RegisterType((*GenSeedResponse)(nil), "lnrpc.GenSeedResponse") - proto.RegisterType((*InitWalletRequest)(nil), "lnrpc.InitWalletRequest") - proto.RegisterType((*InitWalletResponse)(nil), "lnrpc.InitWalletResponse") - proto.RegisterType((*UnlockWalletRequest)(nil), "lnrpc.UnlockWalletRequest") - proto.RegisterType((*UnlockWalletResponse)(nil), "lnrpc.UnlockWalletResponse") - proto.RegisterType((*ChangePasswordRequest)(nil), "lnrpc.ChangePasswordRequest") - proto.RegisterType((*ChangePasswordResponse)(nil), "lnrpc.ChangePasswordResponse") -} - -func init() { proto.RegisterFile("walletunlocker.proto", fileDescriptor_76e3ed10ed53e4fd) } - -var fileDescriptor_76e3ed10ed53e4fd = []byte{ - // 599 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0xdf, 0x6a, 0xd4, 0x4e, - 0x14, 0xc7, 0xc9, 0x6e, 0xdb, 0xdf, 0xaf, 0xa7, 0xdb, 0x6c, 0x3b, 0xb6, 0x25, 0x8d, 0x0a, 0xdb, - 0x60, 0xe9, 0x8a, 0xb8, 0xd5, 0x7a, 0x23, 0x78, 0x21, 0x56, 0xa4, 0x88, 0x14, 0x4a, 0x4a, 0x29, - 0x78, 0x13, 0xa7, 0x93, 0x43, 0x13, 0x36, 0x3b, 0x33, 0xce, 0xcc, 0x1a, 0xd6, 0xf7, 0xf1, 0xda, - 0x47, 0xf0, 0x1d, 0x7c, 0x22, 0x49, 0x32, 0xd9, 0x76, 0xbb, 0x59, 0xf0, 0xcf, 0x45, 0x20, 0x7c, - 0xce, 0x39, 0x33, 0xe7, 0xfb, 0x3d, 0x33, 0x03, 0x5b, 0x39, 0xcd, 0x32, 0x34, 0x63, 0x9e, 0x09, - 0x36, 0x44, 0x35, 0x90, 0x4a, 0x18, 0x41, 0x96, 0x33, 0xae, 0x24, 0xf3, 0x57, 0x95, 0x64, 0x15, - 0x09, 0x3e, 0x81, 0x7b, 0x82, 0xfc, 0x1c, 0x31, 0x0e, 0xf1, 0xf3, 0x18, 0xb5, 0x21, 0x4f, 0x60, - 0x93, 0xe2, 0x57, 0xc4, 0x38, 0x92, 0x54, 0x6b, 0x99, 0x28, 0xaa, 0xd1, 0x73, 0x7a, 0x4e, 0xbf, - 0x13, 0x6e, 0x54, 0x81, 0xb3, 0x29, 0x27, 0x7b, 0xd0, 0xd1, 0x45, 0x2a, 0x72, 0xa3, 0x84, 0x9c, - 0x78, 0xad, 0x32, 0x6f, 0xad, 0x60, 0xef, 0x2a, 0x14, 0x64, 0xd0, 0x9d, 0xee, 0xa0, 0xa5, 0xe0, - 0x1a, 0xc9, 0x33, 0xd8, 0x62, 0xa9, 0x4c, 0x50, 0x45, 0x65, 0xf1, 0x88, 0xe3, 0x48, 0xf0, 0x94, - 0x79, 0x4e, 0xaf, 0xdd, 0x5f, 0x0d, 0x49, 0x15, 0x2b, 0x2a, 0x4e, 0x6d, 0x84, 0x1c, 0x40, 0x17, - 0x79, 0xc5, 0x31, 0x2e, 0xab, 0xec, 0x56, 0xee, 0x0d, 0x2e, 0x0a, 0x82, 0xef, 0x2d, 0xd8, 0x7c, - 0xcf, 0x53, 0x73, 0x59, 0xca, 0xaf, 0x35, 0x1d, 0x40, 0xb7, 0xf2, 0xa3, 0xd4, 0x94, 0x0b, 0x15, - 0x5b, 0x45, 0x6e, 0x85, 0xcf, 0x2c, 0x5d, 0xd8, 0x59, 0x6b, 0x61, 0x67, 0x8d, 0x76, 0xb5, 0x17, - 0xd8, 0x75, 0x00, 0x5d, 0x85, 0x4c, 0x7c, 0x41, 0x35, 0x89, 0xf2, 0x94, 0xc7, 0x22, 0xf7, 0x96, - 0x7a, 0x4e, 0x7f, 0x39, 0x74, 0x6b, 0x7c, 0x59, 0x52, 0x72, 0x0c, 0x5d, 0x96, 0x50, 0xce, 0x31, - 0x8b, 0xae, 0x28, 0x1b, 0x8e, 0xa5, 0xf6, 0x96, 0x7b, 0x4e, 0x7f, 0xed, 0x68, 0x77, 0x50, 0x8e, - 0x70, 0xf0, 0x36, 0xa1, 0xfc, 0xb8, 0x8c, 0x9c, 0x73, 0x2a, 0x75, 0x22, 0x4c, 0xe8, 0xda, 0x8a, - 0x0a, 0x6b, 0xb2, 0x0f, 0xae, 0x36, 0xd4, 0x60, 0x86, 0x5a, 0x47, 0x29, 0x4f, 0x8d, 0xb7, 0xd2, - 0x73, 0xfa, 0xff, 0x87, 0xeb, 0x53, 0x5a, 0x18, 0x15, 0xbc, 0x02, 0x72, 0xdb, 0x30, 0x3b, 0xa2, - 0x7d, 0x70, 0x69, 0x3c, 0x4a, 0x79, 0x34, 0xa2, 0x8c, 0x2a, 0x21, 0xb8, 0x35, 0x6c, 0xbd, 0xa4, - 0xa7, 0x16, 0x06, 0x3f, 0x1d, 0xb8, 0x77, 0x51, 0x9e, 0xb1, 0xbf, 0x34, 0xbc, 0xc1, 0x91, 0xd6, - 0xef, 0x3a, 0xd2, 0xfe, 0x77, 0x47, 0x96, 0x9a, 0x1c, 0xd9, 0x81, 0xad, 0x59, 0x4d, 0x95, 0x27, - 0xc1, 0x0f, 0x07, 0xb6, 0x8b, 0x5d, 0xae, 0xb1, 0x6e, 0xbf, 0x96, 0xfb, 0x18, 0x36, 0xd8, 0x58, - 0x29, 0xe4, 0x73, 0x7a, 0xbb, 0x96, 0x4f, 0x05, 0xef, 0x41, 0x87, 0x63, 0x7e, 0x93, 0x66, 0x6f, - 0x0c, 0xc7, 0x7c, 0x9a, 0x32, 0xdf, 0x66, 0xbb, 0xa1, 0x4d, 0xf2, 0x1c, 0xb6, 0x8b, 0x95, 0xea, - 0x01, 0x45, 0x4a, 0x08, 0x13, 0x0d, 0x71, 0x62, 0x45, 0x11, 0x8e, 0x79, 0x3d, 0xa7, 0x50, 0x08, - 0xf3, 0x01, 0x27, 0xc1, 0x6b, 0xd8, 0xb9, 0x2b, 0xe0, 0x8f, 0xe6, 0x7d, 0xf4, 0xad, 0x05, 0x6e, - 0xe5, 0xca, 0x85, 0x7d, 0x59, 0xc8, 0x4b, 0xf8, 0xcf, 0xde, 0x6f, 0xb2, 0x6d, 0x47, 0x31, 0xfb, - 0xa2, 0xf8, 0x3b, 0x77, 0xb1, 0xdd, 0xf3, 0x0d, 0xc0, 0xcd, 0xc9, 0x23, 0x9e, 0xcd, 0x9a, 0xbb, - 0xbd, 0xfe, 0x6e, 0x43, 0xc4, 0x2e, 0x71, 0x02, 0x9d, 0xdb, 0xa3, 0x22, 0xbe, 0x4d, 0x6d, 0x38, - 0x93, 0xfe, 0xfd, 0xc6, 0x98, 0x5d, 0xe8, 0x14, 0xdc, 0x59, 0x67, 0xc8, 0x83, 0x5b, 0xe7, 0x6a, - 0x6e, 0xe2, 0xfe, 0xc3, 0x05, 0xd1, 0x6a, 0xb9, 0xe3, 0x47, 0x1f, 0x83, 0xeb, 0xd4, 0x24, 0xe3, - 0xab, 0x01, 0x13, 0xa3, 0x43, 0x39, 0x34, 0x4f, 0x19, 0xd5, 0x49, 0xf1, 0x13, 0x1f, 0x66, 0xbc, - 0xf8, 0x94, 0x64, 0x57, 0x2b, 0xe5, 0x1b, 0xfc, 0xe2, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0xeb, - 0x10, 0x06, 0xea, 0xad, 0x05, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// WalletUnlockerClient is the client API for WalletUnlocker service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type WalletUnlockerClient interface { - // - //GenSeed is the first method that should be used to instantiate a new lnd - //instance. This method allows a caller to generate a new aezeed cipher seed - //given an optional passphrase. If provided, the passphrase will be necessary - //to decrypt the cipherseed to expose the internal wallet seed. - // - //Once the cipherseed is obtained and verified by the user, the InitWallet - //method should be used to commit the newly generated seed, and create the - //wallet. - GenSeed(ctx context.Context, in *GenSeedRequest, opts ...grpc.CallOption) (*GenSeedResponse, error) - // - //InitWallet is used when lnd is starting up for the first time to fully - //initialize the daemon and its internal wallet. At the very least a wallet - //password must be provided. This will be used to encrypt sensitive material - //on disk. - // - //In the case of a recovery scenario, the user can also specify their aezeed - //mnemonic and passphrase. If set, then the daemon will use this prior state - //to initialize its internal wallet. - // - //Alternatively, this can be used along with the GenSeed RPC to obtain a - //seed, then present it to the user. Once it has been verified by the user, - //the seed can be fed into this RPC in order to commit the new wallet. - InitWallet(ctx context.Context, in *InitWalletRequest, opts ...grpc.CallOption) (*InitWalletResponse, error) - // lncli: `unlock` - //UnlockWallet is used at startup of lnd to provide a password to unlock - //the wallet database. - UnlockWallet(ctx context.Context, in *UnlockWalletRequest, opts ...grpc.CallOption) (*UnlockWalletResponse, error) - // lncli: `changepassword` - //ChangePassword changes the password of the encrypted wallet. This will - //automatically unlock the wallet database if successful. - ChangePassword(ctx context.Context, in *ChangePasswordRequest, opts ...grpc.CallOption) (*ChangePasswordResponse, error) -} - -type walletUnlockerClient struct { - cc *grpc.ClientConn -} - -func NewWalletUnlockerClient(cc *grpc.ClientConn) WalletUnlockerClient { - return &walletUnlockerClient{cc} -} - -func (c *walletUnlockerClient) GenSeed(ctx context.Context, in *GenSeedRequest, opts ...grpc.CallOption) (*GenSeedResponse, error) { - out := new(GenSeedResponse) - err := c.cc.Invoke(ctx, "/lnrpc.WalletUnlocker/GenSeed", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *walletUnlockerClient) InitWallet(ctx context.Context, in *InitWalletRequest, opts ...grpc.CallOption) (*InitWalletResponse, error) { - out := new(InitWalletResponse) - err := c.cc.Invoke(ctx, "/lnrpc.WalletUnlocker/InitWallet", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *walletUnlockerClient) UnlockWallet(ctx context.Context, in *UnlockWalletRequest, opts ...grpc.CallOption) (*UnlockWalletResponse, error) { - out := new(UnlockWalletResponse) - err := c.cc.Invoke(ctx, "/lnrpc.WalletUnlocker/UnlockWallet", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *walletUnlockerClient) ChangePassword(ctx context.Context, in *ChangePasswordRequest, opts ...grpc.CallOption) (*ChangePasswordResponse, error) { - out := new(ChangePasswordResponse) - err := c.cc.Invoke(ctx, "/lnrpc.WalletUnlocker/ChangePassword", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// WalletUnlockerServer is the server API for WalletUnlocker service. -type WalletUnlockerServer interface { - // - //GenSeed is the first method that should be used to instantiate a new lnd - //instance. This method allows a caller to generate a new aezeed cipher seed - //given an optional passphrase. If provided, the passphrase will be necessary - //to decrypt the cipherseed to expose the internal wallet seed. - // - //Once the cipherseed is obtained and verified by the user, the InitWallet - //method should be used to commit the newly generated seed, and create the - //wallet. - GenSeed(context.Context, *GenSeedRequest) (*GenSeedResponse, error) - // - //InitWallet is used when lnd is starting up for the first time to fully - //initialize the daemon and its internal wallet. At the very least a wallet - //password must be provided. This will be used to encrypt sensitive material - //on disk. - // - //In the case of a recovery scenario, the user can also specify their aezeed - //mnemonic and passphrase. If set, then the daemon will use this prior state - //to initialize its internal wallet. - // - //Alternatively, this can be used along with the GenSeed RPC to obtain a - //seed, then present it to the user. Once it has been verified by the user, - //the seed can be fed into this RPC in order to commit the new wallet. - InitWallet(context.Context, *InitWalletRequest) (*InitWalletResponse, error) - // lncli: `unlock` - //UnlockWallet is used at startup of lnd to provide a password to unlock - //the wallet database. - UnlockWallet(context.Context, *UnlockWalletRequest) (*UnlockWalletResponse, error) - // lncli: `changepassword` - //ChangePassword changes the password of the encrypted wallet. This will - //automatically unlock the wallet database if successful. - ChangePassword(context.Context, *ChangePasswordRequest) (*ChangePasswordResponse, error) -} - -// UnimplementedWalletUnlockerServer can be embedded to have forward compatible implementations. -type UnimplementedWalletUnlockerServer struct { -} - -func (*UnimplementedWalletUnlockerServer) GenSeed(ctx context.Context, req *GenSeedRequest) (*GenSeedResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GenSeed not implemented") -} -func (*UnimplementedWalletUnlockerServer) InitWallet(ctx context.Context, req *InitWalletRequest) (*InitWalletResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method InitWallet not implemented") -} -func (*UnimplementedWalletUnlockerServer) UnlockWallet(ctx context.Context, req *UnlockWalletRequest) (*UnlockWalletResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UnlockWallet not implemented") -} -func (*UnimplementedWalletUnlockerServer) ChangePassword(ctx context.Context, req *ChangePasswordRequest) (*ChangePasswordResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ChangePassword not implemented") -} - -func RegisterWalletUnlockerServer(s *grpc.Server, srv WalletUnlockerServer) { - s.RegisterService(&_WalletUnlocker_serviceDesc, srv) -} - -func _WalletUnlocker_GenSeed_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GenSeedRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WalletUnlockerServer).GenSeed(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.WalletUnlocker/GenSeed", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WalletUnlockerServer).GenSeed(ctx, req.(*GenSeedRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _WalletUnlocker_InitWallet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(InitWalletRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WalletUnlockerServer).InitWallet(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.WalletUnlocker/InitWallet", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WalletUnlockerServer).InitWallet(ctx, req.(*InitWalletRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _WalletUnlocker_UnlockWallet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UnlockWalletRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WalletUnlockerServer).UnlockWallet(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.WalletUnlocker/UnlockWallet", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WalletUnlockerServer).UnlockWallet(ctx, req.(*UnlockWalletRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _WalletUnlocker_ChangePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ChangePasswordRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WalletUnlockerServer).ChangePassword(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/lnrpc.WalletUnlocker/ChangePassword", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WalletUnlockerServer).ChangePassword(ctx, req.(*ChangePasswordRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _WalletUnlocker_serviceDesc = grpc.ServiceDesc{ - ServiceName: "lnrpc.WalletUnlocker", - HandlerType: (*WalletUnlockerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GenSeed", - Handler: _WalletUnlocker_GenSeed_Handler, - }, - { - MethodName: "InitWallet", - Handler: _WalletUnlocker_InitWallet_Handler, - }, - { - MethodName: "UnlockWallet", - Handler: _WalletUnlocker_UnlockWallet_Handler, - }, - { - MethodName: "ChangePassword", - Handler: _WalletUnlocker_ChangePassword_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "walletunlocker.proto", -} diff --git a/lnd/lnrpc/walletunlocker.pb.gw.go b/lnd/lnrpc/walletunlocker.pb.gw.go deleted file mode 100644 index 06a0ce90..00000000 --- a/lnd/lnrpc/walletunlocker.pb.gw.go +++ /dev/null @@ -1,396 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: walletunlocker.proto - -/* -Package lnrpc is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package lnrpc - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage - -var ( - filter_WalletUnlocker_GenSeed_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_WalletUnlocker_GenSeed_0(ctx context.Context, marshaler runtime.Marshaler, client WalletUnlockerClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GenSeedRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WalletUnlocker_GenSeed_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.GenSeed(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WalletUnlocker_GenSeed_0(ctx context.Context, marshaler runtime.Marshaler, server WalletUnlockerServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GenSeedRequest - var metadata runtime.ServerMetadata - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_WalletUnlocker_GenSeed_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.GenSeed(ctx, &protoReq) - return msg, metadata, err - -} - -func request_WalletUnlocker_InitWallet_0(ctx context.Context, marshaler runtime.Marshaler, client WalletUnlockerClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq InitWalletRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.InitWallet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WalletUnlocker_InitWallet_0(ctx context.Context, marshaler runtime.Marshaler, server WalletUnlockerServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq InitWalletRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.InitWallet(ctx, &protoReq) - return msg, metadata, err - -} - -func request_WalletUnlocker_UnlockWallet_0(ctx context.Context, marshaler runtime.Marshaler, client WalletUnlockerClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq UnlockWalletRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.UnlockWallet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WalletUnlocker_UnlockWallet_0(ctx context.Context, marshaler runtime.Marshaler, server WalletUnlockerServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq UnlockWalletRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.UnlockWallet(ctx, &protoReq) - return msg, metadata, err - -} - -func request_WalletUnlocker_ChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, client WalletUnlockerClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ChangePasswordRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ChangePassword(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WalletUnlocker_ChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, server WalletUnlockerServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ChangePasswordRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ChangePassword(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterWalletUnlockerHandlerServer registers the http handlers for service WalletUnlocker to "mux". -// UnaryRPC :call WalletUnlockerServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -func RegisterWalletUnlockerHandlerServer(ctx context.Context, mux *runtime.ServeMux, server WalletUnlockerServer) error { - - mux.Handle("GET", pattern_WalletUnlocker_GenSeed_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WalletUnlocker_GenSeed_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletUnlocker_GenSeed_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletUnlocker_InitWallet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WalletUnlocker_InitWallet_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletUnlocker_InitWallet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletUnlocker_UnlockWallet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WalletUnlocker_UnlockWallet_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletUnlocker_UnlockWallet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletUnlocker_ChangePassword_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WalletUnlocker_ChangePassword_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletUnlocker_ChangePassword_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterWalletUnlockerHandlerFromEndpoint is same as RegisterWalletUnlockerHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterWalletUnlockerHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterWalletUnlockerHandler(ctx, mux, conn) -} - -// RegisterWalletUnlockerHandler registers the http handlers for service WalletUnlocker to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterWalletUnlockerHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterWalletUnlockerHandlerClient(ctx, mux, NewWalletUnlockerClient(conn)) -} - -// RegisterWalletUnlockerHandlerClient registers the http handlers for service WalletUnlocker -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "WalletUnlockerClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "WalletUnlockerClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "WalletUnlockerClient" to call the correct interceptors. -func RegisterWalletUnlockerHandlerClient(ctx context.Context, mux *runtime.ServeMux, client WalletUnlockerClient) error { - - mux.Handle("GET", pattern_WalletUnlocker_GenSeed_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WalletUnlocker_GenSeed_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletUnlocker_GenSeed_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletUnlocker_InitWallet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WalletUnlocker_InitWallet_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletUnlocker_InitWallet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletUnlocker_UnlockWallet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WalletUnlocker_UnlockWallet_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletUnlocker_UnlockWallet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_WalletUnlocker_ChangePassword_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WalletUnlocker_ChangePassword_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WalletUnlocker_ChangePassword_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_WalletUnlocker_GenSeed_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "genseed"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_WalletUnlocker_InitWallet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "initwallet"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_WalletUnlocker_UnlockWallet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "unlockwallet"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_WalletUnlocker_ChangePassword_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "changepassword"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_WalletUnlocker_GenSeed_0 = runtime.ForwardResponseMessage - - forward_WalletUnlocker_InitWallet_0 = runtime.ForwardResponseMessage - - forward_WalletUnlocker_UnlockWallet_0 = runtime.ForwardResponseMessage - - forward_WalletUnlocker_ChangePassword_0 = runtime.ForwardResponseMessage -) diff --git a/lnd/lnrpc/walletunlocker.proto b/lnd/lnrpc/walletunlocker.proto deleted file mode 100644 index 2844f671..00000000 --- a/lnd/lnrpc/walletunlocker.proto +++ /dev/null @@ -1,238 +0,0 @@ -syntax = "proto3"; - -import "rpc.proto"; - -package lnrpc; - -option go_package = "github.com/pkt-cash/pktd/lnd/lnrpc"; - -/* - * Comments in this file will be directly parsed into the API - * Documentation as descriptions of the associated method, message, or field. - * These descriptions should go right above the definition of the object, and - * can be in either block or // comment format. - * - * An RPC method can be matched to an lncli command by placing a line in the - * beginning of the description in exactly the following format: - * lncli: `methodname` - * - * Failure to specify the exact name of the command will cause documentation - * generation to fail. - * - * More information on how exactly the gRPC documentation is generated from - * this proto file can be found here: - * https://github.com/lightninglabs/lightning-api - */ - -// WalletUnlocker is a service that is used to set up a wallet password for -// lnd at first startup, and unlock a previously set up wallet. -service WalletUnlocker { - /* - GenSeed is the first method that should be used to instantiate a new lnd - instance. This method allows a caller to generate a new aezeed cipher seed - given an optional passphrase. If provided, the passphrase will be necessary - to decrypt the cipherseed to expose the internal wallet seed. - - Once the cipherseed is obtained and verified by the user, the InitWallet - method should be used to commit the newly generated seed, and create the - wallet. - */ - rpc GenSeed (GenSeedRequest) returns (GenSeedResponse); - - /* - InitWallet is used when lnd is starting up for the first time to fully - initialize the daemon and its internal wallet. At the very least a wallet - password must be provided. This will be used to encrypt sensitive material - on disk. - - In the case of a recovery scenario, the user can also specify their aezeed - mnemonic and passphrase. If set, then the daemon will use this prior state - to initialize its internal wallet. - - Alternatively, this can be used along with the GenSeed RPC to obtain a - seed, then present it to the user. Once it has been verified by the user, - the seed can be fed into this RPC in order to commit the new wallet. - */ - rpc InitWallet (InitWalletRequest) returns (InitWalletResponse); - - /* lncli: `unlock` - UnlockWallet is used at startup of lnd to provide a password to unlock - the wallet database. - */ - rpc UnlockWallet (UnlockWalletRequest) returns (UnlockWalletResponse); - - /* lncli: `changepassword` - ChangePassword changes the password of the encrypted wallet. This will - automatically unlock the wallet database if successful. - */ - rpc ChangePassword (ChangePasswordRequest) returns (ChangePasswordResponse); -} - -message GenSeedRequest { - /* - aezeed_passphrase is an optional user provided passphrase that will be used - to encrypt the generated aezeed cipher seed. When using REST, this field - must be encoded as base64. - */ - bytes aezeed_passphrase = 1; - - /* - seed_entropy is an optional 16-bytes generated via CSPRNG. If not - specified, then a fresh set of randomness will be used to create the seed. - When using REST, this field must be encoded as base64. - */ - bytes seed_entropy = 2; -} -message GenSeedResponse { - /* - cipher_seed_mnemonic is a 24-word mnemonic that encodes a prior aezeed - cipher seed obtained by the user. This field is optional, as if not - provided, then the daemon will generate a new cipher seed for the user. - Otherwise, then the daemon will attempt to recover the wallet state linked - to this cipher seed. - */ - repeated string cipher_seed_mnemonic = 1; - - /* - enciphered_seed are the raw aezeed cipher seed bytes. This is the raw - cipher text before run through our mnemonic encoding scheme. - */ - bytes enciphered_seed = 2; -} - -message InitWalletRequest { - /* - wallet_password is the passphrase that should be used to encrypt the - wallet. This MUST be at least 8 chars in length. After creation, this - password is required to unlock the daemon. When using REST, this field - must be encoded as base64. - */ - bytes wallet_password = 1; - - /* - cipher_seed_mnemonic is a 24-word mnemonic that encodes a prior aezeed - cipher seed obtained by the user. This may have been generated by the - GenSeed method, or be an existing seed. - */ - repeated string cipher_seed_mnemonic = 2; - - /* - aezeed_passphrase is an optional user provided passphrase that will be used - to encrypt the generated aezeed cipher seed. When using REST, this field - must be encoded as base64. - */ - bytes aezeed_passphrase = 3; - - /* - recovery_window is an optional argument specifying the address lookahead - when restoring a wallet seed. The recovery window applies to each - individual branch of the BIP44 derivation paths. Supplying a recovery - window of zero indicates that no addresses should be recovered, such after - the first initialization of the wallet. - */ - int32 recovery_window = 4; - - /* - channel_backups is an optional argument that allows clients to recover the - settled funds within a set of channels. This should be populated if the - user was unable to close out all channels and sweep funds before partial or - total data loss occurred. If specified, then after on-chain recovery of - funds, lnd begin to carry out the data loss recovery protocol in order to - recover the funds in each channel from a remote force closed transaction. - */ - ChanBackupSnapshot channel_backups = 5; - - /* - stateless_init is an optional argument instructing the daemon NOT to create - any *.macaroon files in its filesystem. If this parameter is set, then the - admin macaroon returned in the response MUST be stored by the caller of the - RPC as otherwise all access to the daemon will be lost! - */ - bool stateless_init = 6; -} -message InitWalletResponse { - /* - The binary serialized admin macaroon that can be used to access the daemon - after creating the wallet. If the stateless_init parameter was set to true, - this is the ONLY copy of the macaroon and MUST be stored safely by the - caller. Otherwise a copy of this macaroon is also persisted on disk by the - daemon, together with other macaroon files. - */ - bytes admin_macaroon = 1; -} - -message UnlockWalletRequest { - /* - wallet_password should be the current valid passphrase for the daemon. This - will be required to decrypt on-disk material that the daemon requires to - function properly. When using REST, this field must be encoded as base64. - */ - bytes wallet_password = 1; - - /* - recovery_window is an optional argument specifying the address lookahead - when restoring a wallet seed. The recovery window applies to each - individual branch of the BIP44 derivation paths. Supplying a recovery - window of zero indicates that no addresses should be recovered, such after - the first initialization of the wallet. - */ - int32 recovery_window = 2; - - /* - channel_backups is an optional argument that allows clients to recover the - settled funds within a set of channels. This should be populated if the - user was unable to close out all channels and sweep funds before partial or - total data loss occurred. If specified, then after on-chain recovery of - funds, lnd begin to carry out the data loss recovery protocol in order to - recover the funds in each channel from a remote force closed transaction. - */ - ChanBackupSnapshot channel_backups = 3; - - /* - stateless_init is an optional argument instructing the daemon NOT to create - any *.macaroon files in its file system. - */ - bool stateless_init = 4; -} -message UnlockWalletResponse { -} - -message ChangePasswordRequest { - /* - current_password should be the current valid passphrase used to unlock the - daemon. When using REST, this field must be encoded as base64. - */ - bytes current_password = 1; - - /* - new_password should be the new passphrase that will be needed to unlock the - daemon. When using REST, this field must be encoded as base64. - */ - bytes new_password = 2; - - /* - stateless_init is an optional argument instructing the daemon NOT to create - any *.macaroon files in its filesystem. If this parameter is set, then the - admin macaroon returned in the response MUST be stored by the caller of the - RPC as otherwise all access to the daemon will be lost! - */ - bool stateless_init = 3; - - /* - new_macaroon_root_key is an optional argument instructing the daemon to - rotate the macaroon root key when set to true. This will invalidate all - previously generated macaroons. - */ - bool new_macaroon_root_key = 4; -} -message ChangePasswordResponse { - /* - The binary serialized admin macaroon that can be used to access the daemon - after rotating the macaroon root key. If both the stateless_init and - new_macaroon_root_key parameter were set to true, this is the ONLY copy of - the macaroon that was created from the new root key and MUST be stored - safely by the caller. Otherwise a copy of this macaroon is also persisted on - disk by the daemon, together with other macaroon files. - */ - bytes admin_macaroon = 1; -} diff --git a/lnd/lnrpc/walletunlocker.swagger.json b/lnd/lnrpc/walletunlocker.swagger.json deleted file mode 100644 index 1071ed8f..00000000 --- a/lnd/lnrpc/walletunlocker.swagger.json +++ /dev/null @@ -1,392 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "walletunlocker.proto", - "version": "version not set" - }, - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/v1/changepassword": { - "post": { - "summary": "lncli: `changepassword`\nChangePassword changes the password of the encrypted wallet. This will\nautomatically unlock the wallet database if successful.", - "operationId": "ChangePassword", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcChangePasswordResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/lnrpcChangePasswordRequest" - } - } - ], - "tags": [ - "WalletUnlocker" - ] - } - }, - "/v1/genseed": { - "get": { - "summary": "GenSeed is the first method that should be used to instantiate a new lnd\ninstance. This method allows a caller to generate a new aezeed cipher seed\ngiven an optional passphrase. If provided, the passphrase will be necessary\nto decrypt the cipherseed to expose the internal wallet seed.", - "description": "Once the cipherseed is obtained and verified by the user, the InitWallet\nmethod should be used to commit the newly generated seed, and create the\nwallet.", - "operationId": "GenSeed", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcGenSeedResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "aezeed_passphrase", - "description": "aezeed_passphrase is an optional user provided passphrase that will be used\nto encrypt the generated aezeed cipher seed. When using REST, this field\nmust be encoded as base64.", - "in": "query", - "required": false, - "type": "string", - "format": "byte" - }, - { - "name": "seed_entropy", - "description": "seed_entropy is an optional 16-bytes generated via CSPRNG. If not\nspecified, then a fresh set of randomness will be used to create the seed.\nWhen using REST, this field must be encoded as base64.", - "in": "query", - "required": false, - "type": "string", - "format": "byte" - } - ], - "tags": [ - "WalletUnlocker" - ] - } - }, - "/v1/initwallet": { - "post": { - "summary": "InitWallet is used when lnd is starting up for the first time to fully\ninitialize the daemon and its internal wallet. At the very least a wallet\npassword must be provided. This will be used to encrypt sensitive material\non disk.", - "description": "In the case of a recovery scenario, the user can also specify their aezeed\nmnemonic and passphrase. If set, then the daemon will use this prior state\nto initialize its internal wallet.\n\nAlternatively, this can be used along with the GenSeed RPC to obtain a\nseed, then present it to the user. Once it has been verified by the user,\nthe seed can be fed into this RPC in order to commit the new wallet.", - "operationId": "InitWallet", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcInitWalletResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/lnrpcInitWalletRequest" - } - } - ], - "tags": [ - "WalletUnlocker" - ] - } - }, - "/v1/unlockwallet": { - "post": { - "summary": "lncli: `unlock`\nUnlockWallet is used at startup of lnd to provide a password to unlock\nthe wallet database.", - "operationId": "UnlockWallet", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/lnrpcUnlockWalletResponse" - } - }, - "default": { - "description": "An unexpected error response", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/lnrpcUnlockWalletRequest" - } - } - ], - "tags": [ - "WalletUnlocker" - ] - } - } - }, - "definitions": { - "lnrpcChanBackupSnapshot": { - "type": "object", - "properties": { - "single_chan_backups": { - "$ref": "#/definitions/lnrpcChannelBackups", - "description": "The set of new channels that have been added since the last channel backup\nsnapshot was requested." - }, - "multi_chan_backup": { - "$ref": "#/definitions/lnrpcMultiChanBackup", - "description": "A multi-channel backup that covers all open channels currently known to\nlnd." - } - } - }, - "lnrpcChangePasswordRequest": { - "type": "object", - "properties": { - "current_password": { - "type": "string", - "format": "byte", - "description": "current_password should be the current valid passphrase used to unlock the\ndaemon. When using REST, this field must be encoded as base64." - }, - "new_password": { - "type": "string", - "format": "byte", - "description": "new_password should be the new passphrase that will be needed to unlock the\ndaemon. When using REST, this field must be encoded as base64." - }, - "stateless_init": { - "type": "boolean", - "format": "boolean", - "title": "stateless_init is an optional argument instructing the daemon NOT to create\nany *.macaroon files in its filesystem. If this parameter is set, then the\nadmin macaroon returned in the response MUST be stored by the caller of the\nRPC as otherwise all access to the daemon will be lost!" - }, - "new_macaroon_root_key": { - "type": "boolean", - "format": "boolean", - "description": "new_macaroon_root_key is an optional argument instructing the daemon to\nrotate the macaroon root key when set to true. This will invalidate all\npreviously generated macaroons." - } - } - }, - "lnrpcChangePasswordResponse": { - "type": "object", - "properties": { - "admin_macaroon": { - "type": "string", - "format": "byte", - "description": "The binary serialized admin macaroon that can be used to access the daemon\nafter rotating the macaroon root key. If both the stateless_init and\nnew_macaroon_root_key parameter were set to true, this is the ONLY copy of\nthe macaroon that was created from the new root key and MUST be stored\nsafely by the caller. Otherwise a copy of this macaroon is also persisted on\ndisk by the daemon, together with other macaroon files." - } - } - }, - "lnrpcChannelBackup": { - "type": "object", - "properties": { - "chan_point": { - "$ref": "#/definitions/lnrpcChannelPoint", - "description": "Identifies the channel that this backup belongs to." - }, - "chan_backup": { - "type": "string", - "format": "byte", - "description": "Is an encrypted single-chan backup. this can be passed to\nRestoreChannelBackups, or the WalletUnlocker Init and Unlock methods in\norder to trigger the recovery protocol. When using REST, this field must be\nencoded as base64." - } - } - }, - "lnrpcChannelBackups": { - "type": "object", - "properties": { - "chan_backups": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcChannelBackup" - }, - "description": "A set of single-chan static channel backups." - } - } - }, - "lnrpcChannelPoint": { - "type": "object", - "properties": { - "funding_txid_bytes": { - "type": "string", - "format": "byte", - "description": "Txid of the funding transaction. When using REST, this field must be\nencoded as base64." - }, - "funding_txid_str": { - "type": "string", - "description": "Hex-encoded string representing the byte-reversed hash of the funding\ntransaction." - }, - "output_index": { - "type": "integer", - "format": "int64", - "title": "The index of the output of the funding transaction" - } - } - }, - "lnrpcGenSeedResponse": { - "type": "object", - "properties": { - "cipher_seed_mnemonic": { - "type": "array", - "items": { - "type": "string" - }, - "description": "cipher_seed_mnemonic is a 24-word mnemonic that encodes a prior aezeed\ncipher seed obtained by the user. This field is optional, as if not\nprovided, then the daemon will generate a new cipher seed for the user.\nOtherwise, then the daemon will attempt to recover the wallet state linked\nto this cipher seed." - }, - "enciphered_seed": { - "type": "string", - "format": "byte", - "description": "enciphered_seed are the raw aezeed cipher seed bytes. This is the raw\ncipher text before run through our mnemonic encoding scheme." - } - } - }, - "lnrpcInitWalletRequest": { - "type": "object", - "properties": { - "wallet_password": { - "type": "string", - "format": "byte", - "description": "wallet_password is the passphrase that should be used to encrypt the\nwallet. This MUST be at least 8 chars in length. After creation, this\npassword is required to unlock the daemon. When using REST, this field\nmust be encoded as base64." - }, - "cipher_seed_mnemonic": { - "type": "array", - "items": { - "type": "string" - }, - "description": "cipher_seed_mnemonic is a 24-word mnemonic that encodes a prior aezeed\ncipher seed obtained by the user. This may have been generated by the\nGenSeed method, or be an existing seed." - }, - "aezeed_passphrase": { - "type": "string", - "format": "byte", - "description": "aezeed_passphrase is an optional user provided passphrase that will be used\nto encrypt the generated aezeed cipher seed. When using REST, this field\nmust be encoded as base64." - }, - "recovery_window": { - "type": "integer", - "format": "int32", - "description": "recovery_window is an optional argument specifying the address lookahead\nwhen restoring a wallet seed. The recovery window applies to each\nindividual branch of the BIP44 derivation paths. Supplying a recovery\nwindow of zero indicates that no addresses should be recovered, such after\nthe first initialization of the wallet." - }, - "channel_backups": { - "$ref": "#/definitions/lnrpcChanBackupSnapshot", - "description": "channel_backups is an optional argument that allows clients to recover the\nsettled funds within a set of channels. This should be populated if the\nuser was unable to close out all channels and sweep funds before partial or\ntotal data loss occurred. If specified, then after on-chain recovery of\nfunds, lnd begin to carry out the data loss recovery protocol in order to\nrecover the funds in each channel from a remote force closed transaction." - }, - "stateless_init": { - "type": "boolean", - "format": "boolean", - "title": "stateless_init is an optional argument instructing the daemon NOT to create\nany *.macaroon files in its filesystem. If this parameter is set, then the\nadmin macaroon returned in the response MUST be stored by the caller of the\nRPC as otherwise all access to the daemon will be lost!" - } - } - }, - "lnrpcInitWalletResponse": { - "type": "object", - "properties": { - "admin_macaroon": { - "type": "string", - "format": "byte", - "description": "The binary serialized admin macaroon that can be used to access the daemon\nafter creating the wallet. If the stateless_init parameter was set to true,\nthis is the ONLY copy of the macaroon and MUST be stored safely by the\ncaller. Otherwise a copy of this macaroon is also persisted on disk by the\ndaemon, together with other macaroon files." - } - } - }, - "lnrpcMultiChanBackup": { - "type": "object", - "properties": { - "chan_points": { - "type": "array", - "items": { - "$ref": "#/definitions/lnrpcChannelPoint" - }, - "description": "Is the set of all channels that are included in this multi-channel backup." - }, - "multi_chan_backup": { - "type": "string", - "format": "byte", - "description": "A single encrypted blob containing all the static channel backups of the\nchannel listed above. This can be stored as a single file or blob, and\nsafely be replaced with any prior/future versions. When using REST, this\nfield must be encoded as base64." - } - } - }, - "lnrpcUnlockWalletRequest": { - "type": "object", - "properties": { - "wallet_password": { - "type": "string", - "format": "byte", - "description": "wallet_password should be the current valid passphrase for the daemon. This\nwill be required to decrypt on-disk material that the daemon requires to\nfunction properly. When using REST, this field must be encoded as base64." - }, - "recovery_window": { - "type": "integer", - "format": "int32", - "description": "recovery_window is an optional argument specifying the address lookahead\nwhen restoring a wallet seed. The recovery window applies to each\nindividual branch of the BIP44 derivation paths. Supplying a recovery\nwindow of zero indicates that no addresses should be recovered, such after\nthe first initialization of the wallet." - }, - "channel_backups": { - "$ref": "#/definitions/lnrpcChanBackupSnapshot", - "description": "channel_backups is an optional argument that allows clients to recover the\nsettled funds within a set of channels. This should be populated if the\nuser was unable to close out all channels and sweep funds before partial or\ntotal data loss occurred. If specified, then after on-chain recovery of\nfunds, lnd begin to carry out the data loss recovery protocol in order to\nrecover the funds in each channel from a remote force closed transaction." - }, - "stateless_init": { - "type": "boolean", - "format": "boolean", - "description": "stateless_init is an optional argument instructing the daemon NOT to create\nany *.macaroon files in its file system." - } - } - }, - "lnrpcUnlockWalletResponse": { - "type": "object" - }, - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string" - }, - "value": { - "type": "string", - "format": "byte" - } - } - }, - "runtimeError": { - "type": "object", - "properties": { - "error": { - "type": "string" - }, - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - } - } -} diff --git a/lnd/lnrpc/watchtowerrpc/config_active.go b/lnd/lnrpc/watchtowerrpc/config_active.go deleted file mode 100644 index 22381626..00000000 --- a/lnd/lnrpc/watchtowerrpc/config_active.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build watchtowerrpc - -package watchtowerrpc - -// Config is the primary configuration struct for the watchtower RPC server. It -// contains all items required for the RPC server to carry out its duties. The -// fields with struct tags are meant to parsed as normal configuration options, -// while if able to be populated, the latter fields MUST also be specified. -type Config struct { - // Active indicates if the watchtower is enabled. - Active bool - - // Tower is the active watchtower which serves as the primary source for - // information presented via RPC. - Tower WatchtowerBackend -} diff --git a/lnd/lnrpc/watchtowerrpc/config_default.go b/lnd/lnrpc/watchtowerrpc/config_default.go deleted file mode 100644 index ac40e0bf..00000000 --- a/lnd/lnrpc/watchtowerrpc/config_default.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !watchtowerrpc - -package watchtowerrpc - -// Config is empty for non-watchtowerrpc builds. -type Config struct{} diff --git a/lnd/lnrpc/watchtowerrpc/driver.go b/lnd/lnrpc/watchtowerrpc/driver.go deleted file mode 100644 index 74eafcfe..00000000 --- a/lnd/lnrpc/watchtowerrpc/driver.go +++ /dev/null @@ -1,55 +0,0 @@ -// +build watchtowerrpc - -package watchtowerrpc - -import ( - "fmt" - - "github.com/pkt-cash/pktd/lnd/lnrpc" -) - -// createNewSubServer is a helper method that will create the new sub server -// given the main config dispatcher method. If we're unable to find the config -// that is meant for us in the config dispatcher, then we'll exit with an -// error. -func createNewSubServer(configRegistry lnrpc.SubServerConfigDispatcher) ( - lnrpc.SubServer, lnrpc.MacaroonPerms, er.R) { - - // We'll attempt to look up the config that we expect, according to our - // subServerName name. If we can't find this, then we'll exit with an - // error, as we're unable to properly initialize ourselves without this - // config. - subServerConf, ok := configRegistry.FetchConfig(subServerName) - if !ok { - return nil, nil, er.Errorf("unable to find config for "+ - "subserver type %s", subServerName) - } - - // Now that we've found an object mapping to our service name, we'll - // ensure that it's the type we need. - config, ok := subServerConf.(*Config) - if !ok { - return nil, nil, er.Errorf("wrong type of config for "+ - "subserver %s, expected %T got %T", subServerName, - &Config{}, subServerConf) - } - - return New(config) -} - -func init() { - subServer := &lnrpc.SubServerDriver{ - SubServerName: subServerName, - New: func(c lnrpc.SubServerConfigDispatcher) (lnrpc.SubServer, - lnrpc.MacaroonPerms, er.R) { - return createNewSubServer(c) - }, - } - - // If the build tag is active, then we'll register ourselves as a - // sub-RPC server within the global lnrpc package namespace. - if err := lnrpc.RegisterSubServer(subServer); err != nil { - panic(fmt.Sprintf("failed to register sub server driver "+ - "'%s': %v", subServerName, err)) - } -} diff --git a/lnd/lnrpc/watchtowerrpc/handler.go b/lnd/lnrpc/watchtowerrpc/handler.go deleted file mode 100644 index 2a046f66..00000000 --- a/lnd/lnrpc/watchtowerrpc/handler.go +++ /dev/null @@ -1,154 +0,0 @@ -// +build watchtowerrpc - -package watchtowerrpc - -import ( - "context" - fmt "fmt" - - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "google.golang.org/grpc" - "gopkg.in/macaroon-bakery.v2/bakery" -) - -const ( - // subServerName is the name of the sub rpc server. We'll use this name - // to register ourselves, and we also require that the main - // SubServerConfigDispatcher instance recognizes it as the name of our - // RPC service. - subServerName = "WatchtowerRPC" -) - -var ( - // macPermissions maps RPC calls to the permissions they require. - macPermissions = map[string][]bakery.Op{ - "/watchtowerrpc.Watchtower/GetInfo": {{ - Entity: "info", - Action: "read", - }}, - } - - // ErrTowerNotActive signals that RPC calls cannot be processed because - // the watchtower is not active. - ErrTowerNotActive = er.GenericErrorType.CodeWithDetail("ErrTowerNotActive", "watchtower not active") -) - -// Handler is the RPC server we'll use to interact with the backing active -// watchtower. -type Handler struct { - cfg Config -} - -// A compile time check to ensure that Handler fully implements the Handler gRPC -// service. -var _ WatchtowerServer = (*Handler)(nil) - -// New returns a new instance of the Watchtower sub-server. We also return the -// set of permissions for the macaroons that we may create within this method. -// If the macaroons we need aren't found in the filepath, then we'll create them -// on start up. If we're unable to locate, or create the macaroons we need, then -// we'll return with an error. -func New(cfg *Config) (*Handler, lnrpc.MacaroonPerms, er.R) { - return &Handler{*cfg}, macPermissions, nil -} - -// Start launches any helper goroutines required for the Handler to function. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (c *Handler) Start() er.R { - return nil -} - -// Stop signals any active goroutines for a graceful closure. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (c *Handler) Stop() er.R { - return nil -} - -// Name returns a unique string representation of the sub-server. This can be -// used to identify the sub-server and also de-duplicate them. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (c *Handler) Name() string { - return subServerName -} - -// RegisterWithRootServer will be called by the root gRPC server to direct a sub -// RPC server to register itself with the main gRPC root server. Until this is -// called, each sub-server won't be able to have requests routed towards it. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (c *Handler) RegisterWithRootServer(grpcServer *grpc.Server) er.R { - // We make sure that we register it with the main gRPC server to ensure - // all our methods are routed properly. - RegisterWatchtowerServer(grpcServer, c) - - log.Debugf("Watchtower RPC server successfully register with root " + - "gRPC server") - - return nil -} - -// RegisterWithRestServer will be called by the root REST mux to direct a sub -// RPC server to register itself with the main REST mux server. Until this is -// called, each sub-server won't be able to have requests routed towards it. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (c *Handler) RegisterWithRestServer(ctx context.Context, - mux *runtime.ServeMux, dest string, opts []grpc.DialOption) er.R { - - // We make sure that we register it with the main REST server to ensure - // all our methods are routed properly. - err := RegisterWatchtowerHandlerFromEndpoint(ctx, mux, dest, opts) - if err != nil { - log.Errorf("Could not register Watchtower REST server "+ - "with root REST server: %v", err) - return err - } - - log.Debugf("Watchtower REST server successfully registered with " + - "root REST server") - return nil -} - -// AddTower adds a new watchtower reachable at the given address and considers -// it for new sessions. If the watchtower already exists, then any new addresses -// included will be considered when dialing it for session negotiations and -// backups. -func (c *Handler) GetInfo(ctx context.Context, - req *GetInfoRequest) (*GetInfoResponse, er.R) { - - if err := c.isActive(); err != nil { - return nil, err - } - - pubkey := c.cfg.Tower.PubKey().SerializeCompressed() - - var listeners []string - for _, addr := range c.cfg.Tower.ListeningAddrs() { - listeners = append(listeners, addr.String()) - } - - var uris []string - for _, addr := range c.cfg.Tower.ExternalIPs() { - uris = append(uris, fmt.Sprintf("%x@%v", pubkey, addr)) - } - - return &GetInfoResponse{ - Pubkey: pubkey, - Listeners: listeners, - Uris: uris, - }, nil -} - -// isActive returns nil if the tower backend is initialized, and the Handler can -// proccess RPC requests. -func (c *Handler) isActive() er.R { - if c.cfg.Active { - return nil - } - return ErrTowerNotActive.Default() -} diff --git a/lnd/lnrpc/watchtowerrpc/interface.go b/lnd/lnrpc/watchtowerrpc/interface.go deleted file mode 100644 index 23a2d144..00000000 --- a/lnd/lnrpc/watchtowerrpc/interface.go +++ /dev/null @@ -1,23 +0,0 @@ -package watchtowerrpc - -import ( - "net" - - "github.com/pkt-cash/pktd/btcec" -) - -// WatchtowerBackend abstracts access to the watchtower information that is -// served via RPC connections. -type WatchtowerBackend interface { - // PubKey returns the public key for the watchtower used to - // authentication and encrypt traffic with clients. - PubKey() *btcec.PublicKey - - // ListeningAddrs returns the listening addresses where the watchtower - // server can accept client connections. - ListeningAddrs() []net.Addr - - // ExternalIPs returns the addresses where the watchtower can be reached - // by clients externally. - ExternalIPs() []net.Addr -} diff --git a/lnd/lnrpc/watchtowerrpc/watchtower.pb.go b/lnd/lnrpc/watchtowerrpc/watchtower.pb.go deleted file mode 100644 index d4b350ce..00000000 --- a/lnd/lnrpc/watchtowerrpc/watchtower.pb.go +++ /dev/null @@ -1,226 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: watchtowerrpc/watchtower.proto - -package watchtowerrpc - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type GetInfoRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetInfoRequest) Reset() { *m = GetInfoRequest{} } -func (m *GetInfoRequest) String() string { return proto.CompactTextString(m) } -func (*GetInfoRequest) ProtoMessage() {} -func (*GetInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9f019c0e859ad3d6, []int{0} -} - -func (m *GetInfoRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetInfoRequest.Unmarshal(m, b) -} -func (m *GetInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetInfoRequest.Marshal(b, m, deterministic) -} -func (m *GetInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetInfoRequest.Merge(m, src) -} -func (m *GetInfoRequest) XXX_Size() int { - return xxx_messageInfo_GetInfoRequest.Size(m) -} -func (m *GetInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetInfoRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetInfoRequest proto.InternalMessageInfo - -type GetInfoResponse struct { - // The public key of the watchtower. - Pubkey []byte `protobuf:"bytes,1,opt,name=pubkey,proto3" json:"pubkey,omitempty"` - // The listening addresses of the watchtower. - Listeners []string `protobuf:"bytes,2,rep,name=listeners,proto3" json:"listeners,omitempty"` - // The URIs of the watchtower. - Uris []string `protobuf:"bytes,3,rep,name=uris,proto3" json:"uris,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetInfoResponse) Reset() { *m = GetInfoResponse{} } -func (m *GetInfoResponse) String() string { return proto.CompactTextString(m) } -func (*GetInfoResponse) ProtoMessage() {} -func (*GetInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9f019c0e859ad3d6, []int{1} -} - -func (m *GetInfoResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetInfoResponse.Unmarshal(m, b) -} -func (m *GetInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetInfoResponse.Marshal(b, m, deterministic) -} -func (m *GetInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetInfoResponse.Merge(m, src) -} -func (m *GetInfoResponse) XXX_Size() int { - return xxx_messageInfo_GetInfoResponse.Size(m) -} -func (m *GetInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetInfoResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetInfoResponse proto.InternalMessageInfo - -func (m *GetInfoResponse) GetPubkey() []byte { - if m != nil { - return m.Pubkey - } - return nil -} - -func (m *GetInfoResponse) GetListeners() []string { - if m != nil { - return m.Listeners - } - return nil -} - -func (m *GetInfoResponse) GetUris() []string { - if m != nil { - return m.Uris - } - return nil -} - -func init() { - proto.RegisterType((*GetInfoRequest)(nil), "watchtowerrpc.GetInfoRequest") - proto.RegisterType((*GetInfoResponse)(nil), "watchtowerrpc.GetInfoResponse") -} - -func init() { proto.RegisterFile("watchtowerrpc/watchtower.proto", fileDescriptor_9f019c0e859ad3d6) } - -var fileDescriptor_9f019c0e859ad3d6 = []byte{ - // 205 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2b, 0x4f, 0x2c, 0x49, - 0xce, 0x28, 0xc9, 0x2f, 0x4f, 0x2d, 0x2a, 0x2a, 0x48, 0xd6, 0x47, 0xf0, 0xf4, 0x0a, 0x8a, 0xf2, - 0x4b, 0xf2, 0x85, 0x78, 0x51, 0xe4, 0x95, 0x04, 0xb8, 0xf8, 0xdc, 0x53, 0x4b, 0x3c, 0xf3, 0xd2, - 0xf2, 0x83, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x94, 0xa2, 0xb9, 0xf8, 0xe1, 0x22, 0xc5, 0x05, - 0xf9, 0x79, 0xc5, 0xa9, 0x42, 0x62, 0x5c, 0x6c, 0x05, 0xa5, 0x49, 0xd9, 0xa9, 0x95, 0x12, 0x8c, - 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x50, 0x9e, 0x90, 0x0c, 0x17, 0x67, 0x4e, 0x66, 0x71, 0x49, 0x6a, - 0x5e, 0x6a, 0x51, 0xb1, 0x04, 0x93, 0x02, 0xb3, 0x06, 0x67, 0x10, 0x42, 0x40, 0x48, 0x88, 0x8b, - 0xa5, 0xb4, 0x28, 0xb3, 0x58, 0x82, 0x19, 0x2c, 0x01, 0x66, 0x1b, 0x85, 0x71, 0x71, 0x85, 0xc3, - 0xed, 0x17, 0xf2, 0xe0, 0x62, 0x87, 0x5a, 0x25, 0x24, 0xab, 0x87, 0xe2, 0x2e, 0x3d, 0x54, 0x47, - 0x49, 0xc9, 0xe1, 0x92, 0x86, 0xb8, 0xd0, 0xc9, 0x28, 0xca, 0x20, 0x3d, 0xb3, 0x24, 0xa3, 0x34, - 0x49, 0x2f, 0x39, 0x3f, 0x57, 0xbf, 0x20, 0xbb, 0x44, 0x37, 0x39, 0xb1, 0x38, 0x03, 0xc4, 0x48, - 0xd1, 0xcf, 0xc9, 0x03, 0x61, 0xd4, 0xc0, 0x28, 0x2a, 0x48, 0x4e, 0x62, 0x03, 0x07, 0x88, 0x31, - 0x20, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x28, 0x83, 0x0d, 0x32, 0x01, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// WatchtowerClient is the client API for Watchtower service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type WatchtowerClient interface { - // lncli: tower info - //GetInfo returns general information concerning the companion watchtower - //including its public key and URIs where the server is currently - //listening for clients. - GetInfo(ctx context.Context, in *GetInfoRequest, opts ...grpc.CallOption) (*GetInfoResponse, error) -} - -type watchtowerClient struct { - cc *grpc.ClientConn -} - -func NewWatchtowerClient(cc *grpc.ClientConn) WatchtowerClient { - return &watchtowerClient{cc} -} - -func (c *watchtowerClient) GetInfo(ctx context.Context, in *GetInfoRequest, opts ...grpc.CallOption) (*GetInfoResponse, error) { - out := new(GetInfoResponse) - err := c.cc.Invoke(ctx, "/watchtowerrpc.Watchtower/GetInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// WatchtowerServer is the server API for Watchtower service. -type WatchtowerServer interface { - // lncli: tower info - //GetInfo returns general information concerning the companion watchtower - //including its public key and URIs where the server is currently - //listening for clients. - GetInfo(context.Context, *GetInfoRequest) (*GetInfoResponse, error) -} - -// UnimplementedWatchtowerServer can be embedded to have forward compatible implementations. -type UnimplementedWatchtowerServer struct { -} - -func (*UnimplementedWatchtowerServer) GetInfo(ctx context.Context, req *GetInfoRequest) (*GetInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetInfo not implemented") -} - -func RegisterWatchtowerServer(s *grpc.Server, srv WatchtowerServer) { - s.RegisterService(&_Watchtower_serviceDesc, srv) -} - -func _Watchtower_GetInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WatchtowerServer).GetInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/watchtowerrpc.Watchtower/GetInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WatchtowerServer).GetInfo(ctx, req.(*GetInfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Watchtower_serviceDesc = grpc.ServiceDesc{ - ServiceName: "watchtowerrpc.Watchtower", - HandlerType: (*WatchtowerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetInfo", - Handler: _Watchtower_GetInfo_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "watchtowerrpc/watchtower.proto", -} diff --git a/lnd/lnrpc/watchtowerrpc/watchtower.pb.gw.go b/lnd/lnrpc/watchtowerrpc/watchtower.pb.gw.go deleted file mode 100644 index 5fecdb72..00000000 --- a/lnd/lnrpc/watchtowerrpc/watchtower.pb.gw.go +++ /dev/null @@ -1,147 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: watchtowerrpc/watchtower.proto - -/* -Package watchtowerrpc is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package watchtowerrpc - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage - -func request_Watchtower_GetInfo_0(ctx context.Context, marshaler runtime.Marshaler, client WatchtowerClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetInfoRequest - var metadata runtime.ServerMetadata - - msg, err := client.GetInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Watchtower_GetInfo_0(ctx context.Context, marshaler runtime.Marshaler, server WatchtowerServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetInfoRequest - var metadata runtime.ServerMetadata - - msg, err := server.GetInfo(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterWatchtowerHandlerServer registers the http handlers for service Watchtower to "mux". -// UnaryRPC :call WatchtowerServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -func RegisterWatchtowerHandlerServer(ctx context.Context, mux *runtime.ServeMux, server WatchtowerServer) error { - - mux.Handle("GET", pattern_Watchtower_GetInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Watchtower_GetInfo_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Watchtower_GetInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterWatchtowerHandlerFromEndpoint is same as RegisterWatchtowerHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterWatchtowerHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterWatchtowerHandler(ctx, mux, conn) -} - -// RegisterWatchtowerHandler registers the http handlers for service Watchtower to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterWatchtowerHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterWatchtowerHandlerClient(ctx, mux, NewWatchtowerClient(conn)) -} - -// RegisterWatchtowerHandlerClient registers the http handlers for service Watchtower -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "WatchtowerClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "WatchtowerClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "WatchtowerClient" to call the correct interceptors. -func RegisterWatchtowerHandlerClient(ctx context.Context, mux *runtime.ServeMux, client WatchtowerClient) error { - - mux.Handle("GET", pattern_Watchtower_GetInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Watchtower_GetInfo_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Watchtower_GetInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Watchtower_GetInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "watchtower", "server"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Watchtower_GetInfo_0 = runtime.ForwardResponseMessage -) diff --git a/lnd/lnrpc/watchtowerrpc/watchtower.proto b/lnd/lnrpc/watchtowerrpc/watchtower.proto deleted file mode 100644 index ea752764..00000000 --- a/lnd/lnrpc/watchtowerrpc/watchtower.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; - -package watchtowerrpc; - -option go_package = "github.com/pkt-cash/pktd/lnd/lnrpc/watchtowerrpc"; - -// Watchtower is a service that grants access to the watchtower server -// functionality of the daemon. -service Watchtower { - /* lncli: tower info - GetInfo returns general information concerning the companion watchtower - including its public key and URIs where the server is currently - listening for clients. - */ - rpc GetInfo (GetInfoRequest) returns (GetInfoResponse); -} - -message GetInfoRequest { -} - -message GetInfoResponse { - // The public key of the watchtower. - bytes pubkey = 1; - - // The listening addresses of the watchtower. - repeated string listeners = 2; - - // The URIs of the watchtower. - repeated string uris = 3; -} diff --git a/lnd/lnrpc/watchtowerrpc/watchtower.swagger.json b/lnd/lnrpc/watchtowerrpc/watchtower.swagger.json deleted file mode 100644 index 20b79cb0..00000000 --- a/lnd/lnrpc/watchtowerrpc/watchtower.swagger.json +++ /dev/null @@ -1,97 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "watchtowerrpc/watchtower.proto", - "version": "version not set" - }, - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/v2/watchtower/server": { - "get": { - "summary": "lncli: tower info\nGetInfo returns general information concerning the companion watchtower\nincluding its public key and URIs where the server is currently\nlistening for clients.", - "operationId": "Watchtower_GetInfo", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/watchtowerrpcGetInfoResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "Watchtower" - ] - } - } - }, - "definitions": { - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string" - }, - "value": { - "type": "string", - "format": "byte" - } - } - }, - "runtimeError": { - "type": "object", - "properties": { - "error": { - "type": "string" - }, - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - }, - "watchtowerrpcGetInfoResponse": { - "type": "object", - "properties": { - "pubkey": { - "type": "string", - "format": "byte", - "description": "The public key of the watchtower." - }, - "listeners": { - "type": "array", - "items": { - "type": "string" - }, - "description": "The listening addresses of the watchtower." - }, - "uris": { - "type": "array", - "items": { - "type": "string" - }, - "description": "The URIs of the watchtower." - } - } - } - } -} diff --git a/lnd/lnrpc/websocket_proxy.go b/lnd/lnrpc/websocket_proxy.go deleted file mode 100644 index 451336ed..00000000 --- a/lnd/lnrpc/websocket_proxy.go +++ /dev/null @@ -1,352 +0,0 @@ -// The code in this file is a heavily modified version of -// https://github.com/tmc/grpc-websocket-proxy/ - -package lnrpc - -import ( - "bufio" - "io" - "net/http" - "net/textproto" - "strings" - - "github.com/gorilla/websocket" - "github.com/pkt-cash/pktd/pktlog/log" - "golang.org/x/net/context" -) - -const ( - // MethodOverrideParam is the GET query parameter that specifies what - // HTTP request method should be used for the forwarded REST request. - // This is necessary because the WebSocket API specifies that a - // handshake request must always be done through a GET request. - MethodOverrideParam = "method" - - // HeaderWebSocketProtocol is the name of the WebSocket protocol - // exchange header field that we use to transport additional header - // fields. - HeaderWebSocketProtocol = "Sec-Websocket-Protocol" - - // WebSocketProtocolDelimiter is the delimiter we use between the - // additional header field and its value. We use the plus symbol because - // the default delimiters aren't allowed in the protocol names. - WebSocketProtocolDelimiter = "+" -) - -var ( - // defaultHeadersToForward is a map of all HTTP header fields that are - // forwarded by default. The keys must be in the canonical MIME header - // format. - defaultHeadersToForward = map[string]bool{ - "Origin": true, - "Referer": true, - "Grpc-Metadata-Macaroon": true, - } - - // defaultProtocolsToAllow are additional header fields that we allow - // to be transported inside of the Sec-Websocket-Protocol field to be - // forwarded to the backend. - defaultProtocolsToAllow = map[string]bool{ - "Grpc-Metadata-Macaroon": true, - } -) - -// NewWebSocketProxy attempts to expose the underlying handler as a response- -// streaming WebSocket stream with newline-delimited JSON as the content -// encoding. -func NewWebSocketProxy(h http.Handler) http.Handler { - p := &WebsocketProxy{ - backend: h, - upgrader: &websocket.Upgrader{ - ReadBufferSize: 1024, - WriteBufferSize: 1024, - CheckOrigin: func(r *http.Request) bool { - return true - }, - }, - } - return p -} - -// WebsocketProxy provides websocket transport upgrade to compatible endpoints. -type WebsocketProxy struct { - backend http.Handler - upgrader *websocket.Upgrader -} - -// ServeHTTP handles the incoming HTTP request. If the request is an -// "upgradeable" WebSocket request (identified by header fields), then the -// WS proxy handles the request. Otherwise the request is passed directly to the -// underlying REST proxy. -func (p *WebsocketProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if !websocket.IsWebSocketUpgrade(r) { - p.backend.ServeHTTP(w, r) - return - } - p.upgradeToWebSocketProxy(w, r) -} - -// upgradeToWebSocketProxy upgrades the incoming request to a WebSocket, reads -// one incoming message then streams all responses until either the client or -// server quit the connection. -func (p *WebsocketProxy) upgradeToWebSocketProxy(w http.ResponseWriter, - r *http.Request) { - - conn, err := p.upgrader.Upgrade(w, r, nil) - if err != nil { - log.Errorf("error upgrading websocket:", err) - return - } - defer func() { - err := conn.Close() - if err != nil && !IsClosedConnError(err) { - log.Errorf("WS: error closing upgraded conn: %v", - err) - } - }() - - ctx, cancelFn := context.WithCancel(context.Background()) - defer cancelFn() - - requestForwarder := newRequestForwardingReader() - request, err := http.NewRequestWithContext( - r.Context(), r.Method, r.URL.String(), requestForwarder, - ) - if err != nil { - log.Errorf("WS: error preparing request:", err) - return - } - - // Allow certain headers to be forwarded, either from source headers - // or the special Sec-Websocket-Protocol header field. - forwardHeaders(r.Header, request.Header) - - // Also allow the target request method to be overwritten, as all - // WebSocket establishment calls MUST be GET requests. - if m := r.URL.Query().Get(MethodOverrideParam); m != "" { - request.Method = m - } - - responseForwarder := newResponseForwardingWriter() - go func() { - <-ctx.Done() - responseForwarder.Close() - }() - - go func() { - defer cancelFn() - p.backend.ServeHTTP(responseForwarder, request) - }() - - // Read loop: Take messages from websocket and write to http request. - go func() { - defer cancelFn() - for { - select { - case <-ctx.Done(): - return - default: - } - - _, payload, err := conn.ReadMessage() - if err != nil { - if IsClosedConnError(err) { - log.Tracef("WS: socket "+ - "closed: %v", err) - return - } - log.Errorf("error reading message: %v", - err) - return - } - _, err = requestForwarder.Write(payload) - if err != nil { - log.Errorf("WS: error writing message "+ - "to upstream http server: %v", err) - return - } - _, _ = requestForwarder.Write([]byte{'\n'}) - - // We currently only support server-streaming messages. - // Therefore we close the request body after the first - // incoming message to trigger a response. - requestForwarder.CloseWriter() - } - }() - - // Write loop: Take messages from the response forwarder and write them - // to the WebSocket. - for responseForwarder.Scan() { - if len(responseForwarder.Bytes()) == 0 { - log.Errorf("WS: empty scan: %v", - responseForwarder.Err()) - - continue - } - - err = conn.WriteMessage( - websocket.TextMessage, responseForwarder.Bytes(), - ) - if err != nil { - log.Errorf("WS: error writing message: %v", err) - return - } - } - if err := responseForwarder.Err(); err != nil && !IsClosedConnError(err) { - log.Errorf("WS: scanner err: %v", err) - } -} - -// forwardHeaders forwards certain allowed header fields from the source request -// to the target request. Because browsers are limited in what header fields -// they can send on the WebSocket setup call, we also allow additional fields to -// be transported in the special Sec-Websocket-Protocol field. -func forwardHeaders(source, target http.Header) { - // Forward allowed header fields directly. - for header := range source { - headerName := textproto.CanonicalMIMEHeaderKey(header) - forward, ok := defaultHeadersToForward[headerName] - if ok && forward { - target.Set(headerName, source.Get(header)) - } - } - - // Browser aren't allowed to set custom header fields on WebSocket - // requests. We need to allow them to submit the macaroon as a WS - // protocol, which is the only allowed header. Set any "protocols" we - // declare valid as header fields on the forwarded request. - protocol := source.Get(HeaderWebSocketProtocol) - for key := range defaultProtocolsToAllow { - if strings.HasPrefix(protocol, key) { - // The format is "+". We know the - // protocol string starts with the name so we only need - // to set the value. - values := strings.Split( - protocol, WebSocketProtocolDelimiter, - ) - target.Set(key, values[1]) - } - } -} - -// newRequestForwardingReader creates a new request forwarding pipe. -func newRequestForwardingReader() *requestForwardingReader { - r, w := io.Pipe() - return &requestForwardingReader{ - Reader: r, - Writer: w, - pipeR: r, - pipeW: w, - } -} - -// requestForwardingReader is a wrapper around io.Pipe that embeds both the -// io.Reader and io.Writer interface and can be closed. -type requestForwardingReader struct { - io.Reader - io.Writer - - pipeR *io.PipeReader - pipeW *io.PipeWriter -} - -// CloseWriter closes the underlying pipe writer. -func (r *requestForwardingReader) CloseWriter() { - _ = r.pipeW.CloseWithError(io.EOF) -} - -// newResponseForwardingWriter creates a new http.ResponseWriter that intercepts -// what's written to it and presents it through a bufio.Scanner interface. -func newResponseForwardingWriter() *responseForwardingWriter { - r, w := io.Pipe() - return &responseForwardingWriter{ - Writer: w, - Scanner: bufio.NewScanner(r), - pipeR: r, - pipeW: w, - header: http.Header{}, - closed: make(chan bool, 1), - } -} - -// responseForwardingWriter is a type that implements the http.ResponseWriter -// interface but internally forwards what's written to the writer through a pipe -// so it can easily be read again through the bufio.Scanner interface. -type responseForwardingWriter struct { - io.Writer - *bufio.Scanner - - pipeR *io.PipeReader - pipeW *io.PipeWriter - - header http.Header - code int - closed chan bool -} - -// Write writes the given bytes to the internal pipe. -// -// NOTE: This is part of the http.ResponseWriter interface. -func (w *responseForwardingWriter) Write(b []byte) (int, error) { - return w.Writer.Write(b) -} - -// Header returns the HTTP header fields intercepted so far. -// -// NOTE: This is part of the http.ResponseWriter interface. -func (w *responseForwardingWriter) Header() http.Header { - return w.header -} - -// WriteHeader indicates that the header part of the response is now finished -// and sets the response code. -// -// NOTE: This is part of the http.ResponseWriter interface. -func (w *responseForwardingWriter) WriteHeader(code int) { - w.code = code -} - -// CloseNotify returns a channel that indicates if a connection was closed. -// -// NOTE: This is part of the http.CloseNotifier interface. -func (w *responseForwardingWriter) CloseNotify() <-chan bool { - return w.closed -} - -// Flush empties all buffers. We implement this to indicate to our backend that -// we support flushing our content. There is no actual implementation because -// all writes happen immediately, there is no internal buffering. -// -// NOTE: This is part of the http.Flusher interface. -func (w *responseForwardingWriter) Flush() {} - -func (w *responseForwardingWriter) Close() { - _ = w.pipeR.CloseWithError(io.EOF) - _ = w.pipeW.CloseWithError(io.EOF) - w.closed <- true -} - -// IsClosedConnError is a helper function that returns true if the given error -// is an error indicating we are using a closed connection. -func IsClosedConnError(err error) bool { - if err == nil { - return false - } - if err == http.ErrServerClosed { - return true - } - - str := err.Error() - if strings.Contains(str, "use of closed network connection") { - return true - } - if strings.Contains(str, "closed pipe") { - return true - } - if strings.Contains(str, "broken pipe") { - return true - } - return websocket.IsCloseError( - err, websocket.CloseNormalClosure, websocket.CloseGoingAway, - ) -} diff --git a/lnd/lnrpc/wtclientrpc/config.go b/lnd/lnrpc/wtclientrpc/config.go deleted file mode 100644 index 32881c2b..00000000 --- a/lnd/lnrpc/wtclientrpc/config.go +++ /dev/null @@ -1,25 +0,0 @@ -package wtclientrpc - -import ( - "github.com/pkt-cash/pktd/lnd/lncfg" - "github.com/pkt-cash/pktd/lnd/watchtower/wtclient" -) - -// Config is the primary configuration struct for the watchtower RPC server. It -// contains all the items required for the RPC server to carry out its duties. -// The fields with struct tags are meant to be parsed as normal configuration -// options, while if able to be populated, the latter fields MUST also be -// specified. -type Config struct { - // Active indicates if the watchtower client is enabled. - Active bool - - // Client is the backing watchtower client that we'll interact with - // through the watchtower RPC subserver. - Client wtclient.Client - - // Resolver is a custom resolver that will be used to resolve watchtower - // addresses to ensure we don't leak any information when running over - // non-clear networks, e.g. Tor, etc. - Resolver lncfg.TCPResolver -} diff --git a/lnd/lnrpc/wtclientrpc/driver.go b/lnd/lnrpc/wtclientrpc/driver.go deleted file mode 100644 index ff5788f2..00000000 --- a/lnd/lnrpc/wtclientrpc/driver.go +++ /dev/null @@ -1,61 +0,0 @@ -package wtclientrpc - -import ( - "fmt" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnrpc" -) - -// createNewSubServer is a helper method that will create the new sub server -// given the main config dispatcher method. If we're unable to find the config -// that is meant for us in the config dispatcher, then we'll exit with an -// error. -func createNewSubServer(configRegistry lnrpc.SubServerConfigDispatcher) ( - lnrpc.SubServer, lnrpc.MacaroonPerms, er.R) { - - // We'll attempt to look up the config that we expect, according to our - // subServerName name. If we can't find this, then we'll exit with an - // error, as we're unable to properly initialize ourselves without this - // config. - subServerConf, ok := configRegistry.FetchConfig(subServerName) - if !ok { - return nil, nil, er.Errorf("unable to find config for "+ - "subserver type %s", subServerName) - } - - // Now that we've found an object mapping to our service name, we'll - // ensure that it's the type we need. - config, ok := subServerConf.(*Config) - if !ok { - return nil, nil, er.Errorf("wrong type of config for "+ - "subserver %s, expected %T got %T", subServerName, - &Config{}, subServerConf) - } - - // Before we try to make the new service instance, we'll perform - // some sanity checks on the arguments to ensure that they're useable. - switch { - case config.Resolver == nil: - return nil, nil, er.New("a lncfg.TCPResolver is required") - } - - return New(config) -} - -func init() { - subServer := &lnrpc.SubServerDriver{ - SubServerName: subServerName, - New: func(c lnrpc.SubServerConfigDispatcher) (lnrpc.SubServer, - lnrpc.MacaroonPerms, er.R) { - return createNewSubServer(c) - }, - } - - // If the build tag is active, then we'll register ourselves as a - // sub-RPC server within the global lnrpc package namespace. - if err := lnrpc.RegisterSubServer(subServer); err != nil { - panic(fmt.Sprintf("failed to register sub server driver "+ - "'%s': %v", subServerName, err)) - } -} diff --git a/lnd/lnrpc/wtclientrpc/wtclient.go b/lnd/lnrpc/wtclientrpc/wtclient.go deleted file mode 100644 index 9ab67016..00000000 --- a/lnd/lnrpc/wtclientrpc/wtclient.go +++ /dev/null @@ -1,327 +0,0 @@ -package wtclientrpc - -import ( - "context" - "net" - "strconv" - - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lncfg" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/watchtower" - "github.com/pkt-cash/pktd/lnd/watchtower/wtclient" - "github.com/pkt-cash/pktd/pktlog/log" - "google.golang.org/grpc" - "gopkg.in/macaroon-bakery.v2/bakery" -) - -const ( - // subServerName is the name of the sub rpc server. We'll use this name - // to register ourselves, and we also require that the main - // SubServerConfigDispatcher instance recognizes it as the name of our - // RPC service. - subServerName = "WatchtowerClientRPC" -) - -var ( - // macPermissions maps RPC calls to the permissions they require. - // - // TODO(wilmer): create tower macaroon? - macPermissions = map[string][]bakery.Op{ - "/wtclientrpc.WatchtowerClient/AddTower": {{ - Entity: "offchain", - Action: "write", - }}, - "/wtclientrpc.WatchtowerClient/RemoveTower": {{ - Entity: "offchain", - Action: "write", - }}, - "/wtclientrpc.WatchtowerClient/ListTowers": {{ - Entity: "offchain", - Action: "read", - }}, - "/wtclientrpc.WatchtowerClient/GetTowerInfo": {{ - Entity: "offchain", - Action: "read", - }}, - "/wtclientrpc.WatchtowerClient/Stats": {{ - Entity: "offchain", - Action: "read", - }}, - "/wtclientrpc.WatchtowerClient/Policy": {{ - Entity: "offchain", - Action: "read", - }}, - } - - // ErrWtclientNotActive signals that RPC calls cannot be processed - // because the watchtower client is not active. - ErrWtclientNotActive = er.GenericErrorType.CodeWithDetail("ErrWtclientNotActive", - "watchtower client not active") -) - -// WatchtowerClient is the RPC server we'll use to interact with the backing -// active watchtower client. -// -// TODO(wilmer): better name? -type WatchtowerClient struct { - cfg Config -} - -// A compile time check to ensure that WatchtowerClient fully implements the -// WatchtowerClientWatchtowerClient gRPC service. -var _ WatchtowerClientServer = (*WatchtowerClient)(nil) - -// New returns a new instance of the wtclientrpc WatchtowerClient sub-server. -// We also return the set of permissions for the macaroons that we may create -// within this method. If the macaroons we need aren't found in the filepath, -// then we'll create them on start up. If we're unable to locate, or create the -// macaroons we need, then we'll return with an error. -func New(cfg *Config) (*WatchtowerClient, lnrpc.MacaroonPerms, er.R) { - return &WatchtowerClient{*cfg}, macPermissions, nil -} - -// Start launches any helper goroutines required for the WatchtowerClient to -// function. -// -// NOTE: This is part of the lnrpc.SubWatchtowerClient interface. -func (c *WatchtowerClient) Start() er.R { - return nil -} - -// Stop signals any active goroutines for a graceful closure. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (c *WatchtowerClient) Stop() er.R { - return nil -} - -// Name returns a unique string representation of the sub-server. This can be -// used to identify the sub-server and also de-duplicate them. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (c *WatchtowerClient) Name() string { - return subServerName -} - -// RegisterWithRootServer will be called by the root gRPC server to direct a sub -// RPC server to register itself with the main gRPC root server. Until this is -// called, each sub-server won't be able to have requests routed towards it. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (c *WatchtowerClient) RegisterWithRootServer(grpcServer *grpc.Server) er.R { - // We make sure that we register it with the main gRPC server to ensure - // all our methods are routed properly. - RegisterWatchtowerClientServer(grpcServer, c) - - log.Debugf("WatchtowerClient RPC server successfully registered " + - "with root gRPC server") - - return nil -} - -// RegisterWithRestServer will be called by the root REST mux to direct a sub -// RPC server to register itself with the main REST mux server. Until this is -// called, each sub-server won't be able to have requests routed towards it. -// -// NOTE: This is part of the lnrpc.SubServer interface. -func (c *WatchtowerClient) RegisterWithRestServer(ctx context.Context, - mux *runtime.ServeMux, dest string, opts []grpc.DialOption) er.R { - - // We make sure that we register it with the main REST server to ensure - // all our methods are routed properly. - err := RegisterWatchtowerClientHandlerFromEndpoint(ctx, mux, dest, opts) - if err != nil { - return er.E(err) - } - - return nil -} - -// isActive returns nil if the watchtower client is initialized so that we can -// process RPC requests. -func (c *WatchtowerClient) isActive() er.R { - if c.cfg.Active { - return nil - } - return ErrWtclientNotActive.Default() -} - -// AddTower adds a new watchtower reachable at the given address and considers -// it for new sessions. If the watchtower already exists, then any new addresses -// included will be considered when dialing it for session negotiations and -// backups. -func (c *WatchtowerClient) AddTower(ctx context.Context, - req *AddTowerRequest) (*AddTowerResponse, error) { - - if err := c.isActive(); err != nil { - return nil, er.Native(err) - } - - pubKey, err := btcec.ParsePubKey(req.Pubkey, btcec.S256()) - if err != nil { - return nil, er.Native(err) - } - addr, errr := lncfg.ParseAddressString( - req.Address, strconv.Itoa(watchtower.DefaultPeerPort), - c.cfg.Resolver, - ) - if errr != nil { - return nil, er.Native(er.Errorf("invalid address %v: %v", req.Address, errr)) - } - - towerAddr := &lnwire.NetAddress{ - IdentityKey: pubKey, - Address: addr, - } - if err := c.cfg.Client.AddTower(towerAddr); err != nil { - return nil, er.Native(err) - } - - return &AddTowerResponse{}, nil -} - -// RemoveTower removes a watchtower from being considered for future session -// negotiations and from being used for any subsequent backups until it's added -// again. If an address is provided, then this RPC only serves as a way of -// removing the address from the watchtower instead. -func (c *WatchtowerClient) RemoveTower(ctx context.Context, - req *RemoveTowerRequest) (*RemoveTowerResponse, error) { - - if err := c.isActive(); err != nil { - return nil, er.Native(err) - } - - pubKey, err := btcec.ParsePubKey(req.Pubkey, btcec.S256()) - if err != nil { - return nil, er.Native(err) - } - - var addr net.Addr - if req.Address != "" { - addr, err = lncfg.ParseAddressString( - req.Address, strconv.Itoa(watchtower.DefaultPeerPort), - c.cfg.Resolver, - ) - if err != nil { - return nil, er.Native(er.Errorf("unable to parse tower "+ - "address %v: %v", req.Address, err)) - } - } - - if err := c.cfg.Client.RemoveTower(pubKey, addr); err != nil { - return nil, er.Native(err) - } - - return &RemoveTowerResponse{}, nil -} - -// ListTowers returns the list of watchtowers registered with the client. -func (c *WatchtowerClient) ListTowers(ctx context.Context, - req *ListTowersRequest) (*ListTowersResponse, error) { - - if err := c.isActive(); err != nil { - return nil, er.Native(err) - } - - towers, err := c.cfg.Client.RegisteredTowers() - if err != nil { - return nil, er.Native(err) - } - - rpcTowers := make([]*Tower, 0, len(towers)) - for _, tower := range towers { - rpcTower := marshallTower(tower, req.IncludeSessions) - rpcTowers = append(rpcTowers, rpcTower) - } - - return &ListTowersResponse{Towers: rpcTowers}, nil -} - -// GetTowerInfo retrieves information for a registered watchtower. -func (c *WatchtowerClient) GetTowerInfo(ctx context.Context, - req *GetTowerInfoRequest) (*Tower, error) { - - if err := c.isActive(); err != nil { - return nil, er.Native(err) - } - - pubKey, err := btcec.ParsePubKey(req.Pubkey, btcec.S256()) - if err != nil { - return nil, er.Native(err) - } - - tower, err := c.cfg.Client.LookupTower(pubKey) - if err != nil { - return nil, er.Native(err) - } - - return marshallTower(tower, req.IncludeSessions), nil -} - -// Stats returns the in-memory statistics of the client since startup. -func (c *WatchtowerClient) Stats(ctx context.Context, - req *StatsRequest) (*StatsResponse, error) { - - if err := c.isActive(); err != nil { - return nil, er.Native(err) - } - - stats := c.cfg.Client.Stats() - return &StatsResponse{ - NumBackups: uint32(stats.NumTasksAccepted), - NumFailedBackups: uint32(stats.NumTasksIneligible), - NumPendingBackups: uint32(stats.NumTasksReceived), - NumSessionsAcquired: uint32(stats.NumSessionsAcquired), - NumSessionsExhausted: uint32(stats.NumSessionsExhausted), - }, nil -} - -// Policy returns the active watchtower client policy configuration. -func (c *WatchtowerClient) Policy(ctx context.Context, - req *PolicyRequest) (*PolicyResponse, error) { - - if err := c.isActive(); err != nil { - return nil, er.Native(err) - } - - policy := c.cfg.Client.Policy() - return &PolicyResponse{ - MaxUpdates: uint32(policy.MaxUpdates), - SweepSatPerByte: uint32(policy.SweepFeeRate.FeePerKVByte() / 1000), - }, nil -} - -// marshallTower converts a client registered watchtower into its corresponding -// RPC type. -func marshallTower(tower *wtclient.RegisteredTower, includeSessions bool) *Tower { - rpcAddrs := make([]string, 0, len(tower.Addresses)) - for _, addr := range tower.Addresses { - rpcAddrs = append(rpcAddrs, addr.String()) - } - - var rpcSessions []*TowerSession - if includeSessions { - rpcSessions = make([]*TowerSession, 0, len(tower.Sessions)) - for _, session := range tower.Sessions { - satPerByte := session.Policy.SweepFeeRate.FeePerKVByte() / 1000 - rpcSessions = append(rpcSessions, &TowerSession{ - NumBackups: uint32(len(session.AckedUpdates)), - NumPendingBackups: uint32(len(session.CommittedUpdates)), - MaxBackups: uint32(session.Policy.MaxUpdates), - SweepSatPerByte: uint32(satPerByte), - }) - } - } - - return &Tower{ - Pubkey: tower.IdentityKey.SerializeCompressed(), - Addresses: rpcAddrs, - ActiveSessionCandidate: tower.ActiveSessionCandidate, - NumSessions: uint32(len(tower.Sessions)), - Sessions: rpcSessions, - } -} diff --git a/lnd/lnrpc/wtclientrpc/wtclient.pb.go b/lnd/lnrpc/wtclientrpc/wtclient.pb.go deleted file mode 100644 index 57acb504..00000000 --- a/lnd/lnrpc/wtclientrpc/wtclient.pb.go +++ /dev/null @@ -1,1016 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: wtclientrpc/wtclient.proto - -package wtclientrpc - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type AddTowerRequest struct { - // The identifying public key of the watchtower to add. - Pubkey []byte `protobuf:"bytes,1,opt,name=pubkey,proto3" json:"pubkey,omitempty"` - // A network address the watchtower is reachable over. - Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddTowerRequest) Reset() { *m = AddTowerRequest{} } -func (m *AddTowerRequest) String() string { return proto.CompactTextString(m) } -func (*AddTowerRequest) ProtoMessage() {} -func (*AddTowerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b5f4e7d95a641af2, []int{0} -} - -func (m *AddTowerRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AddTowerRequest.Unmarshal(m, b) -} -func (m *AddTowerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AddTowerRequest.Marshal(b, m, deterministic) -} -func (m *AddTowerRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddTowerRequest.Merge(m, src) -} -func (m *AddTowerRequest) XXX_Size() int { - return xxx_messageInfo_AddTowerRequest.Size(m) -} -func (m *AddTowerRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AddTowerRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AddTowerRequest proto.InternalMessageInfo - -func (m *AddTowerRequest) GetPubkey() []byte { - if m != nil { - return m.Pubkey - } - return nil -} - -func (m *AddTowerRequest) GetAddress() string { - if m != nil { - return m.Address - } - return "" -} - -type AddTowerResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddTowerResponse) Reset() { *m = AddTowerResponse{} } -func (m *AddTowerResponse) String() string { return proto.CompactTextString(m) } -func (*AddTowerResponse) ProtoMessage() {} -func (*AddTowerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b5f4e7d95a641af2, []int{1} -} - -func (m *AddTowerResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AddTowerResponse.Unmarshal(m, b) -} -func (m *AddTowerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AddTowerResponse.Marshal(b, m, deterministic) -} -func (m *AddTowerResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddTowerResponse.Merge(m, src) -} -func (m *AddTowerResponse) XXX_Size() int { - return xxx_messageInfo_AddTowerResponse.Size(m) -} -func (m *AddTowerResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AddTowerResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AddTowerResponse proto.InternalMessageInfo - -type RemoveTowerRequest struct { - // The identifying public key of the watchtower to remove. - Pubkey []byte `protobuf:"bytes,1,opt,name=pubkey,proto3" json:"pubkey,omitempty"` - // - //If set, then the record for this address will be removed, indicating that is - //is stale. Otherwise, the watchtower will no longer be used for future - //session negotiations and backups. - Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RemoveTowerRequest) Reset() { *m = RemoveTowerRequest{} } -func (m *RemoveTowerRequest) String() string { return proto.CompactTextString(m) } -func (*RemoveTowerRequest) ProtoMessage() {} -func (*RemoveTowerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b5f4e7d95a641af2, []int{2} -} - -func (m *RemoveTowerRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RemoveTowerRequest.Unmarshal(m, b) -} -func (m *RemoveTowerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RemoveTowerRequest.Marshal(b, m, deterministic) -} -func (m *RemoveTowerRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemoveTowerRequest.Merge(m, src) -} -func (m *RemoveTowerRequest) XXX_Size() int { - return xxx_messageInfo_RemoveTowerRequest.Size(m) -} -func (m *RemoveTowerRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RemoveTowerRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_RemoveTowerRequest proto.InternalMessageInfo - -func (m *RemoveTowerRequest) GetPubkey() []byte { - if m != nil { - return m.Pubkey - } - return nil -} - -func (m *RemoveTowerRequest) GetAddress() string { - if m != nil { - return m.Address - } - return "" -} - -type RemoveTowerResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RemoveTowerResponse) Reset() { *m = RemoveTowerResponse{} } -func (m *RemoveTowerResponse) String() string { return proto.CompactTextString(m) } -func (*RemoveTowerResponse) ProtoMessage() {} -func (*RemoveTowerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b5f4e7d95a641af2, []int{3} -} - -func (m *RemoveTowerResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RemoveTowerResponse.Unmarshal(m, b) -} -func (m *RemoveTowerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RemoveTowerResponse.Marshal(b, m, deterministic) -} -func (m *RemoveTowerResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemoveTowerResponse.Merge(m, src) -} -func (m *RemoveTowerResponse) XXX_Size() int { - return xxx_messageInfo_RemoveTowerResponse.Size(m) -} -func (m *RemoveTowerResponse) XXX_DiscardUnknown() { - xxx_messageInfo_RemoveTowerResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_RemoveTowerResponse proto.InternalMessageInfo - -type GetTowerInfoRequest struct { - // The identifying public key of the watchtower to retrieve information for. - Pubkey []byte `protobuf:"bytes,1,opt,name=pubkey,proto3" json:"pubkey,omitempty"` - // Whether we should include sessions with the watchtower in the response. - IncludeSessions bool `protobuf:"varint,2,opt,name=include_sessions,json=includeSessions,proto3" json:"include_sessions,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetTowerInfoRequest) Reset() { *m = GetTowerInfoRequest{} } -func (m *GetTowerInfoRequest) String() string { return proto.CompactTextString(m) } -func (*GetTowerInfoRequest) ProtoMessage() {} -func (*GetTowerInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b5f4e7d95a641af2, []int{4} -} - -func (m *GetTowerInfoRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetTowerInfoRequest.Unmarshal(m, b) -} -func (m *GetTowerInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetTowerInfoRequest.Marshal(b, m, deterministic) -} -func (m *GetTowerInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetTowerInfoRequest.Merge(m, src) -} -func (m *GetTowerInfoRequest) XXX_Size() int { - return xxx_messageInfo_GetTowerInfoRequest.Size(m) -} -func (m *GetTowerInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetTowerInfoRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetTowerInfoRequest proto.InternalMessageInfo - -func (m *GetTowerInfoRequest) GetPubkey() []byte { - if m != nil { - return m.Pubkey - } - return nil -} - -func (m *GetTowerInfoRequest) GetIncludeSessions() bool { - if m != nil { - return m.IncludeSessions - } - return false -} - -type TowerSession struct { - // - //The total number of successful backups that have been made to the - //watchtower session. - NumBackups uint32 `protobuf:"varint,1,opt,name=num_backups,json=numBackups,proto3" json:"num_backups,omitempty"` - // - //The total number of backups in the session that are currently pending to be - //acknowledged by the watchtower. - NumPendingBackups uint32 `protobuf:"varint,2,opt,name=num_pending_backups,json=numPendingBackups,proto3" json:"num_pending_backups,omitempty"` - // The maximum number of backups allowed by the watchtower session. - MaxBackups uint32 `protobuf:"varint,3,opt,name=max_backups,json=maxBackups,proto3" json:"max_backups,omitempty"` - // - //The fee rate, in satoshis per vbyte, that will be used by the watchtower for - //the justice transaction in the event of a channel breach. - SweepSatPerByte uint32 `protobuf:"varint,4,opt,name=sweep_sat_per_byte,json=sweepSatPerByte,proto3" json:"sweep_sat_per_byte,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TowerSession) Reset() { *m = TowerSession{} } -func (m *TowerSession) String() string { return proto.CompactTextString(m) } -func (*TowerSession) ProtoMessage() {} -func (*TowerSession) Descriptor() ([]byte, []int) { - return fileDescriptor_b5f4e7d95a641af2, []int{5} -} - -func (m *TowerSession) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TowerSession.Unmarshal(m, b) -} -func (m *TowerSession) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TowerSession.Marshal(b, m, deterministic) -} -func (m *TowerSession) XXX_Merge(src proto.Message) { - xxx_messageInfo_TowerSession.Merge(m, src) -} -func (m *TowerSession) XXX_Size() int { - return xxx_messageInfo_TowerSession.Size(m) -} -func (m *TowerSession) XXX_DiscardUnknown() { - xxx_messageInfo_TowerSession.DiscardUnknown(m) -} - -var xxx_messageInfo_TowerSession proto.InternalMessageInfo - -func (m *TowerSession) GetNumBackups() uint32 { - if m != nil { - return m.NumBackups - } - return 0 -} - -func (m *TowerSession) GetNumPendingBackups() uint32 { - if m != nil { - return m.NumPendingBackups - } - return 0 -} - -func (m *TowerSession) GetMaxBackups() uint32 { - if m != nil { - return m.MaxBackups - } - return 0 -} - -func (m *TowerSession) GetSweepSatPerByte() uint32 { - if m != nil { - return m.SweepSatPerByte - } - return 0 -} - -type Tower struct { - // The identifying public key of the watchtower. - Pubkey []byte `protobuf:"bytes,1,opt,name=pubkey,proto3" json:"pubkey,omitempty"` - // The list of addresses the watchtower is reachable over. - Addresses []string `protobuf:"bytes,2,rep,name=addresses,proto3" json:"addresses,omitempty"` - // Whether the watchtower is currently a candidate for new sessions. - ActiveSessionCandidate bool `protobuf:"varint,3,opt,name=active_session_candidate,json=activeSessionCandidate,proto3" json:"active_session_candidate,omitempty"` - // The number of sessions that have been negotiated with the watchtower. - NumSessions uint32 `protobuf:"varint,4,opt,name=num_sessions,json=numSessions,proto3" json:"num_sessions,omitempty"` - // The list of sessions that have been negotiated with the watchtower. - Sessions []*TowerSession `protobuf:"bytes,5,rep,name=sessions,proto3" json:"sessions,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Tower) Reset() { *m = Tower{} } -func (m *Tower) String() string { return proto.CompactTextString(m) } -func (*Tower) ProtoMessage() {} -func (*Tower) Descriptor() ([]byte, []int) { - return fileDescriptor_b5f4e7d95a641af2, []int{6} -} - -func (m *Tower) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Tower.Unmarshal(m, b) -} -func (m *Tower) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Tower.Marshal(b, m, deterministic) -} -func (m *Tower) XXX_Merge(src proto.Message) { - xxx_messageInfo_Tower.Merge(m, src) -} -func (m *Tower) XXX_Size() int { - return xxx_messageInfo_Tower.Size(m) -} -func (m *Tower) XXX_DiscardUnknown() { - xxx_messageInfo_Tower.DiscardUnknown(m) -} - -var xxx_messageInfo_Tower proto.InternalMessageInfo - -func (m *Tower) GetPubkey() []byte { - if m != nil { - return m.Pubkey - } - return nil -} - -func (m *Tower) GetAddresses() []string { - if m != nil { - return m.Addresses - } - return nil -} - -func (m *Tower) GetActiveSessionCandidate() bool { - if m != nil { - return m.ActiveSessionCandidate - } - return false -} - -func (m *Tower) GetNumSessions() uint32 { - if m != nil { - return m.NumSessions - } - return 0 -} - -func (m *Tower) GetSessions() []*TowerSession { - if m != nil { - return m.Sessions - } - return nil -} - -type ListTowersRequest struct { - // Whether we should include sessions with the watchtower in the response. - IncludeSessions bool `protobuf:"varint,1,opt,name=include_sessions,json=includeSessions,proto3" json:"include_sessions,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListTowersRequest) Reset() { *m = ListTowersRequest{} } -func (m *ListTowersRequest) String() string { return proto.CompactTextString(m) } -func (*ListTowersRequest) ProtoMessage() {} -func (*ListTowersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b5f4e7d95a641af2, []int{7} -} - -func (m *ListTowersRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListTowersRequest.Unmarshal(m, b) -} -func (m *ListTowersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListTowersRequest.Marshal(b, m, deterministic) -} -func (m *ListTowersRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListTowersRequest.Merge(m, src) -} -func (m *ListTowersRequest) XXX_Size() int { - return xxx_messageInfo_ListTowersRequest.Size(m) -} -func (m *ListTowersRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListTowersRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListTowersRequest proto.InternalMessageInfo - -func (m *ListTowersRequest) GetIncludeSessions() bool { - if m != nil { - return m.IncludeSessions - } - return false -} - -type ListTowersResponse struct { - // The list of watchtowers available for new backups. - Towers []*Tower `protobuf:"bytes,1,rep,name=towers,proto3" json:"towers,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListTowersResponse) Reset() { *m = ListTowersResponse{} } -func (m *ListTowersResponse) String() string { return proto.CompactTextString(m) } -func (*ListTowersResponse) ProtoMessage() {} -func (*ListTowersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b5f4e7d95a641af2, []int{8} -} - -func (m *ListTowersResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListTowersResponse.Unmarshal(m, b) -} -func (m *ListTowersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListTowersResponse.Marshal(b, m, deterministic) -} -func (m *ListTowersResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListTowersResponse.Merge(m, src) -} -func (m *ListTowersResponse) XXX_Size() int { - return xxx_messageInfo_ListTowersResponse.Size(m) -} -func (m *ListTowersResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListTowersResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListTowersResponse proto.InternalMessageInfo - -func (m *ListTowersResponse) GetTowers() []*Tower { - if m != nil { - return m.Towers - } - return nil -} - -type StatsRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StatsRequest) Reset() { *m = StatsRequest{} } -func (m *StatsRequest) String() string { return proto.CompactTextString(m) } -func (*StatsRequest) ProtoMessage() {} -func (*StatsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b5f4e7d95a641af2, []int{9} -} - -func (m *StatsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StatsRequest.Unmarshal(m, b) -} -func (m *StatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StatsRequest.Marshal(b, m, deterministic) -} -func (m *StatsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatsRequest.Merge(m, src) -} -func (m *StatsRequest) XXX_Size() int { - return xxx_messageInfo_StatsRequest.Size(m) -} -func (m *StatsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StatsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StatsRequest proto.InternalMessageInfo - -type StatsResponse struct { - // - //The total number of backups made to all active and exhausted watchtower - //sessions. - NumBackups uint32 `protobuf:"varint,1,opt,name=num_backups,json=numBackups,proto3" json:"num_backups,omitempty"` - // - //The total number of backups that are pending to be acknowledged by all - //active and exhausted watchtower sessions. - NumPendingBackups uint32 `protobuf:"varint,2,opt,name=num_pending_backups,json=numPendingBackups,proto3" json:"num_pending_backups,omitempty"` - // - //The total number of backups that all active and exhausted watchtower - //sessions have failed to acknowledge. - NumFailedBackups uint32 `protobuf:"varint,3,opt,name=num_failed_backups,json=numFailedBackups,proto3" json:"num_failed_backups,omitempty"` - // The total number of new sessions made to watchtowers. - NumSessionsAcquired uint32 `protobuf:"varint,4,opt,name=num_sessions_acquired,json=numSessionsAcquired,proto3" json:"num_sessions_acquired,omitempty"` - // The total number of watchtower sessions that have been exhausted. - NumSessionsExhausted uint32 `protobuf:"varint,5,opt,name=num_sessions_exhausted,json=numSessionsExhausted,proto3" json:"num_sessions_exhausted,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StatsResponse) Reset() { *m = StatsResponse{} } -func (m *StatsResponse) String() string { return proto.CompactTextString(m) } -func (*StatsResponse) ProtoMessage() {} -func (*StatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b5f4e7d95a641af2, []int{10} -} - -func (m *StatsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StatsResponse.Unmarshal(m, b) -} -func (m *StatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StatsResponse.Marshal(b, m, deterministic) -} -func (m *StatsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatsResponse.Merge(m, src) -} -func (m *StatsResponse) XXX_Size() int { - return xxx_messageInfo_StatsResponse.Size(m) -} -func (m *StatsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StatsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StatsResponse proto.InternalMessageInfo - -func (m *StatsResponse) GetNumBackups() uint32 { - if m != nil { - return m.NumBackups - } - return 0 -} - -func (m *StatsResponse) GetNumPendingBackups() uint32 { - if m != nil { - return m.NumPendingBackups - } - return 0 -} - -func (m *StatsResponse) GetNumFailedBackups() uint32 { - if m != nil { - return m.NumFailedBackups - } - return 0 -} - -func (m *StatsResponse) GetNumSessionsAcquired() uint32 { - if m != nil { - return m.NumSessionsAcquired - } - return 0 -} - -func (m *StatsResponse) GetNumSessionsExhausted() uint32 { - if m != nil { - return m.NumSessionsExhausted - } - return 0 -} - -type PolicyRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PolicyRequest) Reset() { *m = PolicyRequest{} } -func (m *PolicyRequest) String() string { return proto.CompactTextString(m) } -func (*PolicyRequest) ProtoMessage() {} -func (*PolicyRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b5f4e7d95a641af2, []int{11} -} - -func (m *PolicyRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PolicyRequest.Unmarshal(m, b) -} -func (m *PolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PolicyRequest.Marshal(b, m, deterministic) -} -func (m *PolicyRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PolicyRequest.Merge(m, src) -} -func (m *PolicyRequest) XXX_Size() int { - return xxx_messageInfo_PolicyRequest.Size(m) -} -func (m *PolicyRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PolicyRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PolicyRequest proto.InternalMessageInfo - -type PolicyResponse struct { - // - //The maximum number of updates each session we negotiate with watchtowers - //should allow. - MaxUpdates uint32 `protobuf:"varint,1,opt,name=max_updates,json=maxUpdates,proto3" json:"max_updates,omitempty"` - // - //The fee rate, in satoshis per vbyte, that will be used by watchtowers for - //justice transactions in response to channel breaches. - SweepSatPerByte uint32 `protobuf:"varint,2,opt,name=sweep_sat_per_byte,json=sweepSatPerByte,proto3" json:"sweep_sat_per_byte,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PolicyResponse) Reset() { *m = PolicyResponse{} } -func (m *PolicyResponse) String() string { return proto.CompactTextString(m) } -func (*PolicyResponse) ProtoMessage() {} -func (*PolicyResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b5f4e7d95a641af2, []int{12} -} - -func (m *PolicyResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PolicyResponse.Unmarshal(m, b) -} -func (m *PolicyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PolicyResponse.Marshal(b, m, deterministic) -} -func (m *PolicyResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PolicyResponse.Merge(m, src) -} -func (m *PolicyResponse) XXX_Size() int { - return xxx_messageInfo_PolicyResponse.Size(m) -} -func (m *PolicyResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PolicyResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_PolicyResponse proto.InternalMessageInfo - -func (m *PolicyResponse) GetMaxUpdates() uint32 { - if m != nil { - return m.MaxUpdates - } - return 0 -} - -func (m *PolicyResponse) GetSweepSatPerByte() uint32 { - if m != nil { - return m.SweepSatPerByte - } - return 0 -} - -func init() { - proto.RegisterType((*AddTowerRequest)(nil), "wtclientrpc.AddTowerRequest") - proto.RegisterType((*AddTowerResponse)(nil), "wtclientrpc.AddTowerResponse") - proto.RegisterType((*RemoveTowerRequest)(nil), "wtclientrpc.RemoveTowerRequest") - proto.RegisterType((*RemoveTowerResponse)(nil), "wtclientrpc.RemoveTowerResponse") - proto.RegisterType((*GetTowerInfoRequest)(nil), "wtclientrpc.GetTowerInfoRequest") - proto.RegisterType((*TowerSession)(nil), "wtclientrpc.TowerSession") - proto.RegisterType((*Tower)(nil), "wtclientrpc.Tower") - proto.RegisterType((*ListTowersRequest)(nil), "wtclientrpc.ListTowersRequest") - proto.RegisterType((*ListTowersResponse)(nil), "wtclientrpc.ListTowersResponse") - proto.RegisterType((*StatsRequest)(nil), "wtclientrpc.StatsRequest") - proto.RegisterType((*StatsResponse)(nil), "wtclientrpc.StatsResponse") - proto.RegisterType((*PolicyRequest)(nil), "wtclientrpc.PolicyRequest") - proto.RegisterType((*PolicyResponse)(nil), "wtclientrpc.PolicyResponse") -} - -func init() { proto.RegisterFile("wtclientrpc/wtclient.proto", fileDescriptor_b5f4e7d95a641af2) } - -var fileDescriptor_b5f4e7d95a641af2 = []byte{ - // 678 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x4d, 0x6f, 0xd3, 0x40, - 0x10, 0x95, 0x1b, 0x12, 0xd2, 0x49, 0xda, 0xa4, 0x1b, 0x5a, 0x19, 0x53, 0x48, 0xf0, 0x29, 0x7c, - 0xb9, 0xa8, 0x80, 0xc4, 0xa9, 0xa2, 0x2d, 0xb4, 0x42, 0x02, 0x29, 0x72, 0x41, 0x20, 0x0e, 0x58, - 0x1b, 0x7b, 0xdb, 0x58, 0x8d, 0xd7, 0xae, 0x77, 0xdd, 0x26, 0x3f, 0x8a, 0x9f, 0xc1, 0x0f, 0xe0, - 0xdf, 0x70, 0x44, 0x5e, 0xef, 0x3a, 0x76, 0xeb, 0x88, 0x03, 0x1c, 0x22, 0xd9, 0xf3, 0xde, 0x3e, - 0x8f, 0xdf, 0xbc, 0x8c, 0xc1, 0xb8, 0xe2, 0xee, 0xd4, 0x27, 0x94, 0xc7, 0x91, 0xbb, 0xa3, 0xae, - 0xad, 0x28, 0x0e, 0x79, 0x88, 0x5a, 0x05, 0xcc, 0x3c, 0x84, 0xce, 0xbe, 0xe7, 0x7d, 0x0a, 0xaf, - 0x48, 0x6c, 0x93, 0x8b, 0x84, 0x30, 0x8e, 0xb6, 0xa0, 0x11, 0x25, 0xe3, 0x73, 0x32, 0xd7, 0xb5, - 0x81, 0x36, 0x6c, 0xdb, 0xf2, 0x0e, 0xe9, 0x70, 0x1b, 0x7b, 0x5e, 0x4c, 0x18, 0xd3, 0x57, 0x06, - 0xda, 0x70, 0xd5, 0x56, 0xb7, 0x26, 0x82, 0xee, 0x42, 0x84, 0x45, 0x21, 0x65, 0xc4, 0x3c, 0x02, - 0x64, 0x93, 0x20, 0xbc, 0x24, 0xff, 0xa8, 0xbd, 0x09, 0xbd, 0x92, 0x8e, 0x94, 0xff, 0x0a, 0xbd, - 0x63, 0xc2, 0x45, 0xed, 0x3d, 0x3d, 0x0d, 0xff, 0xa6, 0xff, 0x08, 0xba, 0x3e, 0x75, 0xa7, 0x89, - 0x47, 0x1c, 0x46, 0x18, 0xf3, 0x43, 0x9a, 0x3d, 0xa8, 0x69, 0x77, 0x64, 0xfd, 0x44, 0x96, 0xcd, - 0x1f, 0x1a, 0xb4, 0x85, 0xae, 0xac, 0xa0, 0x3e, 0xb4, 0x68, 0x12, 0x38, 0x63, 0xec, 0x9e, 0x27, - 0x11, 0x13, 0xc2, 0x6b, 0x36, 0xd0, 0x24, 0x38, 0xc8, 0x2a, 0xc8, 0x82, 0x5e, 0x4a, 0x88, 0x08, - 0xf5, 0x7c, 0x7a, 0x96, 0x13, 0x57, 0x04, 0x71, 0x83, 0x26, 0xc1, 0x28, 0x43, 0x14, 0xbf, 0x0f, - 0xad, 0x00, 0xcf, 0x72, 0x5e, 0x2d, 0x13, 0x0c, 0xf0, 0x4c, 0x11, 0x9e, 0x00, 0x62, 0x57, 0x84, - 0x44, 0x0e, 0xc3, 0xdc, 0x89, 0x48, 0xec, 0x8c, 0xe7, 0x9c, 0xe8, 0xb7, 0x04, 0xaf, 0x23, 0x90, - 0x13, 0xcc, 0x47, 0x24, 0x3e, 0x98, 0x73, 0x62, 0xfe, 0xd2, 0xa0, 0x2e, 0xfa, 0x5d, 0xfa, 0xf2, - 0xdb, 0xb0, 0x2a, 0xdd, 0x24, 0x69, 0x57, 0xb5, 0xe1, 0xaa, 0xbd, 0x28, 0xa0, 0xd7, 0xa0, 0x63, - 0x97, 0xfb, 0x97, 0xb9, 0x33, 0x8e, 0x8b, 0xa9, 0xe7, 0x7b, 0x98, 0x13, 0xd1, 0x5a, 0xd3, 0xde, - 0xca, 0x70, 0xe9, 0xc7, 0xa1, 0x42, 0xd1, 0x43, 0x68, 0xa7, 0xef, 0x9d, 0x1b, 0x9a, 0x35, 0x98, - 0x9a, 0xa5, 0xcc, 0x44, 0xaf, 0xa0, 0x99, 0xc3, 0xf5, 0x41, 0x6d, 0xd8, 0xda, 0xbd, 0x6b, 0x15, - 0xe2, 0x67, 0x15, 0x8d, 0xb6, 0x73, 0xaa, 0xb9, 0x07, 0x1b, 0x1f, 0x7c, 0x96, 0x8d, 0x97, 0xa9, - 0xd9, 0x56, 0xcd, 0x50, 0xab, 0x9e, 0xe1, 0x1b, 0x40, 0xc5, 0xf3, 0x59, 0x66, 0xd0, 0x63, 0x68, - 0x70, 0x51, 0xd1, 0x35, 0xd1, 0x0a, 0xba, 0xd9, 0x8a, 0x2d, 0x19, 0xe6, 0x3a, 0xb4, 0x4f, 0x38, - 0xe6, 0xea, 0xe1, 0xe6, 0x6f, 0x0d, 0xd6, 0x64, 0x41, 0xaa, 0xfd, 0xf7, 0x58, 0x3c, 0x05, 0x94, - 0xf2, 0x4f, 0xb1, 0x3f, 0x25, 0xde, 0xb5, 0x74, 0x74, 0x69, 0x12, 0x1c, 0x09, 0x40, 0xb1, 0x77, - 0x61, 0xb3, 0x68, 0xbe, 0x83, 0xdd, 0x8b, 0xc4, 0x8f, 0x89, 0x27, 0xa7, 0xd0, 0x2b, 0x4c, 0x61, - 0x5f, 0x42, 0xe8, 0x25, 0x6c, 0x95, 0xce, 0x90, 0xd9, 0x04, 0x27, 0x8c, 0x13, 0x4f, 0xaf, 0x8b, - 0x43, 0x77, 0x0a, 0x87, 0xde, 0x29, 0xcc, 0xec, 0xc0, 0xda, 0x28, 0x9c, 0xfa, 0xee, 0x5c, 0x79, - 0xf1, 0x1d, 0xd6, 0x55, 0x61, 0xe1, 0x45, 0x9a, 0xe8, 0x24, 0x4a, 0x73, 0x91, 0x7b, 0x11, 0xe0, - 0xd9, 0xe7, 0xac, 0xb2, 0x24, 0xd1, 0x2b, 0x95, 0x89, 0xde, 0xfd, 0x59, 0x83, 0xee, 0x17, 0xcc, - 0xdd, 0x89, 0x98, 0xc5, 0xa1, 0x98, 0x10, 0x3a, 0x86, 0xa6, 0xda, 0x31, 0x68, 0xbb, 0x34, 0xb8, - 0x6b, 0xfb, 0xcb, 0xb8, 0xbf, 0x04, 0x95, 0xbd, 0x8e, 0xa0, 0x55, 0x58, 0x28, 0xa8, 0x5f, 0x62, - 0xdf, 0x5c, 0x59, 0xc6, 0x60, 0x39, 0x41, 0x2a, 0x7e, 0x04, 0x58, 0xa4, 0x0d, 0x3d, 0x28, 0xf1, - 0x6f, 0xc4, 0xd8, 0xe8, 0x2f, 0xc5, 0xa5, 0xdc, 0x5b, 0x68, 0x17, 0x57, 0x1b, 0x2a, 0x37, 0x50, - 0xb1, 0xf5, 0x8c, 0x8a, 0x20, 0xa3, 0x3d, 0xa8, 0x8b, 0xbc, 0xa2, 0xf2, 0x1f, 0xae, 0x18, 0x6a, - 0xc3, 0xa8, 0x82, 0x64, 0x17, 0xfb, 0xd0, 0xc8, 0x86, 0x8c, 0xca, 0xac, 0x52, 0x14, 0x8c, 0x7b, - 0x95, 0x58, 0x26, 0x71, 0xf0, 0xfc, 0x9b, 0x75, 0xe6, 0xf3, 0x49, 0x32, 0xb6, 0xdc, 0x30, 0xd8, - 0x89, 0xce, 0xf9, 0x33, 0x17, 0xb3, 0x49, 0x7a, 0xe1, 0xed, 0x4c, 0x69, 0xfa, 0x2b, 0x7e, 0x9d, - 0xe2, 0xc8, 0x1d, 0x37, 0xc4, 0x17, 0xea, 0xc5, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7d, 0x78, - 0xe9, 0x85, 0xbf, 0x06, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// WatchtowerClientClient is the client API for WatchtowerClient service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type WatchtowerClientClient interface { - // - //AddTower adds a new watchtower reachable at the given address and - //considers it for new sessions. If the watchtower already exists, then - //any new addresses included will be considered when dialing it for - //session negotiations and backups. - AddTower(ctx context.Context, in *AddTowerRequest, opts ...grpc.CallOption) (*AddTowerResponse, error) - // - //RemoveTower removes a watchtower from being considered for future session - //negotiations and from being used for any subsequent backups until it's added - //again. If an address is provided, then this RPC only serves as a way of - //removing the address from the watchtower instead. - RemoveTower(ctx context.Context, in *RemoveTowerRequest, opts ...grpc.CallOption) (*RemoveTowerResponse, error) - // ListTowers returns the list of watchtowers registered with the client. - ListTowers(ctx context.Context, in *ListTowersRequest, opts ...grpc.CallOption) (*ListTowersResponse, error) - // GetTowerInfo retrieves information for a registered watchtower. - GetTowerInfo(ctx context.Context, in *GetTowerInfoRequest, opts ...grpc.CallOption) (*Tower, error) - // Stats returns the in-memory statistics of the client since startup. - Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsResponse, error) - // Policy returns the active watchtower client policy configuration. - Policy(ctx context.Context, in *PolicyRequest, opts ...grpc.CallOption) (*PolicyResponse, error) -} - -type watchtowerClientClient struct { - cc *grpc.ClientConn -} - -func NewWatchtowerClientClient(cc *grpc.ClientConn) WatchtowerClientClient { - return &watchtowerClientClient{cc} -} - -func (c *watchtowerClientClient) AddTower(ctx context.Context, in *AddTowerRequest, opts ...grpc.CallOption) (*AddTowerResponse, error) { - out := new(AddTowerResponse) - err := c.cc.Invoke(ctx, "/wtclientrpc.WatchtowerClient/AddTower", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *watchtowerClientClient) RemoveTower(ctx context.Context, in *RemoveTowerRequest, opts ...grpc.CallOption) (*RemoveTowerResponse, error) { - out := new(RemoveTowerResponse) - err := c.cc.Invoke(ctx, "/wtclientrpc.WatchtowerClient/RemoveTower", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *watchtowerClientClient) ListTowers(ctx context.Context, in *ListTowersRequest, opts ...grpc.CallOption) (*ListTowersResponse, error) { - out := new(ListTowersResponse) - err := c.cc.Invoke(ctx, "/wtclientrpc.WatchtowerClient/ListTowers", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *watchtowerClientClient) GetTowerInfo(ctx context.Context, in *GetTowerInfoRequest, opts ...grpc.CallOption) (*Tower, error) { - out := new(Tower) - err := c.cc.Invoke(ctx, "/wtclientrpc.WatchtowerClient/GetTowerInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *watchtowerClientClient) Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsResponse, error) { - out := new(StatsResponse) - err := c.cc.Invoke(ctx, "/wtclientrpc.WatchtowerClient/Stats", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *watchtowerClientClient) Policy(ctx context.Context, in *PolicyRequest, opts ...grpc.CallOption) (*PolicyResponse, error) { - out := new(PolicyResponse) - err := c.cc.Invoke(ctx, "/wtclientrpc.WatchtowerClient/Policy", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// WatchtowerClientServer is the server API for WatchtowerClient service. -type WatchtowerClientServer interface { - // - //AddTower adds a new watchtower reachable at the given address and - //considers it for new sessions. If the watchtower already exists, then - //any new addresses included will be considered when dialing it for - //session negotiations and backups. - AddTower(context.Context, *AddTowerRequest) (*AddTowerResponse, error) - // - //RemoveTower removes a watchtower from being considered for future session - //negotiations and from being used for any subsequent backups until it's added - //again. If an address is provided, then this RPC only serves as a way of - //removing the address from the watchtower instead. - RemoveTower(context.Context, *RemoveTowerRequest) (*RemoveTowerResponse, error) - // ListTowers returns the list of watchtowers registered with the client. - ListTowers(context.Context, *ListTowersRequest) (*ListTowersResponse, error) - // GetTowerInfo retrieves information for a registered watchtower. - GetTowerInfo(context.Context, *GetTowerInfoRequest) (*Tower, error) - // Stats returns the in-memory statistics of the client since startup. - Stats(context.Context, *StatsRequest) (*StatsResponse, error) - // Policy returns the active watchtower client policy configuration. - Policy(context.Context, *PolicyRequest) (*PolicyResponse, error) -} - -// UnimplementedWatchtowerClientServer can be embedded to have forward compatible implementations. -type UnimplementedWatchtowerClientServer struct { -} - -func (*UnimplementedWatchtowerClientServer) AddTower(ctx context.Context, req *AddTowerRequest) (*AddTowerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AddTower not implemented") -} -func (*UnimplementedWatchtowerClientServer) RemoveTower(ctx context.Context, req *RemoveTowerRequest) (*RemoveTowerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RemoveTower not implemented") -} -func (*UnimplementedWatchtowerClientServer) ListTowers(ctx context.Context, req *ListTowersRequest) (*ListTowersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListTowers not implemented") -} -func (*UnimplementedWatchtowerClientServer) GetTowerInfo(ctx context.Context, req *GetTowerInfoRequest) (*Tower, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetTowerInfo not implemented") -} -func (*UnimplementedWatchtowerClientServer) Stats(ctx context.Context, req *StatsRequest) (*StatsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Stats not implemented") -} -func (*UnimplementedWatchtowerClientServer) Policy(ctx context.Context, req *PolicyRequest) (*PolicyResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Policy not implemented") -} - -func RegisterWatchtowerClientServer(s *grpc.Server, srv WatchtowerClientServer) { - s.RegisterService(&_WatchtowerClient_serviceDesc, srv) -} - -func _WatchtowerClient_AddTower_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddTowerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WatchtowerClientServer).AddTower(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/wtclientrpc.WatchtowerClient/AddTower", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WatchtowerClientServer).AddTower(ctx, req.(*AddTowerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _WatchtowerClient_RemoveTower_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RemoveTowerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WatchtowerClientServer).RemoveTower(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/wtclientrpc.WatchtowerClient/RemoveTower", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WatchtowerClientServer).RemoveTower(ctx, req.(*RemoveTowerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _WatchtowerClient_ListTowers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListTowersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WatchtowerClientServer).ListTowers(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/wtclientrpc.WatchtowerClient/ListTowers", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WatchtowerClientServer).ListTowers(ctx, req.(*ListTowersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _WatchtowerClient_GetTowerInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetTowerInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WatchtowerClientServer).GetTowerInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/wtclientrpc.WatchtowerClient/GetTowerInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WatchtowerClientServer).GetTowerInfo(ctx, req.(*GetTowerInfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _WatchtowerClient_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StatsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WatchtowerClientServer).Stats(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/wtclientrpc.WatchtowerClient/Stats", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WatchtowerClientServer).Stats(ctx, req.(*StatsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _WatchtowerClient_Policy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PolicyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WatchtowerClientServer).Policy(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/wtclientrpc.WatchtowerClient/Policy", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WatchtowerClientServer).Policy(ctx, req.(*PolicyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _WatchtowerClient_serviceDesc = grpc.ServiceDesc{ - ServiceName: "wtclientrpc.WatchtowerClient", - HandlerType: (*WatchtowerClientServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "AddTower", - Handler: _WatchtowerClient_AddTower_Handler, - }, - { - MethodName: "RemoveTower", - Handler: _WatchtowerClient_RemoveTower_Handler, - }, - { - MethodName: "ListTowers", - Handler: _WatchtowerClient_ListTowers_Handler, - }, - { - MethodName: "GetTowerInfo", - Handler: _WatchtowerClient_GetTowerInfo_Handler, - }, - { - MethodName: "Stats", - Handler: _WatchtowerClient_Stats_Handler, - }, - { - MethodName: "Policy", - Handler: _WatchtowerClient_Policy_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "wtclientrpc/wtclient.proto", -} diff --git a/lnd/lnrpc/wtclientrpc/wtclient.pb.gw.go b/lnd/lnrpc/wtclientrpc/wtclient.pb.gw.go deleted file mode 100644 index 533bd9f7..00000000 --- a/lnd/lnrpc/wtclientrpc/wtclient.pb.gw.go +++ /dev/null @@ -1,590 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: wtclientrpc/wtclient.proto - -/* -Package wtclientrpc is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package wtclientrpc - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage - -func request_WatchtowerClient_AddTower_0(ctx context.Context, marshaler runtime.Marshaler, client WatchtowerClientClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AddTowerRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.AddTower(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WatchtowerClient_AddTower_0(ctx context.Context, marshaler runtime.Marshaler, server WatchtowerClientServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AddTowerRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.AddTower(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_WatchtowerClient_RemoveTower_0 = &utilities.DoubleArray{Encoding: map[string]int{"pubkey": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} -) - -func request_WatchtowerClient_RemoveTower_0(ctx context.Context, marshaler runtime.Marshaler, client WatchtowerClientClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RemoveTowerRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["pubkey"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pubkey") - } - - protoReq.Pubkey, err = runtime.Bytes(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pubkey", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WatchtowerClient_RemoveTower_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.RemoveTower(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WatchtowerClient_RemoveTower_0(ctx context.Context, marshaler runtime.Marshaler, server WatchtowerClientServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RemoveTowerRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["pubkey"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pubkey") - } - - protoReq.Pubkey, err = runtime.Bytes(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pubkey", err) - } - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_WatchtowerClient_RemoveTower_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.RemoveTower(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_WatchtowerClient_ListTowers_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_WatchtowerClient_ListTowers_0(ctx context.Context, marshaler runtime.Marshaler, client WatchtowerClientClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListTowersRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WatchtowerClient_ListTowers_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ListTowers(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WatchtowerClient_ListTowers_0(ctx context.Context, marshaler runtime.Marshaler, server WatchtowerClientServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListTowersRequest - var metadata runtime.ServerMetadata - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_WatchtowerClient_ListTowers_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ListTowers(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_WatchtowerClient_GetTowerInfo_0 = &utilities.DoubleArray{Encoding: map[string]int{"pubkey": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} -) - -func request_WatchtowerClient_GetTowerInfo_0(ctx context.Context, marshaler runtime.Marshaler, client WatchtowerClientClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetTowerInfoRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["pubkey"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pubkey") - } - - protoReq.Pubkey, err = runtime.Bytes(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pubkey", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WatchtowerClient_GetTowerInfo_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.GetTowerInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WatchtowerClient_GetTowerInfo_0(ctx context.Context, marshaler runtime.Marshaler, server WatchtowerClientServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetTowerInfoRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["pubkey"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pubkey") - } - - protoReq.Pubkey, err = runtime.Bytes(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pubkey", err) - } - - if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_WatchtowerClient_GetTowerInfo_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.GetTowerInfo(ctx, &protoReq) - return msg, metadata, err - -} - -func request_WatchtowerClient_Stats_0(ctx context.Context, marshaler runtime.Marshaler, client WatchtowerClientClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq StatsRequest - var metadata runtime.ServerMetadata - - msg, err := client.Stats(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WatchtowerClient_Stats_0(ctx context.Context, marshaler runtime.Marshaler, server WatchtowerClientServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq StatsRequest - var metadata runtime.ServerMetadata - - msg, err := server.Stats(ctx, &protoReq) - return msg, metadata, err - -} - -func request_WatchtowerClient_Policy_0(ctx context.Context, marshaler runtime.Marshaler, client WatchtowerClientClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PolicyRequest - var metadata runtime.ServerMetadata - - msg, err := client.Policy(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_WatchtowerClient_Policy_0(ctx context.Context, marshaler runtime.Marshaler, server WatchtowerClientServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PolicyRequest - var metadata runtime.ServerMetadata - - msg, err := server.Policy(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterWatchtowerClientHandlerServer registers the http handlers for service WatchtowerClient to "mux". -// UnaryRPC :call WatchtowerClientServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -func RegisterWatchtowerClientHandlerServer(ctx context.Context, mux *runtime.ServeMux, server WatchtowerClientServer) error { - - mux.Handle("POST", pattern_WatchtowerClient_AddTower_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WatchtowerClient_AddTower_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WatchtowerClient_AddTower_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("DELETE", pattern_WatchtowerClient_RemoveTower_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WatchtowerClient_RemoveTower_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WatchtowerClient_RemoveTower_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_WatchtowerClient_ListTowers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WatchtowerClient_ListTowers_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WatchtowerClient_ListTowers_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_WatchtowerClient_GetTowerInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WatchtowerClient_GetTowerInfo_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WatchtowerClient_GetTowerInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_WatchtowerClient_Stats_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WatchtowerClient_Stats_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WatchtowerClient_Stats_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_WatchtowerClient_Policy_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_WatchtowerClient_Policy_0(rctx, inboundMarshaler, server, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WatchtowerClient_Policy_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterWatchtowerClientHandlerFromEndpoint is same as RegisterWatchtowerClientHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterWatchtowerClientHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterWatchtowerClientHandler(ctx, mux, conn) -} - -// RegisterWatchtowerClientHandler registers the http handlers for service WatchtowerClient to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterWatchtowerClientHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterWatchtowerClientHandlerClient(ctx, mux, NewWatchtowerClientClient(conn)) -} - -// RegisterWatchtowerClientHandlerClient registers the http handlers for service WatchtowerClient -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "WatchtowerClientClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "WatchtowerClientClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "WatchtowerClientClient" to call the correct interceptors. -func RegisterWatchtowerClientHandlerClient(ctx context.Context, mux *runtime.ServeMux, client WatchtowerClientClient) error { - - mux.Handle("POST", pattern_WatchtowerClient_AddTower_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WatchtowerClient_AddTower_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WatchtowerClient_AddTower_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("DELETE", pattern_WatchtowerClient_RemoveTower_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WatchtowerClient_RemoveTower_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WatchtowerClient_RemoveTower_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_WatchtowerClient_ListTowers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WatchtowerClient_ListTowers_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WatchtowerClient_ListTowers_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_WatchtowerClient_GetTowerInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WatchtowerClient_GetTowerInfo_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WatchtowerClient_GetTowerInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_WatchtowerClient_Stats_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WatchtowerClient_Stats_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WatchtowerClient_Stats_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_WatchtowerClient_Policy_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_WatchtowerClient_Policy_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_WatchtowerClient_Policy_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_WatchtowerClient_AddTower_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "watchtower", "client"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_WatchtowerClient_RemoveTower_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v2", "watchtower", "client", "pubkey"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_WatchtowerClient_ListTowers_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "watchtower", "client"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_WatchtowerClient_GetTowerInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"v2", "watchtower", "client", "info", "pubkey"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_WatchtowerClient_Stats_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v2", "watchtower", "client", "stats"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_WatchtowerClient_Policy_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v2", "watchtower", "client", "policy"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_WatchtowerClient_AddTower_0 = runtime.ForwardResponseMessage - - forward_WatchtowerClient_RemoveTower_0 = runtime.ForwardResponseMessage - - forward_WatchtowerClient_ListTowers_0 = runtime.ForwardResponseMessage - - forward_WatchtowerClient_GetTowerInfo_0 = runtime.ForwardResponseMessage - - forward_WatchtowerClient_Stats_0 = runtime.ForwardResponseMessage - - forward_WatchtowerClient_Policy_0 = runtime.ForwardResponseMessage -) diff --git a/lnd/lnrpc/wtclientrpc/wtclient.proto b/lnd/lnrpc/wtclientrpc/wtclient.proto deleted file mode 100644 index a908bcae..00000000 --- a/lnd/lnrpc/wtclientrpc/wtclient.proto +++ /dev/null @@ -1,167 +0,0 @@ -syntax = "proto3"; - -package wtclientrpc; - -option go_package = "github.com/pkt-cash/pktd/lnd/lnrpc/wtclientrpc"; - -// WatchtowerClient is a service that grants access to the watchtower client -// functionality of the daemon. -service WatchtowerClient { - /* - AddTower adds a new watchtower reachable at the given address and - considers it for new sessions. If the watchtower already exists, then - any new addresses included will be considered when dialing it for - session negotiations and backups. - */ - rpc AddTower (AddTowerRequest) returns (AddTowerResponse); - - /* - RemoveTower removes a watchtower from being considered for future session - negotiations and from being used for any subsequent backups until it's added - again. If an address is provided, then this RPC only serves as a way of - removing the address from the watchtower instead. - */ - rpc RemoveTower (RemoveTowerRequest) returns (RemoveTowerResponse); - - // ListTowers returns the list of watchtowers registered with the client. - rpc ListTowers (ListTowersRequest) returns (ListTowersResponse); - - // GetTowerInfo retrieves information for a registered watchtower. - rpc GetTowerInfo (GetTowerInfoRequest) returns (Tower); - - // Stats returns the in-memory statistics of the client since startup. - rpc Stats (StatsRequest) returns (StatsResponse); - - // Policy returns the active watchtower client policy configuration. - rpc Policy (PolicyRequest) returns (PolicyResponse); -} - -message AddTowerRequest { - // The identifying public key of the watchtower to add. - bytes pubkey = 1; - - // A network address the watchtower is reachable over. - string address = 2; -} - -message AddTowerResponse { -} - -message RemoveTowerRequest { - // The identifying public key of the watchtower to remove. - bytes pubkey = 1; - - /* - If set, then the record for this address will be removed, indicating that is - is stale. Otherwise, the watchtower will no longer be used for future - session negotiations and backups. - */ - string address = 2; -} - -message RemoveTowerResponse { -} - -message GetTowerInfoRequest { - // The identifying public key of the watchtower to retrieve information for. - bytes pubkey = 1; - - // Whether we should include sessions with the watchtower in the response. - bool include_sessions = 2; -} - -message TowerSession { - /* - The total number of successful backups that have been made to the - watchtower session. - */ - uint32 num_backups = 1; - - /* - The total number of backups in the session that are currently pending to be - acknowledged by the watchtower. - */ - uint32 num_pending_backups = 2; - - // The maximum number of backups allowed by the watchtower session. - uint32 max_backups = 3; - - /* - The fee rate, in satoshis per vbyte, that will be used by the watchtower for - the justice transaction in the event of a channel breach. - */ - uint32 sweep_sat_per_byte = 4; -} - -message Tower { - // The identifying public key of the watchtower. - bytes pubkey = 1; - - // The list of addresses the watchtower is reachable over. - repeated string addresses = 2; - - // Whether the watchtower is currently a candidate for new sessions. - bool active_session_candidate = 3; - - // The number of sessions that have been negotiated with the watchtower. - uint32 num_sessions = 4; - - // The list of sessions that have been negotiated with the watchtower. - repeated TowerSession sessions = 5; -} - -message ListTowersRequest { - // Whether we should include sessions with the watchtower in the response. - bool include_sessions = 1; -} - -message ListTowersResponse { - // The list of watchtowers available for new backups. - repeated Tower towers = 1; -} - -message StatsRequest { -} - -message StatsResponse { - /* - The total number of backups made to all active and exhausted watchtower - sessions. - */ - uint32 num_backups = 1; - - /* - The total number of backups that are pending to be acknowledged by all - active and exhausted watchtower sessions. - */ - uint32 num_pending_backups = 2; - - /* - The total number of backups that all active and exhausted watchtower - sessions have failed to acknowledge. - */ - uint32 num_failed_backups = 3; - - // The total number of new sessions made to watchtowers. - uint32 num_sessions_acquired = 4; - - // The total number of watchtower sessions that have been exhausted. - uint32 num_sessions_exhausted = 5; -} - -message PolicyRequest { -} - -message PolicyResponse { - /* - The maximum number of updates each session we negotiate with watchtowers - should allow. - */ - uint32 max_updates = 1; - - /* - The fee rate, in satoshis per vbyte, that will be used by watchtowers for - justice transactions in response to channel breaches. - */ - uint32 sweep_sat_per_byte = 2; -} diff --git a/lnd/lnrpc/wtclientrpc/wtclient.swagger.json b/lnd/lnrpc/wtclientrpc/wtclient.swagger.json deleted file mode 100644 index 84234b01..00000000 --- a/lnd/lnrpc/wtclientrpc/wtclient.swagger.json +++ /dev/null @@ -1,374 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "wtclientrpc/wtclient.proto", - "version": "version not set" - }, - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/v2/watchtower/client": { - "get": { - "summary": "ListTowers returns the list of watchtowers registered with the client.", - "operationId": "WatchtowerClient_ListTowers", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/wtclientrpcListTowersResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "include_sessions", - "description": "Whether we should include sessions with the watchtower in the response.", - "in": "query", - "required": false, - "type": "boolean" - } - ], - "tags": [ - "WatchtowerClient" - ] - }, - "post": { - "summary": "AddTower adds a new watchtower reachable at the given address and\nconsiders it for new sessions. If the watchtower already exists, then\nany new addresses included will be considered when dialing it for\nsession negotiations and backups.", - "operationId": "WatchtowerClient_AddTower", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/wtclientrpcAddTowerResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/wtclientrpcAddTowerRequest" - } - } - ], - "tags": [ - "WatchtowerClient" - ] - } - }, - "/v2/watchtower/client/info/{pubkey}": { - "get": { - "summary": "GetTowerInfo retrieves information for a registered watchtower.", - "operationId": "WatchtowerClient_GetTowerInfo", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/wtclientrpcTower" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "pubkey", - "description": "The identifying public key of the watchtower to retrieve information for.", - "in": "path", - "required": true, - "type": "string", - "format": "byte" - }, - { - "name": "include_sessions", - "description": "Whether we should include sessions with the watchtower in the response.", - "in": "query", - "required": false, - "type": "boolean" - } - ], - "tags": [ - "WatchtowerClient" - ] - } - }, - "/v2/watchtower/client/policy": { - "get": { - "summary": "Policy returns the active watchtower client policy configuration.", - "operationId": "WatchtowerClient_Policy", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/wtclientrpcPolicyResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "WatchtowerClient" - ] - } - }, - "/v2/watchtower/client/stats": { - "get": { - "summary": "Stats returns the in-memory statistics of the client since startup.", - "operationId": "WatchtowerClient_Stats", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/wtclientrpcStatsResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "WatchtowerClient" - ] - } - }, - "/v2/watchtower/client/{pubkey}": { - "delete": { - "summary": "RemoveTower removes a watchtower from being considered for future session\nnegotiations and from being used for any subsequent backups until it's added\nagain. If an address is provided, then this RPC only serves as a way of\nremoving the address from the watchtower instead.", - "operationId": "WatchtowerClient_RemoveTower", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/wtclientrpcRemoveTowerResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "pubkey", - "description": "The identifying public key of the watchtower to remove.", - "in": "path", - "required": true, - "type": "string", - "format": "byte" - }, - { - "name": "address", - "description": "If set, then the record for this address will be removed, indicating that is\nis stale. Otherwise, the watchtower will no longer be used for future\nsession negotiations and backups.", - "in": "query", - "required": false, - "type": "string" - } - ], - "tags": [ - "WatchtowerClient" - ] - } - } - }, - "definitions": { - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string" - }, - "value": { - "type": "string", - "format": "byte" - } - } - }, - "runtimeError": { - "type": "object", - "properties": { - "error": { - "type": "string" - }, - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - }, - "wtclientrpcAddTowerRequest": { - "type": "object", - "properties": { - "pubkey": { - "type": "string", - "format": "byte", - "description": "The identifying public key of the watchtower to add." - }, - "address": { - "type": "string", - "description": "A network address the watchtower is reachable over." - } - } - }, - "wtclientrpcAddTowerResponse": { - "type": "object" - }, - "wtclientrpcListTowersResponse": { - "type": "object", - "properties": { - "towers": { - "type": "array", - "items": { - "$ref": "#/definitions/wtclientrpcTower" - }, - "description": "The list of watchtowers available for new backups." - } - } - }, - "wtclientrpcPolicyResponse": { - "type": "object", - "properties": { - "max_updates": { - "type": "integer", - "format": "int64", - "description": "The maximum number of updates each session we negotiate with watchtowers\nshould allow." - }, - "sweep_sat_per_byte": { - "type": "integer", - "format": "int64", - "description": "The fee rate, in satoshis per vbyte, that will be used by watchtowers for\njustice transactions in response to channel breaches." - } - } - }, - "wtclientrpcRemoveTowerResponse": { - "type": "object" - }, - "wtclientrpcStatsResponse": { - "type": "object", - "properties": { - "num_backups": { - "type": "integer", - "format": "int64", - "description": "The total number of backups made to all active and exhausted watchtower\nsessions." - }, - "num_pending_backups": { - "type": "integer", - "format": "int64", - "description": "The total number of backups that are pending to be acknowledged by all\nactive and exhausted watchtower sessions." - }, - "num_failed_backups": { - "type": "integer", - "format": "int64", - "description": "The total number of backups that all active and exhausted watchtower\nsessions have failed to acknowledge." - }, - "num_sessions_acquired": { - "type": "integer", - "format": "int64", - "description": "The total number of new sessions made to watchtowers." - }, - "num_sessions_exhausted": { - "type": "integer", - "format": "int64", - "description": "The total number of watchtower sessions that have been exhausted." - } - } - }, - "wtclientrpcTower": { - "type": "object", - "properties": { - "pubkey": { - "type": "string", - "format": "byte", - "description": "The identifying public key of the watchtower." - }, - "addresses": { - "type": "array", - "items": { - "type": "string" - }, - "description": "The list of addresses the watchtower is reachable over." - }, - "active_session_candidate": { - "type": "boolean", - "description": "Whether the watchtower is currently a candidate for new sessions." - }, - "num_sessions": { - "type": "integer", - "format": "int64", - "description": "The number of sessions that have been negotiated with the watchtower." - }, - "sessions": { - "type": "array", - "items": { - "$ref": "#/definitions/wtclientrpcTowerSession" - }, - "description": "The list of sessions that have been negotiated with the watchtower." - } - } - }, - "wtclientrpcTowerSession": { - "type": "object", - "properties": { - "num_backups": { - "type": "integer", - "format": "int64", - "description": "The total number of successful backups that have been made to the\nwatchtower session." - }, - "num_pending_backups": { - "type": "integer", - "format": "int64", - "description": "The total number of backups in the session that are currently pending to be\nacknowledged by the watchtower." - }, - "max_backups": { - "type": "integer", - "format": "int64", - "description": "The maximum number of backups allowed by the watchtower session." - }, - "sweep_sat_per_byte": { - "type": "integer", - "format": "int64", - "description": "The fee rate, in satoshis per vbyte, that will be used by the watchtower for\nthe justice transaction in the event of a channel breach." - } - } - } - } -} diff --git a/lnd/lntest/bitcoind.go b/lnd/lntest/bitcoind.go deleted file mode 100644 index b85b354e..00000000 --- a/lnd/lntest/bitcoind.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build bitcoind -// +build !notxindex - -package lntest - -import ( - "github.com/pkt-cash/pktd/chaincfg" -) - -// NewBackend starts a bitcoind node with the txindex enabled and returns a -// BitcoindBackendConfig for that node. -func NewBackend(miner string, netParams *chaincfg.Params) ( - *BitcoindBackendConfig, func() error, er.R) { - - extraArgs := []string{ - "-debug", - "-regtest", - "-txindex", - "-disablewallet", - } - - return newBackend(miner, netParams, extraArgs) -} diff --git a/lnd/lntest/bitcoind_common.go b/lnd/lntest/bitcoind_common.go deleted file mode 100644 index 5afe3744..00000000 --- a/lnd/lntest/bitcoind_common.go +++ /dev/null @@ -1,192 +0,0 @@ -// +build bitcoind - -package lntest - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "time" - - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/rpcclient" -) - -// logDirPattern is the pattern of the name of the temporary log directory. -const logDirPattern = "%s/.backendlogs" - -// BitcoindBackendConfig is an implementation of the BackendConfig interface -// backed by a Bitcoind node. -type BitcoindBackendConfig struct { - rpcHost string - rpcUser string - rpcPass string - zmqBlockPath string - zmqTxPath string - p2pPort int - rpcClient *rpcclient.Client - - // minerAddr is the p2p address of the miner to connect to. - minerAddr string -} - -// A compile time assertion to ensure BitcoindBackendConfig meets the -// BackendConfig interface. -var _ BackendConfig = (*BitcoindBackendConfig)(nil) - -// GenArgs returns the arguments needed to be passed to LND at startup for -// using this node as a chain backend. -func (b BitcoindBackendConfig) GenArgs() []string { - var args []string - args = append(args, "--bitcoin.node=bitcoind") - args = append(args, fmt.Sprintf("--bitcoind.rpchost=%v", b.rpcHost)) - args = append(args, fmt.Sprintf("--bitcoind.rpcuser=%v", b.rpcUser)) - args = append(args, fmt.Sprintf("--bitcoind.rpcpass=%v", b.rpcPass)) - args = append(args, fmt.Sprintf("--bitcoind.zmqpubrawblock=%v", - b.zmqBlockPath)) - args = append(args, fmt.Sprintf("--bitcoind.zmqpubrawtx=%v", - b.zmqTxPath)) - - return args -} - -// ConnectMiner is called to establish a connection to the test miner. -func (b BitcoindBackendConfig) ConnectMiner() er.R { - return b.rpcClient.AddNode(b.minerAddr, rpcclient.ANAdd) -} - -// DisconnectMiner is called to disconnect the miner. -func (b BitcoindBackendConfig) DisconnectMiner() er.R { - return b.rpcClient.AddNode(b.minerAddr, rpcclient.ANRemove) -} - -// Name returns the name of the backend type. -func (b BitcoindBackendConfig) Name() string { - return "bitcoind" -} - -// newBackend starts a bitcoind node with the given extra parameters and returns -// a BitcoindBackendConfig for that node. -func newBackend(miner string, netParams *chaincfg.Params, extraArgs []string) ( - *BitcoindBackendConfig, func() error, er.R) { - - baseLogDir := fmt.Sprintf(logDirPattern, GetLogDir()) - if netParams != &chaincfg.RegressionNetParams { - return nil, nil, er.Errorf("only regtest supported") - } - - if err := os.MkdirAll(baseLogDir, 0700); err != nil { - return nil, nil, err - } - - logFile, err := filepath.Abs(baseLogDir + "/bitcoind.log") - if err != nil { - return nil, nil, err - } - - tempBitcoindDir, err := ioutil.TempDir("", "bitcoind") - if err != nil { - return nil, nil, - er.Errorf("unable to create temp directory: %v", err) - } - - zmqBlockAddr := fmt.Sprintf("tcp://127.0.0.1:%d", nextAvailablePort()) - zmqTxAddr := fmt.Sprintf("tcp://127.0.0.1:%d", nextAvailablePort()) - rpcPort := nextAvailablePort() - p2pPort := nextAvailablePort() - - cmdArgs := []string{ - "-datadir=" + tempBitcoindDir, - "-whitelist=127.0.0.1", // whitelist localhost to speed up relay - "-rpcauth=weks:469e9bb14ab2360f8e226efed5ca6f" + - "d$507c670e800a95284294edb5773b05544b" + - "220110063096c221be9933c82d38e1", - fmt.Sprintf("-rpcport=%d", rpcPort), - fmt.Sprintf("-port=%d", p2pPort), - "-zmqpubrawblock=" + zmqBlockAddr, - "-zmqpubrawtx=" + zmqTxAddr, - "-debuglogfile=" + logFile, - } - cmdArgs = append(cmdArgs, extraArgs...) - bitcoind := exec.Command("bitcoind", cmdArgs...) - - err = bitcoind.Start() - if err != nil { - if err := os.RemoveAll(tempBitcoindDir); err != nil { - fmt.Printf("unable to remote temp dir %v: %v", - tempBitcoindDir, err) - } - return nil, nil, er.Errorf("couldn't start bitcoind: %v", err) - } - - cleanUp := func() er.R { - _ = bitcoind.Process.Kill() - _ = bitcoind.Wait() - - var errStr string - // After shutting down the chain backend, we'll make a copy of - // the log file before deleting the temporary log dir. - logDestination := fmt.Sprintf( - "%s/output_bitcoind_chainbackend.log", GetLogDir(), - ) - err := CopyFile(logDestination, logFile) - if err != nil { - errStr += fmt.Sprintf("unable to copy file: %v\n", err) - } - if err = os.RemoveAll(baseLogDir); err != nil { - errStr += fmt.Sprintf( - "cannot remove dir %s: %v\n", baseLogDir, err, - ) - } - if err := os.RemoveAll(tempBitcoindDir); err != nil { - errStr += fmt.Sprintf( - "cannot remove dir %s: %v\n", - tempBitcoindDir, err, - ) - } - if errStr != "" { - return er.New(errStr) - } - return nil - } - - // Allow process to start. - time.Sleep(1 * time.Second) - - rpcHost := fmt.Sprintf("127.0.0.1:%d", rpcPort) - rpcUser := "weks" - rpcPass := "weks" - - rpcCfg := rpcclient.ConnConfig{ - Host: rpcHost, - User: rpcUser, - Pass: rpcPass, - DisableConnectOnNew: true, - DisableAutoReconnect: false, - DisableTLS: true, - HTTPPostMode: true, - } - - client, err := rpcclient.New(&rpcCfg, nil) - if err != nil { - _ = cleanUp() - return nil, nil, er.Errorf("unable to create rpc client: %v", - err) - } - - bd := BitcoindBackendConfig{ - rpcHost: rpcHost, - rpcUser: rpcUser, - rpcPass: rpcPass, - zmqBlockPath: zmqBlockAddr, - zmqTxPath: zmqTxAddr, - p2pPort: p2pPort, - rpcClient: client, - minerAddr: miner, - } - - return &bd, cleanUp, nil -} diff --git a/lnd/lntest/bitcoind_notxindex.go b/lnd/lntest/bitcoind_notxindex.go deleted file mode 100644 index 7c7ca48e..00000000 --- a/lnd/lntest/bitcoind_notxindex.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build bitcoind -// +build notxindex - -package lntest - -import ( - "github.com/pkt-cash/pktd/chaincfg" -) - -// NewBackend starts a bitcoind node without the txindex enabled and returns a -// BitoindBackendConfig for that node. -func NewBackend(miner string, netParams *chaincfg.Params) ( - *BitcoindBackendConfig, func() error, er.R) { - - extraArgs := []string{ - "-debug", - "-regtest", - "-disablewallet", - } - - return newBackend(miner, netParams, extraArgs) -} diff --git a/lnd/lntest/btcd.go b/lnd/lntest/btcd.go deleted file mode 100644 index 43c6e450..00000000 --- a/lnd/lntest/btcd.go +++ /dev/null @@ -1,134 +0,0 @@ -// +build !bitcoind,!neutrino - -package lntest - -import ( - "encoding/hex" - "fmt" - "os" - - "github.com/pkt-cash/pktd/btcjson" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/integration/rpctest" - "github.com/pkt-cash/pktd/rpcclient" -) - -// logDirPattern is the pattern of the name of the temporary log directory. -const logDirPattern = "%s/.backendlogs" - -// temp is used to signal we want to establish a temporary connection using the -// btcd Node API. -// -// NOTE: Cannot be const, since the node API expects a reference. -var temp = "temp" - -// BtcdBackendConfig is an implementation of the BackendConfig interface -// backed by a btcd node. -type BtcdBackendConfig struct { - // rpcConfig houses the connection config to the backing btcd instance. - rpcConfig rpcclient.ConnConfig - - // harness is the backing btcd instance. - harness *rpctest.Harness - - // minerAddr is the p2p address of the miner to connect to. - minerAddr string -} - -// A compile time assertion to ensure BtcdBackendConfig meets the BackendConfig -// interface. -var _ BackendConfig = (*BtcdBackendConfig)(nil) - -// GenArgs returns the arguments needed to be passed to LND at startup for -// using this node as a chain backend. -func (b BtcdBackendConfig) GenArgs() []string { - var args []string - encodedCert := hex.EncodeToString(b.rpcConfig.Certificates) - args = append(args, "--bitcoin.node=btcd") - args = append(args, fmt.Sprintf("--btcd.rpchost=%v", b.rpcConfig.Host)) - args = append(args, fmt.Sprintf("--btcd.rpcuser=%v", b.rpcConfig.User)) - args = append(args, fmt.Sprintf("--btcd.rpcpass=%v", b.rpcConfig.Pass)) - args = append(args, fmt.Sprintf("--btcd.rawrpccert=%v", encodedCert)) - - return args -} - -// ConnectMiner is called to establish a connection to the test miner. -func (b BtcdBackendConfig) ConnectMiner() er.R { - return b.harness.Node.Node(btcjson.NConnect, b.minerAddr, &temp) -} - -// DisconnectMiner is called to disconnect the miner. -func (b BtcdBackendConfig) DisconnectMiner() er.R { - return b.harness.Node.Node(btcjson.NDisconnect, b.minerAddr, &temp) -} - -// Name returns the name of the backend type. -func (b BtcdBackendConfig) Name() string { - return "btcd" -} - -// NewBackend starts a new rpctest.Harness and returns a BtcdBackendConfig for -// that node. miner should be set to the P2P address of the miner to connect -// to. -func NewBackend(miner string, netParams *chaincfg.Params) ( - *BtcdBackendConfig, func() er.R, er.R) { - - baseLogDir := fmt.Sprintf(logDirPattern, GetLogDir()) - args := []string{ - "--rejectnonstd", - "--txindex", - "--trickleinterval=100ms", - "--debuglevel=debug", - "--logdir=" + baseLogDir, - "--nowinservice", - // The miner will get banned and disconnected from the node if - // its requested data are not found. We add a nobanning flag to - // make sure they stay connected if it happens. - "--nobanning", - } - chainBackend, err := rpctest.New(netParams, nil, args) - if err != nil { - return nil, nil, er.Errorf("unable to create btcd node: %v", err) - } - - if err := chainBackend.SetUp(false, 0); err != nil { - return nil, nil, er.Errorf("unable to set up btcd backend: %v", err) - } - - bd := &BtcdBackendConfig{ - rpcConfig: chainBackend.RPCConfig(), - harness: chainBackend, - minerAddr: miner, - } - - cleanUp := func() er.R { - var errStr string - if err := chainBackend.TearDown(); err != nil { - errStr += err.String() + "\n" - } - - // After shutting down the chain backend, we'll make a copy of - // the log file before deleting the temporary log dir. - logFile := baseLogDir + "/" + netParams.Name + "/btcd.log" - logDestination := fmt.Sprintf( - "%s/output_btcd_chainbackend.log", GetLogDir(), - ) - err := CopyFile(logDestination, logFile) - if err != nil { - errStr += fmt.Sprintf("unable to copy file: %v\n", err) - } - if errr := os.RemoveAll(baseLogDir); errr != nil { - errStr += fmt.Sprintf( - "cannot remove dir %s: %v\n", baseLogDir, errr, - ) - } - if errStr != "" { - return er.New(errStr) - } - return nil - } - - return bd, cleanUp, nil -} diff --git a/lnd/lntest/doc.go b/lnd/lntest/doc.go deleted file mode 100644 index 38ef4089..00000000 --- a/lnd/lntest/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -/* -Package lntest provides testing utilities for the lnd repository. - -This package contains infrastructure for integration tests that launch full lnd -nodes in a controlled environment and interact with them via RPC. Using a -NetworkHarness, a test can launch multiple lnd nodes, open channels between -them, create defined network topologies, and anything else that is possible with -RPC commands. -*/ -package lntest diff --git a/lnd/lntest/fee_service.go b/lnd/lntest/fee_service.go deleted file mode 100644 index be00abb6..00000000 --- a/lnd/lntest/fee_service.go +++ /dev/null @@ -1,102 +0,0 @@ -package lntest - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "sync" - - "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee" -) - -const ( - // feeServiceTarget is the confirmation target for which a fee estimate - // is returned. Requests for higher confirmation targets will fall back - // to this. - feeServiceTarget = 2 -) - -// feeService runs a web service that provides fee estimation information. -type feeService struct { - feeEstimates - - srv *http.Server - wg sync.WaitGroup - - url string - - lock sync.Mutex -} - -// feeEstimates contains the current fee estimates. -type feeEstimates struct { - Fees map[uint32]uint32 `json:"fee_by_block_target"` -} - -// startFeeService spins up a go-routine to serve fee estimates. -func startFeeService() *feeService { - port := nextAvailablePort() - f := feeService{ - url: fmt.Sprintf("http://localhost:%v/fee-estimates.json", port), - } - - // Initialize default fee estimate. - f.Fees = map[uint32]uint32{feeServiceTarget: 50000} - - listenAddr := fmt.Sprintf(":%v", port) - f.srv = &http.Server{ - Addr: listenAddr, - } - - http.HandleFunc("/fee-estimates.json", f.handleRequest) - - f.wg.Add(1) - go func() { - defer f.wg.Done() - - if err := f.srv.ListenAndServe(); err != http.ErrServerClosed { - fmt.Printf("error: cannot start fee api: %v", err) - } - }() - - return &f -} - -// handleRequest handles a client request for fee estimates. -func (f *feeService) handleRequest(w http.ResponseWriter, r *http.Request) { - f.lock.Lock() - defer f.lock.Unlock() - - bytes, err := json.Marshal(f.feeEstimates) - if err != nil { - fmt.Printf("error: cannot serialize "+ - "estimates: %v", err) - - return - } - - _, err = io.WriteString(w, string(bytes)) - if err != nil { - fmt.Printf("error: cannot send estimates: %v", - err) - } -} - -// stop stops the web server. -func (f *feeService) stop() { - if err := f.srv.Shutdown(context.Background()); err != nil { - fmt.Printf("error: cannot stop fee api: %v", err) - } - - f.wg.Wait() -} - -// setFee changes the current fee estimate for the fixed confirmation target. -func (f *feeService) setFee(fee chainfee.SatPerKWeight) { - f.lock.Lock() - defer f.lock.Unlock() - - f.Fees[feeServiceTarget] = uint32(fee.FeePerKVByte()) -} diff --git a/lnd/lntest/fee_service_test.go b/lnd/lntest/fee_service_test.go deleted file mode 100644 index eade2e01..00000000 --- a/lnd/lntest/fee_service_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package lntest - -import ( - "io/ioutil" - "net/http" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -// TestFeeService tests the itest fee estimating web service. -func TestFeeService(t *testing.T) { - service := startFeeService() - defer service.stop() - - service.setFee(5000) - - // Wait for service to start accepting connections. - var resp *http.Response - require.Eventually( - t, - func() bool { - var err error - resp, err = http.Get(service.url) // nolint:bodyclose - return err == nil - }, - 10*time.Second, time.Second, - ) - - defer resp.Body.Close() - - body, errr := ioutil.ReadAll(resp.Body) - require.NoError(t, errr) - - require.Equal( - t, "{\"fee_by_block_target\":{\"2\":20000}}", string(body), - ) -} diff --git a/lnd/lntest/harness.go b/lnd/lntest/harness.go deleted file mode 100644 index abddf36a..00000000 --- a/lnd/lntest/harness.go +++ /dev/null @@ -1,1450 +0,0 @@ -package lntest - -import ( - "context" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/integration/rpctest" - "github.com/pkt-cash/pktd/lnd" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lntest/wait" - "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/txscript" - "github.com/pkt-cash/pktd/wire" - "google.golang.org/grpc/grpclog" -) - -// DefaultCSV is the CSV delay (remotedelay) we will start our test nodes with. -const DefaultCSV = 4 - -// NetworkHarness is an integration testing harness for the lightning network. -// The harness by default is created with two active nodes on the network: -// Alice and Bob. -type NetworkHarness struct { - netParams *chaincfg.Params - - // currentTestCase holds the name for the currently run test case. - currentTestCase string - - // lndBinary is the full path to the lnd binary that was specifically - // compiled with all required itest flags. - lndBinary string - - // Miner is a reference to a running full node that can be used to create - // new blocks on the network. - Miner *rpctest.Harness - - // BackendCfg houses the information necessary to use a node as LND - // chain backend, such as rpc configuration, P2P information etc. - BackendCfg BackendConfig - - activeNodes map[int]*HarnessNode - - nodesByPub map[string]*HarnessNode - - // Alice and Bob are the initial seeder nodes that are automatically - // created to be the initial participants of the test network. - Alice *HarnessNode - Bob *HarnessNode - - // useEtcd is set to true if new nodes are to be created with an - // embedded etcd backend instead of just bbolt. - useEtcd bool - - // Channel for transmitting stderr output from failed lightning node - // to main process. - lndErrorChan chan er.R - - // feeService is a web service that provides external fee estimates to - // lnd. - feeService *feeService - - quit chan struct{} - - mtx sync.Mutex -} - -// NewNetworkHarness creates a new network test harness. -// TODO(roasbeef): add option to use golang's build library to a binary of the -// current repo. This will save developers from having to manually `go install` -// within the repo each time before changes -func NewNetworkHarness(r *rpctest.Harness, b BackendConfig, lndBinary string, - useEtcd bool) (*NetworkHarness, er.R) { - - feeService := startFeeService() - - n := NetworkHarness{ - activeNodes: make(map[int]*HarnessNode), - nodesByPub: make(map[string]*HarnessNode), - lndErrorChan: make(chan er.R), - netParams: r.ActiveNet, - Miner: r, - BackendCfg: b, - feeService: feeService, - quit: make(chan struct{}), - lndBinary: lndBinary, - useEtcd: useEtcd, - } - return &n, nil -} - -// LookUpNodeByPub queries the set of active nodes to locate a node according -// to its public key. The second value will be true if the node was found, and -// false otherwise. -func (n *NetworkHarness) LookUpNodeByPub(pubStr string) (*HarnessNode, er.R) { - n.mtx.Lock() - defer n.mtx.Unlock() - - node, ok := n.nodesByPub[pubStr] - if !ok { - return nil, er.Errorf("unable to find node") - } - - return node, nil -} - -// ProcessErrors returns a channel used for reporting any fatal process errors. -// If any of the active nodes within the harness' test network incur a fatal -// error, that error is sent over this channel. -func (n *NetworkHarness) ProcessErrors() <-chan er.R { - return n.lndErrorChan -} - -// fakeLogger is a fake grpclog.Logger implementation. This is used to stop -// grpc's logger from printing directly to stdout. -type fakeLogger struct{} - -func (f *fakeLogger) Fatal(args ...interface{}) {} -func (f *fakeLogger) Fatalf(format string, args ...interface{}) {} -func (f *fakeLogger) Fatalln(args ...interface{}) {} -func (f *fakeLogger) Print(args ...interface{}) {} -func (f *fakeLogger) Printf(format string, args ...interface{}) {} -func (f *fakeLogger) Println(args ...interface{}) {} - -// SetUp starts the initial seeder nodes within the test harness. The initial -// node's wallets will be funded wallets with ten 1 BTC outputs each. Finally -// rpc clients capable of communicating with the initial seeder nodes are -// created. Nodes are initialized with the given extra command line flags, which -// should be formatted properly - "--arg=value". -func (n *NetworkHarness) SetUp(testCase string, lndArgs []string) er.R { - // Swap out grpc's default logger with out fake logger which drops the - // statements on the floor. - grpclog.SetLogger(&fakeLogger{}) - n.currentTestCase = testCase - - // Start the initial seeder nodes within the test network, then connect - // their respective RPC clients. - var wg sync.WaitGroup - errChan := make(chan er.R, 2) - wg.Add(2) - go func() { - defer wg.Done() - node, err := n.NewNode("Alice", lndArgs) - if err != nil { - errChan <- err - return - } - n.Alice = node - }() - go func() { - defer wg.Done() - node, err := n.NewNode("Bob", lndArgs) - if err != nil { - errChan <- err - return - } - n.Bob = node - }() - wg.Wait() - select { - case err := <-errChan: - return err - default: - } - - // Load up the wallets of the seeder nodes with 10 outputs of 1 BTC - // each. - ctxb := context.Background() - addrReq := &lnrpc.NewAddressRequest{ - Type: lnrpc.AddressType_WITNESS_PUBKEY_HASH, - } - clients := []lnrpc.LightningClient{n.Alice, n.Bob} - for _, client := range clients { - for i := 0; i < 10; i++ { - resp, errr := client.NewAddress(ctxb, addrReq) - if errr != nil { - return er.E(errr) - } - addr, err := btcutil.DecodeAddress(resp.Address, n.netParams) - if err != nil { - return err - } - addrScript, err := txscript.PayToAddrScript(addr) - if err != nil { - return err - } - - output := &wire.TxOut{ - PkScript: addrScript, - Value: btcutil.UnitsPerCoinI64(), - } - _, err = n.Miner.SendOutputs([]*wire.TxOut{output}, 7500) - if err != nil { - return err - } - } - } - - // We generate several blocks in order to give the outputs created - // above a good number of confirmations. - if _, err := n.Miner.Node.Generate(10); err != nil { - return err - } - - // Finally, make a connection between both of the nodes. - if err := n.ConnectNodes(ctxb, n.Alice, n.Bob); err != nil { - return err - } - - // Now block until both wallets have fully synced up. - expectedBalance := int64(btcutil.UnitsPerCoin() * 10) - balReq := &lnrpc.WalletBalanceRequest{} - balanceTicker := time.NewTicker(time.Millisecond * 50) - defer balanceTicker.Stop() - balanceTimeout := time.After(time.Second * 30) -out: - for { - select { - case <-balanceTicker.C: - aliceResp, errr := n.Alice.WalletBalance(ctxb, balReq) - if errr != nil { - return er.E(errr) - } - bobResp, errr := n.Bob.WalletBalance(ctxb, balReq) - if errr != nil { - return er.E(errr) - } - - if aliceResp.ConfirmedBalance == expectedBalance && - bobResp.ConfirmedBalance == expectedBalance { - break out - } - case <-balanceTimeout: - return er.Errorf("balances not synced after deadline") - } - } - - return nil -} - -// TearDown tears down all active nodes within the test lightning network. -func (n *NetworkHarness) TearDown() er.R { - for _, node := range n.activeNodes { - if err := n.ShutdownNode(node); err != nil { - return err - } - } - - return nil -} - -// Stop stops the test harness. -func (n *NetworkHarness) Stop() { - close(n.lndErrorChan) - close(n.quit) - - n.feeService.stop() -} - -// NewNode fully initializes a returns a new HarnessNode bound to the -// current instance of the network harness. The created node is running, but -// not yet connected to other nodes within the network. -func (n *NetworkHarness) NewNode(name string, extraArgs []string) (*HarnessNode, er.R) { - return n.newNode(name, extraArgs, false, nil) -} - -// NewNodeWithSeed fully initializes a new HarnessNode after creating a fresh -// aezeed. The provided password is used as both the aezeed password and the -// wallet password. The generated mnemonic is returned along with the -// initialized harness node. -func (n *NetworkHarness) NewNodeWithSeed(name string, extraArgs []string, - password []byte, statelessInit bool) (*HarnessNode, []string, []byte, - er.R) { - - node, err := n.newNode(name, extraArgs, true, password) - if err != nil { - return nil, nil, nil, err - } - - timeout := time.Duration(time.Second * 15) - ctxb := context.Background() - - // Create a request to generate a new aezeed. The new seed will have the - // same password as the internal wallet. - genSeedReq := &lnrpc.GenSeedRequest{ - AezeedPassphrase: password, - } - - ctxt, _ := context.WithTimeout(ctxb, timeout) - genSeedResp, errr := node.GenSeed(ctxt, genSeedReq) - if errr != nil { - return nil, nil, nil, er.E(errr) - } - - // With the seed created, construct the init request to the node, - // including the newly generated seed. - initReq := &lnrpc.InitWalletRequest{ - WalletPassword: password, - CipherSeedMnemonic: genSeedResp.CipherSeedMnemonic, - AezeedPassphrase: password, - StatelessInit: statelessInit, - } - - // Pass the init request via rpc to finish unlocking the node. This will - // also initialize the macaroon-authenticated LightningClient. - response, err := node.Init(ctxb, initReq) - if err != nil { - return nil, nil, nil, err - } - - // With the node started, we can now record its public key within the - // global mapping. - n.RegisterNode(node) - - // In stateless initialization mode we get a macaroon back that we have - // to return to the test, otherwise gRPC calls won't be possible since - // there are no macaroon files created in that mode. - // In stateful init the admin macaroon will just be nil. - return node, genSeedResp.CipherSeedMnemonic, response.AdminMacaroon, nil -} - -// RestoreNodeWithSeed fully initializes a HarnessNode using a chosen mnemonic, -// password, recovery window, and optionally a set of static channel backups. -// After providing the initialization request to unlock the node, this method -// will finish initializing the LightningClient such that the HarnessNode can -// be used for regular rpc operations. -func (n *NetworkHarness) RestoreNodeWithSeed(name string, extraArgs []string, - password []byte, mnemonic []string, recoveryWindow int32, - chanBackups *lnrpc.ChanBackupSnapshot) (*HarnessNode, er.R) { - - node, err := n.newNode(name, extraArgs, true, password) - if err != nil { - return nil, err - } - - initReq := &lnrpc.InitWalletRequest{ - WalletPassword: password, - CipherSeedMnemonic: mnemonic, - AezeedPassphrase: password, - RecoveryWindow: recoveryWindow, - ChannelBackups: chanBackups, - } - - _, err = node.Init(context.Background(), initReq) - if err != nil { - return nil, err - } - - // With the node started, we can now record its public key within the - // global mapping. - n.RegisterNode(node) - - return node, nil -} - -// newNode initializes a new HarnessNode, supporting the ability to initialize a -// wallet with or without a seed. If hasSeed is false, the returned harness node -// can be used immediately. Otherwise, the node will require an additional -// initialization phase where the wallet is either created or restored. -func (n *NetworkHarness) newNode(name string, extraArgs []string, hasSeed bool, - password []byte) (*HarnessNode, er.R) { - - node, err := newNode(NodeConfig{ - Name: name, - LogFilenamePrefix: n.currentTestCase, - HasSeed: hasSeed, - Password: password, - BackendCfg: n.BackendCfg, - NetParams: n.netParams, - ExtraArgs: extraArgs, - FeeURL: n.feeService.url, - Etcd: n.useEtcd, - }) - if err != nil { - return nil, err - } - - // Put node in activeNodes to ensure Shutdown is called even if Start - // returns an error. - n.mtx.Lock() - n.activeNodes[node.NodeID] = node - n.mtx.Unlock() - - if err := node.start(n.lndBinary, n.lndErrorChan); err != nil { - return nil, err - } - - // If this node is to have a seed, it will need to be unlocked or - // initialized via rpc. Delay registering it with the network until it - // can be driven via an unlocked rpc connection. - if node.Cfg.HasSeed { - return node, nil - } - - // With the node started, we can now record its public key within the - // global mapping. - n.RegisterNode(node) - - return node, nil -} - -// RegisterNode records a new HarnessNode in the NetworkHarnesses map of known -// nodes. This method should only be called with nodes that have successfully -// retrieved their public keys via FetchNodeInfo. -func (n *NetworkHarness) RegisterNode(node *HarnessNode) { - n.mtx.Lock() - n.nodesByPub[node.PubKeyStr] = node - n.mtx.Unlock() -} - -func (n *NetworkHarness) connect(ctx context.Context, - req *lnrpc.ConnectPeerRequest, a *HarnessNode) er.R { - - syncTimeout := time.After(15 * time.Second) -tryconnect: - if _, err := a.ConnectPeer(ctx, req); err != nil { - // If the chain backend is still syncing, retry. - if strings.Contains(err.Error(), lnd.ErrServerNotActive.Detail) || - strings.Contains(err.Error(), "i/o timeout") { - - select { - case <-time.After(100 * time.Millisecond): - goto tryconnect - case <-syncTimeout: - return er.Errorf("chain backend did not " + - "finish syncing") - } - } - return er.E(err) - } - - return nil -} - -// EnsureConnected will try to connect to two nodes, returning no error if they -// are already connected. If the nodes were not connected previously, this will -// behave the same as ConnectNodes. If a pending connection request has already -// been made, the method will block until the two nodes appear in each other's -// peers list, or until the 15s timeout expires. -func (n *NetworkHarness) EnsureConnected(ctx context.Context, a, b *HarnessNode) er.R { - // errConnectionRequested is used to signal that a connection was - // requested successfully, which is distinct from already being - // connected to the peer. - errConnectionRequested := er.New("connection request in progress") - - tryConnect := func(a, b *HarnessNode) er.R { - ctxt, _ := context.WithTimeout(ctx, 15*time.Second) - bInfo, errr := b.GetInfo(ctxt, &lnrpc.GetInfoRequest{}) - if errr != nil { - return er.E(errr) - } - - req := &lnrpc.ConnectPeerRequest{ - Addr: &lnrpc.LightningAddress{ - Pubkey: bInfo.IdentityPubkey, - Host: b.Cfg.P2PAddr(), - }, - } - - var predErr er.R - err := wait.Predicate(func() bool { - ctx, cancel := context.WithTimeout(ctx, 15*time.Second) - defer cancel() - - err := n.connect(ctx, req, a) - switch { - // Request was successful, wait for both to display the - // connection. - case err == nil: - predErr = errConnectionRequested - return true - - // If the two are already connected, we return early - // with no error. - case strings.Contains( - err.String(), "already connected to peer", - ): - predErr = nil - return true - - default: - predErr = err - return false - } - - }, DefaultTimeout) - if err != nil { - return er.Errorf("connection not succeeded within 15 "+ - "seconds: %v", predErr) - } - - return predErr - } - - aErr := tryConnect(a, b) - bErr := tryConnect(b, a) - switch { - // If both reported already being connected to each other, we can exit - // early. - case aErr == nil && bErr == nil: - return nil - - // Return any critical errors returned by either alice. - case aErr != nil && aErr != errConnectionRequested: - return aErr - - // Return any critical errors returned by either bob. - case bErr != nil && bErr != errConnectionRequested: - return bErr - - // Otherwise one or both requested a connection, so we wait for the - // peers lists to reflect the connection. - default: - } - - findSelfInPeerList := func(a, b *HarnessNode) bool { - // If node B is seen in the ListPeers response from node A, - // then we can exit early as the connection has been fully - // established. - ctxt, _ := context.WithTimeout(ctx, 15*time.Second) - resp, err := b.ListPeers(ctxt, &lnrpc.ListPeersRequest{}) - if err != nil { - return false - } - - for _, peer := range resp.Peers { - if peer.PubKey == a.PubKeyStr { - return true - } - } - - return false - } - - err := wait.Predicate(func() bool { - return findSelfInPeerList(a, b) && findSelfInPeerList(b, a) - }, time.Second*15) - if err != nil { - return er.Errorf("peers not connected within 15 seconds") - } - - return nil -} - -// ConnectNodes establishes an encrypted+authenticated p2p connection from node -// a towards node b. The function will return a non-nil error if the connection -// was unable to be established. -// -// NOTE: This function may block for up to 15-seconds as it will not return -// until the new connection is detected as being known to both nodes. -func (n *NetworkHarness) ConnectNodes(ctx context.Context, a, b *HarnessNode) er.R { - bobInfo, errr := b.GetInfo(ctx, &lnrpc.GetInfoRequest{}) - if errr != nil { - return er.E(errr) - } - - req := &lnrpc.ConnectPeerRequest{ - Addr: &lnrpc.LightningAddress{ - Pubkey: bobInfo.IdentityPubkey, - Host: b.Cfg.P2PAddr(), - }, - } - - if err := n.connect(ctx, req, a); err != nil { - return err - } - - err := wait.Predicate(func() bool { - // If node B is seen in the ListPeers response from node A, - // then we can exit early as the connection has been fully - // established. - resp, err := a.ListPeers(ctx, &lnrpc.ListPeersRequest{}) - if err != nil { - return false - } - - for _, peer := range resp.Peers { - if peer.PubKey == b.PubKeyStr { - return true - } - } - - return false - }, time.Second*15) - if err != nil { - return er.Errorf("peers not connected within 15 seconds") - } - - return nil -} - -// DisconnectNodes disconnects node a from node b by sending RPC message -// from a node to b node -func (n *NetworkHarness) DisconnectNodes(ctx context.Context, a, b *HarnessNode) er.R { - bobInfo, errr := b.GetInfo(ctx, &lnrpc.GetInfoRequest{}) - if errr != nil { - return er.E(errr) - } - - req := &lnrpc.DisconnectPeerRequest{ - PubKey: bobInfo.IdentityPubkey, - } - - if _, errr := a.DisconnectPeer(ctx, req); errr != nil { - return er.E(errr) - } - - return nil -} - -// RestartNode attempts to restart a lightning node by shutting it down -// cleanly, then restarting the process. This function is fully blocking. Upon -// restart, the RPC connection to the node will be re-attempted, continuing iff -// the connection attempt is successful. If the callback parameter is non-nil, -// then the function will be executed after the node shuts down, but *before* -// the process has been started up again. -// -// This method can be useful when testing edge cases such as a node broadcast -// and invalidated prior state, or persistent state recovery, simulating node -// crashes, etc. Additionally, each time the node is restarted, the caller can -// pass a set of SCBs to pass in via the Unlock method allowing them to restore -// channels during restart. -func (n *NetworkHarness) RestartNode(node *HarnessNode, callback func() er.R, - chanBackups ...*lnrpc.ChanBackupSnapshot) er.R { - - err := n.RestartNodeNoUnlock(node, callback) - if err != nil { - return err - } - - // If the node doesn't have a password set, then we can exit here as we - // don't need to unlock it. - if len(node.Cfg.Password) == 0 { - return nil - } - - // Otherwise, we'll unlock the wallet, then complete the final steps - // for the node initialization process. - unlockReq := &lnrpc.UnlockWalletRequest{ - WalletPassword: node.Cfg.Password, - } - if len(chanBackups) != 0 { - unlockReq.ChannelBackups = chanBackups[0] - unlockReq.RecoveryWindow = 1000 - } - - return node.Unlock(context.Background(), unlockReq) -} - -// RestartNodeNoUnlock attempts to restart a lightning node by shutting it down -// cleanly, then restarting the process. In case the node was setup with a seed, -// it will be left in the unlocked state. This function is fully blocking. If -// the callback parameter is non-nil, then the function will be executed after -// the node shuts down, but *before* the process has been started up again. -func (n *NetworkHarness) RestartNodeNoUnlock(node *HarnessNode, - callback func() er.R) er.R { - - if err := node.stop(); err != nil { - return err - } - - if callback != nil { - if err := callback(); err != nil { - return err - } - } - - return node.start(n.lndBinary, n.lndErrorChan) -} - -// SuspendNode stops the given node and returns a callback that can be used to -// start it again. -func (n *NetworkHarness) SuspendNode(node *HarnessNode) (func() er.R, er.R) { - if err := node.stop(); err != nil { - return nil, err - } - - restart := func() er.R { - return node.start(n.lndBinary, n.lndErrorChan) - } - - return restart, nil -} - -// ShutdownNode stops an active lnd process and returns when the process has -// exited and any temporary directories have been cleaned up. -func (n *NetworkHarness) ShutdownNode(node *HarnessNode) er.R { - if err := node.shutdown(); err != nil { - return err - } - - delete(n.activeNodes, node.NodeID) - return nil -} - -// StopNode stops the target node, but doesn't yet clean up its directories. -// This can be used to temporarily bring a node down during a test, to be later -// started up again. -func (n *NetworkHarness) StopNode(node *HarnessNode) er.R { - return node.stop() -} - -// SaveProfilesPages hits profiles pages of all active nodes and writes it to -// disk using a similar naming scheme as to the regular set of logs. -func (n *NetworkHarness) SaveProfilesPages() { - // Only write gorutine dumps if flag is active. - if !(*goroutineDump) { - return - } - - for _, node := range n.activeNodes { - if err := saveProfilesPage(node); err != nil { - fmt.Printf("Error: %v\n", err) - } - } -} - -// saveProfilesPage saves the profiles page for the given node to file. -func saveProfilesPage(node *HarnessNode) er.R { - resp, err := http.Get( - fmt.Sprintf( - "http://localhost:%d/debug/pprof/goroutine?debug=1", - node.Cfg.ProfilePort, - ), - ) - if err != nil { - return er.Errorf("failed to get profile page "+ - "(node_id=%d, name=%s): %v", - node.NodeID, node.Cfg.Name, err) - } - defer resp.Body.Close() - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return er.Errorf("failed to read profile page "+ - "(node_id=%d, name=%s): %v", - node.NodeID, node.Cfg.Name, err) - } - - fileName := fmt.Sprintf( - "pprof-%d-%s-%s.log", node.NodeID, node.Cfg.Name, - hex.EncodeToString(node.PubKey[:logPubKeyBytes]), - ) - - logFile, err := os.Create(fileName) - if err != nil { - return er.Errorf("failed to create file for profile page "+ - "(node_id=%d, name=%s): %v", - node.NodeID, node.Cfg.Name, err) - } - defer logFile.Close() - - _, err = logFile.Write(body) - if err != nil { - return er.Errorf("failed to save profile page "+ - "(node_id=%d, name=%s): %v", - node.NodeID, node.Cfg.Name, err) - } - return nil -} - -// WaitForTxInMempool blocks until the target txid is seen in the mempool. If -// the transaction isn't seen within the network before the passed timeout, -// then an error is returned. -func (n *NetworkHarness) WaitForTxInMempool(ctx context.Context, - txid chainhash.Hash) er.R { - - // Return immediately if harness has been torn down. - select { - case <-n.quit: - return er.Errorf("NetworkHarness has been torn down") - default: - } - - ticker := time.NewTicker(50 * time.Millisecond) - defer ticker.Stop() - - var mempool []*chainhash.Hash - for { - select { - case <-ctx.Done(): - return er.Errorf("wanted %v, found %v txs "+ - "in mempool: %v", txid, len(mempool), mempool) - - case <-ticker.C: - var err er.R - mempool, err = n.Miner.Node.GetRawMempool() - if err != nil { - return err - } - - for _, mempoolTx := range mempool { - if *mempoolTx == txid { - return nil - } - } - } - } -} - -// OpenChannelParams houses the params to specify when opening a new channel. -type OpenChannelParams struct { - // Amt is the local amount being put into the channel. - Amt btcutil.Amount - - // PushAmt is the amount that should be pushed to the remote when the - // channel is opened. - PushAmt btcutil.Amount - - // Private is a boolan indicating whether the opened channel should be - // private. - Private bool - - // SpendUnconfirmed is a boolean indicating whether we can utilize - // unconfirmed outputs to fund the channel. - SpendUnconfirmed bool - - // MinHtlc is the htlc_minimum_msat value set when opening the channel. - MinHtlc lnwire.MilliSatoshi - - // RemoteMaxHtlcs is the remote_max_htlcs value set when opening the - // channel, restricting the number of concurrent HTLCs the remote party - // can add to a commitment. - RemoteMaxHtlcs uint16 - - // FundingShim is an optional funding shim that the caller can specify - // in order to modify the channel funding workflow. - FundingShim *lnrpc.FundingShim -} - -// OpenChannel attempts to open a channel between srcNode and destNode with the -// passed channel funding parameters. If the passed context has a timeout, then -// if the timeout is reached before the channel pending notification is -// received, an error is returned. The confirmed boolean determines whether we -// should fund the channel with confirmed outputs or not. -func (n *NetworkHarness) OpenChannel(ctx context.Context, - srcNode, destNode *HarnessNode, p OpenChannelParams) ( - lnrpc.Lightning_OpenChannelClient, er.R) { - - // Wait until srcNode and destNode have the latest chain synced. - // Otherwise, we may run into a check within the funding manager that - // prevents any funding workflows from being kicked off if the chain - // isn't yet synced. - if err := srcNode.WaitForBlockchainSync(ctx); err != nil { - return nil, er.Errorf("unable to sync srcNode chain: %v", err) - } - if err := destNode.WaitForBlockchainSync(ctx); err != nil { - return nil, er.Errorf("unable to sync destNode chain: %v", err) - } - - minConfs := int32(1) - if p.SpendUnconfirmed { - minConfs = 0 - } - - openReq := &lnrpc.OpenChannelRequest{ - NodePubkey: destNode.PubKey[:], - LocalFundingAmount: int64(p.Amt), - PushSat: int64(p.PushAmt), - Private: p.Private, - MinConfs: minConfs, - SpendUnconfirmed: p.SpendUnconfirmed, - MinHtlcMsat: int64(p.MinHtlc), - RemoteMaxHtlcs: uint32(p.RemoteMaxHtlcs), - FundingShim: p.FundingShim, - } - - respStream, errr := srcNode.OpenChannel(ctx, openReq) - if errr != nil { - return nil, er.Errorf("unable to open channel between "+ - "alice and bob: %v", errr) - } - - chanOpen := make(chan struct{}) - errChan := make(chan er.R) - go func() { - // Consume the "channel pending" update. This waits until the node - // notifies us that the final message in the channel funding workflow - // has been sent to the remote node. - resp, errr := respStream.Recv() - if errr != nil { - errChan <- er.E(errr) - return - } - if _, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending); !ok { - errChan <- er.Errorf("expected channel pending update, "+ - "instead got %v", resp) - return - } - - close(chanOpen) - }() - - select { - case <-ctx.Done(): - return nil, er.Errorf("timeout reached before chan pending "+ - "update sent: %v", errr) - case err := <-errChan: - return nil, err - case <-chanOpen: - return respStream, nil - } -} - -// OpenPendingChannel attempts to open a channel between srcNode and destNode with the -// passed channel funding parameters. If the passed context has a timeout, then -// if the timeout is reached before the channel pending notification is -// received, an error is returned. -func (n *NetworkHarness) OpenPendingChannel(ctx context.Context, - srcNode, destNode *HarnessNode, amt btcutil.Amount, - pushAmt btcutil.Amount) (*lnrpc.PendingUpdate, er.R) { - - // Wait until srcNode and destNode have blockchain synced - if err := srcNode.WaitForBlockchainSync(ctx); err != nil { - return nil, er.Errorf("unable to sync srcNode chain: %v", err) - } - if err := destNode.WaitForBlockchainSync(ctx); err != nil { - return nil, er.Errorf("unable to sync destNode chain: %v", err) - } - - openReq := &lnrpc.OpenChannelRequest{ - NodePubkey: destNode.PubKey[:], - LocalFundingAmount: int64(amt), - PushSat: int64(pushAmt), - Private: false, - } - - respStream, err := srcNode.OpenChannel(ctx, openReq) - if err != nil { - return nil, er.Errorf("unable to open channel between "+ - "alice and bob: %v", err) - } - - chanPending := make(chan *lnrpc.PendingUpdate) - errChan := make(chan er.R) - go func() { - // Consume the "channel pending" update. This waits until the node - // notifies us that the final message in the channel funding workflow - // has been sent to the remote node. - resp, errr := respStream.Recv() - if errr != nil { - errChan <- er.E(errr) - return - } - pendingResp, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending) - if !ok { - errChan <- er.Errorf("expected channel pending update, "+ - "instead got %v", resp) - return - } - - chanPending <- pendingResp.ChanPending - }() - - select { - case <-ctx.Done(): - return nil, er.Errorf("timeout reached before chan pending " + - "update sent") - case err := <-errChan: - return nil, err - case pendingChan := <-chanPending: - return pendingChan, nil - } -} - -// WaitForChannelOpen waits for a notification that a channel is open by -// consuming a message from the past open channel stream. If the passed context -// has a timeout, then if the timeout is reached before the channel has been -// opened, then an error is returned. -func (n *NetworkHarness) WaitForChannelOpen(ctx context.Context, - openChanStream lnrpc.Lightning_OpenChannelClient) (*lnrpc.ChannelPoint, er.R) { - - errChan := make(chan er.R) - respChan := make(chan *lnrpc.ChannelPoint) - go func() { - resp, err := openChanStream.Recv() - if err != nil { - errChan <- er.Errorf("unable to read rpc resp: %v", err) - return - } - fundingResp, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanOpen) - if !ok { - errChan <- er.Errorf("expected channel open update, "+ - "instead got %v", resp) - return - } - - respChan <- fundingResp.ChanOpen.ChannelPoint - }() - - select { - case <-ctx.Done(): - return nil, er.Errorf("timeout reached while waiting for " + - "channel open") - case err := <-errChan: - return nil, err - case chanPoint := <-respChan: - return chanPoint, nil - } -} - -// CloseChannel attempts to close the channel indicated by the -// passed channel point, initiated by the passed lnNode. If the passed context -// has a timeout, an error is returned if that timeout is reached before the -// channel close is pending. -func (n *NetworkHarness) CloseChannel(ctx context.Context, - lnNode *HarnessNode, cp *lnrpc.ChannelPoint, - force bool) (lnrpc.Lightning_CloseChannelClient, *chainhash.Hash, er.R) { - - // Create a channel outpoint that we can use to compare to channels - // from the ListChannelsResponse. - txidHash, err := getChanPointFundingTxid(cp) - if err != nil { - return nil, nil, err - } - fundingTxID, err := chainhash.NewHash(txidHash) - if err != nil { - return nil, nil, err - } - chanPoint := wire.OutPoint{ - Hash: *fundingTxID, - Index: cp.OutputIndex, - } - - // We'll wait for *both* nodes to read the channel as active if we're - // performing a cooperative channel closure. - if !force { - timeout := time.Second * 15 - listReq := &lnrpc.ListChannelsRequest{} - - // We define two helper functions, one two locate a particular - // channel, and the other to check if a channel is active or - // not. - filterChannel := func(node *HarnessNode, - op wire.OutPoint) (*lnrpc.Channel, er.R) { - listResp, errr := node.ListChannels(ctx, listReq) - if errr != nil { - return nil, er.E(errr) - } - - for _, c := range listResp.Channels { - if c.ChannelPoint == op.String() { - return c, nil - } - } - - return nil, er.Errorf("unable to find channel") - } - activeChanPredicate := func(node *HarnessNode) func() bool { - return func() bool { - channel, err := filterChannel(node, chanPoint) - if err != nil { - return false - } - - return channel.Active - } - } - - // Next, we'll fetch the target channel in order to get the - // harness node that will be receiving the channel close request. - targetChan, err := filterChannel(lnNode, chanPoint) - if err != nil { - return nil, nil, err - } - receivingNode, err := n.LookUpNodeByPub(targetChan.RemotePubkey) - if err != nil { - return nil, nil, err - } - - // Before proceeding, we'll ensure that the channel is active - // for both nodes. - err = wait.Predicate(activeChanPredicate(lnNode), timeout) - if err != nil { - return nil, nil, er.Errorf("channel of closing " + - "node not active in time") - } - err = wait.Predicate(activeChanPredicate(receivingNode), timeout) - if err != nil { - return nil, nil, er.Errorf("channel of receiving " + - "node not active in time") - } - } - - closeReq := &lnrpc.CloseChannelRequest{ - ChannelPoint: cp, - Force: force, - } - closeRespStream, errr := lnNode.CloseChannel(ctx, closeReq) - if errr != nil { - return nil, nil, er.Errorf("unable to close channel: %v", errr) - } - - errChan := make(chan er.R) - fin := make(chan *chainhash.Hash) - go func() { - // Consume the "channel close" update in order to wait for the closing - // transaction to be broadcast, then wait for the closing tx to be seen - // within the network. - closeResp, errr := closeRespStream.Recv() - if errr != nil { - errChan <- er.Errorf("unable to recv() from close "+ - "stream: %v", errr) - return - } - pendingClose, ok := closeResp.Update.(*lnrpc.CloseStatusUpdate_ClosePending) - if !ok { - errChan <- er.Errorf("expected channel close update, "+ - "instead got %v", pendingClose) - return - } - - closeTxid, err := chainhash.NewHash(pendingClose.ClosePending.Txid) - if err != nil { - errChan <- er.Errorf("unable to decode closeTxid: "+ - "%v", err) - return - } - if err := n.WaitForTxInMempool(ctx, *closeTxid); err != nil { - errChan <- er.Errorf("error while waiting for "+ - "broadcast tx: %v", err) - return - } - fin <- closeTxid - }() - - // Wait until either the deadline for the context expires, an error - // occurs, or the channel close update is received. - select { - case err := <-errChan: - return nil, nil, err - case closeTxid := <-fin: - return closeRespStream, closeTxid, nil - } -} - -// WaitForChannelClose waits for a notification from the passed channel close -// stream that the node has deemed the channel has been fully closed. If the -// passed context has a timeout, then if the timeout is reached before the -// notification is received then an error is returned. -func (n *NetworkHarness) WaitForChannelClose(ctx context.Context, - closeChanStream lnrpc.Lightning_CloseChannelClient) (*chainhash.Hash, er.R) { - - errChan := make(chan er.R) - updateChan := make(chan *lnrpc.CloseStatusUpdate_ChanClose) - go func() { - closeResp, errr := closeChanStream.Recv() - if errr != nil { - errChan <- er.E(errr) - return - } - - closeFin, ok := closeResp.Update.(*lnrpc.CloseStatusUpdate_ChanClose) - if !ok { - errChan <- er.Errorf("expected channel close update, "+ - "instead got %v", closeFin) - return - } - - updateChan <- closeFin - }() - - // Wait until either the deadline for the context expires, an error - // occurs, or the channel close update is received. - select { - case <-ctx.Done(): - return nil, er.Errorf("timeout reached before update sent") - case err := <-errChan: - return nil, err - case update := <-updateChan: - return chainhash.NewHash(update.ChanClose.ClosingTxid) - } -} - -// AssertChannelExists asserts that an active channel identified by the -// specified channel point exists from the point-of-view of the node. It takes -// an optional set of check functions which can be used to make further -// assertions using channel's values. These functions are responsible for -// failing the test themselves if they do not pass. -// nolint: interfacer -func (n *NetworkHarness) AssertChannelExists(ctx context.Context, - node *HarnessNode, chanPoint *wire.OutPoint, - checks ...func(*lnrpc.Channel)) er.R { - - req := &lnrpc.ListChannelsRequest{} - - return wait.NoError(func() er.R { - resp, err := node.ListChannels(ctx, req) - if err != nil { - return er.Errorf("unable fetch node's channels: %v", err) - } - - for _, channel := range resp.Channels { - if channel.ChannelPoint == chanPoint.String() { - // First check whether our channel is active, - // failing early if it is not. - if !channel.Active { - return er.Errorf("channel %s inactive", - chanPoint) - } - - // Apply any additional checks that we would - // like to verify. - for _, check := range checks { - check(channel) - } - - return nil - } - } - - return er.Errorf("channel %s not found", chanPoint) - }, 15*time.Second) -} - -// DumpLogs reads the current logs generated by the passed node, and returns -// the logs as a single string. This function is useful for examining the logs -// of a particular node in the case of a test failure. -// Logs from lightning node being generated with delay - you should -// add time.Sleep() in order to get all logs. -func (n *NetworkHarness) DumpLogs(node *HarnessNode) (string, er.R) { - logFile := fmt.Sprintf("%v/simnet/lnd.log", node.Cfg.LogDir) - - buf, errr := ioutil.ReadFile(logFile) - if errr != nil { - return "", er.E(errr) - } - - return string(buf), nil -} - -// SendCoins attempts to send amt satoshis from the internal mining node to the -// targeted lightning node using a P2WKH address. 6 blocks are mined after in -// order to confirm the transaction. -func (n *NetworkHarness) SendCoins(ctx context.Context, amt btcutil.Amount, - target *HarnessNode) er.R { - - return n.sendCoins( - ctx, amt, target, lnrpc.AddressType_WITNESS_PUBKEY_HASH, - true, - ) -} - -// SendCoinsUnconfirmed sends coins from the internal mining node to the target -// lightning node using a P2WPKH address. No blocks are mined after, so the -// transaction remains unconfirmed. -func (n *NetworkHarness) SendCoinsUnconfirmed(ctx context.Context, - amt btcutil.Amount, target *HarnessNode) er.R { - - return n.sendCoins( - ctx, amt, target, lnrpc.AddressType_WITNESS_PUBKEY_HASH, - false, - ) -} - -// SendCoinsNP2WKH attempts to send amt satoshis from the internal mining node -// to the targeted lightning node using a NP2WKH address. -func (n *NetworkHarness) SendCoinsNP2WKH(ctx context.Context, - amt btcutil.Amount, target *HarnessNode) er.R { - - return n.sendCoins( - ctx, amt, target, lnrpc.AddressType_NESTED_PUBKEY_HASH, - true, - ) -} - -// sendCoins attempts to send amt satoshis from the internal mining node to the -// targeted lightning node. The confirmed boolean indicates whether the -// transaction that pays to the target should confirm. -func (n *NetworkHarness) sendCoins(ctx context.Context, amt btcutil.Amount, - target *HarnessNode, addrType lnrpc.AddressType, - confirmed bool) er.R { - - balReq := &lnrpc.WalletBalanceRequest{} - initialBalance, errr := target.WalletBalance(ctx, balReq) - if errr != nil { - return er.E(errr) - } - - // First, obtain an address from the target lightning node, preferring - // to receive a p2wkh address s.t the output can immediately be used as - // an input to a funding transaction. - addrReq := &lnrpc.NewAddressRequest{ - Type: addrType, - } - resp, errr := target.NewAddress(ctx, addrReq) - if errr != nil { - return er.E(errr) - } - addr, err := btcutil.DecodeAddress(resp.Address, n.netParams) - if err != nil { - return err - } - addrScript, err := txscript.PayToAddrScript(addr) - if err != nil { - return err - } - - // Generate a transaction which creates an output to the target - // pkScript of the desired amount. - output := &wire.TxOut{ - PkScript: addrScript, - Value: int64(amt), - } - _, err = n.Miner.SendOutputs([]*wire.TxOut{output}, 7500) - if err != nil { - return err - } - - // Encode the pkScript in hex as this the format that it will be - // returned via rpc. - expPkScriptStr := hex.EncodeToString(addrScript) - - // Now, wait for ListUnspent to show the unconfirmed transaction - // containing the correct pkscript. - err = wait.NoError(func() er.R { - // Since neutrino doesn't support unconfirmed outputs, skip - // this check. - if target.Cfg.BackendCfg.Name() == "neutrino" { - return nil - } - - req := &lnrpc.ListUnspentRequest{} - resp, errr := target.ListUnspent(ctx, req) - if errr != nil { - return er.E(errr) - } - - // When using this method, there should only ever be on - // unconfirmed transaction. - if len(resp.Utxos) != 1 { - return er.Errorf("number of unconfirmed utxos "+ - "should be 1, found %d", len(resp.Utxos)) - } - - // Assert that the lone unconfirmed utxo contains the same - // pkscript as the output generated above. - pkScriptStr := resp.Utxos[0].PkScript - if strings.Compare(pkScriptStr, expPkScriptStr) != 0 { - return er.Errorf("pkscript mismatch, want: %s, "+ - "found: %s", expPkScriptStr, pkScriptStr) - } - - return nil - }, 15*time.Second) - if err != nil { - return er.Errorf("unconfirmed utxo was not found in "+ - "ListUnspent: %v", err) - } - - // If the transaction should remain unconfirmed, then we'll wait until - // the target node's unconfirmed balance reflects the expected balance - // and exit. - if !confirmed { - expectedBalance := btcutil.Amount(initialBalance.UnconfirmedBalance) + amt - return target.WaitForBalance(expectedBalance, false) - } - - // Otherwise, we'll generate 6 new blocks to ensure the output gains a - // sufficient number of confirmations and wait for the balance to - // reflect what's expected. - if _, err := n.Miner.Node.Generate(6); err != nil { - return err - } - - expectedBalance := btcutil.Amount(initialBalance.ConfirmedBalance) + amt - return target.WaitForBalance(expectedBalance, true) -} - -func (n *NetworkHarness) SetFeeEstimate(fee chainfee.SatPerKWeight) { - n.feeService.setFee(fee) -} - -// CopyFile copies the file src to dest. -func CopyFile(dest, src string) er.R { - s, errr := os.Open(src) - if errr != nil { - return er.E(errr) - } - defer s.Close() - - d, errr := os.Create(dest) - if errr != nil { - return er.E(errr) - } - - if _, errr := io.Copy(d, s); errr != nil { - d.Close() - return er.E(errr) - } - - return er.E(d.Close()) -} - -// FileExists returns true if the file at path exists. -func FileExists(path string) bool { - if _, err := os.Stat(path); os.IsNotExist(err) { - return false - } - - return true -} - -// CopyAll copies all files and directories from srcDir to dstDir recursively. -// Note that this function does not support links. -func CopyAll(dstDir, srcDir string) er.R { - entries, errr := ioutil.ReadDir(srcDir) - if errr != nil { - return er.E(errr) - } - - for _, entry := range entries { - srcPath := filepath.Join(srcDir, entry.Name()) - dstPath := filepath.Join(dstDir, entry.Name()) - - info, errr := os.Stat(srcPath) - if errr != nil { - return er.E(errr) - } - - if info.IsDir() { - errr := os.Mkdir(dstPath, info.Mode()) - if errr != nil && !os.IsExist(errr) { - return er.E(errr) - } - - err := CopyAll(dstPath, srcPath) - if err != nil { - return err - } - } else if err := CopyFile(dstPath, srcPath); err != nil { - return err - } - } - - return nil -} diff --git a/lnd/lntest/itest/lnd_channel_backup_test.go b/lnd/lntest/itest/lnd_channel_backup_test.go deleted file mode 100644 index c383fdd8..00000000 --- a/lnd/lntest/itest/lnd_channel_backup_test.go +++ /dev/null @@ -1,1080 +0,0 @@ -package itest - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "testing" - "time" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/chanbackup" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lntest" - "github.com/pkt-cash/pktd/lnd/lntest/wait" - "github.com/pkt-cash/pktd/wire" - "github.com/stretchr/testify/require" -) - -// testChannelBackupRestore tests that we're able to recover from, and initiate -// the DLP protocol via: the RPC restore command, restoring on unlock, and -// restoring from initial wallet creation. We'll also alternate between -// restoring form the on disk file, and restoring from the exported RPC command -// as well. -func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { - password := []byte("El Psy Kongroo") - - ctxb := context.Background() - - var testCases = []chanRestoreTestCase{ - // Restore from backups obtained via the RPC interface. Dave - // was the initiator, of the non-advertised channel. - { - name: "restore from RPC backup", - channelsUpdated: false, - initiator: true, - private: false, - restoreMethod: func(oldNode *lntest.HarnessNode, - backupFilePath string, - mnemonic []string) (nodeRestorer, er.R) { - - // For this restoration method, we'll grab the - // current multi-channel backup from the old - // node, and use it to restore a new node - // within the closure. - req := &lnrpc.ChanBackupExportRequest{} - chanBackup, err := oldNode.ExportAllChannelBackups( - ctxb, req, - ) - if err != nil { - return nil, er.Errorf("unable to obtain "+ - "channel backup: %v", err) - } - - multi := chanBackup.MultiChanBackup.MultiChanBackup - - // In our nodeRestorer function, we'll restore - // the node from seed, then manually recover - // the channel backup. - return chanRestoreViaRPC( - net, password, mnemonic, multi, - ) - }, - }, - - // Restore the backup from the on-disk file, using the RPC - // interface. - { - name: "restore from backup file", - initiator: true, - private: false, - restoreMethod: func(oldNode *lntest.HarnessNode, - backupFilePath string, - mnemonic []string) (nodeRestorer, er.R) { - - // Read the entire Multi backup stored within - // this node's channels.backup file. - multi, errr := ioutil.ReadFile(backupFilePath) - if errr != nil { - return nil, er.E(errr) - } - - // Now that we have Dave's backup file, we'll - // create a new nodeRestorer that will restore - // using the on-disk channels.backup. - return chanRestoreViaRPC( - net, password, mnemonic, multi, - ) - }, - }, - - // Restore the backup as part of node initialization with the - // prior mnemonic and new backup seed. - { - name: "restore during creation", - initiator: true, - private: false, - restoreMethod: func(oldNode *lntest.HarnessNode, - backupFilePath string, - mnemonic []string) (nodeRestorer, er.R) { - - // First, fetch the current backup state as is, - // to obtain our latest Multi. - chanBackup, err := oldNode.ExportAllChannelBackups( - ctxb, &lnrpc.ChanBackupExportRequest{}, - ) - if err != nil { - return nil, er.Errorf("unable to obtain "+ - "channel backup: %v", err) - } - backupSnapshot := &lnrpc.ChanBackupSnapshot{ - MultiChanBackup: chanBackup.MultiChanBackup, - } - - // Create a new nodeRestorer that will restore - // the node using the Multi backup we just - // obtained above. - return func() (*lntest.HarnessNode, er.R) { - return net.RestoreNodeWithSeed( - "dave", nil, password, - mnemonic, 1000, backupSnapshot, - ) - }, nil - }, - }, - - // Restore the backup once the node has already been - // re-created, using the Unlock call. - { - name: "restore during unlock", - initiator: true, - private: false, - restoreMethod: func(oldNode *lntest.HarnessNode, - backupFilePath string, - mnemonic []string) (nodeRestorer, er.R) { - - // First, fetch the current backup state as is, - // to obtain our latest Multi. - chanBackup, err := oldNode.ExportAllChannelBackups( - ctxb, &lnrpc.ChanBackupExportRequest{}, - ) - if err != nil { - return nil, er.Errorf("unable to obtain "+ - "channel backup: %v", err) - } - backupSnapshot := &lnrpc.ChanBackupSnapshot{ - MultiChanBackup: chanBackup.MultiChanBackup, - } - - // Create a new nodeRestorer that will restore - // the node with its seed, but no channel - // backup, shutdown this initialized node, then - // restart it again using Unlock. - return func() (*lntest.HarnessNode, er.R) { - newNode, err := net.RestoreNodeWithSeed( - "dave", nil, password, - mnemonic, 1000, nil, - ) - if err != nil { - return nil, err - } - - err = net.RestartNode( - newNode, nil, backupSnapshot, - ) - if err != nil { - return nil, err - } - - return newNode, nil - }, nil - }, - }, - - // Restore the backup from the on-disk file a second time to - // make sure imports can be canceled and later resumed. - { - name: "restore from backup file twice", - initiator: true, - private: false, - restoreMethod: func(oldNode *lntest.HarnessNode, - backupFilePath string, - mnemonic []string) (nodeRestorer, er.R) { - - // Read the entire Multi backup stored within - // this node's channels.backup file. - multi, errr := ioutil.ReadFile(backupFilePath) - if errr != nil { - return nil, er.E(errr) - } - - // Now that we have Dave's backup file, we'll - // create a new nodeRestorer that will restore - // using the on-disk channels.backup. - backup := &lnrpc.RestoreChanBackupRequest_MultiChanBackup{ - MultiChanBackup: multi, - } - - ctxb := context.Background() - - return func() (*lntest.HarnessNode, er.R) { - newNode, err := net.RestoreNodeWithSeed( - "dave", nil, password, mnemonic, - 1000, nil, - ) - if err != nil { - return nil, er.Errorf("unable to "+ - "restore node: %v", err) - } - - _, errr := newNode.RestoreChannelBackups( - ctxb, - &lnrpc.RestoreChanBackupRequest{ - Backup: backup, - }, - ) - if errr != nil { - return nil, er.Errorf("unable "+ - "to restore backups: %v", - errr) - } - - _, errr = newNode.RestoreChannelBackups( - ctxb, - &lnrpc.RestoreChanBackupRequest{ - Backup: backup, - }, - ) - if errr != nil { - return nil, er.Errorf("unable "+ - "to restore backups the"+ - "second time: %v", - errr) - } - - return newNode, nil - }, nil - }, - }, - - // Use the channel backup file that contains an unconfirmed - // channel and make sure recovery works as well. - { - name: "restore unconfirmed channel file", - channelsUpdated: false, - initiator: true, - private: false, - unconfirmed: true, - restoreMethod: func(oldNode *lntest.HarnessNode, - backupFilePath string, - mnemonic []string) (nodeRestorer, er.R) { - - // Read the entire Multi backup stored within - // this node's channels.backup file. - multi, errr := ioutil.ReadFile(backupFilePath) - if errr != nil { - return nil, er.E(errr) - } - - // Let's assume time passes, the channel - // confirms in the meantime but for some reason - // the backup we made while it was still - // unconfirmed is the only backup we have. We - // should still be able to restore it. To - // simulate time passing, we mine some blocks - // to get the channel confirmed _after_ we saved - // the backup. - mineBlocks(t, net, 6, 1) - - // In our nodeRestorer function, we'll restore - // the node from seed, then manually recover - // the channel backup. - return chanRestoreViaRPC( - net, password, mnemonic, multi, - ) - }, - }, - - // Create a backup using RPC that contains an unconfirmed - // channel and make sure recovery works as well. - { - name: "restore unconfirmed channel RPC", - channelsUpdated: false, - initiator: true, - private: false, - unconfirmed: true, - restoreMethod: func(oldNode *lntest.HarnessNode, - backupFilePath string, - mnemonic []string) (nodeRestorer, er.R) { - - // For this restoration method, we'll grab the - // current multi-channel backup from the old - // node. The channel should be included, even if - // it is not confirmed yet. - req := &lnrpc.ChanBackupExportRequest{} - chanBackup, err := oldNode.ExportAllChannelBackups( - ctxb, req, - ) - if err != nil { - return nil, er.Errorf("unable to obtain "+ - "channel backup: %v", err) - } - chanPoints := chanBackup.MultiChanBackup.ChanPoints - if len(chanPoints) == 0 { - return nil, er.Errorf("unconfirmed " + - "channel not included in backup") - } - - // Let's assume time passes, the channel - // confirms in the meantime but for some reason - // the backup we made while it was still - // unconfirmed is the only backup we have. We - // should still be able to restore it. To - // simulate time passing, we mine some blocks - // to get the channel confirmed _after_ we saved - // the backup. - mineBlocks(t, net, 6, 1) - - // In our nodeRestorer function, we'll restore - // the node from seed, then manually recover - // the channel backup. - multi := chanBackup.MultiChanBackup.MultiChanBackup - return chanRestoreViaRPC( - net, password, mnemonic, multi, - ) - }, - }, - - // Restore the backup from the on-disk file, using the RPC - // interface, for anchor commitment channels. - { - name: "restore from backup file anchors", - initiator: true, - private: false, - anchorCommit: true, - restoreMethod: func(oldNode *lntest.HarnessNode, - backupFilePath string, - mnemonic []string) (nodeRestorer, er.R) { - - // Read the entire Multi backup stored within - // this node's channels.backup file. - multi, errr := ioutil.ReadFile(backupFilePath) - if errr != nil { - return nil, er.E(errr) - } - - // Now that we have Dave's backup file, we'll - // create a new nodeRestorer that will restore - // using the on-disk channels.backup. - return chanRestoreViaRPC( - net, password, mnemonic, multi, - ) - }, - }, - } - - // TODO(roasbeef): online vs offline close? - - // TODO(roasbeef): need to re-trigger the on-disk file once the node - // ann is updated? - - for _, testCase := range testCases { - testCase := testCase - success := t.t.Run(testCase.name, func(t *testing.T) { - h := newHarnessTest(t, net) - - // Start each test with the default static fee estimate. - net.SetFeeEstimate(12500) - - testChanRestoreScenario(h, net, &testCase, password) - }) - if !success { - break - } - } -} - -// testChannelBackupUpdates tests that both the streaming channel update RPC, -// and the on-disk channels.backup are updated each time a channel is -// opened/closed. -func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // First, we'll make a temp directory that we'll use to store our - // backup file, so we can check in on it during the test easily. - backupDir, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("unable to create backup dir: %v", err) - } - defer os.RemoveAll(backupDir) - - // First, we'll create a new node, Carol. We'll also create a temporary - // file that Carol will use to store her channel backups. - backupFilePath := filepath.Join( - backupDir, chanbackup.DefaultBackupFileName, - ) - carolArgs := fmt.Sprintf("--backupfilepath=%v", backupFilePath) - carol, errr := net.NewNode("carol", []string{carolArgs}) - if errr != nil { - t.Fatalf("unable to create new node: %v", errr) - } - defer shutdownAndAssert(net, t, carol) - - // Next, we'll register for streaming notifications for changes to the - // backup file. - backupStream, err := carol.SubscribeChannelBackups( - ctxb, &lnrpc.ChannelBackupSubscription{}, - ) - if err != nil { - t.Fatalf("unable to create backup stream: %v", err) - } - - // We'll use this goroutine to proxy any updates to a channel we can - // easily use below. - var wg sync.WaitGroup - backupUpdates := make(chan *lnrpc.ChanBackupSnapshot) - streamErr := make(chan er.R) - streamQuit := make(chan struct{}) - - wg.Add(1) - go func() { - defer wg.Done() - for { - snapshot, err := backupStream.Recv() - if err != nil { - select { - case streamErr <- er.E(err): - case <-streamQuit: - return - } - } - - select { - case backupUpdates <- snapshot: - case <-streamQuit: - return - } - } - }() - defer close(streamQuit) - - // With Carol up, we'll now connect her to Alice, and open a channel - // between them. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, carol, net.Alice); err != nil { - t.Fatalf("unable to connect carol to alice: %v", err) - } - - // Next, we'll open two channels between Alice and Carol back to back. - var chanPoints []*lnrpc.ChannelPoint - numChans := 2 - chanAmt := btcutil.Amount(1000000) - for i := 0; i < numChans; i++ { - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - chanPoints = append(chanPoints, chanPoint) - } - - // Using this helper function, we'll maintain a pointer to the latest - // channel backup so we can compare it to the on disk state. - var currentBackup *lnrpc.ChanBackupSnapshot - assertBackupNtfns := func(numNtfns int) { - for i := 0; i < numNtfns; i++ { - select { - case err := <-streamErr: - t.Fatalf("error with backup stream: %v", err) - - case currentBackup = <-backupUpdates: - - case <-time.After(time.Second * 5): - t.Fatalf("didn't receive channel backup "+ - "notification %v", i+1) - } - } - } - - // assertBackupFileState is a helper function that we'll use to compare - // the on disk back up file to our currentBackup pointer above. - assertBackupFileState := func() { - err := wait.NoError(func() er.R { - packedBackup, err := ioutil.ReadFile(backupFilePath) - if err != nil { - return er.Errorf("unable to read backup "+ - "file: %v", err) - } - - // As each back up file will be encrypted with a fresh - // nonce, we can't compare them directly, so instead - // we'll compare the length which is a proxy for the - // number of channels that the multi-backup contains. - rawBackup := currentBackup.MultiChanBackup.MultiChanBackup - if len(rawBackup) != len(packedBackup) { - return er.Errorf("backup files don't match: "+ - "expected %x got %x", rawBackup, packedBackup) - } - - // Additionally, we'll assert that both backups up - // returned are valid. - for i, backup := range [][]byte{rawBackup, packedBackup} { - snapshot := &lnrpc.ChanBackupSnapshot{ - MultiChanBackup: &lnrpc.MultiChanBackup{ - MultiChanBackup: backup, - }, - } - _, err := carol.VerifyChanBackup(ctxb, snapshot) - if err != nil { - return er.Errorf("unable to verify "+ - "backup #%d: %v", i, err) - } - } - - return nil - }, time.Second*15) - if err != nil { - t.Fatalf("backup state invalid: %v", err) - } - } - - // As these two channels were just opened, we should've got two times - // the pending and open notifications for channel backups. - assertBackupNtfns(2 * 2) - - // The on disk file should also exactly match the latest backup that we - // have. - assertBackupFileState() - - // Next, we'll close the channels one by one. After each channel - // closure, we should get a notification, and the on-disk state should - // match this state as well. - for i := 0; i < numChans; i++ { - // To ensure force closes also trigger an update, we'll force - // close half of the channels. - forceClose := i%2 == 0 - - chanPoint := chanPoints[i] - - ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert( - ctxt, t, net, net.Alice, chanPoint, forceClose, - ) - - // We should get a single notification after closing, and the - // on-disk state should match this latest notifications. - assertBackupNtfns(1) - assertBackupFileState() - - // If we force closed the channel, then we'll mine enough - // blocks to ensure all outputs have been swept. - if forceClose { - cleanupForceClose(t, net, net.Alice, chanPoint) - } - } -} - -// testExportChannelBackup tests that we're able to properly export either a -// targeted channel's backup, or export backups of all the currents open -// channels. -func testExportChannelBackup(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // First, we'll create our primary test node: Carol. We'll use Carol to - // open channels and also export backups that we'll examine throughout - // the test. - carol, err := net.NewNode("carol", nil) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - // With Carol up, we'll now connect her to Alice, and open a channel - // between them. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, carol, net.Alice); err != nil { - t.Fatalf("unable to connect carol to alice: %v", err) - } - - // Next, we'll open two channels between Alice and Carol back to back. - var chanPoints []*lnrpc.ChannelPoint - numChans := 2 - chanAmt := btcutil.Amount(1000000) - for i := 0; i < numChans; i++ { - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - chanPoints = append(chanPoints, chanPoint) - } - - // Now that the channels are open, we should be able to fetch the - // backups of each of the channels. - for _, chanPoint := range chanPoints { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - req := &lnrpc.ExportChannelBackupRequest{ - ChanPoint: chanPoint, - } - chanBackup, err := carol.ExportChannelBackup(ctxt, req) - if err != nil { - t.Fatalf("unable to fetch backup for channel %v: %v", - chanPoint, err) - } - - // The returned backup should be full populated. Since it's - // encrypted, we can't assert any more than that atm. - if len(chanBackup.ChanBackup) == 0 { - t.Fatalf("obtained empty backup for channel: %v", chanPoint) - } - - // The specified chanPoint in the response should match our - // requested chanPoint. - if chanBackup.ChanPoint.String() != chanPoint.String() { - t.Fatalf("chanPoint mismatched: expected %v, got %v", - chanPoint.String(), - chanBackup.ChanPoint.String()) - } - } - - // Before we proceed, we'll make two utility methods we'll use below - // for our primary assertions. - assertNumSingleBackups := func(numSingles int) { - err := wait.NoError(func() er.R { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - req := &lnrpc.ChanBackupExportRequest{} - chanSnapshot, err := carol.ExportAllChannelBackups( - ctxt, req, - ) - if err != nil { - return er.Errorf("unable to export channel "+ - "backup: %v", err) - } - - if chanSnapshot.SingleChanBackups == nil { - return er.Errorf("single chan backups not " + - "populated") - } - - backups := chanSnapshot.SingleChanBackups.ChanBackups - if len(backups) != numSingles { - return er.Errorf("expected %v singles, "+ - "got %v", len(backups), numSingles) - } - - return nil - }, defaultTimeout) - if err != nil { - t.Fatalf(err.String()) - } - } - assertMultiBackupFound := func() func(bool, map[wire.OutPoint]struct{}) { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - req := &lnrpc.ChanBackupExportRequest{} - chanSnapshot, err := carol.ExportAllChannelBackups(ctxt, req) - if err != nil { - t.Fatalf("unable to export channel backup: %v", err) - } - - return func(found bool, chanPoints map[wire.OutPoint]struct{}) { - switch { - case found && chanSnapshot.MultiChanBackup == nil: - t.Fatalf("multi-backup not present") - - case !found && chanSnapshot.MultiChanBackup != nil && - (len(chanSnapshot.MultiChanBackup.MultiChanBackup) != - chanbackup.NilMultiSizePacked): - - t.Fatalf("found multi-backup when non should " + - "be found") - } - - if !found { - return - } - - backedUpChans := chanSnapshot.MultiChanBackup.ChanPoints - if len(chanPoints) != len(backedUpChans) { - t.Fatalf("expected %v chans got %v", len(chanPoints), - len(backedUpChans)) - } - - for _, chanPoint := range backedUpChans { - wirePoint := rpcPointToWirePoint(t, chanPoint) - if _, ok := chanPoints[wirePoint]; !ok { - t.Fatalf("unexpected backup: %v", wirePoint) - } - } - } - } - - chans := make(map[wire.OutPoint]struct{}) - for _, chanPoint := range chanPoints { - chans[rpcPointToWirePoint(t, chanPoint)] = struct{}{} - } - - // We should have exactly two single channel backups contained, and we - // should also have a multi-channel backup. - assertNumSingleBackups(2) - assertMultiBackupFound()(true, chans) - - // We'll now close each channel on by one. After we close a channel, we - // shouldn't be able to find that channel as a backup still. We should - // also have one less single written to disk. - for i, chanPoint := range chanPoints { - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert( - ctxt, t, net, net.Alice, chanPoint, false, - ) - - assertNumSingleBackups(len(chanPoints) - i - 1) - - delete(chans, rpcPointToWirePoint(t, chanPoint)) - assertMultiBackupFound()(true, chans) - } - - // At this point we shouldn't have any single or multi-chan backups at - // all. - assertNumSingleBackups(0) - assertMultiBackupFound()(false, nil) -} - -// nodeRestorer is a function closure that allows each chanRestoreTestCase to -// control exactly *how* the prior node is restored. This might be using an -// backup obtained over RPC, or the file system, etc. -type nodeRestorer func() (*lntest.HarnessNode, er.R) - -// chanRestoreTestCase describes a test case for an end to end SCB restoration -// work flow. One node will start from scratch using an existing SCB. At the -// end of the est, both nodes should be made whole via the DLP protocol. -type chanRestoreTestCase struct { - // name is the name of the target test case. - name string - - // channelsUpdated is false then this means that no updates - // have taken place within the channel before restore. - // Otherwise, HTLCs will be settled between the two parties - // before restoration modifying the balance beyond the initial - // allocation. - channelsUpdated bool - - // initiator signals if Dave should be the one that opens the - // channel to Alice, or if it should be the other way around. - initiator bool - - // private signals if the channel from Dave to Carol should be - // private or not. - private bool - - // unconfirmed signals if the channel from Dave to Carol should be - // confirmed or not. - unconfirmed bool - - // anchorCommit is true, then the new anchor commitment type will be - // used for the channels created in the test. - anchorCommit bool - - // restoreMethod takes an old node, then returns a function - // closure that'll return the same node, but with its state - // restored via a custom method. We use this to abstract away - // _how_ a node is restored from our assertions once the node - // has been fully restored itself. - restoreMethod func(oldNode *lntest.HarnessNode, - backupFilePath string, - mnemonic []string) (nodeRestorer, er.R) -} - -// testChanRestoreScenario executes a chanRestoreTestCase from end to end, -// ensuring that after Dave restores his channel state according to the -// testCase, the DLP protocol is executed properly and both nodes are made -// whole. -func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness, - testCase *chanRestoreTestCase, password []byte) { - - const ( - chanAmt = btcutil.Amount(10000000) - pushAmt = btcutil.Amount(5000000) - ) - - ctxb := context.Background() - - var nodeArgs []string - if testCase.anchorCommit { - nodeArgs = commitTypeAnchors.Args() - } - - // First, we'll create a brand new node we'll use within the test. If - // we have a custom backup file specified, then we'll also create that - // for use. - dave, mnemonic, _, err := net.NewNodeWithSeed( - "dave", nodeArgs, password, false, - ) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - // Defer to a closure instead of to shutdownAndAssert due to the value - // of 'dave' changing throughout the test. - defer func() { - shutdownAndAssert(net, t, dave) - }() - carol, err := net.NewNode("carol", nodeArgs) - if err != nil { - t.Fatalf("unable to make new node: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - // Now that our new nodes are created, we'll give them some coins for - // channel opening and anchor sweeping. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), carol) - if err != nil { - t.Fatalf("unable to send coins to dave: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), dave) - if err != nil { - t.Fatalf("unable to send coins to dave: %v", err) - } - - var from, to *lntest.HarnessNode - if testCase.initiator { - from, to = dave, carol - } else { - from, to = carol, dave - } - - // Next, we'll connect Dave to Carol, and open a new channel to her - // with a portion pushed. - if err := net.ConnectNodes(ctxt, dave, carol); err != nil { - t.Fatalf("unable to connect dave to carol: %v", err) - } - - // We will either open a confirmed or unconfirmed channel, depending on - // the requirements of the test case. - switch { - case testCase.unconfirmed: - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - _, err := net.OpenPendingChannel( - ctxt, from, to, chanAmt, pushAmt, - ) - if err != nil { - t.Fatalf("couldn't open pending channel: %v", err) - } - - // Give the pubsub some time to update the channel backup. - err = wait.NoError(func() er.R { - fi, errr := os.Stat(dave.ChanBackupPath()) - if errr != nil { - return er.E(errr) - } - if fi.Size() <= chanbackup.NilMultiSizePacked { - return er.Errorf("backup file empty") - } - return nil - }, defaultTimeout) - if err != nil { - t.Fatalf("channel backup not updated in time: %v", err) - } - - default: - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, from, to, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - Private: testCase.private, - }, - ) - - // Wait for both sides to see the opened channel. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = dave.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("dave didn't report channel: %v", err) - } - err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("carol didn't report channel: %v", err) - } - } - - // If both parties should start with existing channel updates, then - // we'll send+settle an HTLC between 'from' and 'to' now. - if testCase.channelsUpdated { - invoice := &lnrpc.Invoice{ - Memo: "testing", - Value: 10000, - } - invoiceResp, err := to.AddInvoice(ctxt, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - errr := completePaymentRequests( - ctxt, from, from.RouterClient, - []string{invoiceResp.PaymentRequest}, true, - ) - if errr != nil { - t.Fatalf("unable to complete payments: %v", errr) - } - } - - // Before we start the recovery, we'll record the balances of both - // Carol and Dave to ensure they both sweep their coins at the end. - balReq := &lnrpc.WalletBalanceRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolBalResp, errr := carol.WalletBalance(ctxt, balReq) - if errr != nil { - t.Fatalf("unable to get carol's balance: %v", errr) - } - carolStartingBalance := carolBalResp.ConfirmedBalance - - daveBalance, errr := dave.WalletBalance(ctxt, balReq) - if errr != nil { - t.Fatalf("unable to get carol's balance: %v", errr) - } - daveStartingBalance := daveBalance.ConfirmedBalance - - // At this point, we'll now execute the restore method to give us the - // new node we should attempt our assertions against. - backupFilePath := dave.ChanBackupPath() - restoredNodeFunc, err := testCase.restoreMethod( - dave, backupFilePath, mnemonic, - ) - if err != nil { - t.Fatalf("unable to prep node restoration: %v", err) - } - - // Now that we're able to make our restored now, we'll shutdown the old - // Dave node as we'll be storing it shortly below. - shutdownAndAssert(net, t, dave) - - // To make sure the channel state is advanced correctly if the channel - // peer is not online at first, we also shutdown Carol. - restartCarol, err := net.SuspendNode(carol) - util.RequireNoErr(t.t, err) - - // Next, we'll make a new Dave and start the bulk of our recovery - // workflow. - dave, err = restoredNodeFunc() - if err != nil { - t.Fatalf("unable to restore node: %v", err) - } - - // First ensure that the on-chain balance is restored. - err = wait.NoError(func() er.R { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - balReq := &lnrpc.WalletBalanceRequest{} - daveBalResp, err := dave.WalletBalance(ctxt, balReq) - if err != nil { - return er.E(err) - } - - daveBal := daveBalResp.ConfirmedBalance - if daveBal <= 0 { - return er.Errorf("expected positive balance, had %v", - daveBal) - } - - return nil - }, defaultTimeout) - if err != nil { - t.Fatalf("On-chain balance not restored: %v", err) - } - - // We now check that the restored channel is in the proper state. It - // should not yet be force closing as no connection with the remote - // peer was established yet. We should also not be able to close the - // channel. - assertNumPendingChannels(t, dave, 1, 0) - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - pendingChanResp, errr := dave.PendingChannels( - ctxt, &lnrpc.PendingChannelsRequest{}, - ) - require.NoError(t.t, errr) - - // We also want to make sure we cannot force close in this state. That - // would get the state machine in a weird state. - chanPointParts := strings.Split( - pendingChanResp.WaitingCloseChannels[0].Channel.ChannelPoint, - ":", - ) - chanPointIndex, _ := strconv.ParseUint(chanPointParts[1], 10, 32) - resp, errr := dave.CloseChannel(ctxt, &lnrpc.CloseChannelRequest{ - ChannelPoint: &lnrpc.ChannelPoint{ - FundingTxid: &lnrpc.ChannelPoint_FundingTxidStr{ - FundingTxidStr: chanPointParts[0], - }, - OutputIndex: uint32(chanPointIndex), - }, - Force: true, - }) - - // We don't get an error directly but only when reading the first - // message of the stream. - require.NoError(t.t, errr) - _, errr = resp.Recv() - require.NoError(t.t, errr) - require.Contains(t.t, errr.Error(), "cannot close channel with state: ") - require.Contains(t.t, errr.Error(), "ChanStatusRestored") - - // Increase the fee estimate so that the following force close tx will - // be cpfp'ed in case of anchor commitments. - net.SetFeeEstimate(30000) - - // Now that we have ensured that the channels restored by the backup are - // in the correct state even without the remote peer telling us so, - // let's start up Carol again. - err = restartCarol() - util.RequireNoErr(t.t, err) - - // Now that we have our new node up, we expect that it'll re-connect to - // Carol automatically based on the restored backup. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.EnsureConnected(ctxt, dave, carol) - if err != nil { - t.Fatalf("node didn't connect after recovery: %v", err) - } - - // TODO(roasbeef): move dave restarts? - - // Now we'll assert that both sides properly execute the DLP protocol. - // We grab their balances now to ensure that they're made whole at the - // end of the protocol. - assertDLPExecuted( - net, t, carol, carolStartingBalance, dave, daveStartingBalance, - testCase.anchorCommit, - ) -} - -// chanRestoreViaRPC is a helper test method that returns a nodeRestorer -// instance which will restore the target node from a password+seed, then -// trigger a SCB restore using the RPC interface. -func chanRestoreViaRPC(net *lntest.NetworkHarness, - password []byte, mnemonic []string, - multi []byte) (nodeRestorer, er.R) { - - backup := &lnrpc.RestoreChanBackupRequest_MultiChanBackup{ - MultiChanBackup: multi, - } - - ctxb := context.Background() - - return func() (*lntest.HarnessNode, er.R) { - newNode, err := net.RestoreNodeWithSeed( - "dave", nil, password, mnemonic, 1000, nil, - ) - if err != nil { - return nil, er.Errorf("unable to "+ - "restore node: %v", err) - } - - _, errr := newNode.RestoreChannelBackups( - ctxb, &lnrpc.RestoreChanBackupRequest{ - Backup: backup, - }, - ) - if errr != nil { - return nil, er.Errorf("unable "+ - "to restore backups: %v", errr) - } - - return newNode, nil - }, nil -} diff --git a/lnd/lntest/itest/lnd_forward_interceptor_test.go b/lnd/lntest/itest/lnd_forward_interceptor_test.go deleted file mode 100644 index d61ef6b6..00000000 --- a/lnd/lntest/itest/lnd_forward_interceptor_test.go +++ /dev/null @@ -1,420 +0,0 @@ -package itest - -import ( - "context" - "encoding/hex" - "sync" - "time" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd" - "github.com/pkt-cash/pktd/lnd/chainreg" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc" - "github.com/pkt-cash/pktd/lnd/lntest" - "github.com/pkt-cash/pktd/lnd/routing/route" - "github.com/pkt-cash/pktd/wire" - "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var ( - customTestKey uint64 = 394829 - customTestValue = []byte{1, 3, 5} -) - -type interceptorTestCase struct { - amountMsat int64 - invoice *lnrpc.Invoice - shouldHold bool - interceptorAction routerrpc.ResolveHoldForwardAction -} - -// testForwardInterceptor tests the forward interceptor RPC layer. -// The test creates a cluster of 3 connected nodes: Alice -> Bob -> Carol -// Alice sends 4 different payments to Carol while the interceptor handles -// differently the htlcs. -// The test ensures that: -// 1. Intercepted failed htlcs result in no payment (invoice is not settled). -// 2. Intercepted resumed htlcs result in a payment (invoice is settled). -// 3. Intercepted held htlcs result in no payment (invoice is not settled). -// 4. When Interceptor disconnects it resumes all held htlcs, which result in -// valid payment (invoice is settled). -func testForwardInterceptor(net *lntest.NetworkHarness, t *harnessTest) { - // initialize the test context with 3 connected nodes. - testContext := newInterceptorTestContext(t, net) - defer testContext.shutdownNodes() - - const ( - chanAmt = btcutil.Amount(300000) - ) - - // Open and wait for channels. - testContext.openChannel(testContext.alice, testContext.bob, chanAmt) - testContext.openChannel(testContext.bob, testContext.carol, chanAmt) - defer testContext.closeChannels() - testContext.waitForChannels() - - // Connect the interceptor. - ctx := context.Background() - ctxt, cancelInterceptor := context.WithTimeout(ctx, defaultTimeout) - interceptor, errr := testContext.bob.RouterClient.HtlcInterceptor(ctxt) - if errr != nil { - t.Fatalf("failed to create HtlcInterceptor %v", errr) - } - - // Prepare the test cases. - testCases, err := testContext.prepareTestCases() - if err != nil { - t.Fatalf("failed to prepare test cases") - } - - // A channel for the interceptor go routine to send the requested packets. - interceptedChan := make(chan *routerrpc.ForwardHtlcInterceptRequest, - len(testCases)) - - // Run the interceptor loop in its own go routine. - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - for { - request, err := interceptor.Recv() - if err != nil { - // If it is just the error result of the context cancellation - // the we exit silently. - status, ok := status.FromError(err) - if ok && status.Code() == codes.Canceled { - return - } - // Otherwise it an unexpected error, we fail the test. - t.t.Errorf("unexpected error in interceptor.Recv() %v", err) - return - } - interceptedChan <- request - } - }() - - // For each test case make sure we initiate a payment from Alice to Carol - // routed through Bob. For each payment we also test its final status - // according to the interceptorAction specified in the test case. - wg.Add(1) - go func() { - defer wg.Done() - for _, tc := range testCases { - attempt, err := testContext.sendAliceToCarolPayment( - context.Background(), tc.invoice.ValueMsat, tc.invoice.RHash) - - if t.t.Failed() { - return - } - if err != nil { - t.t.Errorf("failed to send payment %v", err) - } - - switch tc.interceptorAction { - // For 'fail' interceptor action we make sure the payment failed. - case routerrpc.ResolveHoldForwardAction_FAIL: - if attempt.Status != lnrpc.HTLCAttempt_FAILED { - t.t.Errorf("expected payment to fail, instead got %v", attempt.Status) - } - - // For settle and resume we make sure the payment is successful. - case routerrpc.ResolveHoldForwardAction_SETTLE: - fallthrough - - case routerrpc.ResolveHoldForwardAction_RESUME: - if attempt.Status != lnrpc.HTLCAttempt_SUCCEEDED { - t.t.Errorf("expected payment to succeed, instead got %v", attempt.Status) - } - } - } - }() - - // We make sure here the interceptor has processed all packets before we - // check the payment statuses. - for i := 0; i < len(testCases); i++ { - select { - case request := <-interceptedChan: - // Assert sanity of informational packet data. - require.NotZero(t.t, request.OutgoingRequestedChanId) - require.NotZero(t.t, request.IncomingExpiry) - require.NotZero(t.t, request.IncomingAmountMsat) - - require.Less( - t.t, - request.OutgoingExpiry, request.IncomingExpiry, - ) - require.Less( - t.t, - request.OutgoingAmountMsat, - request.IncomingAmountMsat, - ) - - value, ok := request.CustomRecords[customTestKey] - require.True(t.t, ok, "expected custom record") - require.Equal(t.t, customTestValue, value) - - testCase := testCases[i] - - // For held packets we ignore, keeping them in hold status. - if testCase.shouldHold { - continue - } - - // For all other packets we resolve according to the test case. - _ = interceptor.Send(&routerrpc.ForwardHtlcInterceptResponse{ - IncomingCircuitKey: request.IncomingCircuitKey, - Action: testCase.interceptorAction, - Preimage: testCase.invoice.RPreimage, - }) - case <-time.After(defaultTimeout): - t.Fatalf("response from interceptor was not received %v", i) - } - } - - // At this point we are left with the held packets, we want to make sure - // each one of them has a corresponding 'in-flight' payment at - // Alice's node. - payments, errr := testContext.alice.ListPayments(context.Background(), - &lnrpc.ListPaymentsRequest{IncludeIncomplete: true}) - if errr != nil { - t.Fatalf("failed to fetch payments") - } - for _, testCase := range testCases { - if testCase.shouldHold { - hashStr := hex.EncodeToString(testCase.invoice.RHash) - var foundPayment *lnrpc.Payment - expectedAmt := testCase.invoice.ValueMsat - for _, p := range payments.Payments { - if p.PaymentHash == hashStr { - foundPayment = p - break - } - } - if foundPayment == nil { - t.Fatalf("expected to find pending payment for held"+ - "htlc %v", hashStr) - } - if foundPayment.ValueMsat != expectedAmt || - foundPayment.Status != lnrpc.Payment_IN_FLIGHT { - - t.Fatalf("expected to find in flight payment for"+ - "amount %v, %v", testCase.invoice.ValueMsat, foundPayment.Status) - } - } - } - - // Disconnect interceptor should cause resume held packets. - // After that we wait for all go routines to finish, including the one - // that tests the payment final status for the held payment. - cancelInterceptor() - wg.Wait() -} - -// interceptorTestContext is a helper struct to hold the test context and -// provide the needed functionality. -type interceptorTestContext struct { - t *harnessTest - net *lntest.NetworkHarness - - // Keep a list of all our active channels. - networkChans []*lnrpc.ChannelPoint - closeChannelFuncs []func() - - alice, bob, carol *lntest.HarnessNode - nodes []*lntest.HarnessNode -} - -func newInterceptorTestContext(t *harnessTest, - net *lntest.NetworkHarness) *interceptorTestContext { - - ctxb := context.Background() - - // Create a three-node context consisting of Alice, Bob and Carol - carol, err := net.NewNode("carol", nil) - if err != nil { - t.Fatalf("unable to create carol: %v", err) - } - - // Connect nodes - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} - for i := 0; i < len(nodes); i++ { - for j := i + 1; j < len(nodes); j++ { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.EnsureConnected(ctxt, nodes[i], nodes[j]); err != nil { - t.Fatalf("unable to connect nodes: %v", err) - } - } - } - - ctx := interceptorTestContext{ - t: t, - net: net, - alice: net.Alice, - bob: net.Bob, - carol: carol, - nodes: nodes, - } - - return &ctx -} - -// prepareTestCases prepares 4 tests: -// 1. failed htlc. -// 2. resumed htlc. -// 3. settling htlc externally. -// 4. held htlc that is resumed later. -func (c *interceptorTestContext) prepareTestCases() ( - []*interceptorTestCase, er.R) { - - cases := []*interceptorTestCase{ - {amountMsat: 1000, shouldHold: false, - interceptorAction: routerrpc.ResolveHoldForwardAction_FAIL}, - {amountMsat: 1000, shouldHold: false, - interceptorAction: routerrpc.ResolveHoldForwardAction_RESUME}, - {amountMsat: 1000, shouldHold: false, - interceptorAction: routerrpc.ResolveHoldForwardAction_SETTLE}, - {amountMsat: 1000, shouldHold: true, - interceptorAction: routerrpc.ResolveHoldForwardAction_RESUME}, - } - - for _, t := range cases { - addResponse, err := c.carol.AddInvoice(context.Background(), &lnrpc.Invoice{ - ValueMsat: t.amountMsat, - }) - if err != nil { - return nil, er.Errorf("unable to add invoice: %v", err) - } - invoice, err := c.carol.LookupInvoice(context.Background(), &lnrpc.PaymentHash{ - RHashStr: hex.EncodeToString(addResponse.RHash), - }) - if err != nil { - return nil, er.Errorf("unable to add invoice: %v", err) - } - t.invoice = invoice - } - return cases, nil -} - -func (c *interceptorTestContext) openChannel(from, to *lntest.HarnessNode, chanSize btcutil.Amount) { - ctxb := context.Background() - - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - err := c.net.SendCoins(ctxt, btcutil.UnitsPerCoin(), from) - if err != nil { - c.t.Fatalf("unable to send coins : %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, c.t, c.net, from, to, - lntest.OpenChannelParams{ - Amt: chanSize, - }, - ) - - c.closeChannelFuncs = append(c.closeChannelFuncs, func() { - ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert( - ctxt, c.t, c.net, from, chanPoint, false, - ) - }) - - c.networkChans = append(c.networkChans, chanPoint) -} - -func (c *interceptorTestContext) closeChannels() { - for _, f := range c.closeChannelFuncs { - f() - } -} - -func (c *interceptorTestContext) shutdownNodes() { - shutdownAndAssert(c.net, c.t, c.carol) -} - -func (c *interceptorTestContext) waitForChannels() { - ctxb := context.Background() - - // Wait for all nodes to have seen all channels. - for _, chanPoint := range c.networkChans { - for _, node := range c.nodes { - txid, err := lnd.GetChanPointFundingTxid(chanPoint) - if err != nil { - c.t.Fatalf("unable to get txid: %v", err) - } - point := wire.OutPoint{ - Hash: *txid, - Index: chanPoint.OutputIndex, - } - - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - err = node.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - c.t.Fatalf("(%d): timeout waiting for "+ - "channel(%s) open: %v", - node.NodeID, point, err) - } - } - } -} - -// sendAliceToCarolPayment sends a payment from alice to carol and make an -// attempt to pay. The lnrpc.HTLCAttempt is returned. -func (c *interceptorTestContext) sendAliceToCarolPayment(ctx context.Context, - amtMsat int64, paymentHash []byte) (*lnrpc.HTLCAttempt, er.R) { - - // Build a route from alice to carol. - route, err := c.buildRoute(ctx, amtMsat, []*lntest.HarnessNode{c.bob, c.carol}) - if err != nil { - return nil, err - } - sendReq := &routerrpc.SendToRouteRequest{ - PaymentHash: paymentHash, - Route: route, - } - - // Send a custom record to the forwarding node. - route.Hops[0].CustomRecords = map[uint64][]byte{ - customTestKey: customTestValue, - } - - // Send the payment. - ret, errr := c.alice.RouterClient.SendToRouteV2(ctx, sendReq) - if errr != nil { - return ret, er.E(errr) - } - return ret, nil -} - -// buildRoute is a helper function to build a route with given hops. -func (c *interceptorTestContext) buildRoute(ctx context.Context, amtMsat int64, hops []*lntest.HarnessNode) ( - *lnrpc.Route, er.R) { - - rpcHops := make([][]byte, 0, len(hops)) - for _, hop := range hops { - k := hop.PubKeyStr - pubkey, err := route.NewVertexFromStr(k) - if err != nil { - return nil, er.Errorf("error parsing %v: %v", - k, err) - } - rpcHops = append(rpcHops, pubkey[:]) - } - - req := &routerrpc.BuildRouteRequest{ - AmtMsat: amtMsat, - FinalCltvDelta: chainreg.DefaultBitcoinTimeLockDelta, - HopPubkeys: rpcHops, - } - - routeResp, err := c.alice.RouterClient.BuildRoute(ctx, req) - if err != nil { - return nil, er.E(err) - } - - return routeResp.Route, nil -} diff --git a/lnd/lntest/itest/lnd_macaroons_test.go b/lnd/lntest/itest/lnd_macaroons_test.go deleted file mode 100644 index 6b6b8f23..00000000 --- a/lnd/lntest/itest/lnd_macaroons_test.go +++ /dev/null @@ -1,613 +0,0 @@ -package itest - -import ( - "bytes" - "context" - "os" - "sort" - "strconv" - "strings" - "testing" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lntest" - "github.com/pkt-cash/pktd/lnd/macaroons" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "gopkg.in/macaroon.v2" -) - -// testMacaroonAuthentication makes sure that if macaroon authentication is -// enabled on the gRPC interface, no requests with missing or invalid -// macaroons are allowed. Further, the specific access rights (read/write, -// entity based) and first-party caveats are tested as well. -func testMacaroonAuthentication(net *lntest.NetworkHarness, ht *harnessTest) { - var ( - infoReq = &lnrpc.GetInfoRequest{} - newAddrReq = &lnrpc.NewAddressRequest{ - Type: AddrTypeWitnessPubkeyHash, - } - testNode = net.Alice - ) - - testCases := []struct { - name string - run func(ctxt context.Context, t *testing.T) - }{{ - // First test: Make sure we get an error if we use no macaroons - // but try to connect to a node that has macaroon authentication - // enabled. - name: "no macaroon", - run: func(ctxt context.Context, t *testing.T) { - conn, err := testNode.ConnectRPC(false) - util.RequireNoErr(t, err) - defer func() { _ = conn.Close() }() - client := lnrpc.NewLightningClient(conn) - _, errr := client.GetInfo(ctxt, infoReq) - require.Error(t, errr) - require.Contains(t, errr.Error(), "expected 1 macaroon") - }, - }, { - // Second test: Ensure that an invalid macaroon also triggers an - // error. - name: "invalid macaroon", - run: func(ctxt context.Context, t *testing.T) { - invalidMac, _ := macaroon.New( - []byte("dummy_root_key"), []byte("0"), "itest", - macaroon.LatestVersion, - ) - cleanup, client := macaroonClient( - t, testNode, invalidMac, - ) - defer cleanup() - _, errr := client.GetInfo(ctxt, infoReq) - require.Error(t, errr) - require.Contains(t, errr.Error(), "cannot get macaroon") - }, - }, { - // Third test: Try to access a write method with read-only - // macaroon. - name: "read only macaroon", - run: func(ctxt context.Context, t *testing.T) { - readonlyMac, err := testNode.ReadMacaroon( - testNode.ReadMacPath(), defaultTimeout, - ) - util.RequireNoErr(t, err) - cleanup, client := macaroonClient( - t, testNode, readonlyMac, - ) - defer cleanup() - _, errr := client.NewAddress(ctxt, newAddrReq) - require.Error(t, errr) - require.Contains(t, errr.Error(), "permission denied") - }, - }, { - // Fourth test: Check first-party caveat with timeout that - // expired 30 seconds ago. - name: "expired macaroon", - run: func(ctxt context.Context, t *testing.T) { - readonlyMac, err := testNode.ReadMacaroon( - testNode.ReadMacPath(), defaultTimeout, - ) - util.RequireNoErr(t, err) - timeoutMac, err := macaroons.AddConstraints( - readonlyMac, macaroons.TimeoutConstraint(-30), - ) - util.RequireNoErr(t, err) - cleanup, client := macaroonClient( - t, testNode, timeoutMac, - ) - defer cleanup() - _, errr := client.GetInfo(ctxt, infoReq) - require.Error(t, errr) - require.Contains(t, errr.Error(), "macaroon has expired") - }, - }, { - // Fifth test: Check first-party caveat with invalid IP address. - name: "invalid IP macaroon", - run: func(ctxt context.Context, t *testing.T) { - readonlyMac, err := testNode.ReadMacaroon( - testNode.ReadMacPath(), defaultTimeout, - ) - util.RequireNoErr(t, err) - invalidIPAddrMac, err := macaroons.AddConstraints( - readonlyMac, macaroons.IPLockConstraint( - "1.1.1.1", - ), - ) - util.RequireNoErr(t, err) - cleanup, client := macaroonClient( - t, testNode, invalidIPAddrMac, - ) - defer cleanup() - _, errr := client.GetInfo(ctxt, infoReq) - require.Error(t, errr) - require.Contains(t, errr.Error(), "different IP address") - }, - }, { - // Sixth test: Make sure that if we do everything correct and - // send the admin macaroon with first-party caveats that we can - // satisfy, we get a correct answer. - name: "correct macaroon", - run: func(ctxt context.Context, t *testing.T) { - adminMac, err := testNode.ReadMacaroon( - testNode.AdminMacPath(), defaultTimeout, - ) - util.RequireNoErr(t, err) - adminMac, err = macaroons.AddConstraints( - adminMac, macaroons.TimeoutConstraint(30), - macaroons.IPLockConstraint("127.0.0.1"), - ) - util.RequireNoErr(t, err) - cleanup, client := macaroonClient(t, testNode, adminMac) - defer cleanup() - res, errr := client.NewAddress(ctxt, newAddrReq) - require.NoError(t, errr, "get new address") - assert.Contains(t, res.Address, "bcrt1") - }, - }, { - // Seventh test: Bake a macaroon that can only access exactly - // two RPCs and make sure it works as expected. - name: "custom URI permissions", - run: func(ctxt context.Context, t *testing.T) { - entity := macaroons.PermissionEntityCustomURI - req := &lnrpc.BakeMacaroonRequest{ - Permissions: []*lnrpc.MacaroonPermission{{ - Entity: entity, - Action: "/lnrpc.Lightning/GetInfo", - }, { - Entity: entity, - Action: "/lnrpc.Lightning/List" + - "Permissions", - }}, - } - bakeRes, errr := testNode.BakeMacaroon(ctxt, req) - require.NoError(t, errr) - - // Create a connection that uses the custom macaroon. - customMacBytes, err := util.DecodeHex( - bakeRes.Macaroon, - ) - util.RequireNoErr(t, err) - customMac := &macaroon.Macaroon{} - errr = customMac.UnmarshalBinary(customMacBytes) - require.NoError(t, errr) - cleanup, client := macaroonClient( - t, testNode, customMac, - ) - defer cleanup() - - // Call GetInfo which should succeed. - _, errr = client.GetInfo(ctxt, infoReq) - require.NoError(t, errr) - - // Call ListPermissions which should also succeed. - permReq := &lnrpc.ListPermissionsRequest{} - permRes, errr := client.ListPermissions(ctxt, permReq) - require.NoError(t, errr) - require.Greater( - t, len(permRes.MethodPermissions), 10, - "permissions", - ) - - // Try NewAddress which should be denied. - _, errr = client.NewAddress(ctxt, newAddrReq) - require.Error(t, errr) - require.Contains(t, errr.Error(), "permission denied") - }, - }} - - for _, tc := range testCases { - tc := tc - ht.t.Run(tc.name, func(tt *testing.T) { - ctxt, cancel := context.WithTimeout( - context.Background(), defaultTimeout, - ) - defer cancel() - - tc.run(ctxt, tt) - }) - } -} - -// testBakeMacaroon checks that when creating macaroons, the permissions param -// in the request must be set correctly, and the baked macaroon has the intended -// permissions. -func testBakeMacaroon(net *lntest.NetworkHarness, t *harnessTest) { - var testNode = net.Alice - - testCases := []struct { - name string - run func(ctxt context.Context, t *testing.T, - adminClient lnrpc.LightningClient) - }{{ - // First test: when the permission list is empty in the request, - // an error should be returned. - name: "no permission list", - run: func(ctxt context.Context, t *testing.T, - adminClient lnrpc.LightningClient) { - - req := &lnrpc.BakeMacaroonRequest{} - _, errr := adminClient.BakeMacaroon(ctxt, req) - require.Error(t, errr) - assert.Contains( - t, errr.Error(), "permission list cannot be "+ - "empty", - ) - }, - }, { - // Second test: when the action in the permission list is not - // valid, an error should be returned. - name: "invalid permission list", - run: func(ctxt context.Context, t *testing.T, - adminClient lnrpc.LightningClient) { - - req := &lnrpc.BakeMacaroonRequest{ - Permissions: []*lnrpc.MacaroonPermission{{ - Entity: "macaroon", - Action: "invalid123", - }}, - } - _, errr := adminClient.BakeMacaroon(ctxt, req) - require.Error(t, errr) - assert.Contains( - t, errr.Error(), "invalid permission action", - ) - }, - }, { - // Third test: when the entity in the permission list is not - // valid, an error should be returned. - name: "invalid permission entity", - run: func(ctxt context.Context, t *testing.T, - adminClient lnrpc.LightningClient) { - - req := &lnrpc.BakeMacaroonRequest{ - Permissions: []*lnrpc.MacaroonPermission{{ - Entity: "invalid123", - Action: "read", - }}, - } - _, errr := adminClient.BakeMacaroon(ctxt, req) - require.Error(t, errr) - assert.Contains( - t, errr.Error(), "invalid permission entity", - ) - }, - }, { - // Fourth test: check that when no root key ID is specified, the - // default root keyID is used. - name: "default root key ID", - run: func(ctxt context.Context, t *testing.T, - adminClient lnrpc.LightningClient) { - - req := &lnrpc.BakeMacaroonRequest{ - Permissions: []*lnrpc.MacaroonPermission{{ - Entity: "macaroon", - Action: "read", - }}, - } - _, errr := adminClient.BakeMacaroon(ctxt, req) - require.NoError(t, errr) - - listReq := &lnrpc.ListMacaroonIDsRequest{} - resp, errr := adminClient.ListMacaroonIDs(ctxt, listReq) - require.NoError(t, errr) - require.Equal(t, resp.RootKeyIds[0], uint64(0)) - }, - }, { - // Fifth test: create a macaroon use a non-default root key ID. - name: "custom root key ID", - run: func(ctxt context.Context, t *testing.T, - adminClient lnrpc.LightningClient) { - - rootKeyID := uint64(4200) - req := &lnrpc.BakeMacaroonRequest{ - RootKeyId: rootKeyID, - Permissions: []*lnrpc.MacaroonPermission{{ - Entity: "macaroon", - Action: "read", - }}, - } - _, errr := adminClient.BakeMacaroon(ctxt, req) - require.NoError(t, errr) - - listReq := &lnrpc.ListMacaroonIDsRequest{} - resp, errr := adminClient.ListMacaroonIDs(ctxt, listReq) - require.NoError(t, errr) - - // the ListMacaroonIDs should give a list of two IDs, - // the default ID 0, and the newly created ID. The - // returned response is sorted to guarantee the order so - // that we can compare them one by one. - sort.Slice(resp.RootKeyIds, func(i, j int) bool { - return resp.RootKeyIds[i] < resp.RootKeyIds[j] - }) - require.Equal(t, resp.RootKeyIds[0], uint64(0)) - require.Equal(t, resp.RootKeyIds[1], rootKeyID) - }, - }, { - // Sixth test: check the baked macaroon has the intended - // permissions. It should succeed in reading, and fail to write - // a macaroon. - name: "custom macaroon permissions", - run: func(ctxt context.Context, t *testing.T, - adminClient lnrpc.LightningClient) { - - rootKeyID := uint64(4200) - req := &lnrpc.BakeMacaroonRequest{ - RootKeyId: rootKeyID, - Permissions: []*lnrpc.MacaroonPermission{{ - Entity: "macaroon", - Action: "read", - }}, - } - bakeResp, errr := adminClient.BakeMacaroon(ctxt, req) - require.NoError(t, errr) - - newMac, err := readMacaroonFromHex(bakeResp.Macaroon) - util.RequireNoErr(t, err) - cleanup, readOnlyClient := macaroonClient( - t, testNode, newMac, - ) - defer cleanup() - - // BakeMacaroon requires a write permission, so this - // call should return an error. - _, errr = readOnlyClient.BakeMacaroon(ctxt, req) - require.Error(t, errr) - require.Contains(t, errr.Error(), "permission denied") - - // ListMacaroon requires a read permission, so this call - // should succeed. - listReq := &lnrpc.ListMacaroonIDsRequest{} - _, errr = readOnlyClient.ListMacaroonIDs(ctxt, listReq) - require.NoError(t, errr) - - // Current macaroon can only work on entity macaroon, so - // a GetInfo request will fail. - infoReq := &lnrpc.GetInfoRequest{} - _, errr = readOnlyClient.GetInfo(ctxt, infoReq) - require.Error(t, errr) - require.Contains(t, errr.Error(), "permission denied") - }, - }} - - for _, tc := range testCases { - tc := tc - t.t.Run(tc.name, func(tt *testing.T) { - ctxt, cancel := context.WithTimeout( - context.Background(), defaultTimeout, - ) - defer cancel() - - adminMac, err := testNode.ReadMacaroon( - testNode.AdminMacPath(), defaultTimeout, - ) - util.RequireNoErr(tt, err) - cleanup, client := macaroonClient(tt, testNode, adminMac) - defer cleanup() - - tc.run(ctxt, tt, client) - }) - } -} - -// testDeleteMacaroonID checks that when deleting a macaroon ID, it removes the -// specified ID and invalidates all macaroons derived from the key with that ID. -// Also, it checks deleting the reserved marcaroon ID, DefaultRootKeyID or is -// forbidden. -func testDeleteMacaroonID(net *lntest.NetworkHarness, t *harnessTest) { - var ( - ctxb = context.Background() - testNode = net.Alice - ) - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - - // Use admin macaroon to create a connection. - adminMac, err := testNode.ReadMacaroon( - testNode.AdminMacPath(), defaultTimeout, - ) - util.RequireNoErr(t.t, err) - cleanup, client := macaroonClient(t.t, testNode, adminMac) - defer cleanup() - - // Record the number of macaroon IDs before creation. - listReq := &lnrpc.ListMacaroonIDsRequest{} - listResp, errr := client.ListMacaroonIDs(ctxt, listReq) - require.NoError(t.t, errr) - numMacIDs := len(listResp.RootKeyIds) - - // Create macaroons for testing. - rootKeyIDs := []uint64{1, 2, 3} - macList := make([]string, 0, len(rootKeyIDs)) - for _, id := range rootKeyIDs { - req := &lnrpc.BakeMacaroonRequest{ - RootKeyId: id, - Permissions: []*lnrpc.MacaroonPermission{{ - Entity: "macaroon", - Action: "read", - }}, - } - resp, errr := client.BakeMacaroon(ctxt, req) - require.NoError(t.t, errr) - macList = append(macList, resp.Macaroon) - } - - // Check that the creation is successful. - listReq = &lnrpc.ListMacaroonIDsRequest{} - listResp, errr = client.ListMacaroonIDs(ctxt, listReq) - require.NoError(t.t, errr) - - // The number of macaroon IDs should be increased by len(rootKeyIDs). - require.Equal(t.t, numMacIDs+len(rootKeyIDs), len(listResp.RootKeyIds)) - - // First test: check deleting the DefaultRootKeyID returns an error. - defaultID, _ := strconv.ParseUint( - string(macaroons.DefaultRootKeyID), 10, 64, - ) - req := &lnrpc.DeleteMacaroonIDRequest{ - RootKeyId: defaultID, - } - _, errr = client.DeleteMacaroonID(ctxt, req) - require.Error(t.t, errr) - require.Contains( - t.t, errr.Error(), macaroons.ErrDeletionForbidden.Detail, - ) - - // Second test: check deleting the customized ID returns success. - req = &lnrpc.DeleteMacaroonIDRequest{ - RootKeyId: rootKeyIDs[0], - } - resp, errr := client.DeleteMacaroonID(ctxt, req) - require.NoError(t.t, errr) - require.True(t.t, resp.Deleted) - - // Check that the deletion is successful. - listReq = &lnrpc.ListMacaroonIDsRequest{} - listResp, errr = client.ListMacaroonIDs(ctxt, listReq) - require.NoError(t.t, errr) - - // The number of macaroon IDs should be decreased by 1. - require.Equal(t.t, numMacIDs+len(rootKeyIDs)-1, len(listResp.RootKeyIds)) - - // Check that the deleted macaroon can no longer access macaroon:read. - deletedMac, err := readMacaroonFromHex(macList[0]) - util.RequireNoErr(t.t, err) - cleanup, client = macaroonClient(t.t, testNode, deletedMac) - defer cleanup() - - // Because the macaroon is deleted, it will be treated as an invalid one. - listReq = &lnrpc.ListMacaroonIDsRequest{} - _, errr = client.ListMacaroonIDs(ctxt, listReq) - require.Error(t.t, errr) - require.Contains(t.t, errr.Error(), "cannot get macaroon") -} - -// testStatelessInit checks that the stateless initialization of the daemon -// does not write any macaroon files to the daemon's file system and returns -// the admin macaroon in the response. It then checks that the password -// change of the wallet can also happen stateless. -func testStatelessInit(net *lntest.NetworkHarness, t *harnessTest) { - var ( - initPw = []byte("stateless") - newPw = []byte("stateless-new") - newAddrReq = &lnrpc.NewAddressRequest{ - Type: AddrTypeWitnessPubkeyHash, - } - ) - - // First, create a new node and request it to initialize stateless. - // This should return us the binary serialized admin macaroon that we - // can then use for further calls. - carol, _, macBytes, err := net.NewNodeWithSeed( - "Carol", nil, initPw, true, - ) - util.RequireNoErr(t.t, err) - if len(macBytes) == 0 { - t.Fatalf("invalid macaroon returned in stateless init") - } - - // Now make sure no macaroon files have been created by the node Carol. - _, errr := os.Stat(carol.AdminMacPath()) - require.Error(t.t, errr) - _, errr = os.Stat(carol.ReadMacPath()) - require.Error(t.t, errr) - _, errr = os.Stat(carol.InvoiceMacPath()) - require.Error(t.t, errr) - - // Then check that we can unmarshal the binary serialized macaroon. - adminMac := &macaroon.Macaroon{} - errr = adminMac.UnmarshalBinary(macBytes) - require.NoError(t.t, errr) - - // Find out if we can actually use the macaroon that has been returned - // to us for a RPC call. - conn, err := carol.ConnectRPCWithMacaroon(adminMac) - util.RequireNoErr(t.t, err) - defer conn.Close() - adminMacClient := lnrpc.NewLightningClient(conn) - ctxt, _ := context.WithTimeout(context.Background(), defaultTimeout) - res, errr := adminMacClient.NewAddress(ctxt, newAddrReq) - require.NoError(t.t, errr) - if !strings.HasPrefix(res.Address, harnessNetParams.Bech32HRPSegwit) { - t.Fatalf("returned address was not a regtest address") - } - - // As a second part, shut down the node and then try to change the - // password when we start it up again. - if err := net.RestartNodeNoUnlock(carol, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - changePwReq := &lnrpc.ChangePasswordRequest{ - CurrentPassword: initPw, - NewPassword: newPw, - StatelessInit: true, - } - ctxb := context.Background() - response, err := carol.InitChangePassword(ctxb, changePwReq) - util.RequireNoErr(t.t, err) - - // Again, make sure no macaroon files have been created by the node - // Carol. - _, errr = os.Stat(carol.AdminMacPath()) - require.Error(t.t, errr) - _, errr = os.Stat(carol.ReadMacPath()) - require.Error(t.t, errr) - _, errr = os.Stat(carol.InvoiceMacPath()) - require.Error(t.t, errr) - - // Then check that we can unmarshal the new binary serialized macaroon - // and that it really is a new macaroon. - if errr = adminMac.UnmarshalBinary(response.AdminMacaroon); err != nil { - t.Fatalf("unable to unmarshal macaroon: %v", errr) - } - if bytes.Equal(response.AdminMacaroon, macBytes) { - t.Fatalf("expected new macaroon to be different") - } - - // Finally, find out if we can actually use the new macaroon that has - // been returned to us for a RPC call. - conn2, err := carol.ConnectRPCWithMacaroon(adminMac) - util.RequireNoErr(t.t, err) - defer conn2.Close() - adminMacClient = lnrpc.NewLightningClient(conn2) - - // Changing the password takes a while, so we use the default timeout - // of 30 seconds to wait for the connection to be ready. - ctxt, _ = context.WithTimeout(context.Background(), defaultTimeout) - res, errr = adminMacClient.NewAddress(ctxt, newAddrReq) - require.NoError(t.t, errr) - if !strings.HasPrefix(res.Address, harnessNetParams.Bech32HRPSegwit) { - t.Fatalf("returned address was not a regtest address") - } -} - -// readMacaroonFromHex loads a macaroon from a hex string. -func readMacaroonFromHex(macHex string) (*macaroon.Macaroon, er.R) { - macBytes, err := util.DecodeHex(macHex) - if err != nil { - return nil, err - } - - mac := &macaroon.Macaroon{} - if errr := mac.UnmarshalBinary(macBytes); errr != nil { - return nil, er.E(errr) - } - return mac, nil -} - -func macaroonClient(t *testing.T, testNode *lntest.HarnessNode, - mac *macaroon.Macaroon) (func(), lnrpc.LightningClient) { - - conn, err := testNode.ConnectRPCWithMacaroon(mac) - util.RequireNoErr(t, err, "connect to alice") - - cleanup := func() { - errr := conn.Close() - require.NoError(t, errr, "close") - } - return cleanup, lnrpc.NewLightningClient(conn) -} diff --git a/lnd/lntest/itest/lnd_max_channel_size_test.go b/lnd/lntest/itest/lnd_max_channel_size_test.go deleted file mode 100644 index be54c67e..00000000 --- a/lnd/lntest/itest/lnd_max_channel_size_test.go +++ /dev/null @@ -1,120 +0,0 @@ -// +build rpctest - -package itest - -import ( - "context" - "fmt" - "strings" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/lnd" - "github.com/pkt-cash/pktd/lnd/lntest" -) - -// testMaxChannelSize tests that lnd handles --maxchansize parameter -// correctly. Wumbo nodes should enforce a default soft limit of 10 BTC by -// default. This limit can be adjusted with --maxchansize config option -func testMaxChannelSize(net *lntest.NetworkHarness, t *harnessTest) { - // We'll make two new nodes, both wumbo but with the default - // limit on maximum channel size (10 BTC) - wumboNode, err := net.NewNode( - "wumbo", []string{"--protocol.wumbo-channels"}, - ) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, t, wumboNode) - - wumboNode2, err := net.NewNode( - "wumbo2", []string{"--protocol.wumbo-channels"}, - ) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, t, wumboNode2) - - // We'll send 11 BTC to the wumbo node so it can test the wumbo soft limit. - ctxb := context.Background() - err = net.SendCoins(ctxb, 11*btcutil.UnitsPerCoin(), wumboNode) - if err != nil { - t.Fatalf("unable to send coins to wumbo node: %v", err) - } - - // Next we'll connect both nodes, then attempt to make a wumbo channel - // funding request, which should fail as it exceeds the default wumbo - // soft limit of 10 BTC. - err = net.EnsureConnected(ctxb, wumboNode, wumboNode2) - if err != nil { - t.Fatalf("unable to connect peers: %v", err) - } - - chanAmt := lnd.MaxBtcFundingAmountWumbo + 1 - _, err = net.OpenChannel( - ctxb, wumboNode, wumboNode2, lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - if err == nil { - t.Fatalf("expected channel funding to fail as it exceeds 10 BTC limit") - } - - // The test should show failure due to the channel exceeding our max size. - if !strings.Contains(err.Error(), "exceeds maximum chan size") { - t.Fatalf("channel should be rejected due to size, instead "+ - "error was: %v", err) - } - - // Next we'll create a non-wumbo node to verify that it enforces the - // BOLT-02 channel size limit and rejects our funding request. - miniNode, err := net.NewNode("mini", nil) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, t, miniNode) - - err = net.EnsureConnected(ctxb, wumboNode, miniNode) - if err != nil { - t.Fatalf("unable to connect peers: %v", err) - } - - _, err = net.OpenChannel( - ctxb, wumboNode, miniNode, lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - if err == nil { - t.Fatalf("expected channel funding to fail as it exceeds 0.16 BTC limit") - } - - // The test should show failure due to the channel exceeding our max size. - if !strings.Contains(err.Error(), "exceeds maximum chan size") { - t.Fatalf("channel should be rejected due to size, instead "+ - "error was: %v", err) - } - - // We'll now make another wumbo node with appropriate maximum channel size - // to accept our wumbo channel funding. - wumboNode3, err := net.NewNode( - "wumbo3", []string{"--protocol.wumbo-channels", - fmt.Sprintf("--maxchansize=%v", int64(lnd.MaxBtcFundingAmountWumbo+1))}, - ) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, t, wumboNode3) - - // Creating a wumbo channel between these two nodes should succeed. - err = net.EnsureConnected(ctxb, wumboNode, wumboNode3) - if err != nil { - t.Fatalf("unable to connect peers: %v", err) - } - chanPoint := openChannelAndAssert( - ctxb, t, net, wumboNode, wumboNode3, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - closeChannelAndAssert(ctxb, t, net, wumboNode, chanPoint, false) - -} diff --git a/lnd/lntest/itest/lnd_mpp_test.go b/lnd/lntest/itest/lnd_mpp_test.go deleted file mode 100644 index 0527c10d..00000000 --- a/lnd/lntest/itest/lnd_mpp_test.go +++ /dev/null @@ -1,397 +0,0 @@ -package itest - -import ( - "bytes" - "context" - "time" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd" - "github.com/pkt-cash/pktd/lnd/chainreg" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc" - "github.com/pkt-cash/pktd/lnd/lntest" - "github.com/pkt-cash/pktd/lnd/routing/route" - "github.com/pkt-cash/pktd/wire" -) - -// testSendToRouteMultiPath tests that we are able to successfully route a -// payment using multiple shards across different paths, by using SendToRoute. -func testSendToRouteMultiPath(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - ctx := newMppTestContext(t, net) - defer ctx.shutdownNodes() - - // To ensure the payment goes through separate paths, we'll set a - // channel size that can only carry one shard at a time. We'll divide - // the payment into 3 shards. - const ( - paymentAmt = btcutil.Amount(300000) - shardAmt = paymentAmt / 3 - chanAmt = shardAmt * 3 / 2 - ) - - // Set up a network with three different paths Alice <-> Bob. - // _ Eve _ - // / \ - // Alice -- Carol ---- Bob - // \ / - // \__ Dave ____/ - // - ctx.openChannel(ctx.carol, ctx.bob, chanAmt) - ctx.openChannel(ctx.dave, ctx.bob, chanAmt) - ctx.openChannel(ctx.alice, ctx.dave, chanAmt) - ctx.openChannel(ctx.eve, ctx.bob, chanAmt) - ctx.openChannel(ctx.carol, ctx.eve, chanAmt) - - // Since the channel Alice-> Carol will have to carry two - // shards, we make it larger. - ctx.openChannel(ctx.alice, ctx.carol, chanAmt+shardAmt) - - defer ctx.closeChannels() - - ctx.waitForChannels() - - // Make Bob create an invoice for Alice to pay. - payReqs, rHashes, invoices, err := createPayReqs( - net.Bob, paymentAmt, 1, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } - - rHash := rHashes[0] - payReq := payReqs[0] - - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - decodeResp, errr := net.Bob.DecodePayReq( - ctxt, &lnrpc.PayReqString{PayReq: payReq}, - ) - if errr != nil { - t.Fatalf("decode pay req: %v", errr) - } - - payAddr := decodeResp.PaymentAddr - - // Helper function for Alice to build a route from pubkeys. - buildRoute := func(amt btcutil.Amount, hops []*lntest.HarnessNode) ( - *lnrpc.Route, er.R) { - - rpcHops := make([][]byte, 0, len(hops)) - for _, hop := range hops { - k := hop.PubKeyStr - pubkey, err := route.NewVertexFromStr(k) - if err != nil { - return nil, er.Errorf("error parsing %v: %v", - k, err) - } - rpcHops = append(rpcHops, pubkey[:]) - } - - req := &routerrpc.BuildRouteRequest{ - AmtMsat: int64(amt * 1000), - FinalCltvDelta: chainreg.DefaultBitcoinTimeLockDelta, - HopPubkeys: rpcHops, - } - - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - routeResp, errr := net.Alice.RouterClient.BuildRoute(ctxt, req) - if errr != nil { - return nil, er.E(errr) - } - - return routeResp.Route, nil - } - - // We'll send shards along three routes from Alice. - sendRoutes := [][]*lntest.HarnessNode{ - {ctx.carol, ctx.bob}, - {ctx.dave, ctx.bob}, - {ctx.carol, ctx.eve, ctx.bob}, - } - - responses := make(chan *lnrpc.HTLCAttempt, len(sendRoutes)) - for _, hops := range sendRoutes { - // Build a route for the specified hops. - r, err := buildRoute(shardAmt, hops) - if err != nil { - t.Fatalf("unable to build route: %v", err) - } - - // Set the MPP records to indicate this is a payment shard. - hop := r.Hops[len(r.Hops)-1] - hop.TlvPayload = true - hop.MppRecord = &lnrpc.MPPRecord{ - PaymentAddr: payAddr, - TotalAmtMsat: int64(paymentAmt * 1000), - } - - // Send the shard. - sendReq := &routerrpc.SendToRouteRequest{ - PaymentHash: rHash, - Route: r, - } - - // We'll send all shards in their own goroutine, since SendToRoute will - // block as long as the payment is in flight. - go func() { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - resp, err := net.Alice.RouterClient.SendToRouteV2(ctxt, sendReq) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } - - responses <- resp - }() - } - - // Wait for all responses to be back, and check that they all - // succeeded. - for range sendRoutes { - var resp *lnrpc.HTLCAttempt - select { - case resp = <-responses: - case <-time.After(defaultTimeout): - t.Fatalf("response not received") - } - - if resp.Failure != nil { - t.Fatalf("received payment failure : %v", resp.Failure) - } - - // All shards should come back with the preimage. - if !bytes.Equal(resp.Preimage, invoices[0].RPreimage) { - t.Fatalf("preimage doesn't match") - } - } - - // assertNumHtlcs is a helper that checks the node's latest payment, - // and asserts it was split into num shards. - assertNumHtlcs := func(node *lntest.HarnessNode, num int) { - req := &lnrpc.ListPaymentsRequest{ - IncludeIncomplete: true, - } - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - paymentsResp, err := node.ListPayments(ctxt, req) - if err != nil { - t.Fatalf("error when obtaining payments: %v", - err) - } - - payments := paymentsResp.Payments - if len(payments) == 0 { - t.Fatalf("no payments found") - } - - payment := payments[len(payments)-1] - htlcs := payment.Htlcs - if len(htlcs) == 0 { - t.Fatalf("no htlcs") - } - - succeeded := 0 - for _, htlc := range htlcs { - if htlc.Status == lnrpc.HTLCAttempt_SUCCEEDED { - succeeded++ - } - } - - if succeeded != num { - t.Fatalf("expected %v succussful HTLCs, got %v", num, - succeeded) - } - } - - // assertSettledInvoice checks that the invoice for the given payment - // hash is settled, and has been paid using num HTLCs. - assertSettledInvoice := func(node *lntest.HarnessNode, rhash []byte, - num int) { - - found := false - offset := uint64(0) - for !found { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - invoicesResp, err := node.ListInvoices( - ctxt, &lnrpc.ListInvoiceRequest{ - IndexOffset: offset, - }, - ) - if err != nil { - t.Fatalf("error when obtaining payments: %v", - err) - } - - if len(invoicesResp.Invoices) == 0 { - break - } - - for _, inv := range invoicesResp.Invoices { - if !bytes.Equal(inv.RHash, rhash) { - continue - } - - // Assert that the amount paid to the invoice is - // correct. - if inv.AmtPaidSat != int64(paymentAmt) { - t.Fatalf("incorrect payment amt for "+ - "invoicewant: %d, got %d", - paymentAmt, inv.AmtPaidSat) - } - - if inv.State != lnrpc.Invoice_SETTLED { - t.Fatalf("Invoice not settled: %v", - inv.State) - } - - if len(inv.Htlcs) != num { - t.Fatalf("expected invoice to be "+ - "settled with %v HTLCs, had %v", - num, len(inv.Htlcs)) - } - - found = true - break - } - - offset = invoicesResp.LastIndexOffset - } - - if !found { - t.Fatalf("invoice not found") - } - } - - // Finally check that the payment shows up with three settled HTLCs in - // Alice's list of payments... - assertNumHtlcs(net.Alice, 3) - - // ...and in Bob's list of paid invoices. - assertSettledInvoice(net.Bob, rHash, 3) -} - -type mppTestContext struct { - t *harnessTest - net *lntest.NetworkHarness - - // Keep a list of all our active channels. - networkChans []*lnrpc.ChannelPoint - closeChannelFuncs []func() - - alice, bob, carol, dave, eve *lntest.HarnessNode - nodes []*lntest.HarnessNode -} - -func newMppTestContext(t *harnessTest, - net *lntest.NetworkHarness) *mppTestContext { - - ctxb := context.Background() - - // Create a five-node context consisting of Alice, Bob and three new - // nodes. - carol, err := net.NewNode("carol", nil) - if err != nil { - t.Fatalf("unable to create carol: %v", err) - } - - dave, err := net.NewNode("dave", nil) - if err != nil { - t.Fatalf("unable to create dave: %v", err) - } - - eve, err := net.NewNode("eve", nil) - if err != nil { - t.Fatalf("unable to create eve: %v", err) - } - - // Connect nodes to ensure propagation of channels. - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol, dave, eve} - for i := 0; i < len(nodes); i++ { - for j := i + 1; j < len(nodes); j++ { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.EnsureConnected(ctxt, nodes[i], nodes[j]); err != nil { - t.Fatalf("unable to connect nodes: %v", err) - } - } - } - - ctx := mppTestContext{ - t: t, - net: net, - alice: net.Alice, - bob: net.Bob, - carol: carol, - dave: dave, - eve: eve, - nodes: nodes, - } - - return &ctx -} - -// openChannel is a helper to open a channel from->to. -func (c *mppTestContext) openChannel(from, to *lntest.HarnessNode, chanSize btcutil.Amount) { - ctxb := context.Background() - - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - err := c.net.SendCoins(ctxt, btcutil.UnitsPerCoin(), from) - if err != nil { - c.t.Fatalf("unable to send coins : %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, c.t, c.net, from, to, - lntest.OpenChannelParams{ - Amt: chanSize, - }, - ) - - c.closeChannelFuncs = append(c.closeChannelFuncs, func() { - ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert( - ctxt, c.t, c.net, from, chanPoint, false, - ) - }) - - c.networkChans = append(c.networkChans, chanPoint) -} - -func (c *mppTestContext) closeChannels() { - for _, f := range c.closeChannelFuncs { - f() - } -} - -func (c *mppTestContext) shutdownNodes() { - shutdownAndAssert(c.net, c.t, c.carol) - shutdownAndAssert(c.net, c.t, c.dave) - shutdownAndAssert(c.net, c.t, c.eve) -} - -func (c *mppTestContext) waitForChannels() { - ctxb := context.Background() - - // Wait for all nodes to have seen all channels. - for _, chanPoint := range c.networkChans { - for _, node := range c.nodes { - txid, err := lnd.GetChanPointFundingTxid(chanPoint) - if err != nil { - c.t.Fatalf("unable to get txid: %v", err) - } - point := wire.OutPoint{ - Hash: *txid, - Index: chanPoint.OutputIndex, - } - - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - err = node.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - c.t.Fatalf("(%d): timeout waiting for "+ - "channel(%s) open: %v", - node.NodeID, point, err) - } - } - } -} diff --git a/lnd/lntest/itest/lnd_multi-hop-error-propagation_test.go b/lnd/lntest/itest/lnd_multi-hop-error-propagation_test.go deleted file mode 100644 index 1b465a92..00000000 --- a/lnd/lntest/itest/lnd_multi-hop-error-propagation_test.go +++ /dev/null @@ -1,431 +0,0 @@ -package itest - -import ( - "context" - "math" - "strings" - "time" - - "github.com/pkt-cash/pktd/lnd" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc" - "github.com/pkt-cash/pktd/lnd/lntest" - "github.com/pkt-cash/pktd/lnd/lnwire" -) - -func testHtlcErrorPropagation(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // In this test we wish to exercise the daemon's correct parsing, - // handling, and propagation of errors that occur while processing a - // multi-hop payment. - const chanAmt = lnd.MaxBtcFundingAmount - - // First establish a channel with a capacity of 0.5 BTC between Alice - // and Bob. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPointAlice := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPointAlice); err != nil { - t.Fatalf("channel not seen by alice before timeout: %v", err) - } - - cType, err := channelCommitType(net.Alice, chanPointAlice) - if err != nil { - t.Fatalf("unable to get channel type: %v", err) - } - - commitFee := cType.calcStaticFee(0) - assertBaseBalance := func() { - // Alice has opened a channel with Bob with zero push amount, so - // it's remote balance is zero. - expBalanceAlice := &lnrpc.ChannelBalanceResponse{ - LocalBalance: &lnrpc.Amount{ - Sat: uint64(chanAmt - commitFee), - Msat: uint64(lnwire.NewMSatFromSatoshis( - chanAmt - commitFee, - )), - }, - RemoteBalance: &lnrpc.Amount{}, - UnsettledLocalBalance: &lnrpc.Amount{}, - UnsettledRemoteBalance: &lnrpc.Amount{}, - PendingOpenLocalBalance: &lnrpc.Amount{}, - PendingOpenRemoteBalance: &lnrpc.Amount{}, - // Deprecated fields. - Balance: int64(chanAmt - commitFee), - } - assertChannelBalanceResp(t, net.Alice, expBalanceAlice) - - // Bob has a channel with Alice and another with Carol, so it's - // local and remote balances are both chanAmt - commitFee. - expBalanceBob := &lnrpc.ChannelBalanceResponse{ - LocalBalance: &lnrpc.Amount{ - Sat: uint64(chanAmt - commitFee), - Msat: uint64(lnwire.NewMSatFromSatoshis( - chanAmt - commitFee, - )), - }, - RemoteBalance: &lnrpc.Amount{ - Sat: uint64(chanAmt - commitFee), - Msat: uint64(lnwire.NewMSatFromSatoshis( - chanAmt - commitFee, - )), - }, - UnsettledLocalBalance: &lnrpc.Amount{}, - UnsettledRemoteBalance: &lnrpc.Amount{}, - PendingOpenLocalBalance: &lnrpc.Amount{}, - PendingOpenRemoteBalance: &lnrpc.Amount{}, - // Deprecated fields. - Balance: int64(chanAmt - commitFee), - } - assertChannelBalanceResp(t, net.Bob, expBalanceBob) - } - - // Since we'd like to test some multi-hop failure scenarios, we'll - // introduce another node into our test network: Carol. - carol, err := net.NewNode("Carol", nil) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - - // Next, we'll create a connection from Bob to Carol, and open a - // channel between them so we have the topology: Alice -> Bob -> Carol. - // The channel created will be of lower capacity that the one created - // above. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, net.Bob, carol); err != nil { - t.Fatalf("unable to connect bob to carol: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - const bobChanAmt = lnd.MaxBtcFundingAmount - chanPointBob := openChannelAndAssert( - ctxt, t, net, net.Bob, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // Ensure that Alice has Carol in her routing table before proceeding. - nodeInfoReq := &lnrpc.NodeInfoRequest{ - PubKey: carol.PubKeyStr, - } - checkTableTimeout := time.After(time.Second * 10) - checkTableTicker := time.NewTicker(100 * time.Millisecond) - defer checkTableTicker.Stop() - -out: - // TODO(roasbeef): make into async hook for node announcements - for { - select { - case <-checkTableTicker.C: - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - _, err := net.Alice.GetNodeInfo(ctxt, nodeInfoReq) - if err != nil && strings.Contains(err.Error(), - "unable to find") { - - continue - } - - break out - case <-checkTableTimeout: - t.Fatalf("carol's node announcement didn't propagate within " + - "the timeout period") - } - } - - // With the channels, open we can now start to test our multi-hop error - // scenarios. First, we'll generate an invoice from carol that we'll - // use to test some error cases. - const payAmt = 10000 - invoiceReq := &lnrpc.Invoice{ - Memo: "kek99", - Value: payAmt, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolInvoice, errr := carol.AddInvoice(ctxt, invoiceReq) - if errr != nil { - t.Fatalf("unable to generate carol invoice: %v", errr) - } - - carolPayReq, errr := carol.DecodePayReq(ctxb, - &lnrpc.PayReqString{ - PayReq: carolInvoice.PaymentRequest, - }) - if errr != nil { - t.Fatalf("unable to decode generated payment request: %v", errr) - } - - // Before we send the payment, ensure that the announcement of the new - // channel has been processed by Alice. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPointBob); err != nil { - t.Fatalf("channel not seen by alice before timeout: %v", err) - } - - // Before we start sending payments, subscribe to htlc events for each - // node. - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - - aliceEvents, errr := net.Alice.RouterClient.SubscribeHtlcEvents( - ctxt, &routerrpc.SubscribeHtlcEventsRequest{}, - ) - if errr != nil { - t.Fatalf("could not subscribe events: %v", errr) - } - - bobEvents, errr := net.Bob.RouterClient.SubscribeHtlcEvents( - ctxt, &routerrpc.SubscribeHtlcEventsRequest{}, - ) - if errr != nil { - t.Fatalf("could not subscribe events: %v", errr) - } - - carolEvents, errr := carol.RouterClient.SubscribeHtlcEvents( - ctxt, &routerrpc.SubscribeHtlcEventsRequest{}, - ) - if errr != nil { - t.Fatalf("could not subscribe events: %v", errr) - } - - // For the first scenario, we'll test the cancellation of an HTLC with - // an unknown payment hash. - // TODO(roasbeef): return failure response rather than failing entire - // stream on payment error. - sendReq := &routerrpc.SendPaymentRequest{ - PaymentHash: makeFakePayHash(t), - Dest: carol.PubKey[:], - Amt: payAmt, - FinalCltvDelta: int32(carolPayReq.CltvExpiry), - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - } - sendAndAssertFailure( - t, net.Alice, - sendReq, lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS, - ) - assertLastHTLCError( - t, net.Alice, - lnrpc.Failure_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS, - ) - - // We expect alice and bob to each have one forward and one forward - // fail event at this stage. - assertHtlcEvents(t, 1, 1, 0, routerrpc.HtlcEvent_SEND, aliceEvents) - assertHtlcEvents(t, 1, 1, 0, routerrpc.HtlcEvent_FORWARD, bobEvents) - - // Carol should have a link failure because the htlc failed on her - // incoming link. - assertLinkFailure( - t, routerrpc.HtlcEvent_RECEIVE, - routerrpc.FailureDetail_UNKNOWN_INVOICE, carolEvents, - ) - - // The balances of all parties should be the same as initially since - // the HTLC was canceled. - assertBaseBalance() - - // Next, we'll test the case of a recognized payHash but, an incorrect - // value on the extended HTLC. - htlcAmt := lnwire.NewMSatFromSatoshis(1000) - sendReq = &routerrpc.SendPaymentRequest{ - PaymentHash: carolInvoice.RHash, - Dest: carol.PubKey[:], - Amt: int64(htlcAmt.ToSatoshis()), // 10k satoshis are expected. - FinalCltvDelta: int32(carolPayReq.CltvExpiry), - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - } - sendAndAssertFailure( - t, net.Alice, - sendReq, lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS, - ) - assertLastHTLCError( - t, net.Alice, - lnrpc.Failure_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS, - ) - - // We expect alice and bob to each have one forward and one forward - // fail event at this stage. - assertHtlcEvents(t, 1, 1, 0, routerrpc.HtlcEvent_SEND, aliceEvents) - assertHtlcEvents(t, 1, 1, 0, routerrpc.HtlcEvent_FORWARD, bobEvents) - - // Carol should have a link failure because the htlc failed on her - // incoming link. - assertLinkFailure( - t, routerrpc.HtlcEvent_RECEIVE, - routerrpc.FailureDetail_INVOICE_UNDERPAID, carolEvents, - ) - - // The balances of all parties should be the same as initially since - // the HTLC was canceled. - assertBaseBalance() - - // Next we'll test an error that occurs mid-route due to an outgoing - // link having insufficient capacity. In order to do so, we'll first - // need to unbalance the link connecting Bob<->Carol. - // - // To do so, we'll push most of the funds in the channel over to - // Alice's side, leaving on 10k satoshis of available balance for bob. - // There's a max payment amount, so we'll have to do this - // incrementally. - chanReserve := int64(chanAmt / 100) - amtToSend := int64(chanAmt) - chanReserve - 20000 - amtSent := int64(0) - for amtSent != amtToSend { - // We'll send in chunks of the max payment amount. If we're - // about to send too much, then we'll only send the amount - // remaining. - toSend := int64(math.MaxUint32) - if toSend+amtSent > amtToSend { - toSend = amtToSend - amtSent - } - - invoiceReq = &lnrpc.Invoice{ - Value: toSend, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolInvoice2, err := carol.AddInvoice(ctxt, invoiceReq) - if err != nil { - t.Fatalf("unable to generate carol invoice: %v", err) - } - sendAndAssertSuccess( - t, net.Bob, - &routerrpc.SendPaymentRequest{ - PaymentRequest: carolInvoice2.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - - // For each send bob makes, we need to check that bob has a - // forward and settle event for his send, and carol has a - // settle event for her receive. - assertHtlcEvents( - t, 1, 0, 1, routerrpc.HtlcEvent_SEND, bobEvents, - ) - assertHtlcEvents( - t, 0, 0, 1, routerrpc.HtlcEvent_RECEIVE, carolEvents, - ) - - amtSent += toSend - } - - // At this point, Alice has 50mil satoshis on her side of the channel, - // but Bob only has 10k available on his side of the channel. So a - // payment from Alice to Carol worth 100k satoshis should fail. - invoiceReq = &lnrpc.Invoice{ - Value: 100000, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolInvoice3, errr := carol.AddInvoice(ctxt, invoiceReq) - if errr != nil { - t.Fatalf("unable to generate carol invoice: %v", errr) - } - - sendReq = &routerrpc.SendPaymentRequest{ - PaymentRequest: carolInvoice3.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - } - sendAndAssertFailure( - t, net.Alice, - sendReq, lnrpc.PaymentFailureReason_FAILURE_REASON_NO_ROUTE, - ) - assertLastHTLCError( - t, net.Alice, lnrpc.Failure_TEMPORARY_CHANNEL_FAILURE, - ) - - // Alice should have a forwarding event and a forwarding failure. - assertHtlcEvents(t, 1, 1, 0, routerrpc.HtlcEvent_SEND, aliceEvents) - - // Bob should have a link failure because the htlc failed on his - // outgoing link. - assertLinkFailure( - t, routerrpc.HtlcEvent_FORWARD, - routerrpc.FailureDetail_INSUFFICIENT_BALANCE, bobEvents, - ) - - // Generate new invoice to not pay same invoice twice. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolInvoice, errr = carol.AddInvoice(ctxt, invoiceReq) - if errr != nil { - t.Fatalf("unable to generate carol invoice: %v", errr) - } - - // For our final test, we'll ensure that if a target link isn't - // available for what ever reason then the payment fails accordingly. - // - // We'll attempt to complete the original invoice we created with Carol - // above, but before we do so, Carol will go offline, resulting in a - // failed payment. - shutdownAndAssert(net, t, carol) - - // Reset mission control to forget the temporary channel failure above. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - _, errr = net.Alice.RouterClient.ResetMissionControl( - ctxt, &routerrpc.ResetMissionControlRequest{}, - ) - if errr != nil { - t.Fatalf("unable to reset mission control: %v", errr) - } - - sendAndAssertFailure( - t, net.Alice, - &routerrpc.SendPaymentRequest{ - PaymentRequest: carolInvoice.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - lnrpc.PaymentFailureReason_FAILURE_REASON_NO_ROUTE, - ) - assertLastHTLCError(t, net.Alice, lnrpc.Failure_UNKNOWN_NEXT_PEER) - - // Alice should have a forwarding event and subsequent fail. - assertHtlcEvents(t, 1, 1, 0, routerrpc.HtlcEvent_SEND, aliceEvents) - - // Bob should have a link failure because he could not find the next - // peer. - assertLinkFailure( - t, routerrpc.HtlcEvent_FORWARD, - routerrpc.FailureDetail_NO_DETAIL, bobEvents, - ) - - // Finally, immediately close the channel. This function will also - // block until the channel is closed and will additionally assert the - // relevant channel closing post conditions. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointAlice, false) - - // Force close Bob's final channel. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Bob, chanPointBob, true) - - // Cleanup by mining the force close and sweep transaction. - cleanupForceClose(t, net, net.Bob, chanPointBob) -} - -// assertLinkFailure checks that the stream provided has a single link failure -// the the failure detail provided. -func assertLinkFailure(t *harnessTest, - eventType routerrpc.HtlcEvent_EventType, - failureDetail routerrpc.FailureDetail, - client routerrpc.Router_SubscribeHtlcEventsClient) { - - event := assertEventAndType(t, eventType, client) - - linkFail, ok := event.Event.(*routerrpc.HtlcEvent_LinkFailEvent) - if !ok { - t.Fatalf("expected forwarding failure, got: %T", linkFail) - } - - if linkFail.LinkFailEvent.FailureDetail != failureDetail { - t.Fatalf("expected: %v, got: %v", failureDetail, - linkFail.LinkFailEvent.FailureDetail) - } -} diff --git a/lnd/lntest/itest/lnd_multi-hop-payments_test.go b/lnd/lntest/itest/lnd_multi-hop-payments_test.go deleted file mode 100644 index 3ddae470..00000000 --- a/lnd/lntest/itest/lnd_multi-hop-payments_test.go +++ /dev/null @@ -1,414 +0,0 @@ -package itest - -import ( - "context" - "time" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/lnd" - "github.com/pkt-cash/pktd/lnd/chainreg" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc" - "github.com/pkt-cash/pktd/lnd/lntest" - "github.com/pkt-cash/pktd/wire" -) - -func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const chanAmt = btcutil.Amount(100000) - var networkChans []*lnrpc.ChannelPoint - - // Open a channel with 100k satoshis between Alice and Bob with Alice - // being the sole funder of the channel. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPointAlice := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - networkChans = append(networkChans, chanPointAlice) - - aliceChanTXID, err := lnd.GetChanPointFundingTxid(chanPointAlice) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - aliceFundPoint := wire.OutPoint{ - Hash: *aliceChanTXID, - Index: chanPointAlice.OutputIndex, - } - - // As preliminary setup, we'll create two new nodes: Carol and Dave, - // such that we now have a 4 node, 3 channel topology. Dave will make a - // channel with Alice, and Carol with Dave. After this setup, the - // network topology should now look like: - // Carol -> Dave -> Alice -> Bob - // - // First, we'll create Dave and establish a channel to Alice. Dave will - // be running an older node that requires the legacy onion payload. - daveArgs := []string{"--protocol.legacy.onion"} - dave, err := net.NewNode("Dave", daveArgs) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, dave) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, dave, net.Alice); err != nil { - t.Fatalf("unable to connect dave to alice: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), dave) - if err != nil { - t.Fatalf("unable to send coins to dave: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointDave := openChannelAndAssert( - ctxt, t, net, dave, net.Alice, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - networkChans = append(networkChans, chanPointDave) - daveChanTXID, err := lnd.GetChanPointFundingTxid(chanPointDave) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - daveFundPoint := wire.OutPoint{ - Hash: *daveChanTXID, - Index: chanPointDave.OutputIndex, - } - - // Next, we'll create Carol and establish a channel to from her to - // Dave. - carol, err := net.NewNode("Carol", nil) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, carol, dave); err != nil { - t.Fatalf("unable to connect carol to dave: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), carol) - if err != nil { - t.Fatalf("unable to send coins to carol: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointCarol := openChannelAndAssert( - ctxt, t, net, carol, dave, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - networkChans = append(networkChans, chanPointCarol) - - carolChanTXID, err := lnd.GetChanPointFundingTxid(chanPointCarol) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - carolFundPoint := wire.OutPoint{ - Hash: *carolChanTXID, - Index: chanPointCarol.OutputIndex, - } - - // Wait for all nodes to have seen all channels. - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol, dave} - nodeNames := []string{"Alice", "Bob", "Carol", "Dave"} - for _, chanPoint := range networkChans { - for i, node := range nodes { - txid, err := lnd.GetChanPointFundingTxid(chanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - point := wire.OutPoint{ - Hash: *txid, - Index: chanPoint.OutputIndex, - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = node.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("%s(%d): timeout waiting for "+ - "channel(%s) open: %v", nodeNames[i], - node.NodeID, point, err) - } - } - } - - // Create 5 invoices for Bob, which expect a payment from Carol for 1k - // satoshis with a different preimage each time. - const numPayments = 5 - const paymentAmt = 1000 - payReqs, _, _, err := createPayReqs( - net.Bob, paymentAmt, numPayments, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } - - // We'll wait for all parties to recognize the new channels within the - // network. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = dave.WaitForNetworkChannelOpen(ctxt, chanPointDave) - if err != nil { - t.Fatalf("dave didn't advertise his channel: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = carol.WaitForNetworkChannelOpen(ctxt, chanPointCarol) - if err != nil { - t.Fatalf("carol didn't advertise her channel in time: %v", - err) - } - - time.Sleep(time.Millisecond * 50) - - // Set the fee policies of the Alice -> Bob and the Dave -> Alice - // channel edges to relatively large non default values. This makes it - // possible to pick up more subtle fee calculation errors. - maxHtlc := calculateMaxHtlc(chanAmt) - const aliceBaseFeeSat = 1 - const aliceFeeRatePPM = 100000 - updateChannelPolicy( - t, net.Alice, chanPointAlice, aliceBaseFeeSat*1000, - aliceFeeRatePPM, chainreg.DefaultBitcoinTimeLockDelta, maxHtlc, - carol, - ) - - const daveBaseFeeSat = 5 - const daveFeeRatePPM = 150000 - updateChannelPolicy( - t, dave, chanPointDave, daveBaseFeeSat*1000, daveFeeRatePPM, - chainreg.DefaultBitcoinTimeLockDelta, maxHtlc, carol, - ) - - // Before we start sending payments, subscribe to htlc events for each - // node. - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - - aliceEvents, errr := net.Alice.RouterClient.SubscribeHtlcEvents( - ctxt, &routerrpc.SubscribeHtlcEventsRequest{}, - ) - if errr != nil { - t.Fatalf("could not subscribe events: %v", errr) - } - - bobEvents, errr := net.Bob.RouterClient.SubscribeHtlcEvents( - ctxt, &routerrpc.SubscribeHtlcEventsRequest{}, - ) - if errr != nil { - t.Fatalf("could not subscribe events: %v", errr) - } - - carolEvents, errr := carol.RouterClient.SubscribeHtlcEvents( - ctxt, &routerrpc.SubscribeHtlcEventsRequest{}, - ) - if errr != nil { - t.Fatalf("could not subscribe events: %v", errr) - } - - daveEvents, errr := dave.RouterClient.SubscribeHtlcEvents( - ctxt, &routerrpc.SubscribeHtlcEventsRequest{}, - ) - if errr != nil { - t.Fatalf("could not subscribe events: %v", errr) - } - - // Using Carol as the source, pay to the 5 invoices from Bob created - // above. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, carol, carol.RouterClient, payReqs, true, - ) - if err != nil { - t.Fatalf("unable to send payments: %v", err) - } - - // At this point all the channels within our proto network should be - // shifted by 5k satoshis in the direction of Bob, the sink within the - // payment flow generated above. The order of asserts corresponds to - // increasing of time is needed to embed the HTLC in commitment - // transaction, in channel Carol->David->Alice->Bob, order is Bob, - // Alice, David, Carol. - - // The final node bob expects to get paid five times 1000 sat. - expectedAmountPaidAtoB := int64(numPayments * paymentAmt) - - assertAmountPaid(t, "Alice(local) => Bob(remote)", net.Bob, - aliceFundPoint, int64(0), expectedAmountPaidAtoB) - assertAmountPaid(t, "Alice(local) => Bob(remote)", net.Alice, - aliceFundPoint, expectedAmountPaidAtoB, int64(0)) - - // To forward a payment of 1000 sat, Alice is charging a fee of - // 1 sat + 10% = 101 sat. - const aliceFeePerPayment = aliceBaseFeeSat + - (paymentAmt * aliceFeeRatePPM / 1_000_000) - const expectedFeeAlice = numPayments * aliceFeePerPayment - - // Dave needs to pay what Alice pays plus Alice's fee. - expectedAmountPaidDtoA := expectedAmountPaidAtoB + expectedFeeAlice - - assertAmountPaid(t, "Dave(local) => Alice(remote)", net.Alice, - daveFundPoint, int64(0), expectedAmountPaidDtoA) - assertAmountPaid(t, "Dave(local) => Alice(remote)", dave, - daveFundPoint, expectedAmountPaidDtoA, int64(0)) - - // To forward a payment of 1101 sat, Dave is charging a fee of - // 5 sat + 15% = 170.15 sat. This is rounded down in rpcserver to 170. - const davePaymentAmt = paymentAmt + aliceFeePerPayment - const daveFeePerPayment = daveBaseFeeSat + - (davePaymentAmt * daveFeeRatePPM / 1_000_000) - const expectedFeeDave = numPayments * daveFeePerPayment - - // Carol needs to pay what Dave pays plus Dave's fee. - expectedAmountPaidCtoD := expectedAmountPaidDtoA + expectedFeeDave - - assertAmountPaid(t, "Carol(local) => Dave(remote)", dave, - carolFundPoint, int64(0), expectedAmountPaidCtoD) - assertAmountPaid(t, "Carol(local) => Dave(remote)", carol, - carolFundPoint, expectedAmountPaidCtoD, int64(0)) - - // Now that we know all the balances have been settled out properly, - // we'll ensure that our internal record keeping for completed circuits - // was properly updated. - - // First, check that the FeeReport response shows the proper fees - // accrued over each time range. Dave should've earned 170 satoshi for - // each of the forwarded payments. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - feeReport, errr := dave.FeeReport(ctxt, &lnrpc.FeeReportRequest{}) - if errr != nil { - t.Fatalf("unable to query for fee report: %v", errr) - } - - if feeReport.DayFeeSum != uint64(expectedFeeDave) { - t.Fatalf("fee mismatch: expected %v, got %v", expectedFeeDave, - feeReport.DayFeeSum) - } - if feeReport.WeekFeeSum != uint64(expectedFeeDave) { - t.Fatalf("fee mismatch: expected %v, got %v", expectedFeeDave, - feeReport.WeekFeeSum) - } - if feeReport.MonthFeeSum != uint64(expectedFeeDave) { - t.Fatalf("fee mismatch: expected %v, got %v", expectedFeeDave, - feeReport.MonthFeeSum) - } - - // Next, ensure that if we issue the vanilla query for the forwarding - // history, it returns 5 values, and each entry is formatted properly. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - fwdingHistory, errr := dave.ForwardingHistory( - ctxt, &lnrpc.ForwardingHistoryRequest{}, - ) - if errr != nil { - t.Fatalf("unable to query for fee report: %v", errr) - } - if len(fwdingHistory.ForwardingEvents) != numPayments { - t.Fatalf("wrong number of forwarding event: expected %v, "+ - "got %v", numPayments, - len(fwdingHistory.ForwardingEvents)) - } - expectedForwardingFee := uint64(expectedFeeDave / numPayments) - for _, event := range fwdingHistory.ForwardingEvents { - // Each event should show a fee of 170 satoshi. - if event.Fee != expectedForwardingFee { - t.Fatalf("fee mismatch: expected %v, got %v", - expectedForwardingFee, event.Fee) - } - } - - // We expect Carol to have successful forwards and settles for - // her sends. - assertHtlcEvents( - t, numPayments, 0, numPayments, routerrpc.HtlcEvent_SEND, - carolEvents, - ) - - // Dave and Alice should both have forwards and settles for - // their role as forwarding nodes. - assertHtlcEvents( - t, numPayments, 0, numPayments, routerrpc.HtlcEvent_FORWARD, - daveEvents, - ) - assertHtlcEvents( - t, numPayments, 0, numPayments, routerrpc.HtlcEvent_FORWARD, - aliceEvents, - ) - - // Bob should only have settle events for his receives. - assertHtlcEvents( - t, 0, 0, numPayments, routerrpc.HtlcEvent_RECEIVE, bobEvents, - ) - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointAlice, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, dave, chanPointDave, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, carol, chanPointCarol, false) -} - -// assertHtlcEvents consumes events from a client and ensures that they are of -// the expected type and contain the expected number of forwards, forward -// failures and settles. -func assertHtlcEvents(t *harnessTest, fwdCount, fwdFailCount, settleCount int, - userType routerrpc.HtlcEvent_EventType, - client routerrpc.Router_SubscribeHtlcEventsClient) { - - var forwards, forwardFails, settles int - - numEvents := fwdCount + fwdFailCount + settleCount - for i := 0; i < numEvents; i++ { - event := assertEventAndType(t, userType, client) - - switch event.Event.(type) { - case *routerrpc.HtlcEvent_ForwardEvent: - forwards++ - - case *routerrpc.HtlcEvent_ForwardFailEvent: - forwardFails++ - - case *routerrpc.HtlcEvent_SettleEvent: - settles++ - - default: - t.Fatalf("unexpected event: %T", event.Event) - } - } - - if forwards != fwdCount { - t.Fatalf("expected: %v forwards, got: %v", fwdCount, forwards) - } - - if forwardFails != fwdFailCount { - t.Fatalf("expected: %v forward fails, got: %v", fwdFailCount, - forwardFails) - } - - if settles != settleCount { - t.Fatalf("expected: %v settles, got: %v", settleCount, settles) - } -} - -// assertEventAndType reads an event from the stream provided and ensures that -// it is associated with the correct user related type - a user initiated send, -// a receive to our node or a forward through our node. Note that this event -// type is different from the htlc event type (forward, link failure etc). -func assertEventAndType(t *harnessTest, eventType routerrpc.HtlcEvent_EventType, - client routerrpc.Router_SubscribeHtlcEventsClient) *routerrpc.HtlcEvent { - event, err := client.Recv() - if err != nil { - t.Fatalf("could not get event") - } - - if event.EventType != eventType { - t.Fatalf("expected: %v, got: %v", eventType, - event.EventType) - } - - return event -} diff --git a/lnd/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go b/lnd/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go deleted file mode 100644 index 4c55cc18..00000000 --- a/lnd/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go +++ /dev/null @@ -1,295 +0,0 @@ -package itest - -import ( - "context" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd" - "github.com/pkt-cash/pktd/lnd/lncfg" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/invoicesrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc" - "github.com/pkt-cash/pktd/lnd/lntest" - "github.com/pkt-cash/pktd/lnd/lntest/wait" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/wire" - "github.com/stretchr/testify/require" -) - -// testMultiHopHtlcLocalChainClaim tests that in a multi-hop HTLC scenario, if -// we force close a channel with an incoming HTLC, and later find out the -// preimage via the witness beacon, we properly settle the HTLC on-chain using -// the HTLC success transaction in order to ensure we don't lose any funds. -func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest, - alice, bob *lntest.HarnessNode, c commitType) { - - ctxb := context.Background() - - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - t, net, alice, bob, false, c, - ) - - // Clean up carol's node when the test finishes. - defer shutdownAndAssert(net, t, carol) - - // With the network active, we'll now add a new hodl invoice at Carol's - // end. Make sure the cltv expiry delta is large enough, otherwise Bob - // won't send out the outgoing htlc. - - const invoiceAmt = 100000 - preimage := lntypes.Preimage{1, 2, 3} - payHash := preimage.Hash() - invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ - Value: invoiceAmt, - CltvExpiry: 40, - Hash: payHash[:], - } - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - carolInvoice, errr := carol.AddHoldInvoice(ctxt, invoiceReq) - require.NoError(t.t, errr) - - // Now that we've created the invoice, we'll send a single payment from - // Alice to Carol. We won't wait for the response however, as Carol - // will not immediately settle the payment. - ctx, cancel := context.WithCancel(ctxb) - defer cancel() - - _, errr = alice.RouterClient.SendPaymentV2( - ctx, &routerrpc.SendPaymentRequest{ - PaymentRequest: carolInvoice.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - require.NoError(t.t, errr) - - // At this point, all 3 nodes should now have an active channel with - // the created HTLC pending on all of them. - nodes := []*lntest.HarnessNode{alice, bob, carol} - err := wait.NoError(func() er.R { - return assertActiveHtlcs(nodes, payHash[:]) - }, defaultTimeout) - util.RequireNoErr(t.t, err) - - // Wait for carol to mark invoice as accepted. There is a small gap to - // bridge between adding the htlc to the channel and executing the exit - // hop logic. - waitForInvoiceAccepted(t, carol, payHash) - - // Increase the fee estimate so that the following force close tx will - // be cpfp'ed. - net.SetFeeEstimate(30000) - - // At this point, Bob decides that he wants to exit the channel - // immediately, so he force closes his commitment transaction. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - bobForceClose := closeChannelAndAssertType( - ctxt, t, net, bob, aliceChanPoint, c == commitTypeAnchors, true, - ) - - // Alice will sweep her commitment output immediately. If there are - // anchors, Alice will also sweep hers. - expectedTxes := 1 - if c == commitTypeAnchors { - expectedTxes = 2 - } - _, err = waitForNTxsInMempool( - net.Miner.Node, expectedTxes, minerMempoolTimeout, - ) - util.RequireNoErr(t.t, err) - - // Suspend Bob to force Carol to go to chain. - restartBob, err := net.SuspendNode(bob) - util.RequireNoErr(t.t, err) - - // Settle invoice. This will just mark the invoice as settled, as there - // is no link anymore to remove the htlc from the commitment tx. For - // this test, it is important to actually settle and not leave the - // invoice in the accepted state, because without a known preimage, the - // channel arbitrator won't go to chain. - ctx, cancel = context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - _, errr = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{ - Preimage: preimage[:], - }) - require.NoError(t.t, errr) - - // We'll now mine enough blocks so Carol decides that she needs to go - // on-chain to claim the HTLC as Bob has been inactive. - numBlocks := padCLTV(uint32(invoiceReq.CltvExpiry - - lncfg.DefaultIncomingBroadcastDelta)) - - _, err = net.Miner.Node.Generate(numBlocks) - util.RequireNoErr(t.t, err) - - // Carol's commitment transaction should now be in the mempool. If there - // is an anchor, Carol will sweep that too. - _, err = waitForNTxsInMempool( - net.Miner.Node, expectedTxes, minerMempoolTimeout, - ) - util.RequireNoErr(t.t, err) - bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint) - util.RequireNoErr(t.t, err) - carolFundingPoint := wire.OutPoint{ - Hash: *bobFundingTxid, - Index: bobChanPoint.OutputIndex, - } - - // Look up the closing transaction. It should be spending from the - // funding transaction, - closingTx := getSpendingTxInMempool( - t, net.Miner.Node, minerMempoolTimeout, carolFundingPoint, - ) - closingTxid := closingTx.TxHash() - - // Mine a block that should confirm the commit tx, the anchor if present - // and the coinbase. - block := mineBlocks(t, net, 1, expectedTxes)[0] - require.Len(t.t, block.Transactions, expectedTxes+1) - assertTxInBlock(t, block, &closingTxid) - - // Restart bob again. - err = restartBob() - util.RequireNoErr(t.t, err) - - // After the force close transacion is mined, Carol should broadcast her - // second level HTLC transacion. Bob will broadcast a sweep tx to sweep - // his output in the channel with Carol. He can do this immediately, as - // the output is not timelocked since Carol was the one force closing. - // If there are anchors on the commitment, Bob will also sweep his - // anchor. - expectedTxes = 2 - if c == commitTypeAnchors { - expectedTxes = 3 - } - txes, err := getNTxsFromMempool( - net.Miner.Node, expectedTxes, minerMempoolTimeout, - ) - util.RequireNoErr(t.t, err) - - // Both Carol's second level transaction and Bob's sweep should be - // spending from the commitment transaction. - assertAllTxesSpendFrom(t, txes, closingTxid) - - // At this point we suspend Alice to make sure she'll handle the - // on-chain settle after a restart. - restartAlice, err := net.SuspendNode(alice) - util.RequireNoErr(t.t, err) - - // Mine a block to confirm the two transactions (+ the coinbase). - block = mineBlocks(t, net, 1, expectedTxes)[0] - require.Len(t.t, block.Transactions, expectedTxes+1) - - // Keep track of the second level tx maturity. - carolSecondLevelCSV := uint32(defaultCSV) - - // When Bob notices Carol's second level transaction in the block, he - // will extract the preimage and broadcast a second level tx to claim - // the HTLC in his (already closed) channel with Alice. - bobSecondLvlTx, err := waitForTxInMempool( - net.Miner.Node, minerMempoolTimeout, - ) - util.RequireNoErr(t.t, err) - - // It should spend from the commitment in the channel with Alice. - tx, err := net.Miner.Node.GetRawTransaction(bobSecondLvlTx) - util.RequireNoErr(t.t, err) - - require.Equal( - t.t, *bobForceClose, tx.MsgTx().TxIn[0].PreviousOutPoint.Hash, - ) - - // At this point, Bob should have broadcast his second layer success - // transaction, and should have sent it to the nursery for incubation. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = waitForNumChannelPendingForceClose( - ctxt, bob, 1, func(c *lnrpcForceCloseChannel) er.R { - if c.Channel.LocalBalance != 0 { - return nil - } - - if len(c.PendingHtlcs) != 1 { - return er.Errorf("bob should have pending " + - "htlc but doesn't") - } - - if c.PendingHtlcs[0].Stage != 1 { - return er.Errorf("bob's htlc should have "+ - "advanced to the first stage but was "+ - "stage: %v", c.PendingHtlcs[0].Stage) - } - - return nil - }, - ) - util.RequireNoErr(t.t, err) - - // We'll now mine a block which should confirm Bob's second layer - // transaction. - block = mineBlocks(t, net, 1, 1)[0] - require.Len(t.t, block.Transactions, 2) - assertTxInBlock(t, block, bobSecondLvlTx) - - // Keep track of Bob's second level maturity, and decrement our track - // of Carol's. - bobSecondLevelCSV := uint32(defaultCSV) - carolSecondLevelCSV-- - - // Now that the preimage from Bob has hit the chain, restart Alice to - // ensure she'll pick it up. - err = restartAlice() - util.RequireNoErr(t.t, err) - - // If we then mine 3 additional blocks, Carol's second level tx should - // mature, and she can pull the funds from it with a sweep tx. - _, err = net.Miner.Node.Generate(carolSecondLevelCSV) - util.RequireNoErr(t.t, err) - bobSecondLevelCSV -= carolSecondLevelCSV - - carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - util.RequireNoErr(t.t, err) - - // Mining one additional block, Bob's second level tx is mature, and he - // can sweep the output. - block = mineBlocks(t, net, bobSecondLevelCSV, 1)[0] - assertTxInBlock(t, block, carolSweep) - - bobSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - util.RequireNoErr(t.t, err) - - // Make sure it spends from the second level tx. - tx, err = net.Miner.Node.GetRawTransaction(bobSweep) - util.RequireNoErr(t.t, err) - require.Equal( - t.t, *bobSecondLvlTx, tx.MsgTx().TxIn[0].PreviousOutPoint.Hash, - ) - - // When we mine one additional block, that will confirm Bob's sweep. - // Now Bob should have no pending channels anymore, as this just - // resolved it by the confirmation of the sweep transaction. - block = mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, bobSweep) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil) - util.RequireNoErr(t.t, err) - assertNodeNumChannels(t, bob, 0) - - // Also Carol should have no channels left (open nor pending). - err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil) - util.RequireNoErr(t.t, err) - assertNodeNumChannels(t, carol, 0) - - // Finally, check that the Alice's payment is correctly marked - // succeeded. - ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) - err = checkPaymentStatus( - ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED, - ) - util.RequireNoErr(t.t, err) -} diff --git a/lnd/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go b/lnd/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go deleted file mode 100644 index 6e78aad8..00000000 --- a/lnd/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go +++ /dev/null @@ -1,238 +0,0 @@ -package itest - -import ( - "context" - "time" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd" - "github.com/pkt-cash/pktd/lnd/lncfg" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc" - "github.com/pkt-cash/pktd/lnd/lntest" - "github.com/pkt-cash/pktd/lnd/lntest/wait" - "github.com/pkt-cash/pktd/wire" - "github.com/stretchr/testify/require" -) - -// testMultiHopHtlcLocalTimeout tests that in a multi-hop HTLC scenario, if the -// outgoing HTLC is about to time out, then we'll go to chain in order to claim -// it using the HTLC timeout transaction. Any dust HTLC's should be immediately -// canceled backwards. Once the timeout has been reached, then we should sweep -// it on-chain, and cancel the HTLC backwards. -func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, - alice, bob *lntest.HarnessNode, c commitType) { - - ctxb := context.Background() - - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - t, net, alice, bob, true, c, - ) - - // Clean up carol's node when the test finishes. - defer shutdownAndAssert(net, t, carol) - - time.Sleep(time.Second * 1) - - // Now that our channels are set up, we'll send two HTLC's from Alice - // to Carol. The first HTLC will be universally considered "dust", - // while the second will be a proper fully valued HTLC. - const ( - dustHtlcAmt = btcutil.Amount(100) - htlcAmt = btcutil.Amount(30000) - finalCltvDelta = 40 - ) - - ctx, cancel := context.WithCancel(ctxb) - defer cancel() - - // We'll create two random payment hashes unknown to carol, then send - // each of them by manually specifying the HTLC details. - carolPubKey := carol.PubKey[:] - dustPayHash := makeFakePayHash(t) - payHash := makeFakePayHash(t) - - _, errr := alice.RouterClient.SendPaymentV2( - ctx, &routerrpc.SendPaymentRequest{ - Dest: carolPubKey, - Amt: int64(dustHtlcAmt), - PaymentHash: dustPayHash, - FinalCltvDelta: finalCltvDelta, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - require.NoError(t.t, errr) - - _, errr = alice.RouterClient.SendPaymentV2( - ctx, &routerrpc.SendPaymentRequest{ - Dest: carolPubKey, - Amt: int64(htlcAmt), - PaymentHash: payHash, - FinalCltvDelta: finalCltvDelta, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - require.NoError(t.t, errr) - - // Verify that all nodes in the path now have two HTLC's with the - // proper parameters. - nodes := []*lntest.HarnessNode{alice, bob, carol} - err := wait.NoError(func() er.R { - return assertActiveHtlcs(nodes, dustPayHash, payHash) - }, defaultTimeout) - util.RequireNoErr(t.t, err) - - // Increase the fee estimate so that the following force close tx will - // be cpfp'ed. - net.SetFeeEstimate(30000) - - // We'll now mine enough blocks to trigger Bob's broadcast of his - // commitment transaction due to the fact that the HTLC is about to - // timeout. With the default outgoing broadcast delta of zero, this will - // be the same height as the htlc expiry height. - numBlocks := padCLTV( - uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta), - ) - _, err = net.Miner.Node.Generate(numBlocks) - util.RequireNoErr(t.t, err) - - // Bob's force close transaction should now be found in the mempool. If - // there are anchors, we also expect Bob's anchor sweep. - expectedTxes := 1 - if c == commitTypeAnchors { - expectedTxes = 2 - } - - bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint) - util.RequireNoErr(t.t, err) - _, err = waitForNTxsInMempool( - net.Miner.Node, expectedTxes, minerMempoolTimeout, - ) - util.RequireNoErr(t.t, err) - closeTx := getSpendingTxInMempool( - t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{ - Hash: *bobFundingTxid, - Index: bobChanPoint.OutputIndex, - }, - ) - closeTxid := closeTx.TxHash() - - // Mine a block to confirm the closing transaction. - mineBlocks(t, net, 1, expectedTxes) - - // At this point, Bob should have canceled backwards the dust HTLC - // that we sent earlier. This means Alice should now only have a single - // HTLC on her channel. - nodes = []*lntest.HarnessNode{alice} - err = wait.NoError(func() er.R { - return assertActiveHtlcs(nodes, payHash) - }, defaultTimeout) - util.RequireNoErr(t.t, err) - - // With the closing transaction confirmed, we should expect Bob's HTLC - // timeout transaction to be broadcast due to the expiry being reached. - // If there are anchors, we also expect Carol's anchor sweep now. - txes, err := getNTxsFromMempool( - net.Miner.Node, expectedTxes, minerMempoolTimeout, - ) - util.RequireNoErr(t.t, err) - - // Lookup the timeout transaction that is expected to spend from the - // closing tx. We distinguish it from a possibly anchor sweep by value. - var htlcTimeout *chainhash.Hash - for _, tx := range txes { - prevOp := tx.TxIn[0].PreviousOutPoint - require.Equal(t.t, closeTxid, prevOp.Hash) - - // Assume that the timeout tx doesn't spend an output of exactly - // the size of the anchor. - if closeTx.TxOut[prevOp.Index].Value != anchorSize { - hash := tx.TxHash() - htlcTimeout = &hash - } - } - require.NotNil(t.t, htlcTimeout) - - // We'll mine the remaining blocks in order to generate the sweep - // transaction of Bob's commitment output. The commitment was just - // mined at the current tip and the sweep will be broadcast so it can - // be mined at the tip+defaultCSV'th block, so mine one less to be able - // to make mempool assertions. - mineBlocks(t, net, defaultCSV-1, expectedTxes) - - // Check that the sweep spends from the mined commitment. - txes, err = getNTxsFromMempool(net.Miner.Node, 1, minerMempoolTimeout) - util.RequireNoErr(t.t, err) - assertAllTxesSpendFrom(t, txes, closeTxid) - - // Bob's pending channel report should show that he has a commitment - // output awaiting sweeping, and also that there's an outgoing HTLC - // output pending. - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, errr := bob.PendingChannels(ctxt, pendingChansRequest) - require.NoError(t.t, errr) - - require.NotZero(t.t, len(pendingChanResp.PendingForceClosingChannels)) - forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] - require.NotZero(t.t, forceCloseChan.LimboBalance) - require.NotZero(t.t, len(forceCloseChan.PendingHtlcs)) - - // Mine a block to confirm Bob's commit sweep tx and assert it was in - // fact mined. - block := mineBlocks(t, net, 1, 1)[0] - commitSweepTxid := txes[0].TxHash() - assertTxInBlock(t, block, &commitSweepTxid) - - // Mine an additional block to prompt Bob to broadcast their second - // layer sweep due to the CSV on the HTLC timeout output. - mineBlocks(t, net, 1, 0) - assertSpendingTxInMempool( - t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{ - Hash: *htlcTimeout, - Index: 0, - }, - ) - - // The block should have confirmed Bob's HTLC timeout transaction. - // Therefore, at this point, there should be no active HTLC's on the - // commitment transaction from Alice -> Bob. - nodes = []*lntest.HarnessNode{alice} - err = wait.NoError(func() er.R { - return assertNumActiveHtlcs(nodes, 0) - }, defaultTimeout) - util.RequireNoErr(t.t, err) - - // At this point, Bob should show that the pending HTLC has advanced to - // the second stage and is to be swept. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, errr = bob.PendingChannels(ctxt, pendingChansRequest) - require.NoError(t.t, errr) - forceCloseChan = pendingChanResp.PendingForceClosingChannels[0] - require.Equal(t.t, uint32(2), forceCloseChan.PendingHtlcs[0].Stage) - - // Next, we'll mine a final block that should confirm the second-layer - // sweeping transaction. - _, err = net.Miner.Node.Generate(1) - util.RequireNoErr(t.t, err) - - // Once this transaction has been confirmed, Bob should detect that he - // no longer has any pending channels. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil) - util.RequireNoErr(t.t, err) - - // Coop close channel, expect no anchors. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssertType( - ctxt, t, net, alice, aliceChanPoint, false, false, - ) -} diff --git a/lnd/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go b/lnd/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go deleted file mode 100644 index f72a0da0..00000000 --- a/lnd/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go +++ /dev/null @@ -1,244 +0,0 @@ -package itest - -import ( - "context" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd" - "github.com/pkt-cash/pktd/lnd/lncfg" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/invoicesrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc" - "github.com/pkt-cash/pktd/lnd/lntest" - "github.com/pkt-cash/pktd/lnd/lntest/wait" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/wire" - "github.com/stretchr/testify/require" -) - -// testMultiHopReceiverChainClaim tests that in the multi-hop setting, if the -// receiver of an HTLC knows the preimage, but wasn't able to settle the HTLC -// off-chain, then it goes on chain to claim the HTLC uing the HTLC success -// transaction. In this scenario, the node that sent the outgoing HTLC should -// extract the preimage from the sweep transaction, and finish settling the -// HTLC backwards into the route. -func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest, - alice, bob *lntest.HarnessNode, c commitType) { - - ctxb := context.Background() - - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - t, net, alice, bob, false, c, - ) - - // Clean up carol's node when the test finishes. - defer shutdownAndAssert(net, t, carol) - - // With the network active, we'll now add a new hodl invoice at Carol's - // end. Make sure the cltv expiry delta is large enough, otherwise Bob - // won't send out the outgoing htlc. - - const invoiceAmt = 100000 - preimage := lntypes.Preimage{1, 2, 4} - payHash := preimage.Hash() - invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ - Value: invoiceAmt, - CltvExpiry: 40, - Hash: payHash[:], - } - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - carolInvoice, errr := carol.AddHoldInvoice(ctxt, invoiceReq) - require.NoError(t.t, errr) - - // Now that we've created the invoice, we'll send a single payment from - // Alice to Carol. We won't wait for the response however, as Carol - // will not immediately settle the payment. - ctx, cancel := context.WithCancel(ctxb) - defer cancel() - - _, errr = alice.RouterClient.SendPaymentV2( - ctx, &routerrpc.SendPaymentRequest{ - PaymentRequest: carolInvoice.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - require.NoError(t.t, errr) - - // At this point, all 3 nodes should now have an active channel with - // the created HTLC pending on all of them. - nodes := []*lntest.HarnessNode{alice, bob, carol} - err := wait.NoError(func() er.R { - return assertActiveHtlcs(nodes, payHash[:]) - }, defaultTimeout) - util.RequireNoErr(t.t, err) - - // Wait for carol to mark invoice as accepted. There is a small gap to - // bridge between adding the htlc to the channel and executing the exit - // hop logic. - waitForInvoiceAccepted(t, carol, payHash) - - restartBob, err := net.SuspendNode(bob) - util.RequireNoErr(t.t, err) - - // Settle invoice. This will just mark the invoice as settled, as there - // is no link anymore to remove the htlc from the commitment tx. For - // this test, it is important to actually settle and not leave the - // invoice in the accepted state, because without a known preimage, the - // channel arbitrator won't go to chain. - ctx, cancel = context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - _, errr = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{ - Preimage: preimage[:], - }) - require.NoError(t.t, errr) - - // Increase the fee estimate so that the following force close tx will - // be cpfp'ed. - net.SetFeeEstimate(30000) - - // Now we'll mine enough blocks to prompt carol to actually go to the - // chain in order to sweep her HTLC since the value is high enough. - // TODO(roasbeef): modify once go to chain policy changes - numBlocks := padCLTV(uint32( - invoiceReq.CltvExpiry - lncfg.DefaultIncomingBroadcastDelta, - )) - _, err = net.Miner.Node.Generate(numBlocks) - util.RequireNoErr(t.t, err) - - // At this point, Carol should broadcast her active commitment - // transaction in order to go to the chain and sweep her HTLC. If there - // are anchors, Carol also sweeps hers. - expectedTxes := 1 - if c == commitTypeAnchors { - expectedTxes = 2 - } - _, err = getNTxsFromMempool( - net.Miner.Node, expectedTxes, minerMempoolTimeout, - ) - util.RequireNoErr(t.t, err) - - bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint) - util.RequireNoErr(t.t, err) - - carolFundingPoint := wire.OutPoint{ - Hash: *bobFundingTxid, - Index: bobChanPoint.OutputIndex, - } - - // The commitment transaction should be spending from the funding - // transaction. - closingTx := getSpendingTxInMempool( - t, net.Miner.Node, minerMempoolTimeout, carolFundingPoint, - ) - closingTxid := closingTx.TxHash() - - // Confirm the commitment. - mineBlocks(t, net, 1, expectedTxes) - - // Restart bob again. - err = restartBob() - util.RequireNoErr(t.t, err) - - // After the force close transaction is mined, Carol should broadcast - // her second level HTLC transaction. Bob will broadcast a sweep tx to - // sweep his output in the channel with Carol. When Bob notices Carol's - // second level transaction in the mempool, he will extract the preimage - // and settle the HTLC back off-chain. Bob will also sweep his anchor, - // if present. - expectedTxes = 2 - if c == commitTypeAnchors { - expectedTxes = 3 - } - txes, err := getNTxsFromMempool( - net.Miner.Node, expectedTxes, minerMempoolTimeout, - ) - util.RequireNoErr(t.t, err) - - // All transactions should be spending from the commitment transaction. - assertAllTxesSpendFrom(t, txes, closingTxid) - - // We'll now mine an additional block which should confirm both the - // second layer transactions. - _, err = net.Miner.Node.Generate(1) - util.RequireNoErr(t.t, err) - - time.Sleep(time.Second * 4) - - // TODO(roasbeef): assert bob pending state as well - - // Carol's pending channel report should now show two outputs under - // limbo: her commitment output, as well as the second-layer claim - // output. - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, errr := carol.PendingChannels(ctxt, pendingChansRequest) - require.NoError(t.t, errr) - - require.NotZero(t.t, len(pendingChanResp.PendingForceClosingChannels)) - forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] - require.NotZero(t.t, forceCloseChan.LimboBalance) - - // The pending HTLC carol has should also now be in stage 2. - require.Len(t.t, forceCloseChan.PendingHtlcs, 1) - require.Equal(t.t, uint32(2), forceCloseChan.PendingHtlcs[0].Stage) - - // Once the second-level transaction confirmed, Bob should have - // extracted the preimage from the chain, and sent it back to Alice, - // clearing the HTLC off-chain. - nodes = []*lntest.HarnessNode{alice} - err = wait.NoError(func() er.R { - return assertNumActiveHtlcs(nodes, 0) - }, defaultTimeout) - util.RequireNoErr(t.t, err) - - // If we mine 4 additional blocks, then both outputs should now be - // mature. - _, err = net.Miner.Node.Generate(defaultCSV) - util.RequireNoErr(t.t, err) - - // We should have a new transaction in the mempool. - _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - util.RequireNoErr(t.t, err) - - // Finally, if we mine an additional block to confirm these two sweep - // transactions, Carol should not show a pending channel in her report - // afterwards. - _, err = net.Miner.Node.Generate(1) - util.RequireNoErr(t.t, err) - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil) - util.RequireNoErr(t.t, err) - - // The invoice should show as settled for Carol, indicating that it was - // swept on-chain. - invoicesReq := &lnrpc.ListInvoiceRequest{} - invoicesResp, errr := carol.ListInvoices(ctxb, invoicesReq) - require.NoError(t.t, errr) - require.Len(t.t, invoicesResp.Invoices, 1) - invoice := invoicesResp.Invoices[0] - require.Equal(t.t, lnrpc.Invoice_SETTLED, invoice.State) - require.Equal(t.t, int64(invoiceAmt), invoice.AmtPaidSat) - - // Finally, check that the Alice's payment is correctly marked - // succeeded. - ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) - err = checkPaymentStatus( - ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED, - ) - util.RequireNoErr(t.t, err) - - // We'll close out the channel between Alice and Bob, then shutdown - // carol to conclude the test. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssertType( - ctxt, t, net, alice, aliceChanPoint, - false, false, - ) -} diff --git a/lnd/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go b/lnd/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go deleted file mode 100644 index 7c7f1dd0..00000000 --- a/lnd/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go +++ /dev/null @@ -1,278 +0,0 @@ -package itest - -import ( - "context" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd" - "github.com/pkt-cash/pktd/lnd/lncfg" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/invoicesrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc" - "github.com/pkt-cash/pktd/lnd/lntest" - "github.com/pkt-cash/pktd/lnd/lntest/wait" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/wire" - "github.com/stretchr/testify/require" -) - -// testMultiHopHtlcRemoteChainClaim tests that in the multi-hop HTLC scenario, -// if the remote party goes to chain while we have an incoming HTLC, then when -// we found out the preimage via the witness beacon, we properly settle the -// HTLC directly on-chain using the preimage in order to ensure that we don't -// lose any funds. -func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest, - alice, bob *lntest.HarnessNode, c commitType) { - - ctxb := context.Background() - - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - t, net, alice, bob, false, c, - ) - - // Clean up carol's node when the test finishes. - defer shutdownAndAssert(net, t, carol) - - // With the network active, we'll now add a new hodl invoice at Carol's - // end. Make sure the cltv expiry delta is large enough, otherwise Bob - // won't send out the outgoing htlc. - const invoiceAmt = 100000 - preimage := lntypes.Preimage{1, 2, 5} - payHash := preimage.Hash() - invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ - Value: invoiceAmt, - CltvExpiry: 40, - Hash: payHash[:], - } - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - carolInvoice, errr := carol.AddHoldInvoice(ctxt, invoiceReq) - require.NoError(t.t, errr) - - // Now that we've created the invoice, we'll send a single payment from - // Alice to Carol. We won't wait for the response however, as Carol - // will not immediately settle the payment. - ctx, cancel := context.WithCancel(ctxb) - defer cancel() - - _, errr = alice.RouterClient.SendPaymentV2( - ctx, &routerrpc.SendPaymentRequest{ - PaymentRequest: carolInvoice.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - require.NoError(t.t, errr) - - // At this point, all 3 nodes should now have an active channel with - // the created HTLC pending on all of them. - nodes := []*lntest.HarnessNode{alice, bob, carol} - err := wait.NoError(func() er.R { - return assertActiveHtlcs(nodes, payHash[:]) - }, defaultTimeout) - util.RequireNoErr(t.t, err) - - // Wait for carol to mark invoice as accepted. There is a small gap to - // bridge between adding the htlc to the channel and executing the exit - // hop logic. - waitForInvoiceAccepted(t, carol, payHash) - - // Increase the fee estimate so that the following force close tx will - // be cpfp'ed. - net.SetFeeEstimate(30000) - - // Next, Alice decides that she wants to exit the channel, so she'll - // immediately force close the channel by broadcast her commitment - // transaction. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - aliceForceClose := closeChannelAndAssertType( - ctxt, t, net, alice, aliceChanPoint, c == commitTypeAnchors, - true, - ) - - // Wait for the channel to be marked pending force close. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = waitForChannelPendingForceClose(ctxt, alice, aliceChanPoint) - util.RequireNoErr(t.t, err) - - // After closeChannelAndAssertType returns, it has mined a block so now - // bob will attempt to redeem his anchor commitment (if the channel - // type is of that type). - if c == commitTypeAnchors { - _, err = waitForNTxsInMempool( - net.Miner.Node, 1, minerMempoolTimeout, - ) - if err != nil { - t.Fatalf("unable to find bob's anchor commit sweep: %v", - err) - } - } - - // Mine enough blocks for Alice to sweep her funds from the force - // closed channel. closeChannelAndAssertType() already mined a block - // containing the commitment tx and the commit sweep tx will be - // broadcast immediately before it can be included in a block, so mine - // one less than defaultCSV in order to perform mempool assertions. - _, err = net.Miner.Node.Generate(defaultCSV - 1) - util.RequireNoErr(t.t, err) - - // Alice should now sweep her funds. - _, err = waitForNTxsInMempool( - net.Miner.Node, 1, minerMempoolTimeout, - ) - util.RequireNoErr(t.t, err) - - // Suspend bob, so Carol is forced to go on chain. - restartBob, err := net.SuspendNode(bob) - util.RequireNoErr(t.t, err) - - // Settle invoice. This will just mark the invoice as settled, as there - // is no link anymore to remove the htlc from the commitment tx. For - // this test, it is important to actually settle and not leave the - // invoice in the accepted state, because without a known preimage, the - // channel arbitrator won't go to chain. - ctx, cancel = context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - _, errr = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{ - Preimage: preimage[:], - }) - require.NoError(t.t, errr) - - // We'll now mine enough blocks so Carol decides that she needs to go - // on-chain to claim the HTLC as Bob has been inactive. - numBlocks := padCLTV(uint32( - invoiceReq.CltvExpiry-lncfg.DefaultIncomingBroadcastDelta, - ) - defaultCSV) - - _, err = net.Miner.Node.Generate(numBlocks) - util.RequireNoErr(t.t, err) - - expectedTxes := 1 - if c == commitTypeAnchors { - expectedTxes = 2 - } - - // Carol's commitment transaction should now be in the mempool. If - // there are anchors, Carol also sweeps her anchor. - _, err = waitForNTxsInMempool( - net.Miner.Node, expectedTxes, minerMempoolTimeout, - ) - util.RequireNoErr(t.t, err) - bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint) - util.RequireNoErr(t.t, err) - carolFundingPoint := wire.OutPoint{ - Hash: *bobFundingTxid, - Index: bobChanPoint.OutputIndex, - } - - // The closing transaction should be spending from the funding - // transaction. - closingTx := getSpendingTxInMempool( - t, net.Miner.Node, minerMempoolTimeout, carolFundingPoint, - ) - closingTxid := closingTx.TxHash() - - // Mine a block, which should contain: the commitment, possibly an - // anchor sweep and the coinbase tx. - block := mineBlocks(t, net, 1, expectedTxes)[0] - require.Len(t.t, block.Transactions, expectedTxes+1) - assertTxInBlock(t, block, &closingTxid) - - // Restart bob again. - err = restartBob() - util.RequireNoErr(t.t, err) - - // After the force close transacion is mined, Carol should broadcast her - // second level HTLC transacion. Bob will broadcast a sweep tx to sweep - // his output in the channel with Carol. He can do this immediately, as - // the output is not timelocked since Carol was the one force closing. - // If there are anchors, Bob should also sweep his. - expectedTxes = 2 - if c == commitTypeAnchors { - expectedTxes = 3 - } - txes, err := getNTxsFromMempool( - net.Miner.Node, expectedTxes, minerMempoolTimeout, - ) - util.RequireNoErr(t.t, err) - - // All transactions should be pending from the commitment transaction. - assertAllTxesSpendFrom(t, txes, closingTxid) - - // Mine a block to confirm the two transactions (+ coinbase). - block = mineBlocks(t, net, 1, expectedTxes)[0] - require.Len(t.t, block.Transactions, expectedTxes+1) - - // Keep track of the second level tx maturity. - carolSecondLevelCSV := uint32(defaultCSV) - - // When Bob notices Carol's second level transaction in the block, he - // will extract the preimage and broadcast a sweep tx to directly claim - // the HTLC in his (already closed) channel with Alice. - bobHtlcSweep, err := waitForTxInMempool( - net.Miner.Node, minerMempoolTimeout, - ) - util.RequireNoErr(t.t, err) - - // It should spend from the commitment in the channel with Alice. - tx, err := net.Miner.Node.GetRawTransaction(bobHtlcSweep) - util.RequireNoErr(t.t, err) - require.Equal( - t.t, *aliceForceClose, tx.MsgTx().TxIn[0].PreviousOutPoint.Hash, - ) - - // We'll now mine a block which should confirm Bob's HTLC sweep - // transaction. - block = mineBlocks(t, net, 1, 1)[0] - require.Len(t.t, block.Transactions, 2) - assertTxInBlock(t, block, bobHtlcSweep) - carolSecondLevelCSV-- - - // Now that the sweeping transaction has been confirmed, Bob should now - // recognize that all contracts have been fully resolved, and show no - // pending close channels. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil) - util.RequireNoErr(t.t, err) - - // If we then mine 3 additional blocks, Carol's second level tx will - // mature, and she should pull the funds. - _, err = net.Miner.Node.Generate(carolSecondLevelCSV) - util.RequireNoErr(t.t, err) - - carolSweep, err := waitForTxInMempool( - net.Miner.Node, minerMempoolTimeout, - ) - util.RequireNoErr(t.t, err) - - // When Carol's sweep gets confirmed, she should have no more pending - // channels. - block = mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, carolSweep) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil) - util.RequireNoErr(t.t, err) - - // The invoice should show as settled for Carol, indicating that it was - // swept on-chain. - invoicesReq := &lnrpc.ListInvoiceRequest{} - invoicesResp, errr := carol.ListInvoices(ctxb, invoicesReq) - require.NoError(t.t, errr) - require.Len(t.t, invoicesResp.Invoices, 1) - invoice := invoicesResp.Invoices[0] - require.Equal(t.t, lnrpc.Invoice_SETTLED, invoice.State) - require.Equal(t.t, int64(invoiceAmt), invoice.AmtPaidSat) - - // Finally, check that the Alice's payment is correctly marked - // succeeded. - ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) - err = checkPaymentStatus( - ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED, - ) - util.RequireNoErr(t.t, err) -} diff --git a/lnd/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go b/lnd/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go deleted file mode 100644 index 5d916cc0..00000000 --- a/lnd/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go +++ /dev/null @@ -1,203 +0,0 @@ -package itest - -import ( - "context" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc" - "github.com/pkt-cash/pktd/lnd/lntest" - "github.com/pkt-cash/pktd/lnd/lntest/wait" - "github.com/stretchr/testify/require" -) - -// testMultiHopLocalForceCloseOnChainHtlcTimeout tests that in a multi-hop HTLC -// scenario, if the node that extended the HTLC to the final node closes their -// commitment on-chain early, then it eventually recognizes this HTLC as one -// that's timed out. At this point, the node should timeout the HTLC using the -// HTLC timeout transaction, then cancel it backwards as normal. -func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, - t *harnessTest, alice, bob *lntest.HarnessNode, c commitType) { - - ctxb := context.Background() - - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - t, net, alice, bob, true, c, - ) - - // Clean up carol's node when the test finishes. - defer shutdownAndAssert(net, t, carol) - - // With our channels set up, we'll then send a single HTLC from Alice - // to Carol. As Carol is in hodl mode, she won't settle this HTLC which - // opens up the base for out tests. - const ( - finalCltvDelta = 40 - htlcAmt = btcutil.Amount(30000) - ) - ctx, cancel := context.WithCancel(ctxb) - defer cancel() - - // We'll now send a single HTLC across our multi-hop network. - carolPubKey := carol.PubKey[:] - payHash := makeFakePayHash(t) - _, errr := alice.RouterClient.SendPaymentV2( - ctx, &routerrpc.SendPaymentRequest{ - Dest: carolPubKey, - Amt: int64(htlcAmt), - PaymentHash: payHash, - FinalCltvDelta: finalCltvDelta, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - require.NoError(t.t, errr) - - // Once the HTLC has cleared, all channels in our mini network should - // have the it locked in. - nodes := []*lntest.HarnessNode{alice, bob, carol} - err := wait.NoError(func() er.R { - return assertActiveHtlcs(nodes, payHash) - }, defaultTimeout) - util.RequireNoErr(t.t, err) - - // Increase the fee estimate so that the following force close tx will - // be cpfp'ed. - net.SetFeeEstimate(30000) - - // Now that all parties have the HTLC locked in, we'll immediately - // force close the Bob -> Carol channel. This should trigger contract - // resolution mode for both of them. - ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssertType( - ctxt, t, net, bob, bobChanPoint, c == commitTypeAnchors, true, - ) - - // At this point, Bob should have a pending force close channel as he - // just went to chain. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = waitForNumChannelPendingForceClose( - ctxt, bob, 1, func(c *lnrpcForceCloseChannel) er.R { - if c.LimboBalance == 0 { - return er.Errorf("bob should have nonzero "+ - "limbo balance instead has: %v", - c.LimboBalance) - } - - return nil - }, - ) - util.RequireNoErr(t.t, err) - - // We'll mine defaultCSV blocks in order to generate the sweep - // transaction of Bob's funding output. If there are anchors, mine - // Carol's anchor sweep too. - if c == commitTypeAnchors { - _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - util.RequireNoErr(t.t, err) - } - - // The sweep is broadcast on the block immediately before the CSV - // expires and the commitment was already mined inside - // closeChannelAndAssertType(), so mine one block less than defaultCSV - // in order to perform mempool assertions. - _, err = net.Miner.Node.Generate(defaultCSV - 1) - util.RequireNoErr(t.t, err) - - _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - util.RequireNoErr(t.t, err) - - // We'll now mine enough blocks for the HTLC to expire. After this, Bob - // should hand off the now expired HTLC output to the utxo nursery. - numBlocks := padCLTV(uint32(finalCltvDelta - defaultCSV)) - _, err = net.Miner.Node.Generate(numBlocks) - util.RequireNoErr(t.t, err) - - // Bob's pending channel report should show that he has a single HTLC - // that's now in stage one. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = waitForNumChannelPendingForceClose( - ctxt, bob, 1, func(c *lnrpcForceCloseChannel) er.R { - if len(c.PendingHtlcs) != 1 { - return er.Errorf("bob should have pending " + - "htlc but doesn't") - } - - if c.PendingHtlcs[0].Stage != 1 { - return er.Errorf("bob's htlc should have "+ - "advanced to the first stage: %v", err) - } - - return nil - }, - ) - util.RequireNoErr(t.t, err) - - // We should also now find a transaction in the mempool, as Bob should - // have broadcast his second layer timeout transaction. - timeoutTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - util.RequireNoErr(t.t, err) - - // Next, we'll mine an additional block. This should serve to confirm - // the second layer timeout transaction. - block := mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, timeoutTx) - - // With the second layer timeout transaction confirmed, Bob should have - // canceled backwards the HTLC that carol sent. - nodes = []*lntest.HarnessNode{alice} - err = wait.NoError(func() er.R { - return assertNumActiveHtlcs(nodes, 0) - }, defaultTimeout) - util.RequireNoErr(t.t, err) - - // Additionally, Bob should now show that HTLC as being advanced to the - // second stage. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = waitForNumChannelPendingForceClose( - ctxt, bob, 1, func(c *lnrpcForceCloseChannel) er.R { - if len(c.PendingHtlcs) != 1 { - return er.Errorf("bob should have pending " + - "htlc but doesn't") - } - - if c.PendingHtlcs[0].Stage != 2 { - return er.Errorf("bob's htlc should have "+ - "advanced to the second stage: %v", err) - } - - return nil - }, - ) - util.RequireNoErr(t.t, err) - - // We'll now mine 4 additional blocks. This should be enough for Bob's - // CSV timelock to expire and the sweeping transaction of the HTLC to be - // broadcast. - _, err = net.Miner.Node.Generate(defaultCSV) - util.RequireNoErr(t.t, err) - - sweepTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - util.RequireNoErr(t.t, err) - - // We'll then mine a final block which should confirm this second layer - // sweep transaction. - block = mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, sweepTx) - - // At this point, Bob should no longer show any channels as pending - // close. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil) - util.RequireNoErr(t.t, err) - - // Coop close, no anchors. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssertType( - ctxt, t, net, alice, aliceChanPoint, false, false, - ) -} diff --git a/lnd/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go b/lnd/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go deleted file mode 100644 index b4313f76..00000000 --- a/lnd/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go +++ /dev/null @@ -1,179 +0,0 @@ -package itest - -import ( - "context" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc" - "github.com/pkt-cash/pktd/lnd/lntest" - "github.com/pkt-cash/pktd/lnd/lntest/wait" - "github.com/stretchr/testify/require" -) - -// testMultiHopRemoteForceCloseOnChainHtlcTimeout tests that if we extend a -// multi-hop HTLC, and the final destination of the HTLC force closes the -// channel, then we properly timeout the HTLC directly on *their* commitment -// transaction once the timeout has expired. Once we sweep the transaction, we -// should also cancel back the initial HTLC. -func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, - t *harnessTest, alice, bob *lntest.HarnessNode, c commitType) { - - ctxb := context.Background() - - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - t, net, alice, bob, true, c, - ) - - // Clean up carol's node when the test finishes. - defer shutdownAndAssert(net, t, carol) - - // With our channels set up, we'll then send a single HTLC from Alice - // to Carol. As Carol is in hodl mode, she won't settle this HTLC which - // opens up the base for out tests. - const ( - finalCltvDelta = 40 - htlcAmt = btcutil.Amount(30000) - ) - - ctx, cancel := context.WithCancel(ctxb) - defer cancel() - - // We'll now send a single HTLC across our multi-hop network. - carolPubKey := carol.PubKey[:] - payHash := makeFakePayHash(t) - _, errr := alice.RouterClient.SendPaymentV2( - ctx, &routerrpc.SendPaymentRequest{ - Dest: carolPubKey, - Amt: int64(htlcAmt), - PaymentHash: payHash, - FinalCltvDelta: finalCltvDelta, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - require.NoError(t.t, errr) - - // Once the HTLC has cleared, all the nodes in our mini network should - // show that the HTLC has been locked in. - nodes := []*lntest.HarnessNode{alice, bob, carol} - err := wait.NoError(func() er.R { - return assertActiveHtlcs(nodes, payHash) - }, defaultTimeout) - util.RequireNoErr(t.t, err) - - // Increase the fee estimate so that the following force close tx will - // be cpfp'ed. - net.SetFeeEstimate(30000) - - // At this point, we'll now instruct Carol to force close the - // transaction. This will let us exercise that Bob is able to sweep the - // expired HTLC on Carol's version of the commitment transaction. If - // Carol has an anchor, it will be swept too. - ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssertType( - ctxt, t, net, carol, bobChanPoint, c == commitTypeAnchors, - true, - ) - - // At this point, Bob should have a pending force close channel as - // Carol has gone directly to chain. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = waitForNumChannelPendingForceClose(ctxt, bob, 1, nil) - util.RequireNoErr(t.t, err) - - // Bob can sweep his output immediately. If there is an anchor, Bob will - // sweep that as well. - expectedTxes := 1 - if c == commitTypeAnchors { - expectedTxes = 2 - } - - _, err = waitForNTxsInMempool( - net.Miner.Node, expectedTxes, minerMempoolTimeout, - ) - util.RequireNoErr(t.t, err) - - // Next, we'll mine enough blocks for the HTLC to expire. At this - // point, Bob should hand off the output to his internal utxo nursery, - // which will broadcast a sweep transaction. - numBlocks := padCLTV(finalCltvDelta - 1) - _, err = net.Miner.Node.Generate(numBlocks) - util.RequireNoErr(t.t, err) - - // If we check Bob's pending channel report, it should show that he has - // a single HTLC that's now in the second stage, as skip the initial - // first stage since this is a direct HTLC. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = waitForNumChannelPendingForceClose( - ctxt, bob, 1, func(c *lnrpcForceCloseChannel) er.R { - if len(c.PendingHtlcs) != 1 { - return er.Errorf("bob should have pending " + - "htlc but doesn't") - } - - if c.PendingHtlcs[0].Stage != 2 { - return er.Errorf("bob's htlc should have "+ - "advanced to the second stage: %v", err) - } - - return nil - }, - ) - util.RequireNoErr(t.t, err) - - // We need to generate an additional block to trigger the sweep. - _, err = net.Miner.Node.Generate(1) - util.RequireNoErr(t.t, err) - - // Bob's sweeping transaction should now be found in the mempool at - // this point. - sweepTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - // If Bob's transaction isn't yet in the mempool, then due to - // internal message passing and the low period between blocks - // being mined, it may have been detected as a late - // registration. As a result, we'll mine another block and - // repeat the check. If it doesn't go through this time, then - // we'll fail. - // TODO(halseth): can we use waitForChannelPendingForceClose to - // avoid this hack? - _, err = net.Miner.Node.Generate(1) - util.RequireNoErr(t.t, err) - sweepTx, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - util.RequireNoErr(t.t, err) - } - - // If we mine an additional block, then this should confirm Bob's - // transaction which sweeps the direct HTLC output. - block := mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, sweepTx) - - // Now that the sweeping transaction has been confirmed, Bob should - // cancel back that HTLC. As a result, Alice should not know of any - // active HTLC's. - nodes = []*lntest.HarnessNode{alice} - err = wait.NoError(func() er.R { - return assertNumActiveHtlcs(nodes, 0) - }, defaultTimeout) - util.RequireNoErr(t.t, err) - - // Now we'll check Bob's pending channel report. Since this was Carol's - // commitment, he doesn't have to wait for any CSV delays. As a result, - // he should show no additional pending transactions. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil) - util.RequireNoErr(t.t, err) - - // We'll close out the test by closing the channel from Alice to Bob, - // and then shutting down the new node we created as its no longer - // needed. Coop close, no anchors. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssertType( - ctxt, t, net, alice, aliceChanPoint, false, false, - ) -} diff --git a/lnd/lntest/itest/lnd_multi-hop_test.go b/lnd/lntest/itest/lnd_multi-hop_test.go deleted file mode 100644 index 9f745cdd..00000000 --- a/lnd/lntest/itest/lnd_multi-hop_test.go +++ /dev/null @@ -1,319 +0,0 @@ -package itest - -import ( - "context" - "fmt" - "testing" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/invoicesrpc" - "github.com/pkt-cash/pktd/lnd/lntest" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/wire" -) - -func testMultiHopHtlcClaims(net *lntest.NetworkHarness, t *harnessTest) { - - type testCase struct { - name string - test func(net *lntest.NetworkHarness, t *harnessTest, alice, - bob *lntest.HarnessNode, c commitType) - } - - subTests := []testCase{ - { - // bob: outgoing our commit timeout - // carol: incoming their commit watch and see timeout - name: "local force close immediate expiry", - test: testMultiHopHtlcLocalTimeout, - }, - { - // bob: outgoing watch and see, they sweep on chain - // carol: incoming our commit, know preimage - name: "receiver chain claim", - test: testMultiHopReceiverChainClaim, - }, - { - // bob: outgoing our commit watch and see timeout - // carol: incoming their commit watch and see timeout - name: "local force close on-chain htlc timeout", - test: testMultiHopLocalForceCloseOnChainHtlcTimeout, - }, - { - // bob: outgoing their commit watch and see timeout - // carol: incoming our commit watch and see timeout - name: "remote force close on-chain htlc timeout", - test: testMultiHopRemoteForceCloseOnChainHtlcTimeout, - }, - { - // bob: outgoing our commit watch and see, they sweep on chain - // bob: incoming our commit watch and learn preimage - // carol: incoming their commit know preimage - name: "local chain claim", - test: testMultiHopHtlcLocalChainClaim, - }, - { - // bob: outgoing their commit watch and see, they sweep on chain - // bob: incoming their commit watch and learn preimage - // carol: incoming our commit know preimage - name: "remote chain claim", - test: testMultiHopHtlcRemoteChainClaim, - }, - } - - commitTypes := []commitType{ - commitTypeLegacy, - commitTypeAnchors, - } - - for _, commitType := range commitTypes { - testName := fmt.Sprintf("committype=%v", commitType.String()) - - commitType := commitType - success := t.t.Run(testName, func(t *testing.T) { - ht := newHarnessTest(t, net) - - args := commitType.Args() - alice, err := net.NewNode("Alice", args) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, ht, alice) - - bob, err := net.NewNode("Bob", args) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, ht, bob) - - ctxb := context.Background() - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, alice, bob); err != nil { - t.Fatalf("unable to connect alice to bob: %v", err) - } - - for _, subTest := range subTests { - subTest := subTest - - success := ht.t.Run(subTest.name, func(t *testing.T) { - ht := newHarnessTest(t, net) - - // Start each test with the default - // static fee estimate. - net.SetFeeEstimate(12500) - - subTest.test(net, ht, alice, bob, commitType) - }) - if !success { - return - } - } - }) - if !success { - return - } - } -} - -// waitForInvoiceAccepted waits until the specified invoice moved to the -// accepted state by the node. -func waitForInvoiceAccepted(t *harnessTest, node *lntest.HarnessNode, - payHash lntypes.Hash) { - - ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) - defer cancel() - invoiceUpdates, err := node.SubscribeSingleInvoice(ctx, - &invoicesrpc.SubscribeSingleInvoiceRequest{ - RHash: payHash[:], - }, - ) - if err != nil { - t.Fatalf("subscribe single invoice: %v", err) - } - - for { - update, err := invoiceUpdates.Recv() - if err != nil { - t.Fatalf("invoice update err: %v", err) - } - if update.State == lnrpc.Invoice_ACCEPTED { - break - } - } -} - -// checkPaymentStatus asserts that the given node list a payment with the given -// preimage has the expected status. -func checkPaymentStatus(ctxt context.Context, node *lntest.HarnessNode, - preimage lntypes.Preimage, status lnrpc.Payment_PaymentStatus) er.R { - - req := &lnrpc.ListPaymentsRequest{ - IncludeIncomplete: true, - } - paymentsResp, err := node.ListPayments(ctxt, req) - if err != nil { - return er.Errorf("error when obtaining Alice payments: %v", - err) - } - - payHash := preimage.Hash() - var found bool - for _, p := range paymentsResp.Payments { - if p.PaymentHash != payHash.String() { - continue - } - - found = true - if p.Status != status { - return er.Errorf("expected payment status "+ - "%v, got %v", status, p.Status) - } - - switch status { - - // If this expected status is SUCCEEDED, we expect the final preimage. - case lnrpc.Payment_SUCCEEDED: - if p.PaymentPreimage != preimage.String() { - return er.Errorf("preimage doesn't match: %v vs %v", - p.PaymentPreimage, preimage.String()) - } - - // Otherwise we expect an all-zero preimage. - default: - if p.PaymentPreimage != (lntypes.Preimage{}).String() { - return er.Errorf("expected zero preimage, got %v", - p.PaymentPreimage) - } - } - - } - - if !found { - return er.Errorf("payment with payment hash %v not found "+ - "in response", payHash) - } - - return nil -} - -func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness, - alice, bob *lntest.HarnessNode, carolHodl bool, c commitType) ( - *lnrpc.ChannelPoint, *lnrpc.ChannelPoint, *lntest.HarnessNode) { - - ctxb := context.Background() - - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - err := net.EnsureConnected(ctxt, alice, bob) - if err != nil { - t.Fatalf("unable to connect peers: %v", err) - } - - // Make sure there are enough utxos for anchoring. - for i := 0; i < 2; i++ { - ctxt, _ = context.WithTimeout(context.Background(), defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), alice) - if err != nil { - t.Fatalf("unable to send coins to Alice: %v", err) - } - - ctxt, _ = context.WithTimeout(context.Background(), defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), bob) - if err != nil { - t.Fatalf("unable to send coins to Bob: %v", err) - } - } - - // We'll start the test by creating a channel between Alice and Bob, - // which will act as the first leg for out multi-hop HTLC. - const chanAmt = 1000000 - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - aliceChanPoint := openChannelAndAssert( - ctxt, t, net, alice, bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = alice.WaitForNetworkChannelOpen(ctxt, aliceChanPoint) - if err != nil { - t.Fatalf("alice didn't report channel: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = bob.WaitForNetworkChannelOpen(ctxt, aliceChanPoint) - if err != nil { - t.Fatalf("bob didn't report channel: %v", err) - } - - // Next, we'll create a new node "carol" and have Bob connect to her. If - // the carolHodl flag is set, we'll make carol always hold onto the - // HTLC, this way it'll force Bob to go to chain to resolve the HTLC. - carolFlags := c.Args() - if carolHodl { - carolFlags = append(carolFlags, "--hodl.exit-settle") - } - carol, err := net.NewNode("Carol", carolFlags) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, bob, carol); err != nil { - t.Fatalf("unable to connect bob to carol: %v", err) - } - - // Make sure Carol has enough utxos for anchoring. Because the anchor by - // itself often doesn't meet the dust limit, a utxo from the wallet - // needs to be attached as an additional input. This can still lead to a - // positively-yielding transaction. - for i := 0; i < 2; i++ { - ctxt, _ = context.WithTimeout(context.Background(), defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), carol) - if err != nil { - t.Fatalf("unable to send coins to Alice: %v", err) - } - } - - // We'll then create a channel from Bob to Carol. After this channel is - // open, our topology looks like: A -> B -> C. - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - bobChanPoint := openChannelAndAssert( - ctxt, t, net, bob, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = bob.WaitForNetworkChannelOpen(ctxt, bobChanPoint) - if err != nil { - t.Fatalf("alice didn't report channel: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = carol.WaitForNetworkChannelOpen(ctxt, bobChanPoint) - if err != nil { - t.Fatalf("bob didn't report channel: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = alice.WaitForNetworkChannelOpen(ctxt, bobChanPoint) - if err != nil { - t.Fatalf("bob didn't report channel: %v", err) - } - - return aliceChanPoint, bobChanPoint, carol -} - -// assertAllTxesSpendFrom asserts that all txes in the list spend from the given -// tx. -func assertAllTxesSpendFrom(t *harnessTest, txes []*wire.MsgTx, - prevTxid chainhash.Hash) { - - for _, tx := range txes { - if tx.TxIn[0].PreviousOutPoint.Hash != prevTxid { - t.Fatalf("tx %v did not spend from %v", - tx.TxHash(), prevTxid) - } - } -} diff --git a/lnd/lntest/itest/lnd_network_test.go b/lnd/lntest/itest/lnd_network_test.go deleted file mode 100644 index 6dababfa..00000000 --- a/lnd/lntest/itest/lnd_network_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package itest - -import ( - "context" - "strings" - "time" - - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lntest" - "github.com/stretchr/testify/require" -) - -// testNetworkConnectionTimeout checks that the connectiontimeout is taking -// effect. It creates a node with a small connection timeout value, and connects -// it to a non-routable IP address. -func testNetworkConnectionTimeout(net *lntest.NetworkHarness, t *harnessTest) { - var ( - ctxt, _ = context.WithTimeout( - context.Background(), defaultTimeout, - ) - // testPub is a random public key for testing only. - testPub = "0332bda7da70fefe4b6ab92f53b3c4f4ee7999" + - "f312284a8e89c8670bb3f67dbee2" - // testHost is a non-routable IP address. It's used to cause a - // connection timeout. - testHost = "10.255.255.255" - ) - - // First, test the global timeout settings. - // Create Carol with a connection timeout of 1 millisecond. - carol, err := net.NewNode("Carol", []string{"--connectiontimeout=1ms"}) - if err != nil { - t.Fatalf("unable to create new node carol: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - // Try to connect Carol to a non-routable IP address, which should give - // us a timeout error. - req := &lnrpc.ConnectPeerRequest{ - Addr: &lnrpc.LightningAddress{ - Pubkey: testPub, - Host: testHost, - }, - } - assertTimeoutError(ctxt, t, carol, req) - - // Second, test timeout on the connect peer request. - // Create Dave with the default timeout setting. - dave, err := net.NewNode("Dave", nil) - if err != nil { - t.Fatalf("unable to create new node dave: %v", err) - } - defer shutdownAndAssert(net, t, dave) - - // Try to connect Dave to a non-routable IP address, using a timeout - // value of 1ms, which should give us a timeout error immediately. - req = &lnrpc.ConnectPeerRequest{ - Addr: &lnrpc.LightningAddress{ - Pubkey: testPub, - Host: testHost, - }, - Timeout: 1, - } - assertTimeoutError(ctxt, t, dave, req) -} - -// assertTimeoutError asserts that a connection timeout error is raised. A -// context with a default timeout is used to make the request. If our customized -// connection timeout is less than the default, we won't see the request context -// times out, instead a network connection timeout will be returned. -func assertTimeoutError(ctxt context.Context, t *harnessTest, - node *lntest.HarnessNode, req *lnrpc.ConnectPeerRequest) { - - t.t.Helper() - - // Create a context with a timeout value. - ctxt, cancel := context.WithTimeout(ctxt, defaultTimeout) - defer cancel() - - err := connect(ctxt, node, req) - - // a DeadlineExceeded error will appear in the context if the above - // ctxtTimeout value is reached. - require.NoError(t.t, ctxt.Err(), "context time out") - - // Check that the network returns a timeout error. - require.Containsf( - t.t, err.String(), "i/o timeout", - "expected to get a timeout error, instead got: %v", err, - ) -} - -func connect(ctxt context.Context, node *lntest.HarnessNode, - req *lnrpc.ConnectPeerRequest) er.R { - - syncTimeout := time.After(15 * time.Second) - ticker := time.NewTicker(time.Millisecond * 100) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - _, err := node.ConnectPeer(ctxt, req) - // If there's no error, return nil - if err == nil { - return er.E(err) - } - // If the error is no ErrServerNotActive, return it. - // Otherwise, we will retry until timeout. - if !strings.Contains(err.Error(), - lnd.ErrServerNotActive.Detail) { - - return er.E(err) - } - case <-syncTimeout: - return er.Errorf("chain backend did not " + - "finish syncing") - } - } - return nil -} diff --git a/lnd/lntest/itest/lnd_onchain_test.go b/lnd/lntest/itest/lnd_onchain_test.go deleted file mode 100644 index 7a377304..00000000 --- a/lnd/lntest/itest/lnd_onchain_test.go +++ /dev/null @@ -1,162 +0,0 @@ -package itest - -import ( - "bytes" - "context" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/walletrpc" - "github.com/pkt-cash/pktd/lnd/lntest" - "github.com/pkt-cash/pktd/lnd/lntest/wait" - "github.com/pkt-cash/pktd/lnd/sweep" - "github.com/pkt-cash/pktd/txscript" -) - -// testCPFP ensures that the daemon can bump an unconfirmed transaction's fee -// rate by broadcasting a Child-Pays-For-Parent (CPFP) transaction. -// -// TODO(wilmer): Add RBF case once btcd supports it. -func testCPFP(net *lntest.NetworkHarness, t *harnessTest) { - // Skip this test for neutrino, as it's not aware of mempool - // transactions. - if net.BackendCfg.Name() == "neutrino" { - t.Skipf("skipping reorg test for neutrino backend") - } - - // We'll start the test by sending Alice some coins, which she'll use to - // send to Bob. - ctxb := context.Background() - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - err := net.SendCoins(ctxt, btcutil.UnitsPerCoin(), net.Alice) - if err != nil { - t.Fatalf("unable to send coins to alice: %v", err) - } - - // Create an address for Bob to send the coins to. - addrReq := &lnrpc.NewAddressRequest{ - Type: lnrpc.AddressType_WITNESS_PUBKEY_HASH, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, errr := net.Bob.NewAddress(ctxt, addrReq) - if errr != nil { - t.Fatalf("unable to get new address for bob: %v", errr) - } - - // Send the coins from Alice to Bob. We should expect a transaction to - // be broadcast and seen in the mempool. - sendReq := &lnrpc.SendCoinsRequest{ - Addr: resp.Address, - Amount: btcutil.UnitsPerCoinI64(), - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if _, errr = net.Alice.SendCoins(ctxt, sendReq); errr != nil { - t.Fatalf("unable to send coins to bob: %v", errr) - } - - txid, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("expected one mempool transaction: %v", err) - } - - // We'll then extract the raw transaction from the mempool in order to - // determine the index of Bob's output. - tx, err := net.Miner.Node.GetRawTransaction(txid) - if err != nil { - t.Fatalf("unable to extract raw transaction from mempool: %v", - err) - } - bobOutputIdx := -1 - for i, txOut := range tx.MsgTx().TxOut { - _, addrs, _, err := txscript.ExtractPkScriptAddrs( - txOut.PkScript, net.Miner.ActiveNet, - ) - if err != nil { - t.Fatalf("unable to extract address from pkScript=%x: "+ - "%v", txOut.PkScript, err) - } - if addrs[0].String() == resp.Address { - bobOutputIdx = i - } - } - if bobOutputIdx == -1 { - t.Fatalf("bob's output was not found within the transaction") - } - - // Wait until bob has seen the tx and considers it as owned. - op := &lnrpc.OutPoint{ - TxidBytes: txid[:], - OutputIndex: uint32(bobOutputIdx), - } - assertWalletUnspent(t, net.Bob, op) - - // We'll attempt to bump the fee of this transaction by performing a - // CPFP from Alice's point of view. - bumpFeeReq := &walletrpc.BumpFeeRequest{ - Outpoint: op, - SatPerByte: uint32(sweep.DefaultMaxFeeRate.FeePerKVByte() / 2000), - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - _, errr = net.Bob.WalletKitClient.BumpFee(ctxt, bumpFeeReq) - if errr != nil { - t.Fatalf("unable to bump fee: %v", errr) - } - - // We should now expect to see two transactions within the mempool, a - // parent and its child. - _, err = waitForNTxsInMempool(net.Miner.Node, 2, minerMempoolTimeout) - if err != nil { - t.Fatalf("expected two mempool transactions: %v", err) - } - - // We should also expect to see the output being swept by the - // UtxoSweeper. We'll ensure it's using the fee rate specified. - pendingSweepsReq := &walletrpc.PendingSweepsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingSweepsResp, errr := net.Bob.WalletKitClient.PendingSweeps( - ctxt, pendingSweepsReq, - ) - if errr != nil { - t.Fatalf("unable to retrieve pending sweeps: %v", errr) - } - if len(pendingSweepsResp.PendingSweeps) != 1 { - t.Fatalf("expected to find %v pending sweep(s), found %v", 1, - len(pendingSweepsResp.PendingSweeps)) - } - pendingSweep := pendingSweepsResp.PendingSweeps[0] - if !bytes.Equal(pendingSweep.Outpoint.TxidBytes, op.TxidBytes) { - t.Fatalf("expected output txid %x, got %x", op.TxidBytes, - pendingSweep.Outpoint.TxidBytes) - } - if pendingSweep.Outpoint.OutputIndex != op.OutputIndex { - t.Fatalf("expected output index %v, got %v", op.OutputIndex, - pendingSweep.Outpoint.OutputIndex) - } - if pendingSweep.SatPerByte != bumpFeeReq.SatPerByte { - t.Fatalf("expected sweep sat per byte %v, got %v", - bumpFeeReq.SatPerByte, pendingSweep.SatPerByte) - } - - // Mine a block to clean up the unconfirmed transactions. - mineBlocks(t, net, 1, 2) - - // The input used to CPFP should no longer be pending. - err = wait.NoError(func() er.R { - req := &walletrpc.PendingSweepsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, err := net.Bob.WalletKitClient.PendingSweeps(ctxt, req) - if err != nil { - return er.Errorf("unable to retrieve bob's pending "+ - "sweeps: %v", err) - } - if len(resp.PendingSweeps) != 0 { - return er.Errorf("expected 0 pending sweeps, found %d", - len(resp.PendingSweeps)) - } - return nil - }, defaultTimeout) - if err != nil { - t.Fatalf(err.String()) - } -} diff --git a/lnd/lntest/itest/lnd_psbt_test.go b/lnd/lntest/itest/lnd_psbt_test.go deleted file mode 100644 index 79e63881..00000000 --- a/lnd/lntest/itest/lnd_psbt_test.go +++ /dev/null @@ -1,346 +0,0 @@ -package itest - -import ( - "bytes" - "context" - "crypto/rand" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/walletrpc" - "github.com/pkt-cash/pktd/lnd/lntest" - "github.com/pkt-cash/pktd/wire" - "github.com/stretchr/testify/require" -) - -// testPsbtChanFunding makes sure a channel can be opened between carol and dave -// by using a Partially Signed Bitcoin Transaction that funds the channel -// multisig funding output. -func testPsbtChanFunding(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - const chanSize = lnd.MaxBtcFundingAmount - - // First, we'll create two new nodes that we'll use to open channels - // between for this test. Dave gets some coins that will be used to - // fund the PSBT, just to make sure that Carol has an empty wallet. - carol, err := net.NewNode("carol", nil) - util.RequireNoErr(t.t, err) - defer shutdownAndAssert(net, t, carol) - - dave, err := net.NewNode("dave", nil) - util.RequireNoErr(t.t, err) - defer shutdownAndAssert(net, t, dave) - err = net.SendCoins(ctxb, btcutil.UnitsPerCoin(), dave) - if err != nil { - t.Fatalf("unable to send coins to dave: %v", err) - } - - // Before we start the test, we'll ensure both sides are connected so - // the funding flow can be properly executed. - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - err = net.EnsureConnected(ctxt, carol, dave) - util.RequireNoErr(t.t, err) - err = net.EnsureConnected(ctxt, carol, net.Alice) - util.RequireNoErr(t.t, err) - - // At this point, we can begin our PSBT channel funding workflow. We'll - // start by generating a pending channel ID externally that will be used - // to track this new funding type. - var pendingChanID [32]byte - _, errr := rand.Read(pendingChanID[:]) - require.NoError(t.t, errr) - - // We'll also test batch funding of two channels so we need another ID. - var pendingChanID2 [32]byte - _, errr = rand.Read(pendingChanID2[:]) - require.NoError(t.t, errr) - - // Now that we have the pending channel ID, Carol will open the channel - // by specifying a PSBT shim. We use the NoPublish flag here to avoid - // publishing the whole batch TX too early. - ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - chanUpdates, tempPsbt, err := openChannelPsbt( - ctxt, carol, dave, lntest.OpenChannelParams{ - Amt: chanSize, - FundingShim: &lnrpc.FundingShim{ - Shim: &lnrpc.FundingShim_PsbtShim{ - PsbtShim: &lnrpc.PsbtShim{ - PendingChanId: pendingChanID[:], - NoPublish: true, - }, - }, - }, - }, - ) - util.RequireNoErr(t.t, err) - - // Let's add a second channel to the batch. This time between Carol and - // Alice. We will publish the batch TX once this channel funding is - // complete. - ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - chanUpdates2, psbtBytes2, err := openChannelPsbt( - ctxt, carol, net.Alice, lntest.OpenChannelParams{ - Amt: chanSize, - FundingShim: &lnrpc.FundingShim{ - Shim: &lnrpc.FundingShim_PsbtShim{ - PsbtShim: &lnrpc.PsbtShim{ - PendingChanId: pendingChanID2[:], - NoPublish: false, - BasePsbt: tempPsbt, - }, - }, - }, - }, - ) - util.RequireNoErr(t.t, err) - - // We'll now ask Dave's wallet to fund the PSBT for us. This will return - // a packet with inputs and outputs set but without any witness data. - // This is exactly what we need for the next step. - ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - fundReq := &walletrpc.FundPsbtRequest{ - Template: &walletrpc.FundPsbtRequest_Psbt{ - Psbt: psbtBytes2, - }, - Fees: &walletrpc.FundPsbtRequest_SatPerVbyte{ - SatPerVbyte: 2, - }, - } - fundResp, errr := dave.WalletKitClient.FundPsbt(ctxt, fundReq) - require.NoError(t.t, errr) - - // We have a PSBT that has no witness data yet, which is exactly what we - // need for the next step: Verify the PSBT with the funding intents. - _, errr = carol.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{ - Trigger: &lnrpc.FundingTransitionMsg_PsbtVerify{ - PsbtVerify: &lnrpc.FundingPsbtVerify{ - PendingChanId: pendingChanID[:], - FundedPsbt: fundResp.FundedPsbt, - }, - }, - }) - require.NoError(t.t, errr) - _, errr = carol.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{ - Trigger: &lnrpc.FundingTransitionMsg_PsbtVerify{ - PsbtVerify: &lnrpc.FundingPsbtVerify{ - PendingChanId: pendingChanID2[:], - FundedPsbt: fundResp.FundedPsbt, - }, - }, - }) - require.NoError(t.t, errr) - - // Now we'll ask Dave's wallet to sign the PSBT so we can finish the - // funding flow. - ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - finalizeReq := &walletrpc.FinalizePsbtRequest{ - FundedPsbt: fundResp.FundedPsbt, - } - finalizeRes, errr := dave.WalletKitClient.FinalizePsbt(ctxt, finalizeReq) - require.NoError(t.t, errr) - - // We've signed our PSBT now, let's pass it to the intent again. - _, errr = carol.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{ - Trigger: &lnrpc.FundingTransitionMsg_PsbtFinalize{ - PsbtFinalize: &lnrpc.FundingPsbtFinalize{ - PendingChanId: pendingChanID[:], - SignedPsbt: finalizeRes.SignedPsbt, - }, - }, - }) - require.NoError(t.t, errr) - - // Consume the "channel pending" update. This waits until the funding - // transaction was fully compiled. - ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - updateResp, err := receiveChanUpdate(ctxt, chanUpdates) - util.RequireNoErr(t.t, err) - upd, ok := updateResp.Update.(*lnrpc.OpenStatusUpdate_ChanPending) - require.True(t.t, ok) - chanPoint := &lnrpc.ChannelPoint{ - FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ - FundingTxidBytes: upd.ChanPending.Txid, - }, - OutputIndex: upd.ChanPending.OutputIndex, - } - - // No transaction should have been published yet. - mempool, err := net.Miner.Node.GetRawMempool() - util.RequireNoErr(t.t, err) - require.Equal(t.t, 0, len(mempool)) - - // Let's progress the second channel now. This time we'll use the raw - // wire format transaction directly. - util.RequireNoErr(t.t, err) - _, errr = carol.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{ - Trigger: &lnrpc.FundingTransitionMsg_PsbtFinalize{ - PsbtFinalize: &lnrpc.FundingPsbtFinalize{ - PendingChanId: pendingChanID2[:], - FinalRawTx: finalizeRes.RawFinalTx, - }, - }, - }) - require.NoError(t.t, errr) - - // Consume the "channel pending" update for the second channel. This - // waits until the funding transaction was fully compiled and in this - // case published. - ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - updateResp2, err := receiveChanUpdate(ctxt, chanUpdates2) - util.RequireNoErr(t.t, err) - upd2, ok := updateResp2.Update.(*lnrpc.OpenStatusUpdate_ChanPending) - require.True(t.t, ok) - chanPoint2 := &lnrpc.ChannelPoint{ - FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ - FundingTxidBytes: upd2.ChanPending.Txid, - }, - OutputIndex: upd2.ChanPending.OutputIndex, - } - - // Great, now we can mine a block to get the transaction confirmed, then - // wait for the new channel to be propagated through the network. - var finalTx wire.MsgTx - err = finalTx.Deserialize(bytes.NewReader(finalizeRes.RawFinalTx)) - util.RequireNoErr(t.t, err) - - txHash := finalTx.TxHash() - block := mineBlocks(t, net, 6, 1)[0] - assertTxInBlock(t, block, &txHash) - ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint) - util.RequireNoErr(t.t, err) - err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint2) - util.RequireNoErr(t.t, err) - - // With the channel open, ensure that it is counted towards Carol's - // total channel balance. - balReq := &lnrpc.ChannelBalanceRequest{} - ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - balRes, errr := carol.ChannelBalance(ctxt, balReq) - require.NoError(t.t, errr) - require.NotEqual(t.t, int64(0), balRes.LocalBalance.Sat) - - // Next, to make sure the channel functions as normal, we'll make some - // payments within the channel. - payAmt := btcutil.Amount(100000) - invoice := &lnrpc.Invoice{ - Memo: "new chans", - Value: int64(payAmt), - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, errr := dave.AddInvoice(ctxt, invoice) - require.NoError(t.t, errr) - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, carol, carol.RouterClient, []string{resp.PaymentRequest}, - true, - ) - util.RequireNoErr(t.t, err) - - // To conclude, we'll close the newly created channel between Carol and - // Dave. This function will also block until the channel is closed and - // will additionally assert the relevant channel closing post - // conditions. - ctxt, cancel = context.WithTimeout(ctxb, channelCloseTimeout) - defer cancel() - closeChannelAndAssert(ctxt, t, net, carol, chanPoint, false) -} - -// openChannelPsbt attempts to open a channel between srcNode and destNode with -// the passed channel funding parameters. If the passed context has a timeout, -// then if the timeout is reached before the channel pending notification is -// received, an error is returned. An error is returned if the expected step -// of funding the PSBT is not received from the source node. -func openChannelPsbt(ctx context.Context, srcNode, destNode *lntest.HarnessNode, - p lntest.OpenChannelParams) (lnrpc.Lightning_OpenChannelClient, []byte, - er.R) { - - // Wait until srcNode and destNode have the latest chain synced. - // Otherwise, we may run into a check within the funding manager that - // prevents any funding workflows from being kicked off if the chain - // isn't yet synced. - if err := srcNode.WaitForBlockchainSync(ctx); err != nil { - return nil, nil, er.Errorf("unable to sync srcNode chain: %v", - err) - } - if err := destNode.WaitForBlockchainSync(ctx); err != nil { - return nil, nil, er.Errorf("unable to sync destNode chain: %v", - err) - } - - // Send the request to open a channel to the source node now. This will - // open a long-lived stream where we'll receive status updates about the - // progress of the channel. - respStream, errr := srcNode.OpenChannel(ctx, &lnrpc.OpenChannelRequest{ - NodePubkey: destNode.PubKey[:], - LocalFundingAmount: int64(p.Amt), - PushSat: int64(p.PushAmt), - Private: p.Private, - SpendUnconfirmed: p.SpendUnconfirmed, - MinHtlcMsat: int64(p.MinHtlc), - FundingShim: p.FundingShim, - }) - if errr != nil { - return nil, nil, er.Errorf("unable to open channel between "+ - "source and dest: %v", errr) - } - - // Consume the "PSBT funding ready" update. This waits until the node - // notifies us that the PSBT can now be funded. - resp, err := receiveChanUpdate(ctx, respStream) - if err != nil { - return nil, nil, er.Errorf("unable to consume channel update "+ - "message: %v", err) - } - upd, ok := resp.Update.(*lnrpc.OpenStatusUpdate_PsbtFund) - if !ok { - return nil, nil, er.Errorf("expected PSBT funding update, "+ - "instead got %v", resp) - } - return respStream, upd.PsbtFund.Psbt, nil -} - -// receiveChanUpdate waits until a message is received on the stream or the -// context is canceled. The context must have a timeout or must be canceled -// in case no message is received, otherwise this function will block forever. -func receiveChanUpdate(ctx context.Context, - stream lnrpc.Lightning_OpenChannelClient) (*lnrpc.OpenStatusUpdate, - er.R) { - - chanMsg := make(chan *lnrpc.OpenStatusUpdate) - errChan := make(chan er.R) - go func() { - // Consume one message. This will block until the message is - // received. - resp, err := stream.Recv() - if err != nil { - errChan <- er.E(err) - return - } - chanMsg <- resp - }() - - select { - case <-ctx.Done(): - return nil, er.Errorf("timeout reached before chan pending " + - "update sent") - - case err := <-errChan: - return nil, err - - case updateMsg := <-chanMsg: - return updateMsg, nil - } -} diff --git a/lnd/lntest/itest/lnd_rest_api_test.go b/lnd/lntest/itest/lnd_rest_api_test.go deleted file mode 100644 index 61383212..00000000 --- a/lnd/lntest/itest/lnd_rest_api_test.go +++ /dev/null @@ -1,526 +0,0 @@ -package itest - -import ( - "bytes" - "context" - "crypto/tls" - "encoding/base64" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "net/http" - "regexp" - "strings" - "testing" - "time" - - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/proto" - "github.com/gorilla/websocket" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/autopilotrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/chainrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/verrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/walletrpc" - "github.com/pkt-cash/pktd/lnd/lntest" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var ( - insecureTransport = &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - restClient = &http.Client{ - Transport: insecureTransport, - } - jsonMarshaler = &jsonpb.Marshaler{ - EmitDefaults: true, - OrigName: true, - Indent: " ", - } - urlEnc = base64.URLEncoding - webSocketDialer = &websocket.Dialer{ - HandshakeTimeout: 45 * time.Second, - TLSClientConfig: insecureTransport.TLSClientConfig, - } - resultPattern = regexp.MustCompile("{\"result\":(.*)}") -) - -// testRestAPI tests that the most important features of the REST API work -// correctly. -func testRestAPI(net *lntest.NetworkHarness, ht *harnessTest) { - testCases := []struct { - name string - run func(*testing.T, *lntest.HarnessNode, *lntest.HarnessNode) - }{{ - name: "simple GET", - run: func(t *testing.T, a, b *lntest.HarnessNode) { - // Check that the parsing into the response proto - // message works. - resp := &lnrpc.GetInfoResponse{} - err := invokeGET(a, "/v1/getinfo", resp) - require.Nil(t, err, "getinfo") - assert.Equal(t, "#3399ff", resp.Color, "node color") - - // Make sure we get the correct field names (snake - // case). - _, resp2, err := makeRequest( - a, "/v1/getinfo", "GET", nil, nil, - ) - require.Nil(t, err, "getinfo") - assert.Contains( - t, string(resp2), "best_header_timestamp", - "getinfo", - ) - }, - }, { - name: "simple POST and GET with query param", - run: func(t *testing.T, a, b *lntest.HarnessNode) { - // Add an invoice, testing POST in the process. - req := &lnrpc.Invoice{Value: 1234} - resp := &lnrpc.AddInvoiceResponse{} - err := invokePOST(a, "/v1/invoices", req, resp) - require.Nil(t, err, "add invoice") - assert.Equal(t, 32, len(resp.RHash), "invoice rhash") - - // Make sure we can call a GET endpoint with a hex - // encoded URL part. - url := fmt.Sprintf("/v1/invoice/%x", resp.RHash) - resp2 := &lnrpc.Invoice{} - err = invokeGET(a, url, resp2) - require.Nil(t, err, "query invoice") - assert.Equal(t, int64(1234), resp2.Value, "invoice amt") - }, - }, { - name: "GET with base64 encoded byte slice in path", - run: func(t *testing.T, a, b *lntest.HarnessNode) { - url := "/v2/router/mc/probability/%s/%s/%d" - url = fmt.Sprintf( - url, urlEnc.EncodeToString(a.PubKey[:]), - urlEnc.EncodeToString(b.PubKey[:]), 1234, - ) - resp := &routerrpc.QueryProbabilityResponse{} - err := invokeGET(a, url, resp) - require.Nil(t, err, "query probability") - assert.Greater(t, resp.Probability, 0.5, "probability") - }, - }, { - name: "GET with map type query param", - run: func(t *testing.T, a, b *lntest.HarnessNode) { - // Get a new wallet address from Alice. - ctxb := context.Background() - newAddrReq := &lnrpc.NewAddressRequest{ - Type: lnrpc.AddressType_WITNESS_PUBKEY_HASH, - } - addrRes, errr := a.NewAddress(ctxb, newAddrReq) - require.Nil(t, errr, "get address") - - // Create the full URL with the map query param. - url := "/v1/transactions/fee?target_conf=%d&" + - "AddrToAmount[%s]=%d" - url = fmt.Sprintf(url, 2, addrRes.Address, 50000) - resp := &lnrpc.EstimateFeeResponse{} - err := invokeGET(a, url, resp) - require.Nil(t, err, "estimate fee") - assert.Greater(t, resp.FeeSat, int64(253), "fee") - }, - }, { - name: "sub RPC servers REST support", - run: func(t *testing.T, a, b *lntest.HarnessNode) { - // Query autopilot status. - res1 := &autopilotrpc.StatusResponse{} - err := invokeGET(a, "/v2/autopilot/status", res1) - require.Nil(t, err, "autopilot status") - assert.Equal(t, false, res1.Active, "autopilot status") - - // Query the version RPC. - res2 := &verrpc.Version{} - err = invokeGET(a, "/v2/versioner/version", res2) - require.Nil(t, err, "version") - assert.Greater( - t, res2.AppMinor, uint32(0), "lnd minor version", - ) - - // Request a new external address from the wallet kit. - req1 := &walletrpc.AddrRequest{} - res3 := &walletrpc.AddrResponse{} - err = invokePOST( - a, "/v2/wallet/address/next", req1, res3, - ) - require.Nil(t, err, "address") - assert.NotEmpty(t, res3.Addr, "address") - }, - }, { - name: "CORS headers", - run: func(t *testing.T, a, b *lntest.HarnessNode) { - // Alice allows all origins. Make sure we get the same - // value back in the CORS header that we send in the - // Origin header. - reqHeaders := make(http.Header) - reqHeaders.Add("Origin", "https://foo.bar:9999") - resHeaders, body, err := makeRequest( - a, "/v1/getinfo", "OPTIONS", nil, reqHeaders, - ) - require.Nil(t, err, "getinfo") - assert.Equal( - t, "https://foo.bar:9999", - resHeaders.Get("Access-Control-Allow-Origin"), - "CORS header", - ) - assert.Equal(t, 0, len(body)) - - // Make sure that we don't get a value set for Bob which - // doesn't allow any CORS origin. - resHeaders, body, err = makeRequest( - b, "/v1/getinfo", "OPTIONS", nil, reqHeaders, - ) - require.Nil(t, err, "getinfo") - assert.Equal( - t, "", - resHeaders.Get("Access-Control-Allow-Origin"), - "CORS header", - ) - assert.Equal(t, 0, len(body)) - }, - }, { - name: "websocket subscription", - run: func(t *testing.T, a, b *lntest.HarnessNode) { - // Find out the current best block so we can subscribe - // to the next one. - hash, height, err := net.Miner.Node.GetBestBlock() - require.Nil(t, err, "get best block") - - // Create a new subscription to get block epoch events. - req := &chainrpc.BlockEpoch{ - Hash: hash.CloneBytes(), - Height: uint32(height), - } - url := "/v2/chainnotifier/register/blocks" - c, errr := openWebSocket(a, url, "POST", req, nil) - require.Nil(t, errr, "websocket") - defer func() { - _ = c.WriteMessage( - websocket.CloseMessage, - websocket.FormatCloseMessage( - websocket.CloseNormalClosure, - "done", - ), - ) - _ = c.Close() - }() - - msgChan := make(chan *chainrpc.BlockEpoch) - errChan := make(chan er.R) - timeout := time.After(defaultTimeout) - - // We want to read exactly one message. - go func() { - defer close(msgChan) - - _, msg, err := c.ReadMessage() - if err != nil { - errChan <- er.E(err) - return - } - - // The chunked/streamed responses come wrapped - // in either a {"result":{}} or {"error":{}} - // wrapper which we'll get rid of here. - msgStr := string(msg) - if !strings.Contains(msgStr, "\"result\":") { - errChan <- er.Errorf("invalid msg: %s", - msgStr) - return - } - msgStr = resultPattern.ReplaceAllString( - msgStr, "${1}", - ) - - // Make sure we can parse the unwrapped message - // into the expected proto message. - protoMsg := &chainrpc.BlockEpoch{} - err = jsonpb.UnmarshalString( - msgStr, protoMsg, - ) - if err != nil { - errChan <- er.E(err) - return - } - - select { - case msgChan <- protoMsg: - case <-timeout: - } - }() - - // Mine a block and make sure we get a message for it. - blockHashes, err := net.Miner.Node.Generate(1) - require.Nil(t, err, "generate blocks") - assert.Equal(t, 1, len(blockHashes), "num blocks") - select { - case msg := <-msgChan: - assert.Equal( - t, blockHashes[0].CloneBytes(), - msg.Hash, "block hash", - ) - - case err := <-errChan: - t.Fatalf("Received error from WS: %v", err) - - case <-timeout: - t.Fatalf("Timeout before message was received") - } - }, - }, { - name: "websocket subscription with macaroon in protocol", - run: func(t *testing.T, a, b *lntest.HarnessNode) { - // Find out the current best block so we can subscribe - // to the next one. - hash, height, err := net.Miner.Node.GetBestBlock() - require.Nil(t, err, "get best block") - - // Create a new subscription to get block epoch events. - req := &chainrpc.BlockEpoch{ - Hash: hash.CloneBytes(), - Height: uint32(height), - } - url := "/v2/chainnotifier/register/blocks" - - // This time we send the macaroon in the special header - // Sec-Websocket-Protocol which is the only header field - // available to browsers when opening a WebSocket. - mac, err := a.ReadMacaroon( - a.AdminMacPath(), defaultTimeout, - ) - util.RequireNoErr(t, err, "read admin mac") - macBytes, errr := mac.MarshalBinary() - require.NoError(t, errr, "marshal admin mac") - - customHeader := make(http.Header) - customHeader.Set( - lnrpc.HeaderWebSocketProtocol, fmt.Sprintf( - "Grpc-Metadata-Macaroon+%s", - hex.EncodeToString(macBytes), - ), - ) - c, err := openWebSocket( - a, url, "POST", req, customHeader, - ) - require.Nil(t, err, "websocket") - defer func() { - _ = c.WriteMessage( - websocket.CloseMessage, - websocket.FormatCloseMessage( - websocket.CloseNormalClosure, - "done", - ), - ) - _ = c.Close() - }() - - msgChan := make(chan *chainrpc.BlockEpoch) - errChan := make(chan er.R) - timeout := time.After(defaultTimeout) - - // We want to read exactly one message. - go func() { - defer close(msgChan) - - _, msg, err := c.ReadMessage() - if err != nil { - errChan <- er.E(err) - return - } - - // The chunked/streamed responses come wrapped - // in either a {"result":{}} or {"error":{}} - // wrapper which we'll get rid of here. - msgStr := string(msg) - if !strings.Contains(msgStr, "\"result\":") { - errChan <- er.Errorf("invalid msg: %s", - msgStr) - return - } - msgStr = resultPattern.ReplaceAllString( - msgStr, "${1}", - ) - - // Make sure we can parse the unwrapped message - // into the expected proto message. - protoMsg := &chainrpc.BlockEpoch{} - err = jsonpb.UnmarshalString( - msgStr, protoMsg, - ) - if err != nil { - errChan <- er.E(err) - return - } - - select { - case msgChan <- protoMsg: - case <-timeout: - } - }() - - // Mine a block and make sure we get a message for it. - blockHashes, err := net.Miner.Node.Generate(1) - require.Nil(t, err, "generate blocks") - assert.Equal(t, 1, len(blockHashes), "num blocks") - select { - case msg := <-msgChan: - assert.Equal( - t, blockHashes[0].CloneBytes(), - msg.Hash, "block hash", - ) - - case err := <-errChan: - t.Fatalf("Received error from WS: %v", err) - - case <-timeout: - t.Fatalf("Timeout before message was received") - } - }, - }} - - // Make sure Alice allows all CORS origins. Bob will keep the default. - net.Alice.Cfg.ExtraArgs = append( - net.Alice.Cfg.ExtraArgs, "--restcors=\"*\"", - ) - err := net.RestartNode(net.Alice, nil) - if err != nil { - ht.t.Fatalf("Could not restart Alice to set CORS config: %v", - err) - } - - for _, tc := range testCases { - tc := tc - ht.t.Run(tc.name, func(t *testing.T) { - tc.run(t, net.Alice, net.Bob) - }) - } -} - -// invokeGET calls the given URL with the GET method and appropriate macaroon -// header fields then tries to unmarshal the response into the given response -// proto message. -func invokeGET(node *lntest.HarnessNode, url string, resp proto.Message) er.R { - _, rawResp, err := makeRequest(node, url, "GET", nil, nil) - if err != nil { - return err - } - - return er.E(jsonpb.Unmarshal(bytes.NewReader(rawResp), resp)) -} - -// invokePOST calls the given URL with the POST method, request body and -// appropriate macaroon header fields then tries to unmarshal the response into -// the given response proto message. -func invokePOST(node *lntest.HarnessNode, url string, req, - resp proto.Message) er.R { - - // Marshal the request to JSON using the jsonpb marshaler to get correct - // field names. - var buf bytes.Buffer - if errr := jsonMarshaler.Marshal(&buf, req); errr != nil { - return er.E(errr) - } - - _, rawResp, err := makeRequest(node, url, "POST", &buf, nil) - if err != nil { - return err - } - - return er.E(jsonpb.Unmarshal(bytes.NewReader(rawResp), resp)) -} - -// makeRequest calls the given URL with the given method, request body and -// appropriate macaroon header fields and returns the raw response body. -func makeRequest(node *lntest.HarnessNode, url, method string, - request io.Reader, additionalHeaders http.Header) (http.Header, []byte, - er.R) { - - // Assemble the full URL from the node's listening address then create - // the request so we can set the macaroon on it. - fullURL := fmt.Sprintf("https://%s%s", node.Cfg.RESTAddr(), url) - req, errr := http.NewRequest(method, fullURL, request) - if errr != nil { - return nil, nil, er.E(errr) - } - if err := addAdminMacaroon(node, req.Header); err != nil { - return nil, nil, err - } - for key, values := range additionalHeaders { - for _, value := range values { - req.Header.Add(key, value) - } - } - - // Do the actual call with the completed request object now. - resp, errr := restClient.Do(req) - if errr != nil { - return nil, nil, er.E(errr) - } - defer func() { _ = resp.Body.Close() }() - - data, errr := ioutil.ReadAll(resp.Body) - return resp.Header, data, er.E(errr) -} - -// openWebSocket opens a new WebSocket connection to the given URL with the -// appropriate macaroon headers and sends the request message over the socket. -func openWebSocket(node *lntest.HarnessNode, url, method string, - req proto.Message, customHeader http.Header) (*websocket.Conn, er.R) { - - // Prepare our macaroon headers and assemble the full URL from the - // node's listening address. WebSockets always work over GET so we need - // to append the target request method as a query parameter. - header := customHeader - if header == nil { - header = make(http.Header) - if err := addAdminMacaroon(node, header); err != nil { - return nil, err - } - } - fullURL := fmt.Sprintf( - "wss://%s%s?method=%s", node.Cfg.RESTAddr(), url, method, - ) - conn, resp, errr := webSocketDialer.Dial(fullURL, header) - if errr != nil { - return nil, er.E(errr) - } - defer func() { _ = resp.Body.Close() }() - - // Send the given request message as the first message on the socket. - reqMsg, errr := jsonMarshaler.MarshalToString(req) - if errr != nil { - return nil, er.E(errr) - } - errr = conn.WriteMessage(websocket.TextMessage, []byte(reqMsg)) - if errr != nil { - return nil, er.E(errr) - } - - return conn, nil -} - -// addAdminMacaroon reads the admin macaroon from the node and appends it to -// the HTTP header fields. -func addAdminMacaroon(node *lntest.HarnessNode, header http.Header) er.R { - mac, err := node.ReadMacaroon(node.AdminMacPath(), defaultTimeout) - if err != nil { - return err - } - macBytes, errr := mac.MarshalBinary() - if errr != nil { - return er.E(errr) - } - - header.Set("Grpc-Metadata-Macaroon", hex.EncodeToString(macBytes)) - - return nil -} diff --git a/lnd/lntest/itest/lnd_send_multi_path_payment_test.go b/lnd/lntest/itest/lnd_send_multi_path_payment_test.go deleted file mode 100644 index 5239600a..00000000 --- a/lnd/lntest/itest/lnd_send_multi_path_payment_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package itest - -import ( - "context" - "encoding/hex" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc" - "github.com/pkt-cash/pktd/lnd/lntest" -) - -// testSendMultiPathPayment tests that we are able to successfully route a -// payment using multiple shards across different paths. -func testSendMultiPathPayment(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - ctx := newMppTestContext(t, net) - defer ctx.shutdownNodes() - - const paymentAmt = btcutil.Amount(300000) - - // Set up a network with three different paths Alice <-> Bob. Channel - // capacities are set such that the payment can only succeed if (at - // least) three paths are used. - // - // _ Eve _ - // / \ - // Alice -- Carol ---- Bob - // \ / - // \__ Dave ____/ - // - ctx.openChannel(ctx.carol, ctx.bob, 135000) - ctx.openChannel(ctx.alice, ctx.carol, 235000) - ctx.openChannel(ctx.dave, ctx.bob, 135000) - ctx.openChannel(ctx.alice, ctx.dave, 135000) - ctx.openChannel(ctx.eve, ctx.bob, 135000) - ctx.openChannel(ctx.carol, ctx.eve, 135000) - - defer ctx.closeChannels() - - ctx.waitForChannels() - - // Increase Dave's fee to make the test deterministic. Otherwise it - // would be unpredictable whether pathfinding would go through Charlie - // or Dave for the first shard. - _, errr := ctx.dave.UpdateChannelPolicy( - context.Background(), - &lnrpc.PolicyUpdateRequest{ - Scope: &lnrpc.PolicyUpdateRequest_Global{Global: true}, - BaseFeeMsat: 500000, - FeeRate: 0.001, - TimeLockDelta: 40, - }, - ) - if errr != nil { - t.Fatalf("dave policy update: %v", errr) - } - // Our first test will be Alice paying Bob using a SendPayment call. - // Let Bob create an invoice for Alice to pay. - payReqs, rHashes, invoices, err := createPayReqs( - net.Bob, paymentAmt, 1, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } - - rHash := rHashes[0] - payReq := payReqs[0] - - payment := sendAndAssertSuccess( - t, net.Alice, - &routerrpc.SendPaymentRequest{ - PaymentRequest: payReq, - MaxParts: 10, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - - // Make sure we got the preimage. - if payment.PaymentPreimage != hex.EncodeToString(invoices[0].RPreimage) { - t.Fatalf("preimage doesn't match") - } - - // Check that Alice split the payment in at least three shards. Because - // the hand-off of the htlc to the link is asynchronous (via a mailbox), - // there is some non-determinism in the process. Depending on whether - // the new pathfinding round is started before or after the htlc is - // locked into the channel, different sharding may occur. Therefore we - // can only check if the number of shards isn't below the theoretical - // minimum. - succeeded := 0 - for _, htlc := range payment.Htlcs { - if htlc.Status == lnrpc.HTLCAttempt_SUCCEEDED { - succeeded++ - } - } - - const minExpectedShards = 3 - if succeeded < minExpectedShards { - t.Fatalf("expected at least %v shards, but got %v", - minExpectedShards, succeeded) - } - - // Make sure Bob show the invoice as settled for the full - // amount. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - inv, errr := ctx.bob.LookupInvoice( - ctxt, &lnrpc.PaymentHash{ - RHash: rHash, - }, - ) - if errr != nil { - t.Fatalf("error when obtaining invoice: %v", errr) - } - - if inv.AmtPaidSat != int64(paymentAmt) { - t.Fatalf("incorrect payment amt for invoice"+ - "want: %d, got %d", - paymentAmt, inv.AmtPaidSat) - } - - if inv.State != lnrpc.Invoice_SETTLED { - t.Fatalf("Invoice not settled: %v", inv.State) - } - - settled := 0 - for _, htlc := range inv.Htlcs { - if htlc.State == lnrpc.InvoiceHTLCState_SETTLED { - settled++ - } - - } - if settled != succeeded { - t.Fatalf("expected invoice to be settled "+ - "with %v HTLCs, had %v", succeeded, settled) - } -} diff --git a/lnd/lntest/itest/lnd_signer_test.go b/lnd/lntest/itest/lnd_signer_test.go deleted file mode 100644 index 2f278169..00000000 --- a/lnd/lntest/itest/lnd_signer_test.go +++ /dev/null @@ -1,205 +0,0 @@ -package itest - -import ( - "context" - - "github.com/pkt-cash/pktd/btcec" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/lnd/keychain" - "github.com/pkt-cash/pktd/lnd/lnrpc/signrpc" - "github.com/pkt-cash/pktd/lnd/lntest" - "github.com/stretchr/testify/require" -) - -// testDeriveSharedKey checks the ECDH performed by the endpoint -// DeriveSharedKey. It creates an ephemeral private key, performing an ECDH with -// the node's pubkey and a customized public key to check the validity of the -// result. -func testDeriveSharedKey(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // Create an ephemeral key, extracts its public key, and make a - // PrivKeyECDH using the ephemeral key. - ephemeralPriv, err := btcec.NewPrivateKey(btcec.S256()) - util.RequireNoErr(t.t, err, "failed to create ephemeral key") - - ephemeralPubBytes := ephemeralPriv.PubKey().SerializeCompressed() - privKeyECDH := &keychain.PrivKeyECDH{PrivKey: ephemeralPriv} - - // assertECDHMatch checks the correctness of the ECDH between the - // ephemeral key and the given public key. - assertECDHMatch := func(pub *btcec.PublicKey, - req *signrpc.SharedKeyRequest) { - - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - resp, errr := net.Alice.SignerClient.DeriveSharedKey(ctxt, req) - require.NoError(t.t, errr, "calling DeriveSharedKey failed") - - sharedKey, _ := privKeyECDH.ECDH(pub) - require.Equal( - t.t, sharedKey[:], resp.SharedKey, - "failed to derive the expected key", - ) - } - - nodePub, err := btcec.ParsePubKey(net.Alice.PubKey[:], btcec.S256()) - util.RequireNoErr(t.t, err, "failed to parse node pubkey") - - customizedKeyFamily := int32(keychain.KeyFamilyMultiSig) - customizedIndex := int32(1) - customizedPub, errr := deriveCustomizedKey( - ctxb, net.Alice, customizedKeyFamily, customizedIndex, - ) - util.RequireNoErr(t.t, errr, "failed to create customized pubkey") - - // Test DeriveSharedKey with no optional arguments. It will result in - // performing an ECDH between the ephemeral key and the node's pubkey. - req := &signrpc.SharedKeyRequest{EphemeralPubkey: ephemeralPubBytes} - assertECDHMatch(nodePub, req) - - // Test DeriveSharedKey with a KeyLoc which points to the node's pubkey. - req = &signrpc.SharedKeyRequest{ - EphemeralPubkey: ephemeralPubBytes, - KeyLoc: &signrpc.KeyLocator{ - KeyFamily: int32(keychain.KeyFamilyNodeKey), - KeyIndex: 0, - }, - } - assertECDHMatch(nodePub, req) - - // Test DeriveSharedKey with a KeyLoc being set in KeyDesc. The KeyLoc - // points to the node's pubkey. - req = &signrpc.SharedKeyRequest{ - EphemeralPubkey: ephemeralPubBytes, - KeyDesc: &signrpc.KeyDescriptor{ - KeyLoc: &signrpc.KeyLocator{ - KeyFamily: int32(keychain.KeyFamilyNodeKey), - KeyIndex: 0, - }, - }, - } - assertECDHMatch(nodePub, req) - - // Test DeriveSharedKey with RawKeyBytes set in KeyDesc. The RawKeyBytes - // is the node's pubkey bytes, and the KeyFamily is KeyFamilyNodeKey. - req = &signrpc.SharedKeyRequest{ - EphemeralPubkey: ephemeralPubBytes, - KeyDesc: &signrpc.KeyDescriptor{ - RawKeyBytes: net.Alice.PubKey[:], - KeyLoc: &signrpc.KeyLocator{ - KeyFamily: int32(keychain.KeyFamilyNodeKey), - }, - }, - } - assertECDHMatch(nodePub, req) - - // Test DeriveSharedKey with a KeyLoc which points to the customized - // public key. - req = &signrpc.SharedKeyRequest{ - EphemeralPubkey: ephemeralPubBytes, - KeyLoc: &signrpc.KeyLocator{ - KeyFamily: customizedKeyFamily, - KeyIndex: customizedIndex, - }, - } - assertECDHMatch(customizedPub, req) - - // Test DeriveSharedKey with a KeyLoc being set in KeyDesc. The KeyLoc - // points to the customized public key. - req = &signrpc.SharedKeyRequest{ - EphemeralPubkey: ephemeralPubBytes, - KeyDesc: &signrpc.KeyDescriptor{ - KeyLoc: &signrpc.KeyLocator{ - KeyFamily: customizedKeyFamily, - KeyIndex: customizedIndex, - }, - }, - } - assertECDHMatch(customizedPub, req) - - // Test DeriveSharedKey with RawKeyBytes set in KeyDesc. The RawKeyBytes - // is the customized public key. The KeyLoc is also set with the family - // being the customizedKeyFamily. - req = &signrpc.SharedKeyRequest{ - EphemeralPubkey: ephemeralPubBytes, - KeyDesc: &signrpc.KeyDescriptor{ - RawKeyBytes: customizedPub.SerializeCompressed(), - KeyLoc: &signrpc.KeyLocator{ - KeyFamily: customizedKeyFamily, - }, - }, - } - assertECDHMatch(customizedPub, req) - - // assertErrorMatch checks when calling DeriveSharedKey with invalid - // params, the expected error is returned. - assertErrorMatch := func(match string, req *signrpc.SharedKeyRequest) { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - _, errr := net.Alice.SignerClient.DeriveSharedKey(ctxt, req) - require.NoError(t.t, errr, "expected to have an error") - require.Contains( - t.t, errr.Error(), match, "error failed to match", - ) - } - - // Test that EphemeralPubkey must be supplied. - req = &signrpc.SharedKeyRequest{} - assertErrorMatch("must provide ephemeral pubkey", req) - - // Test that cannot use both KeyDesc and KeyLoc. - req = &signrpc.SharedKeyRequest{ - EphemeralPubkey: ephemeralPubBytes, - KeyDesc: &signrpc.KeyDescriptor{ - RawKeyBytes: customizedPub.SerializeCompressed(), - }, - KeyLoc: &signrpc.KeyLocator{ - KeyFamily: customizedKeyFamily, - KeyIndex: 0, - }, - } - assertErrorMatch("use either key_desc or key_loc", req) - - // Test when KeyDesc is used, KeyLoc must be set. - req = &signrpc.SharedKeyRequest{ - EphemeralPubkey: ephemeralPubBytes, - KeyDesc: &signrpc.KeyDescriptor{ - RawKeyBytes: net.Alice.PubKey[:], - }, - } - assertErrorMatch("key_desc.key_loc must also be set", req) - - // Test that cannot use both RawKeyBytes and KeyIndex. - req = &signrpc.SharedKeyRequest{ - EphemeralPubkey: ephemeralPubBytes, - KeyDesc: &signrpc.KeyDescriptor{ - RawKeyBytes: customizedPub.SerializeCompressed(), - KeyLoc: &signrpc.KeyLocator{ - KeyFamily: customizedKeyFamily, - KeyIndex: 1, - }, - }, - } - assertErrorMatch("use either raw_key_bytes or key_index", req) -} - -// deriveCustomizedKey uses the family and index to derive a public key from -// the node's walletkit client. -func deriveCustomizedKey(ctx context.Context, node *lntest.HarnessNode, - family, index int32) (*btcec.PublicKey, er.R) { - - ctxt, _ := context.WithTimeout(ctx, defaultTimeout) - req := &signrpc.KeyLocator{ - KeyFamily: family, - KeyIndex: index, - } - resp, errr := node.WalletKitClient.DeriveKey(ctxt, req) - if errr != nil { - return nil, er.Errorf("failed to derive key: %v", errr) - } - pub, err := btcec.ParsePubKey(resp.RawKeyBytes, btcec.S256()) - if err != nil { - return nil, er.Errorf("failed to parse node pubkey: %v", err) - } - return pub, nil -} diff --git a/lnd/lntest/itest/lnd_single_hop_invoice_test.go b/lnd/lntest/itest/lnd_single_hop_invoice_test.go deleted file mode 100644 index 60fb47d0..00000000 --- a/lnd/lntest/itest/lnd_single_hop_invoice_test.go +++ /dev/null @@ -1,229 +0,0 @@ -package itest - -import ( - "bytes" - "context" - "encoding/hex" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc" - "github.com/pkt-cash/pktd/lnd/lntest" - "github.com/pkt-cash/pktd/lnd/lntest/wait" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/record" -) - -func testSingleHopInvoice(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // Open a channel with 100k satoshis between Alice and Bob with Alice being - // the sole funder of the channel. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanAmt := btcutil.Amount(100000) - chanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // Now that the channel is open, create an invoice for Bob which - // expects a payment of 1000 satoshis from Alice paid via a particular - // preimage. - const paymentAmt = 1000 - preimage := bytes.Repeat([]byte("A"), 32) - invoice := &lnrpc.Invoice{ - Memo: "testing", - RPreimage: preimage, - Value: paymentAmt, - } - invoiceResp, errr := net.Bob.AddInvoice(ctxb, invoice) - if errr != nil { - t.Fatalf("unable to add invoice: %v", errr) - } - - // Wait for Alice to recognize and advertise the new channel generated - // above. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("alice didn't advertise channel before "+ - "timeout: %v", err) - } - err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("bob didn't advertise channel before "+ - "timeout: %v", err) - } - - // With the invoice for Bob added, send a payment towards Alice paying - // to the above generated invoice. - resp := sendAndAssertSuccess( - t, net.Alice, - &routerrpc.SendPaymentRequest{ - PaymentRequest: invoiceResp.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - if hex.EncodeToString(preimage) != resp.PaymentPreimage { - t.Fatalf("preimage mismatch: expected %v, got %v", preimage, - resp.PaymentPreimage) - } - - // Bob's invoice should now be found and marked as settled. - payHash := &lnrpc.PaymentHash{ - RHash: invoiceResp.RHash, - } - ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) - dbInvoice, errr := net.Bob.LookupInvoice(ctxt, payHash) - if errr != nil { - t.Fatalf("unable to lookup invoice: %v", errr) - } - if !dbInvoice.Settled { - t.Fatalf("bob's invoice should be marked as settled: %v", - spew.Sdump(dbInvoice)) - } - - // With the payment completed all balance related stats should be - // properly updated. - err = wait.NoError( - assertAmountSent(paymentAmt, net.Alice, net.Bob), - 3*time.Second, - ) - if err != nil { - t.Fatalf(err.String()) - } - - // Create another invoice for Bob, this time leaving off the preimage - // to one will be randomly generated. We'll test the proper - // encoding/decoding of the zpay32 payment requests. - invoice = &lnrpc.Invoice{ - Memo: "test3", - Value: paymentAmt, - } - ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) - invoiceResp, errr = net.Bob.AddInvoice(ctxt, invoice) - if errr != nil { - t.Fatalf("unable to add invoice: %v", errr) - } - - // Next send another payment, but this time using a zpay32 encoded - // invoice rather than manually specifying the payment details. - sendAndAssertSuccess( - t, net.Alice, - &routerrpc.SendPaymentRequest{ - PaymentRequest: invoiceResp.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - - // The second payment should also have succeeded, with the balances - // being update accordingly. - err = wait.NoError( - assertAmountSent(2*paymentAmt, net.Alice, net.Bob), - 3*time.Second, - ) - if err != nil { - t.Fatalf(err.String()) - } - - // Next send a keysend payment. - keySendPreimage := lntypes.Preimage{3, 4, 5, 11} - keySendHash := keySendPreimage.Hash() - - sendAndAssertSuccess( - t, net.Alice, - &routerrpc.SendPaymentRequest{ - Dest: net.Bob.PubKey[:], - Amt: paymentAmt, - FinalCltvDelta: 40, - PaymentHash: keySendHash[:], - DestCustomRecords: map[uint64][]byte{ - record.KeySendType: keySendPreimage[:], - }, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - - // The keysend payment should also have succeeded, with the balances - // being update accordingly. - err = wait.NoError( - assertAmountSent(3*paymentAmt, net.Alice, net.Bob), - 3*time.Second, - ) - if err != nil { - t.Fatalf(err.String()) - } - - // Now create an invoice and specify routing hints. - // We will test that the routing hints are encoded properly. - hintChannel := lnwire.ShortChannelID{BlockHeight: 10} - bobPubKey := hex.EncodeToString(net.Bob.PubKey[:]) - hints := []*lnrpc.RouteHint{ - { - HopHints: []*lnrpc.HopHint{ - { - NodeId: bobPubKey, - ChanId: hintChannel.ToUint64(), - FeeBaseMsat: 1, - FeeProportionalMillionths: 1000000, - CltvExpiryDelta: 20, - }, - }, - }, - } - - invoice = &lnrpc.Invoice{ - Memo: "hints", - Value: paymentAmt, - RouteHints: hints, - } - - ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) - invoiceResp, errr = net.Bob.AddInvoice(ctxt, invoice) - if errr != nil { - t.Fatalf("unable to add invoice: %v", errr) - } - payreq, errr := net.Bob.DecodePayReq(ctxt, &lnrpc.PayReqString{PayReq: invoiceResp.PaymentRequest}) - if errr != nil { - t.Fatalf("failed to decode payment request %v", errr) - } - if len(payreq.RouteHints) != 1 { - t.Fatalf("expected one routing hint") - } - routingHint := payreq.RouteHints[0] - if len(routingHint.HopHints) != 1 { - t.Fatalf("expected one hop hint") - } - hopHint := routingHint.HopHints[0] - if hopHint.FeeProportionalMillionths != 1000000 { - t.Fatalf("wrong FeeProportionalMillionths %v", - hopHint.FeeProportionalMillionths) - } - if hopHint.NodeId != bobPubKey { - t.Fatalf("wrong NodeId %v", - hopHint.NodeId) - } - if hopHint.ChanId != hintChannel.ToUint64() { - t.Fatalf("wrong ChanId %v", - hopHint.ChanId) - } - if hopHint.FeeBaseMsat != 1 { - t.Fatalf("wrong FeeBaseMsat %v", - hopHint.FeeBaseMsat) - } - if hopHint.CltvExpiryDelta != 20 { - t.Fatalf("wrong CltvExpiryDelta %v", - hopHint.CltvExpiryDelta) - } - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false) -} diff --git a/lnd/lntest/itest/lnd_test.go b/lnd/lntest/itest/lnd_test.go deleted file mode 100644 index 24dee00e..00000000 --- a/lnd/lntest/itest/lnd_test.go +++ /dev/null @@ -1,14347 +0,0 @@ -package itest - -import ( - "bytes" - "context" - "crypto/rand" - "crypto/sha256" - "encoding/hex" - "flag" - "fmt" - "io" - "io/ioutil" - "math" - "os" - "reflect" - "strings" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkt-cash/pktd/blockchain" - "github.com/pkt-cash/pktd/btcjson" - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/btcutil/er" - "github.com/pkt-cash/pktd/btcutil/util" - "github.com/pkt-cash/pktd/chaincfg/chainhash" - "github.com/pkt-cash/pktd/integration/rpctest" - "github.com/pkt-cash/pktd/lnd" - "github.com/pkt-cash/pktd/lnd/chainreg" - "github.com/pkt-cash/pktd/lnd/channeldb" - "github.com/pkt-cash/pktd/lnd/input" - "github.com/pkt-cash/pktd/lnd/labels" - "github.com/pkt-cash/pktd/lnd/lncfg" - "github.com/pkt-cash/pktd/lnd/lnrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/invoicesrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/routerrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/signrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/walletrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/watchtowerrpc" - "github.com/pkt-cash/pktd/lnd/lnrpc/wtclientrpc" - "github.com/pkt-cash/pktd/lnd/lntest" - "github.com/pkt-cash/pktd/lnd/lntest/wait" - "github.com/pkt-cash/pktd/lnd/lntypes" - "github.com/pkt-cash/pktd/lnd/lnwallet" - "github.com/pkt-cash/pktd/lnd/lnwallet/chainfee" - "github.com/pkt-cash/pktd/lnd/lnwire" - "github.com/pkt-cash/pktd/lnd/routing" - "github.com/pkt-cash/pktd/rpcclient" - "github.com/pkt-cash/pktd/wire" - "github.com/stretchr/testify/require" -) - -const ( - // defaultSplitTranches is the default number of tranches we split the - // test cases into. - defaultSplitTranches uint = 1 - - // defaultRunTranche is the default index of the test cases tranche that - // we run. - defaultRunTranche uint = 0 -) - -var ( - // testCasesSplitParts is the number of tranches the test cases should - // be split into. By default this is set to 1, so no splitting happens. - // If this value is increased, then the -runtranche flag must be - // specified as well to indicate which part should be run in the current - // invocation. - testCasesSplitTranches = flag.Uint( - "splittranches", defaultSplitTranches, "split the test cases "+ - "in this many tranches and run the tranche at "+ - "0-based index specified by the -runtranche flag", - ) - - // testCasesRunTranche is the 0-based index of the split test cases - // tranche to run in the current invocation. - testCasesRunTranche = flag.Uint( - "runtranche", defaultRunTranche, "run the tranche of the "+ - "split test cases with the given (0-based) index", - ) - - // useEtcd test LND nodes use (embedded) etcd as remote db. - useEtcd = flag.Bool("etcd", false, "Use etcd backend for lnd.") -) - -// getTestCaseSplitTranche returns the sub slice of the test cases that should -// be run as the current split tranche as well as the index and slice offset of -// the tranche. -func getTestCaseSplitTranche() ([]*testCase, uint, uint) { - numTranches := defaultSplitTranches - if testCasesSplitTranches != nil { - numTranches = *testCasesSplitTranches - } - runTranche := defaultRunTranche - if testCasesRunTranche != nil { - runTranche = *testCasesRunTranche - } - - // There's a special flake-hunt mode where we run the same test multiple - // times in parallel. In that case the tranche index is equal to the - // thread ID, but we need to actually run all tests for the regex - // selection to work. - threadID := runTranche - if numTranches == 1 { - runTranche = 0 - } - - numCases := uint(len(allTestCases)) - testsPerTranche := numCases / numTranches - trancheOffset := runTranche * testsPerTranche - trancheEnd := trancheOffset + testsPerTranche - if trancheEnd > numCases || runTranche == numTranches-1 { - trancheEnd = numCases - } - - return allTestCases[trancheOffset:trancheEnd], threadID, trancheOffset -} - -func rpcPointToWirePoint(t *harnessTest, chanPoint *lnrpc.ChannelPoint) wire.OutPoint { - txid, err := lnd.GetChanPointFundingTxid(chanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - - return wire.OutPoint{ - Hash: *txid, - Index: chanPoint.OutputIndex, - } -} - -// openChannelStream blocks until an OpenChannel request for a channel funding -// by alice succeeds. If it does, a stream client is returned to receive events -// about the opening channel. -func openChannelStream(ctx context.Context, t *harnessTest, - net *lntest.NetworkHarness, alice, bob *lntest.HarnessNode, - p lntest.OpenChannelParams) lnrpc.Lightning_OpenChannelClient { - - t.t.Helper() - - // Wait until we are able to fund a channel successfully. This wait - // prevents us from erroring out when trying to create a channel while - // the node is starting up. - var chanOpenUpdate lnrpc.Lightning_OpenChannelClient - err := wait.NoError(func() er.R { - var err er.R - chanOpenUpdate, err = net.OpenChannel(ctx, alice, bob, p) - return err - }, defaultTimeout) - if err != nil { - t.Fatalf("unable to open channel: %v", err) - } - - return chanOpenUpdate -} - -// openChannelAndAssert attempts to open a channel with the specified -// parameters extended from Alice to Bob. Additionally, two items are asserted -// after the channel is considered open: the funding transaction should be -// found within a block, and that Alice can report the status of the new -// channel. -func openChannelAndAssert(ctx context.Context, t *harnessTest, - net *lntest.NetworkHarness, alice, bob *lntest.HarnessNode, - p lntest.OpenChannelParams) *lnrpc.ChannelPoint { - - t.t.Helper() - - chanOpenUpdate := openChannelStream(ctx, t, net, alice, bob, p) - - // Mine 6 blocks, then wait for Alice's node to notify us that the - // channel has been opened. The funding transaction should be found - // within the first newly mined block. We mine 6 blocks so that in the - // case that the channel is public, it is announced to the network. - block := mineBlocks(t, net, 6, 1)[0] - - fundingChanPoint, err := net.WaitForChannelOpen(ctx, chanOpenUpdate) - if err != nil { - t.Fatalf("error while waiting for channel open: %v", err) - } - fundingTxID, err := lnd.GetChanPointFundingTxid(fundingChanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - assertTxInBlock(t, block, fundingTxID) - - // The channel should be listed in the peer information returned by - // both peers. - chanPoint := wire.OutPoint{ - Hash: *fundingTxID, - Index: fundingChanPoint.OutputIndex, - } - if err := net.AssertChannelExists(ctx, alice, &chanPoint); err != nil { - t.Fatalf("unable to assert channel existence: %v", err) - } - if err := net.AssertChannelExists(ctx, bob, &chanPoint); err != nil { - t.Fatalf("unable to assert channel existence: %v", err) - } - - return fundingChanPoint -} - -// closeChannelAndAssert attempts to close a channel identified by the passed -// channel point owned by the passed Lightning node. A fully blocking channel -// closure is attempted, therefore the passed context should be a child derived -// via timeout from a base parent. Additionally, once the channel has been -// detected as closed, an assertion checks that the transaction is found within -// a block. Finally, this assertion verifies that the node always sends out a -// disable update when closing the channel if the channel was previously enabled. -// -// NOTE: This method assumes that the provided funding point is confirmed -// on-chain AND that the edge exists in the node's channel graph. If the funding -// transactions was reorged out at some point, use closeReorgedChannelAndAssert. -func closeChannelAndAssert(ctx context.Context, t *harnessTest, - net *lntest.NetworkHarness, node *lntest.HarnessNode, - fundingChanPoint *lnrpc.ChannelPoint, force bool) *chainhash.Hash { - - return closeChannelAndAssertType(ctx, t, net, node, fundingChanPoint, false, force) -} - -func closeChannelAndAssertType(ctx context.Context, t *harnessTest, - net *lntest.NetworkHarness, node *lntest.HarnessNode, - fundingChanPoint *lnrpc.ChannelPoint, anchors, force bool) *chainhash.Hash { - - // Fetch the current channel policy. If the channel is currently - // enabled, we will register for graph notifications before closing to - // assert that the node sends out a disabling update as a result of the - // channel being closed. - curPolicy := getChannelPolicies(t, node, node.PubKeyStr, fundingChanPoint)[0] - expectDisable := !curPolicy.Disabled - - // If the current channel policy is enabled, begin subscribing the graph - // updates before initiating the channel closure. - var graphSub *graphSubscription - if expectDisable { - sub := subscribeGraphNotifications(t, ctx, node) - graphSub = &sub - defer close(graphSub.quit) - } - - closeUpdates, _, err := net.CloseChannel(ctx, node, fundingChanPoint, force) - if err != nil { - t.Fatalf("unable to close channel: %v", err) - } - - // If the channel policy was enabled prior to the closure, wait until we - // received the disabled update. - if expectDisable { - curPolicy.Disabled = true - waitForChannelUpdate( - t, *graphSub, - []expectedChanUpdate{ - {node.PubKeyStr, curPolicy, fundingChanPoint}, - }, - ) - } - - return assertChannelClosed( - ctx, t, net, node, fundingChanPoint, anchors, closeUpdates, - ) -} - -// closeReorgedChannelAndAssert attempts to close a channel identified by the -// passed channel point owned by the passed Lightning node. A fully blocking -// channel closure is attempted, therefore the passed context should be a child -// derived via timeout from a base parent. Additionally, once the channel has -// been detected as closed, an assertion checks that the transaction is found -// within a block. -// -// NOTE: This method does not verify that the node sends a disable update for -// the closed channel. -func closeReorgedChannelAndAssert(ctx context.Context, t *harnessTest, - net *lntest.NetworkHarness, node *lntest.HarnessNode, - fundingChanPoint *lnrpc.ChannelPoint, force bool) *chainhash.Hash { - - closeUpdates, _, err := net.CloseChannel(ctx, node, fundingChanPoint, force) - if err != nil { - t.Fatalf("unable to close channel: %v", err) - } - - return assertChannelClosed( - ctx, t, net, node, fundingChanPoint, false, closeUpdates, - ) -} - -// assertChannelClosed asserts that the channel is properly cleaned up after -// initiating a cooperative or local close. -func assertChannelClosed(ctx context.Context, t *harnessTest, - net *lntest.NetworkHarness, node *lntest.HarnessNode, - fundingChanPoint *lnrpc.ChannelPoint, anchors bool, - closeUpdates lnrpc.Lightning_CloseChannelClient) *chainhash.Hash { - - txid, err := lnd.GetChanPointFundingTxid(fundingChanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - chanPointStr := fmt.Sprintf("%v:%v", txid, fundingChanPoint.OutputIndex) - - // If the channel appears in list channels, ensure that its state - // contains ChanStatusCoopBroadcasted. - ctxt, _ := context.WithTimeout(ctx, defaultTimeout) - listChansRequest := &lnrpc.ListChannelsRequest{} - listChansResp, errr := node.ListChannels(ctxt, listChansRequest) - if errr != nil { - t.Fatalf("unable to query for list channels: %v", errr) - } - for _, channel := range listChansResp.Channels { - // Skip other channels. - if channel.ChannelPoint != chanPointStr { - continue - } - - // Assert that the channel is in coop broadcasted. - if !strings.Contains(channel.ChanStatusFlags, - channeldb.ChanStatusCoopBroadcasted.String()) { - t.Fatalf("channel not coop broadcasted, "+ - "got: %v", channel.ChanStatusFlags) - } - } - - // At this point, the channel should now be marked as being in the - // state of "waiting close". - ctxt, _ = context.WithTimeout(ctx, defaultTimeout) - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - pendingChanResp, errr := node.PendingChannels(ctxt, pendingChansRequest) - if errr != nil { - t.Fatalf("unable to query for pending channels: %v", errr) - } - var found bool - for _, pendingClose := range pendingChanResp.WaitingCloseChannels { - if pendingClose.Channel.ChannelPoint == chanPointStr { - found = true - break - } - } - if !found { - t.Fatalf("channel not marked as waiting close") - } - - // We'll now, generate a single block, wait for the final close status - // update, then ensure that the closing transaction was included in the - // block. If there are anchors, we also expect an anchor sweep. - expectedTxes := 1 - if anchors { - expectedTxes = 2 - } - - block := mineBlocks(t, net, 1, expectedTxes)[0] - - closingTxid, err := net.WaitForChannelClose(ctx, closeUpdates) - if err != nil { - t.Fatalf("error while waiting for channel close: %v", err) - } - - assertTxInBlock(t, block, closingTxid) - - // Finally, the transaction should no longer be in the waiting close - // state as we've just mined a block that should include the closing - // transaction. - err = wait.Predicate(func() bool { - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - pendingChanResp, err := node.PendingChannels( - ctx, pendingChansRequest, - ) - if err != nil { - return false - } - - for _, pendingClose := range pendingChanResp.WaitingCloseChannels { - if pendingClose.Channel.ChannelPoint == chanPointStr { - return false - } - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("closing transaction not marked as fully closed") - } - - return closingTxid -} - -// waitForChannelPendingForceClose waits for the node to report that the -// channel is pending force close, and that the UTXO nursery is aware of it. -func waitForChannelPendingForceClose(ctx context.Context, - node *lntest.HarnessNode, fundingChanPoint *lnrpc.ChannelPoint) er.R { - - txid, err := lnd.GetChanPointFundingTxid(fundingChanPoint) - if err != nil { - return err - } - - op := wire.OutPoint{ - Hash: *txid, - Index: fundingChanPoint.OutputIndex, - } - - return wait.NoError(func() er.R { - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - pendingChanResp, err := node.PendingChannels( - ctx, pendingChansRequest, - ) - if err != nil { - return er.Errorf("unable to get pending channels: %v", - err) - } - - forceClose, errr := findForceClosedChannel(pendingChanResp, &op) - if errr != nil { - return errr - } - - // We must wait until the UTXO nursery has received the channel - // and is aware of its maturity height. - if forceClose.MaturityHeight == 0 { - return er.Errorf("channel had maturity height of 0") - } - - return nil - }, defaultTimeout) -} - -// lnrpcForceCloseChannel is a short type alias for a ridiculously long type -// name in the lnrpc package. -type lnrpcForceCloseChannel = lnrpc.PendingChannelsResponse_ForceClosedChannel - -// waitForNumChannelPendingForceClose waits for the node to report a certain -// number of channels in state pending force close. -func waitForNumChannelPendingForceClose(ctx context.Context, - node *lntest.HarnessNode, expectedNum int, - perChanCheck func(channel *lnrpcForceCloseChannel) er.R) er.R { - - return wait.NoError(func() er.R { - resp, err := node.PendingChannels( - ctx, &lnrpc.PendingChannelsRequest{}, - ) - if err != nil { - return er.Errorf("unable to get pending channels: %v", - err) - } - - forceCloseChans := resp.PendingForceClosingChannels - if len(forceCloseChans) != expectedNum { - return er.Errorf("bob should have %d pending "+ - "force close channels but has %d", expectedNum, - len(forceCloseChans)) - } - - if perChanCheck != nil { - for _, forceCloseChan := range forceCloseChans { - err := perChanCheck(forceCloseChan) - if err != nil { - return err - } - } - } - - return nil - }, defaultTimeout) -} - -// cleanupForceClose mines a force close commitment found in the mempool and -// the following sweep transaction from the force closing node. -func cleanupForceClose(t *harnessTest, net *lntest.NetworkHarness, - node *lntest.HarnessNode, chanPoint *lnrpc.ChannelPoint) { - ctxb := context.Background() - - // Wait for the channel to be marked pending force close. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - err := waitForChannelPendingForceClose(ctxt, node, chanPoint) - if err != nil { - t.Fatalf("channel not pending force close: %v", err) - } - - // Mine enough blocks for the node to sweep its funds from the force - // closed channel. - // - // The commit sweep resolver is able to broadcast the sweep tx up to - // one block before the CSV elapses, so wait until defaulCSV-1. - _, err = net.Miner.Node.Generate(defaultCSV - 1) - if err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - // The node should now sweep the funds, clean up by mining the sweeping - // tx. - mineBlocks(t, net, 1, 1) -} - -// numOpenChannelsPending sends an RPC request to a node to get a count of the -// node's channels that are currently in a pending state (with a broadcast, but -// not confirmed funding transaction). -func numOpenChannelsPending(ctxt context.Context, node *lntest.HarnessNode) (int, er.R) { - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - resp, err := node.PendingChannels(ctxt, pendingChansRequest) - if err != nil { - return 0, er.E(err) - } - return len(resp.PendingOpenChannels), nil -} - -// assertNumOpenChannelsPending asserts that a pair of nodes have the expected -// number of pending channels between them. -func assertNumOpenChannelsPending(ctxt context.Context, t *harnessTest, - alice, bob *lntest.HarnessNode, expected int) { - - err := wait.NoError(func() er.R { - aliceNumChans, err := numOpenChannelsPending(ctxt, alice) - if err != nil { - return er.Errorf("error fetching alice's node (%v) "+ - "pending channels %v", alice.NodeID, err) - } - bobNumChans, err := numOpenChannelsPending(ctxt, bob) - if err != nil { - return er.Errorf("error fetching bob's node (%v) "+ - "pending channels %v", bob.NodeID, err) - } - - aliceStateCorrect := aliceNumChans == expected - if !aliceStateCorrect { - return er.Errorf("number of pending channels for "+ - "alice incorrect. expected %v, got %v", - expected, aliceNumChans) - } - - bobStateCorrect := bobNumChans == expected - if !bobStateCorrect { - return er.Errorf("number of pending channels for bob "+ - "incorrect. expected %v, got %v", expected, - bobNumChans) - } - - return nil - }, 15*time.Second) - if err != nil { - t.Fatalf(err.String()) - } -} - -// assertNumConnections asserts number current connections between two peers. -func assertNumConnections(t *harnessTest, alice, bob *lntest.HarnessNode, - expected int) { - ctxb := context.Background() - - const nPolls = 10 - - tick := time.NewTicker(300 * time.Millisecond) - defer tick.Stop() - - for i := nPolls - 1; i >= 0; i-- { - select { - case <-tick.C: - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - aNumPeers, err := alice.ListPeers(ctxt, &lnrpc.ListPeersRequest{}) - if err != nil { - t.Fatalf("unable to fetch alice's node (%v) list peers %v", - alice.NodeID, err) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - bNumPeers, err := bob.ListPeers(ctxt, &lnrpc.ListPeersRequest{}) - if err != nil { - t.Fatalf("unable to fetch bob's node (%v) list peers %v", - bob.NodeID, err) - } - if len(aNumPeers.Peers) != expected { - // Continue polling if this is not the final - // loop. - if i > 0 { - continue - } - t.Fatalf("number of peers connected to alice is incorrect: "+ - "expected %v, got %v", expected, len(aNumPeers.Peers)) - } - if len(bNumPeers.Peers) != expected { - // Continue polling if this is not the final - // loop. - if i > 0 { - continue - } - t.Fatalf("number of peers connected to bob is incorrect: "+ - "expected %v, got %v", expected, len(bNumPeers.Peers)) - } - - // Alice and Bob both have the required number of - // peers, stop polling and return to caller. - return - } - } -} - -// shutdownAndAssert shuts down the given node and asserts that no errors -// occur. -func shutdownAndAssert(net *lntest.NetworkHarness, t *harnessTest, - node *lntest.HarnessNode) { - if err := net.ShutdownNode(node); err != nil { - t.Fatalf("unable to shutdown %v: %v", node.Name(), err) - } -} - -// completePaymentRequests sends payments from a lightning node to complete all -// payment requests. If the awaitResponse parameter is true, this function -// does not return until all payments successfully complete without errors. -func completePaymentRequests(ctx context.Context, client lnrpc.LightningClient, - routerClient routerrpc.RouterClient, paymentRequests []string, - awaitResponse bool) er.R { - - // We start by getting the current state of the client's channels. This - // is needed to ensure the payments actually have been committed before - // we return. - ctxt, _ := context.WithTimeout(ctx, defaultTimeout) - req := &lnrpc.ListChannelsRequest{} - listResp, errr := client.ListChannels(ctxt, req) - if errr != nil { - return er.E(errr) - } - - // send sends a payment and returns an error if it doesn't succeeded. - send := func(payReq string) er.R { - ctxc, cancel := context.WithCancel(ctx) - defer cancel() - - payStream, errr := routerClient.SendPaymentV2( - ctxc, - &routerrpc.SendPaymentRequest{ - PaymentRequest: payReq, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - if errr != nil { - return er.E(errr) - } - - resp, err := getPaymentResult(payStream) - if err != nil { - return err - } - if resp.Status != lnrpc.Payment_SUCCEEDED { - return er.Errorf("%v", resp.FailureReason) - } - - return nil - } - - // Launch all payments simultaneously. - results := make(chan er.R) - for _, payReq := range paymentRequests { - payReqCopy := payReq - go func() { - err := send(payReqCopy) - if awaitResponse { - results <- err - } - }() - } - - // If awaiting a response, verify that all payments succeeded. - if awaitResponse { - for range paymentRequests { - err := <-results - if err != nil { - return err - } - } - return nil - } - - // We are not waiting for feedback in the form of a response, but we - // should still wait long enough for the server to receive and handle - // the send before cancelling the request. We wait for the number of - // updates to one of our channels has increased before we return. - err := wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctx, defaultTimeout) - newListResp, errr := client.ListChannels(ctxt, req) - if errr != nil { - return false - } - - // If the number of open channels is now lower than before - // attempting the payments, it means one of the payments - // triggered a force closure (for example, due to an incorrect - // preimage). Return early since it's clear the payment was - // attempted. - if len(newListResp.Channels) < len(listResp.Channels) { - return true - } - - for _, c1 := range listResp.Channels { - for _, c2 := range newListResp.Channels { - if c1.ChannelPoint != c2.ChannelPoint { - continue - } - - // If this channel has an increased numbr of - // updates, we assume the payments are - // committed, and we can return. - if c2.NumUpdates > c1.NumUpdates { - return true - } - } - } - - return false - }, time.Second*15) - if err != nil { - return err - } - - return nil -} - -// makeFakePayHash creates random pre image hash -func makeFakePayHash(t *harnessTest) []byte { - randBuf := make([]byte, 32) - - if _, err := rand.Read(randBuf); err != nil { - t.Fatalf("internal error, cannot generate random string: %v", err) - } - - return randBuf -} - -// createPayReqs is a helper method that will create a slice of payment -// requests for the given node. -func createPayReqs(node *lntest.HarnessNode, paymentAmt btcutil.Amount, - numInvoices int) ([]string, [][]byte, []*lnrpc.Invoice, er.R) { - - payReqs := make([]string, numInvoices) - rHashes := make([][]byte, numInvoices) - invoices := make([]*lnrpc.Invoice, numInvoices) - for i := 0; i < numInvoices; i++ { - preimage := make([]byte, 32) - _, err := rand.Read(preimage) - if err != nil { - return nil, nil, nil, er.Errorf("unable to generate "+ - "preimage: %v", err) - } - invoice := &lnrpc.Invoice{ - Memo: "testing", - RPreimage: preimage, - Value: int64(paymentAmt), - } - ctxt, _ := context.WithTimeout( - context.Background(), defaultTimeout, - ) - resp, err := node.AddInvoice(ctxt, invoice) - if err != nil { - return nil, nil, nil, er.Errorf("unable to add "+ - "invoice: %v", err) - } - - payReqs[i] = resp.PaymentRequest - rHashes[i] = resp.RHash - invoices[i] = invoice - } - return payReqs, rHashes, invoices, nil -} - -// getChanInfo is a helper method for getting channel info for a node's sole -// channel. -func getChanInfo(ctx context.Context, node *lntest.HarnessNode) ( - *lnrpc.Channel, er.R) { - - req := &lnrpc.ListChannelsRequest{} - channelInfo, errr := node.ListChannels(ctx, req) - if errr != nil { - return nil, er.E(errr) - } - if len(channelInfo.Channels) != 1 { - return nil, er.Errorf("node should only have a single "+ - "channel, instead it has %v", len(channelInfo.Channels)) - } - - return channelInfo.Channels[0], nil -} - -// testGetRecoveryInfo checks whether lnd gives the right information about -// the wallet recovery process. -func testGetRecoveryInfo(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // First, create a new node with strong passphrase and grab the mnemonic - // used for key derivation. This will bring up Carol with an empty - // wallet, and such that she is synced up. - password := []byte("The Magic Words are Squeamish Ossifrage") - carol, mnemonic, _, err := net.NewNodeWithSeed( - "Carol", nil, password, false, - ) - if err != nil { - t.Fatalf("unable to create node with seed; %v", err) - } - - shutdownAndAssert(net, t, carol) - - checkInfo := func(expectedRecoveryMode, expectedRecoveryFinished bool, - expectedProgress float64, recoveryWindow int32) { - - // Restore Carol, passing in the password, mnemonic, and - // desired recovery window. - node, err := net.RestoreNodeWithSeed( - "Carol", nil, password, mnemonic, recoveryWindow, nil, - ) - if err != nil { - t.Fatalf("unable to restore node: %v", err) - } - - // Wait for Carol to sync to the chain. - _, minerHeight, err := net.Miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current blockheight %v", err) - } - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - err = waitForNodeBlockHeight(ctxt, node, minerHeight) - if err != nil { - t.Fatalf("unable to sync to chain: %v", err) - } - - // Query carol for her current wallet recovery progress. - var ( - recoveryMode bool - recoveryFinished bool - progress float64 - ) - - err = wait.Predicate(func() bool { - // Verify that recovery info gives the right response. - req := &lnrpc.GetRecoveryInfoRequest{} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - resp, err := node.GetRecoveryInfo(ctxt, req) - if err != nil { - t.Fatalf("unable to query recovery info: %v", err) - } - - recoveryMode = resp.RecoveryMode - recoveryFinished = resp.RecoveryFinished - progress = resp.Progress - - if recoveryMode != expectedRecoveryMode || - recoveryFinished != expectedRecoveryFinished || - progress != expectedProgress { - return false - } - - return true - }, 15*time.Second) - if err != nil { - t.Fatalf("expected recovery mode to be %v, got %v, "+ - "expected recovery finished to be %v, got %v, "+ - "expected progress %v, got %v", - expectedRecoveryMode, recoveryMode, - expectedRecoveryFinished, recoveryFinished, - expectedProgress, progress, - ) - } - - // Lastly, shutdown this Carol so we can move on to the next - // restoration. - shutdownAndAssert(net, t, node) - } - - // Restore Carol with a recovery window of 0. Since it's not in recovery - // mode, the recovery info will give a response with recoveryMode=false, - // recoveryFinished=false, and progress=0 - checkInfo(false, false, 0, 0) - - // Change the recovery windown to be 1 to turn on recovery mode. Since the - // current chain height is the same as the birthday height, it should - // indicate the recovery process is finished. - checkInfo(true, true, 1, 1) - - // We now go ahead 5 blocks. Because the wallet's syncing process is - // controlled by a goroutine in the background, it will catch up quickly. - // This makes the recovery progress back to 1. - mineBlocks(t, net, 5, 0) - checkInfo(true, true, 1, 1) -} - -// testOnchainFundRecovery checks lnd's ability to rescan for onchain outputs -// when providing a valid aezeed that owns outputs on the chain. This test -// performs multiple restorations using the same seed and various recovery -// windows to ensure we detect funds properly. -func testOnchainFundRecovery(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // First, create a new node with strong passphrase and grab the mnemonic - // used for key derivation. This will bring up Carol with an empty - // wallet, and such that she is synced up. - password := []byte("The Magic Words are Squeamish Ossifrage") - carol, mnemonic, _, err := net.NewNodeWithSeed( - "Carol", nil, password, false, - ) - if err != nil { - t.Fatalf("unable to create node with seed; %v", err) - } - shutdownAndAssert(net, t, carol) - - // Create a closure for testing the recovery of Carol's wallet. This - // method takes the expected value of Carol's balance when using the - // given recovery window. Additionally, the caller can specify an action - // to perform on the restored node before the node is shutdown. - restoreCheckBalance := func(expAmount int64, expectedNumUTXOs uint32, - recoveryWindow int32, fn func(*lntest.HarnessNode)) { - - // Restore Carol, passing in the password, mnemonic, and - // desired recovery window. - node, err := net.RestoreNodeWithSeed( - "Carol", nil, password, mnemonic, recoveryWindow, nil, - ) - if err != nil { - t.Fatalf("unable to restore node: %v", err) - } - - // Query carol for her current wallet balance, and also that we - // gain the expected number of UTXOs. - var ( - currBalance int64 - currNumUTXOs uint32 - ) - err = wait.Predicate(func() bool { - req := &lnrpc.WalletBalanceRequest{} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - resp, err := node.WalletBalance(ctxt, req) - if err != nil { - t.Fatalf("unable to query wallet balance: %v", - err) - } - currBalance = resp.ConfirmedBalance - - utxoReq := &lnrpc.ListUnspentRequest{ - MaxConfs: math.MaxInt32, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - utxoResp, err := node.ListUnspent(ctxt, utxoReq) - if err != nil { - t.Fatalf("unable to query utxos: %v", err) - } - currNumUTXOs = uint32(len(utxoResp.Utxos)) - - // Verify that Carol's balance and number of UTXOs - // matches what's expected. - if expAmount != currBalance { - return false - } - if currNumUTXOs != expectedNumUTXOs { - return false - } - - return true - }, 15*time.Second) - if err != nil { - t.Fatalf("expected restored node to have %d satoshis, "+ - "instead has %d satoshis, expected %d utxos "+ - "instead has %d", expAmount, currBalance, - expectedNumUTXOs, currNumUTXOs) - } - - // If the user provided a callback, execute the commands against - // the restored Carol. - if fn != nil { - fn(node) - } - - // Lastly, shutdown this Carol so we can move on to the next - // restoration. - shutdownAndAssert(net, t, node) - } - - // Create a closure-factory for building closures that can generate and - // skip a configurable number of addresses, before finally sending coins - // to a next generated address. The returned closure will apply the same - // behavior to both default P2WKH and NP2WKH scopes. - skipAndSend := func(nskip int) func(*lntest.HarnessNode) { - return func(node *lntest.HarnessNode) { - newP2WKHAddrReq := &lnrpc.NewAddressRequest{ - Type: AddrTypeWitnessPubkeyHash, - } - - newNP2WKHAddrReq := &lnrpc.NewAddressRequest{ - Type: AddrTypeNestedPubkeyHash, - } - - // Generate and skip the number of addresses requested. - for i := 0; i < nskip; i++ { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - _, errr := node.NewAddress(ctxt, newP2WKHAddrReq) - if errr != nil { - t.Fatalf("unable to generate new "+ - "p2wkh address: %v", errr) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - _, errr = node.NewAddress(ctxt, newNP2WKHAddrReq) - if errr != nil { - t.Fatalf("unable to generate new "+ - "np2wkh address: %v", errr) - } - } - - // Send one BTC to the next P2WKH address. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins( - ctxt, btcutil.UnitsPerCoin(), node, - ) - if err != nil { - t.Fatalf("unable to send coins to node: %v", - err) - } - - // And another to the next NP2WKH address. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoinsNP2WKH( - ctxt, btcutil.UnitsPerCoin(), node, - ) - if err != nil { - t.Fatalf("unable to send coins to node: %v", - err) - } - } - } - - // Restore Carol with a recovery window of 0. Since no coins have been - // sent, her balance should be zero. - // - // After, one BTC is sent to both her first external P2WKH and NP2WKH - // addresses. - restoreCheckBalance(0, 0, 0, skipAndSend(0)) - - // Check that restoring without a look-ahead results in having no funds - // in the wallet, even though they exist on-chain. - restoreCheckBalance(0, 0, 0, nil) - - // Now, check that using a look-ahead of 1 recovers the balance from - // the two transactions above. We should also now have 2 UTXOs in the - // wallet at the end of the recovery attempt. - // - // After, we will generate and skip 9 P2WKH and NP2WKH addresses, and - // send another BTC to the subsequent 10th address in each derivation - // path. - restoreCheckBalance(2*btcutil.UnitsPerCoinI64(), 2, 1, skipAndSend(9)) - - // Check that using a recovery window of 9 does not find the two most - // recent txns. - restoreCheckBalance(2*btcutil.UnitsPerCoinI64(), 2, 9, nil) - - // Extending our recovery window to 10 should find the most recent - // transactions, leaving the wallet with 4 BTC total. We should also - // learn of the two additional UTXOs created above. - // - // After, we will skip 19 more addrs, sending to the 20th address past - // our last found address, and repeat the same checks. - restoreCheckBalance(4*btcutil.UnitsPerCoinI64(), 4, 10, skipAndSend(19)) - - // Check that recovering with a recovery window of 19 fails to find the - // most recent transactions. - restoreCheckBalance(4*btcutil.UnitsPerCoinI64(), 4, 19, nil) - - // Ensure that using a recovery window of 20 succeeds with all UTXOs - // found and the final balance reflected. - - // After these checks are done, we'll want to make sure we can also - // recover change address outputs. This is mainly motivated by a now - // fixed bug in the wallet in which change addresses could at times be - // created outside of the default key scopes. Recovery only used to be - // performed on the default key scopes, so ideally this test case - // would've caught the bug earlier. Carol has received 6 BTC so far from - // the miner, we'll send 5 back to ensure all of her UTXOs get spent to - // avoid fee discrepancies and a change output is formed. - minerAmt := 5 * btcutil.UnitsPerCoinI64() - finalBalance := 6 * btcutil.UnitsPerCoinI64() - promptChangeAddr := func(node *lntest.HarnessNode) { - minerAddr, err := net.Miner.NewAddress() - if err != nil { - t.Fatalf("unable to create new miner address: %v", err) - } - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - resp, errr := node.SendCoins(ctxt, &lnrpc.SendCoinsRequest{ - Addr: minerAddr.String(), - Amount: minerAmt, - }) - if errr != nil { - t.Fatalf("unable to send coins to miner: %v", errr) - } - txid, err := waitForTxInMempool( - net.Miner.Node, minerMempoolTimeout, - ) - if err != nil { - t.Fatalf("transaction not found in mempool: %v", err) - } - if resp.Txid != txid.String() { - t.Fatalf("txid mismatch: %v vs %v", resp.Txid, - txid.String()) - } - block := mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, txid) - } - restoreCheckBalance(finalBalance, 6, 20, promptChangeAddr) - - // We should expect a static fee of 27750 satoshis for spending 6 inputs - // (3 P2WPKH, 3 NP2WPKH) to two P2WPKH outputs. Carol should therefore - // only have one UTXO present (the change output) of 6 - 5 - fee BTC. - const fee = 27750 - restoreCheckBalance(finalBalance-minerAmt-fee, 1, 21, nil) -} - -// commitType is a simple enum used to run though the basic funding flow with -// different commitment formats. -type commitType byte - -const ( - // commitTypeLegacy is the old school commitment type. - commitTypeLegacy commitType = iota - - // commiTypeTweakless is the commitment type where the remote key is - // static (non-tweaked). - commitTypeTweakless - - // commitTypeAnchors is the kind of commitment that has extra outputs - // used for anchoring down to commitment using CPFP. - commitTypeAnchors -) - -// String returns that name of the commitment type. -func (c commitType) String() string { - switch c { - case commitTypeLegacy: - return "legacy" - case commitTypeTweakless: - return "tweakless" - case commitTypeAnchors: - return "anchors" - default: - return "invalid" - } -} - -// Args returns the command line flag to supply to enable this commitment type. -func (c commitType) Args() []string { - switch c { - case commitTypeLegacy: - return []string{"--protocol.legacy.committweak"} - case commitTypeTweakless: - return []string{} - case commitTypeAnchors: - return []string{"--protocol.anchors"} - } - - return nil -} - -// calcStaticFee calculates appropriate fees for commitment transactions. This -// function provides a simple way to allow test balance assertions to take fee -// calculations into account. -func (c commitType) calcStaticFee(numHTLCs int) btcutil.Amount { - const htlcWeight = input.HTLCWeight - var ( - feePerKw = chainfee.SatPerKVByte(50000).FeePerKWeight() - commitWeight = input.CommitWeight - anchors = btcutil.Amount(0) - ) - - // The anchor commitment type is slightly heavier, and we must also add - // the value of the two anchors to the resulting fee the initiator - // pays. - if c == commitTypeAnchors { - commitWeight = input.AnchorCommitWeight - anchors = 2 * anchorSize - } - - return feePerKw.FeeForWeight(int64(commitWeight+htlcWeight*numHTLCs)) + - anchors -} - -// channelCommitType retrieves the active channel commitment type for the given -// chan point. -func channelCommitType(node *lntest.HarnessNode, - chanPoint *lnrpc.ChannelPoint) (commitType, er.R) { - - ctxb := context.Background() - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - - req := &lnrpc.ListChannelsRequest{} - channels, err := node.ListChannels(ctxt, req) - if err != nil { - return 0, er.Errorf("listchannels failed: %v", err) - } - - for _, c := range channels.Channels { - if c.ChannelPoint == txStr(chanPoint) { - switch c.CommitmentType { - - // If the anchor output size is non-zero, we are - // dealing with the anchor type. - case lnrpc.CommitmentType_ANCHORS: - return commitTypeAnchors, nil - - // StaticRemoteKey means it is tweakless, - case lnrpc.CommitmentType_STATIC_REMOTE_KEY: - return commitTypeTweakless, nil - - // Otherwise legacy. - default: - return commitTypeLegacy, nil - } - } - } - - return 0, er.Errorf("channel point %v not found", chanPoint) -} - -// assertChannelBalanceResp makes a ChannelBalance request and checks the -// returned response matches the expected. -func assertChannelBalanceResp(t *harnessTest, - node *lntest.HarnessNode, expected *lnrpc.ChannelBalanceResponse) { - - resp := getChannelBalance(t, node) - require.Equal( - t.t, expected, resp, "balance is incorrect", - ) -} - -// getChannelBalance gets the channel balance. -func getChannelBalance(t *harnessTest, - node *lntest.HarnessNode) *lnrpc.ChannelBalanceResponse { - - t.t.Helper() - - ctxt, _ := context.WithTimeout(context.Background(), defaultTimeout) - req := &lnrpc.ChannelBalanceRequest{} - resp, err := node.ChannelBalance(ctxt, req) - - require.NoError(t.t, err, "unable to get node's balance") - return resp -} - -// basicChannelFundingTest is a sub-test of the main testBasicChannelFunding -// test. Given two nodes: Alice and Bob, it'll assert proper channel creation, -// then return a function closure that should be called to assert proper -// channel closure. -func basicChannelFundingTest(t *harnessTest, net *lntest.NetworkHarness, - alice *lntest.HarnessNode, bob *lntest.HarnessNode, - fundingShim *lnrpc.FundingShim) (*lnrpc.Channel, *lnrpc.Channel, func(), er.R) { - - chanAmt := lnd.MaxBtcFundingAmount - pushAmt := btcutil.Amount(100000) - - // Record nodes' channel balance before testing. - aliceChannelBalance := getChannelBalance(t, alice) - bobChannelBalance := getChannelBalance(t, bob) - - // Creates a helper closure to be used below which asserts the proper - // response to a channel balance RPC. - checkChannelBalance := func(node *lntest.HarnessNode, - oldChannelBalance *lnrpc.ChannelBalanceResponse, - local, remote btcutil.Amount) { - - newResp := oldChannelBalance - - newResp.LocalBalance.Sat += uint64(local) - newResp.LocalBalance.Msat += uint64( - lnwire.NewMSatFromSatoshis(local), - ) - newResp.RemoteBalance.Sat += uint64(remote) - newResp.RemoteBalance.Msat += uint64( - lnwire.NewMSatFromSatoshis(remote), - ) - // Deprecated fields. - newResp.Balance += int64(local) - assertChannelBalanceResp(t, node, newResp) - } - - // First establish a channel with a capacity of 0.5 BTC between Alice - // and Bob with Alice pushing 100k satoshis to Bob's side during - // funding. This function will block until the channel itself is fully - // open or an error occurs in the funding process. A series of - // assertions will be executed to ensure the funding process completed - // successfully. - ctxb := context.Background() - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, alice, bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - FundingShim: fundingShim, - }, - ) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err := alice.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - return nil, nil, nil, er.Errorf("alice didn't report "+ - "channel: %v", err) - } - err = bob.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - return nil, nil, nil, er.Errorf("bob didn't report "+ - "channel: %v", err) - } - - cType, err := channelCommitType(alice, chanPoint) - if err != nil { - return nil, nil, nil, er.Errorf("unable to get channel "+ - "type: %v", err) - } - - // With the channel open, ensure that the amount specified above has - // properly been pushed to Bob. - aliceLocalBalance := chanAmt - pushAmt - cType.calcStaticFee(0) - checkChannelBalance( - alice, aliceChannelBalance, aliceLocalBalance, pushAmt, - ) - checkChannelBalance( - bob, bobChannelBalance, pushAmt, aliceLocalBalance, - ) - - req := &lnrpc.ListChannelsRequest{} - aliceChannel, errr := alice.ListChannels(context.Background(), req) - if errr != nil { - return nil, nil, nil, er.Errorf("unable to obtain chan: %v", errr) - } - - bobChannel, errr := bob.ListChannels(context.Background(), req) - if errr != nil { - return nil, nil, nil, er.Errorf("unable to obtain chan: %v", errr) - } - - closeChan := func() { - // Finally, immediately close the channel. This function will - // also block until the channel is closed and will additionally - // assert the relevant channel closing post conditions. - ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, alice, chanPoint, false) - } - - return aliceChannel.Channels[0], bobChannel.Channels[0], closeChan, nil -} - -// testBasicChannelFunding performs a test exercising expected behavior from a -// basic funding workflow. The test creates a new channel between Alice and -// Bob, then immediately closes the channel after asserting some expected post -// conditions. Finally, the chain itself is checked to ensure the closing -// transaction was mined. -func testBasicChannelFunding(net *lntest.NetworkHarness, t *harnessTest) { - - ctxb := context.Background() - - // Run through the test with combinations of all the different - // commitment types. - allTypes := []commitType{ - commitTypeLegacy, - commitTypeTweakless, - commitTypeAnchors, - } - -test: - // We'll test all possible combinations of the feature bit presence - // that both nodes can signal for this new channel type. We'll make a - // new Carol+Dave for each test instance as well. - for _, carolCommitType := range allTypes { - for _, daveCommitType := range allTypes { - // Based on the current tweak variable for Carol, we'll - // preferentially signal the legacy commitment format. - // We do the same for Dave shortly below. - carolArgs := carolCommitType.Args() - carol, err := net.NewNode("Carol", carolArgs) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - - // Each time, we'll send Carol a new set of coins in - // order to fund the channel. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), carol) - if err != nil { - t.Fatalf("unable to send coins to carol: %v", err) - } - - daveArgs := daveCommitType.Args() - dave, err := net.NewNode("Dave", daveArgs) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - - // Before we start the test, we'll ensure both sides - // are connected to the funding flow can properly be - // executed. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.EnsureConnected(ctxt, carol, dave) - if err != nil { - t.Fatalf("unable to connect peers: %v", err) - } - - testName := fmt.Sprintf("carol_commit=%v,dave_commit=%v", - carolCommitType, daveCommitType) - - ht := t - carolCommitType := carolCommitType - daveCommitType := daveCommitType - success := t.t.Run(testName, func(t *testing.T) { - carolChannel, daveChannel, closeChan, err := basicChannelFundingTest( - ht, net, carol, dave, nil, - ) - if err != nil { - t.Fatalf("failed funding flow: %v", err) - } - - // Both nodes should report the same commitment - // type. - chansCommitType := carolChannel.CommitmentType - if daveChannel.CommitmentType != chansCommitType { - t.Fatalf("commit types don't match, "+ - "carol got %v, dave got %v", - carolChannel.CommitmentType, - daveChannel.CommitmentType, - ) - } - - // Now check that the commitment type reported - // by both nodes is what we expect. It will be - // the minimum of the two nodes' preference, in - // the order Legacy, Tweakless, Anchors. - expType := carolCommitType - - switch daveCommitType { - - // Dave supports anchors, type will be what - // Carol supports. - case commitTypeAnchors: - - // Dave only supports tweakless, channel will - // be downgraded to this type if Carol supports - // anchors. - case commitTypeTweakless: - if expType == commitTypeAnchors { - expType = commitTypeTweakless - } - - // Dave only supoprts legacy type, channel will - // be downgraded to this type. - case commitTypeLegacy: - expType = commitTypeLegacy - - default: - t.Fatalf("invalid commit type %v", - daveCommitType) - } - - // Check that the signalled type matches what we - // expect. - switch { - case expType == commitTypeAnchors && - chansCommitType == lnrpc.CommitmentType_ANCHORS: - - case expType == commitTypeTweakless && - chansCommitType == lnrpc.CommitmentType_STATIC_REMOTE_KEY: - - case expType == commitTypeLegacy && - chansCommitType == lnrpc.CommitmentType_LEGACY: - - default: - t.Fatalf("expected nodes to signal "+ - "commit type %v, instead got "+ - "%v", expType, chansCommitType) - } - - // As we've concluded this sub-test case we'll - // now close out the channel for both sides. - closeChan() - }) - if !success { - break test - } - - shutdownAndAssert(net, t, carol) - shutdownAndAssert(net, t, dave) - } - } -} - -// testUnconfirmedChannelFunding tests that our unconfirmed change outputs can -// be used to fund channels. -func testUnconfirmedChannelFunding(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const ( - chanAmt = lnd.MaxBtcFundingAmount - pushAmt = btcutil.Amount(100000) - ) - - // We'll start off by creating a node for Carol. - carol, err := net.NewNode("Carol", nil) - if err != nil { - t.Fatalf("unable to create carol's node: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - // We'll send her some confirmed funds. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, 2*chanAmt, carol) - if err != nil { - t.Fatalf("unable to send coins to carol: %v", err) - } - - // Now let Carol send some funds to herself, making a unconfirmed - // change output. - addrReq := &lnrpc.NewAddressRequest{ - Type: lnrpc.AddressType_WITNESS_PUBKEY_HASH, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, errr := carol.NewAddress(ctxt, addrReq) - if errr != nil { - t.Fatalf("unable to get new address: %v", errr) - } - - sendReq := &lnrpc.SendCoinsRequest{ - Addr: resp.Address, - Amount: int64(chanAmt) / 5, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - _, errr = carol.SendCoins(ctxt, sendReq) - if errr != nil { - t.Fatalf("unable to send coins: %v", errr) - } - - // Make sure the unconfirmed tx is seen in the mempool. - _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("failed to find tx in miner mempool: %v", err) - } - - // Now, we'll connect her to Alice so that they can open a channel - // together. The funding flow should select Carol's unconfirmed output - // as she doesn't have any other funds since it's a new node. - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, carol, net.Alice); err != nil { - t.Fatalf("unable to connect dave to alice: %v", err) - } - - chanOpenUpdate := openChannelStream( - ctxt, t, net, carol, net.Alice, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - SpendUnconfirmed: true, - }, - ) - - // Creates a helper closure to be used below which asserts the proper - // response to a channel balance RPC. - checkChannelBalance := func(node *lntest.HarnessNode, - local, remote, pendingLocal, pendingRemote btcutil.Amount) { - expectedResponse := &lnrpc.ChannelBalanceResponse{ - LocalBalance: &lnrpc.Amount{ - Sat: uint64(local), - Msat: uint64(lnwire.NewMSatFromSatoshis( - local, - )), - }, - RemoteBalance: &lnrpc.Amount{ - Sat: uint64(remote), - Msat: uint64(lnwire.NewMSatFromSatoshis( - remote, - )), - }, - PendingOpenLocalBalance: &lnrpc.Amount{ - Sat: uint64(pendingLocal), - Msat: uint64(lnwire.NewMSatFromSatoshis( - pendingLocal, - )), - }, - PendingOpenRemoteBalance: &lnrpc.Amount{ - Sat: uint64(pendingRemote), - Msat: uint64(lnwire.NewMSatFromSatoshis( - pendingRemote, - )), - }, - UnsettledLocalBalance: &lnrpc.Amount{}, - UnsettledRemoteBalance: &lnrpc.Amount{}, - // Deprecated fields. - Balance: int64(local), - PendingOpenBalance: int64(pendingLocal), - } - assertChannelBalanceResp(t, node, expectedResponse) - } - - // As the channel is pending open, it's expected Carol has both zero - // local and remote balances, and pending local/remote should not be - // zero. - // - // Note that atm we haven't obtained the chanPoint yet, so we use the - // type directly. - cType := commitTypeTweakless - carolLocalBalance := chanAmt - pushAmt - cType.calcStaticFee(0) - checkChannelBalance(carol, 0, 0, carolLocalBalance, pushAmt) - - // For Alice, her local/remote balances should be zero, and the - // local/remote balances are the mirror of Carol's. - checkChannelBalance(net.Alice, 0, 0, pushAmt, carolLocalBalance) - - // Confirm the channel and wait for it to be recognized by both - // parties. Two transactions should be mined, the unconfirmed spend and - // the funding tx. - mineBlocks(t, net, 6, 2) - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - chanPoint, err := net.WaitForChannelOpen(ctxt, chanOpenUpdate) - if err != nil { - t.Fatalf("error while waiting for channel open: %v", err) - } - - // With the channel open, we'll check the balances on each side of the - // channel as a sanity check to ensure things worked out as intended. - checkChannelBalance(carol, carolLocalBalance, pushAmt, 0, 0) - checkChannelBalance(net.Alice, pushAmt, carolLocalBalance, 0, 0) - - // Now that we're done with the test, the channel can be closed. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, carol, chanPoint, false) -} - -// testPaymentFollowingChannelOpen tests that the channel transition from -// 'pending' to 'open' state does not cause any inconsistencies within other -// subsystems trying to update the channel state in the db. We follow this -// transition with a payment that updates the commitment state and verify that -// the pending state is up to date. -func testPaymentFollowingChannelOpen(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const paymentAmt = btcutil.Amount(100) - channelCapacity := paymentAmt * 1000 - - // We first establish a channel between Alice and Bob. - ctxt, cancel := context.WithTimeout(ctxb, channelOpenTimeout) - defer cancel() - pendingUpdate, err := net.OpenPendingChannel( - ctxt, net.Alice, net.Bob, channelCapacity, 0, - ) - if err != nil { - t.Fatalf("unable to open channel: %v", err) - } - - // At this point, the channel's funding transaction will have been - // broadcast, but not confirmed. Alice and Bob's nodes - // should reflect this when queried via RPC. - ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - assertNumOpenChannelsPending(ctxt, t, net.Alice, net.Bob, 1) - - // We are restarting Bob's node to let the link be created for the - // pending channel. - if err := net.RestartNode(net.Bob, nil); err != nil { - t.Fatalf("Bob restart failed: %v", err) - } - - // We ensure that Bob reconnects to Alice. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.EnsureConnected(ctxt, net.Bob, net.Alice); err != nil { - t.Fatalf("peers unable to reconnect after restart: %v", err) - } - - // We mine one block for the channel to be confirmed. - _ = mineBlocks(t, net, 6, 1)[0] - - // We verify that the channel is open from both nodes point of view. - ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - assertNumOpenChannelsPending(ctxt, t, net.Alice, net.Bob, 0) - - // With the channel open, we'll create invoices for Bob that Alice will - // pay to in order to advance the state of the channel. - bobPayReqs, _, _, err := createPayReqs( - net.Bob, paymentAmt, 1, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } - - // Send payment to Bob so that a channel update to disk will be - // executed. - sendAndAssertSuccess(t, net.Alice, &routerrpc.SendPaymentRequest{ - PaymentRequest: bobPayReqs[0], - TimeoutSeconds: 60, - FeeLimitSat: 1000000, - }) - - // At this point we want to make sure the channel is opened and not - // pending. - ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - res, errr := net.Bob.ListChannels(ctxt, &lnrpc.ListChannelsRequest{}) - if errr != nil { - t.Fatalf("unable to list bob channels: %v", errr) - } - if len(res.Channels) == 0 { - t.Fatalf("bob list of channels is empty") - } - - // Finally, immediately close the channel. This function will also - // block until the channel is closed and will additionally assert the - // relevant channel closing post conditions. - chanPoint := &lnrpc.ChannelPoint{ - FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ - FundingTxidBytes: pendingUpdate.Txid, - }, - OutputIndex: pendingUpdate.OutputIndex, - } - ctxt, cancel = context.WithTimeout(ctxb, channelCloseTimeout) - defer cancel() - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false) -} - -// txStr returns the string representation of the channel's funding transaction. -func txStr(chanPoint *lnrpc.ChannelPoint) string { - fundingTxID, err := lnd.GetChanPointFundingTxid(chanPoint) - if err != nil { - return "" - } - cp := wire.OutPoint{ - Hash: *fundingTxID, - Index: chanPoint.OutputIndex, - } - return cp.String() -} - -// expectedChanUpdate houses params we expect a ChannelUpdate to advertise. -type expectedChanUpdate struct { - advertisingNode string - expectedPolicy *lnrpc.RoutingPolicy - chanPoint *lnrpc.ChannelPoint -} - -// calculateMaxHtlc re-implements the RequiredRemoteChannelReserve of the -// funding manager's config, which corresponds to the maximum MaxHTLC value we -// allow users to set when updating a channel policy. -func calculateMaxHtlc(chanCap btcutil.Amount) uint64 { - reserve := lnwire.NewMSatFromSatoshis(chanCap / 100) - max := lnwire.NewMSatFromSatoshis(chanCap) - reserve - return uint64(max) -} - -// waitForChannelUpdate waits for a node to receive the expected channel -// updates. -func waitForChannelUpdate(t *harnessTest, subscription graphSubscription, - expUpdates []expectedChanUpdate) { - - // Create an array indicating which expected channel updates we have - // received. - found := make([]bool, len(expUpdates)) -out: - for { - select { - case graphUpdate := <-subscription.updateChan: - for _, update := range graphUpdate.ChannelUpdates { - // For each expected update, check if it matches - // the update we just received. - for i, exp := range expUpdates { - fundingTxStr := txStr(update.ChanPoint) - if fundingTxStr != txStr(exp.chanPoint) { - continue - } - - if update.AdvertisingNode != - exp.advertisingNode { - continue - } - - err := checkChannelPolicy( - update.RoutingPolicy, - exp.expectedPolicy, - ) - if err != nil { - continue - } - - // We got a policy update that matched - // the values and channel point of what - // we expected, mark it as found. - found[i] = true - - // If we have no more channel updates - // we are waiting for, break out of the - // loop. - rem := 0 - for _, f := range found { - if !f { - rem++ - } - } - - if rem == 0 { - break out - } - - // Since we found a match among the - // expected updates, break out of the - // inner loop. - break - } - } - case err := <-subscription.errChan: - t.Fatalf("unable to recv graph update: %v", err) - case <-time.After(20 * time.Second): - t.Fatalf("did not receive channel update") - } - } -} - -// assertNoChannelUpdates ensures that no ChannelUpdates are sent via the -// graphSubscription. This method will block for the provided duration before -// returning to the caller if successful. -func assertNoChannelUpdates(t *harnessTest, subscription graphSubscription, - duration time.Duration) { - - timeout := time.After(duration) - for { - select { - case graphUpdate := <-subscription.updateChan: - if len(graphUpdate.ChannelUpdates) > 0 { - t.Fatalf("received %d channel updates when "+ - "none were expected", - len(graphUpdate.ChannelUpdates)) - } - - case err := <-subscription.errChan: - t.Fatalf("graph subscription failure: %v", err) - - case <-timeout: - // No updates received, success. - return - } - } -} - -// getChannelPolicies queries the channel graph and retrieves the current edge -// policies for the provided channel points. -func getChannelPolicies(t *harnessTest, node *lntest.HarnessNode, - advertisingNode string, - chanPoints ...*lnrpc.ChannelPoint) []*lnrpc.RoutingPolicy { - - ctxb := context.Background() - - descReq := &lnrpc.ChannelGraphRequest{ - IncludeUnannounced: true, - } - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - chanGraph, err := node.DescribeGraph(ctxt, descReq) - if err != nil { - t.Fatalf("unable to query for alice's graph: %v", err) - } - - var policies []*lnrpc.RoutingPolicy -out: - for _, chanPoint := range chanPoints { - for _, e := range chanGraph.Edges { - if e.ChanPoint != txStr(chanPoint) { - continue - } - - if e.Node1Pub == advertisingNode { - policies = append(policies, e.Node1Policy) - } else { - policies = append(policies, e.Node2Policy) - } - - continue out - } - - // If we've iterated over all the known edges and we weren't - // able to find this specific one, then we'll fail. - t.Fatalf("did not find edge %v", txStr(chanPoint)) - } - - return policies -} - -// assertChannelPolicy asserts that the passed node's known channel policy for -// the passed chanPoint is consistent with the expected policy values. -func assertChannelPolicy(t *harnessTest, node *lntest.HarnessNode, - advertisingNode string, expectedPolicy *lnrpc.RoutingPolicy, - chanPoints ...*lnrpc.ChannelPoint) { - - policies := getChannelPolicies(t, node, advertisingNode, chanPoints...) - for _, policy := range policies { - err := checkChannelPolicy(policy, expectedPolicy) - if err != nil { - t.Fatalf(err.String()) - } - } -} - -// checkChannelPolicy checks that the policy matches the expected one. -func checkChannelPolicy(policy, expectedPolicy *lnrpc.RoutingPolicy) er.R { - if policy.FeeBaseMsat != expectedPolicy.FeeBaseMsat { - return er.Errorf("expected base fee %v, got %v", - expectedPolicy.FeeBaseMsat, policy.FeeBaseMsat) - } - if policy.FeeRateMilliMsat != expectedPolicy.FeeRateMilliMsat { - return er.Errorf("expected fee rate %v, got %v", - expectedPolicy.FeeRateMilliMsat, - policy.FeeRateMilliMsat) - } - if policy.TimeLockDelta != expectedPolicy.TimeLockDelta { - return er.Errorf("expected time lock delta %v, got %v", - expectedPolicy.TimeLockDelta, - policy.TimeLockDelta) - } - if policy.MinHtlc != expectedPolicy.MinHtlc { - return er.Errorf("expected min htlc %v, got %v", - expectedPolicy.MinHtlc, policy.MinHtlc) - } - if policy.MaxHtlcMsat != expectedPolicy.MaxHtlcMsat { - return er.Errorf("expected max htlc %v, got %v", - expectedPolicy.MaxHtlcMsat, policy.MaxHtlcMsat) - } - if policy.Disabled != expectedPolicy.Disabled { - return er.New("edge should be disabled but isn't") - } - - return nil -} - -// testUpdateChannelPolicy tests that policy updates made to a channel -// gets propagated to other nodes in the network. -func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const ( - defaultFeeBase = 1000 - defaultFeeRate = 1 - defaultTimeLockDelta = chainreg.DefaultBitcoinTimeLockDelta - defaultMinHtlc = 1000 - ) - defaultMaxHtlc := calculateMaxHtlc(lnd.MaxBtcFundingAmount) - - // Launch notification clients for all nodes, such that we can - // get notified when they discover new channels and updates in the - // graph. - aliceSub := subscribeGraphNotifications(t, ctxb, net.Alice) - defer close(aliceSub.quit) - bobSub := subscribeGraphNotifications(t, ctxb, net.Bob) - defer close(bobSub.quit) - - chanAmt := lnd.MaxBtcFundingAmount - pushAmt := chanAmt / 2 - - // Create a channel Alice->Bob. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - }, - ) - - // We add all the nodes' update channels to a slice, such that we can - // make sure they all receive the expected updates. - graphSubs := []graphSubscription{aliceSub, bobSub} - nodes := []*lntest.HarnessNode{net.Alice, net.Bob} - - // Alice and Bob should see each other's ChannelUpdates, advertising the - // default routing policies. - expectedPolicy := &lnrpc.RoutingPolicy{ - FeeBaseMsat: defaultFeeBase, - FeeRateMilliMsat: defaultFeeRate, - TimeLockDelta: defaultTimeLockDelta, - MinHtlc: defaultMinHtlc, - MaxHtlcMsat: defaultMaxHtlc, - } - - for _, graphSub := range graphSubs { - waitForChannelUpdate( - t, graphSub, - []expectedChanUpdate{ - {net.Alice.PubKeyStr, expectedPolicy, chanPoint}, - {net.Bob.PubKeyStr, expectedPolicy, chanPoint}, - }, - ) - } - - // They should now know about the default policies. - for _, node := range nodes { - assertChannelPolicy( - t, node, net.Alice.PubKeyStr, expectedPolicy, chanPoint, - ) - assertChannelPolicy( - t, node, net.Bob.PubKeyStr, expectedPolicy, chanPoint, - ) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("alice didn't report channel: %v", err) - } - err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("bob didn't report channel: %v", err) - } - - // Create Carol and a new channel Bob->Carol. - carol, err := net.NewNode("Carol", nil) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - - // Clean up carol's node when the test finishes. - defer shutdownAndAssert(net, t, carol) - - carolSub := subscribeGraphNotifications(t, ctxb, carol) - defer close(carolSub.quit) - - graphSubs = append(graphSubs, carolSub) - nodes = append(nodes, carol) - - // Send some coins to Carol that can be used for channel funding. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), carol) - if err != nil { - t.Fatalf("unable to send coins to carol: %v", err) - } - - if err := net.ConnectNodes(ctxb, carol, net.Bob); err != nil { - t.Fatalf("unable to connect dave to alice: %v", err) - } - - // Open the channel Carol->Bob with a custom min_htlc value set. Since - // Carol is opening the channel, she will require Bob to not forward - // HTLCs smaller than this value, and hence he should advertise it as - // part of his ChannelUpdate. - const customMinHtlc = 5000 - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint2 := openChannelAndAssert( - ctxt, t, net, carol, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - MinHtlc: customMinHtlc, - }, - ) - - expectedPolicyBob := &lnrpc.RoutingPolicy{ - FeeBaseMsat: defaultFeeBase, - FeeRateMilliMsat: defaultFeeRate, - TimeLockDelta: defaultTimeLockDelta, - MinHtlc: customMinHtlc, - MaxHtlcMsat: defaultMaxHtlc, - } - - expectedPolicyCarol := &lnrpc.RoutingPolicy{ - FeeBaseMsat: defaultFeeBase, - FeeRateMilliMsat: defaultFeeRate, - TimeLockDelta: defaultTimeLockDelta, - MinHtlc: defaultMinHtlc, - MaxHtlcMsat: defaultMaxHtlc, - } - - for _, graphSub := range graphSubs { - waitForChannelUpdate( - t, graphSub, - []expectedChanUpdate{ - {net.Bob.PubKeyStr, expectedPolicyBob, chanPoint2}, - {carol.PubKeyStr, expectedPolicyCarol, chanPoint2}, - }, - ) - } - - // Check that all nodes now know about the updated policies. - for _, node := range nodes { - assertChannelPolicy( - t, node, net.Bob.PubKeyStr, expectedPolicyBob, - chanPoint2, - ) - assertChannelPolicy( - t, node, carol.PubKeyStr, expectedPolicyCarol, - chanPoint2, - ) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint2) - if err != nil { - t.Fatalf("alice didn't report channel: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint2) - if err != nil { - t.Fatalf("bob didn't report channel: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint2) - if err != nil { - t.Fatalf("carol didn't report channel: %v", err) - } - - // First we'll try to send a payment from Alice to Carol with an amount - // less than the min_htlc value required by Carol. This payment should - // fail, as the channel Bob->Carol cannot carry HTLCs this small. - payAmt := btcutil.Amount(4) - invoice := &lnrpc.Invoice{ - Memo: "testing", - Value: int64(payAmt), - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, errr := carol.AddInvoice(ctxt, invoice) - if errr != nil { - t.Fatalf("unable to add invoice: %v", errr) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, net.Alice, net.Alice.RouterClient, - []string{resp.PaymentRequest}, true, - ) - - // Alice knows about the channel policy of Carol and should therefore - // not be able to find a path during routing. - expErr := lnrpc.PaymentFailureReason_FAILURE_REASON_NO_ROUTE - if strings.Contains(err.String(), expErr.String()) { - t.Fatalf("expected %v, instead got %v", expErr, err) - } - - // Now we try to send a payment over the channel with a value too low - // to be accepted. First we query for a route to route a payment of - // 5000 mSAT, as this is accepted. - payAmt = btcutil.Amount(5) - routesReq := &lnrpc.QueryRoutesRequest{ - PubKey: carol.PubKeyStr, - Amt: int64(payAmt), - FinalCltvDelta: defaultTimeLockDelta, - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - routes, errr := net.Alice.QueryRoutes(ctxt, routesReq) - if errr != nil { - t.Fatalf("unable to get route: %v", errr) - } - - if len(routes.Routes) != 1 { - t.Fatalf("expected to find 1 route, got %v", len(routes.Routes)) - } - - // We change the route to carry a payment of 4000 mSAT instead of 5000 - // mSAT. - payAmt = btcutil.Amount(4) - amtSat := int64(payAmt) - amtMSat := int64(lnwire.NewMSatFromSatoshis(payAmt)) - routes.Routes[0].Hops[0].AmtToForward = amtSat - routes.Routes[0].Hops[0].AmtToForwardMsat = amtMSat - routes.Routes[0].Hops[1].AmtToForward = amtSat - routes.Routes[0].Hops[1].AmtToForwardMsat = amtMSat - - // Send the payment with the modified value. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - alicePayStream, errr := net.Alice.SendToRoute(ctxt) - if errr != nil { - t.Fatalf("unable to create payment stream for alice: %v", errr) - } - sendReq := &lnrpc.SendToRouteRequest{ - PaymentHash: resp.RHash, - Route: routes.Routes[0], - } - - errr = alicePayStream.Send(sendReq) - if errr != nil { - t.Fatalf("unable to send payment: %v", errr) - } - - // We expect this payment to fail, and that the min_htlc value is - // communicated back to us, since the attempted HTLC value was too low. - sendResp, errr := alicePayStream.Recv() - if errr != nil { - t.Fatalf("unable to send payment: %v", errr) - } - - // Expected as part of the error message. - substrs := []string{ - "AmountBelowMinimum", - "HtlcMinimumMsat: (lnwire.MilliSatoshi) 5000 mSAT", - } - for _, s := range substrs { - if !strings.Contains(sendResp.PaymentError, s) { - t.Fatalf("expected error to contain \"%v\", instead "+ - "got %v", s, sendResp.PaymentError) - } - } - - // Make sure sending using the original value succeeds. - payAmt = btcutil.Amount(5) - amtSat = int64(payAmt) - amtMSat = int64(lnwire.NewMSatFromSatoshis(payAmt)) - routes.Routes[0].Hops[0].AmtToForward = amtSat - routes.Routes[0].Hops[0].AmtToForwardMsat = amtMSat - routes.Routes[0].Hops[1].AmtToForward = amtSat - routes.Routes[0].Hops[1].AmtToForwardMsat = amtMSat - - sendReq = &lnrpc.SendToRouteRequest{ - PaymentHash: resp.RHash, - Route: routes.Routes[0], - } - - errr = alicePayStream.Send(sendReq) - if errr != nil { - t.Fatalf("unable to send payment: %v", errr) - } - - sendResp, errr = alicePayStream.Recv() - if errr != nil { - t.Fatalf("unable to send payment: %v", errr) - } - - if sendResp.PaymentError != "" { - t.Fatalf("expected payment to succeed, instead got %v", - sendResp.PaymentError) - } - - // With our little cluster set up, we'll update the fees and the max htlc - // size for the Bob side of the Alice->Bob channel, and make sure - // all nodes learn about it. - baseFee := int64(1500) - feeRate := int64(12) - timeLockDelta := uint32(66) - maxHtlc := uint64(500000) - - expectedPolicy = &lnrpc.RoutingPolicy{ - FeeBaseMsat: baseFee, - FeeRateMilliMsat: testFeeBase * feeRate, - TimeLockDelta: timeLockDelta, - MinHtlc: defaultMinHtlc, - MaxHtlcMsat: maxHtlc, - } - - req := &lnrpc.PolicyUpdateRequest{ - BaseFeeMsat: baseFee, - FeeRate: float64(feeRate), - TimeLockDelta: timeLockDelta, - MaxHtlcMsat: maxHtlc, - Scope: &lnrpc.PolicyUpdateRequest_ChanPoint{ - ChanPoint: chanPoint, - }, - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if _, err := net.Bob.UpdateChannelPolicy(ctxt, req); err != nil { - t.Fatalf("unable to get alice's balance: %v", err) - } - - // Wait for all nodes to have seen the policy update done by Bob. - for _, graphSub := range graphSubs { - waitForChannelUpdate( - t, graphSub, - []expectedChanUpdate{ - {net.Bob.PubKeyStr, expectedPolicy, chanPoint}, - }, - ) - } - - // Check that all nodes now know about Bob's updated policy. - for _, node := range nodes { - assertChannelPolicy( - t, node, net.Bob.PubKeyStr, expectedPolicy, chanPoint, - ) - } - - // Now that all nodes have received the new channel update, we'll try - // to send a payment from Alice to Carol to ensure that Alice has - // internalized this fee update. This shouldn't affect the route that - // Alice takes though: we updated the Alice -> Bob channel and she - // doesn't pay for transit over that channel as it's direct. - // Note that the payment amount is >= the min_htlc value for the - // channel Bob->Carol, so it should successfully be forwarded. - payAmt = btcutil.Amount(5) - invoice = &lnrpc.Invoice{ - Memo: "testing", - Value: int64(payAmt), - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, errr = carol.AddInvoice(ctxt, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, net.Alice, net.Alice.RouterClient, - []string{resp.PaymentRequest}, true, - ) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } - - // We'll now open a channel from Alice directly to Carol. - if err := net.ConnectNodes(ctxb, net.Alice, carol); err != nil { - t.Fatalf("unable to connect dave to alice: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint3 := openChannelAndAssert( - ctxt, t, net, net.Alice, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - }, - ) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint3) - if err != nil { - t.Fatalf("alice didn't report channel: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint3) - if err != nil { - t.Fatalf("bob didn't report channel: %v", err) - } - - // Make a global update, and check that both channels' new policies get - // propagated. - baseFee = int64(800) - feeRate = int64(123) - timeLockDelta = uint32(22) - maxHtlc *= 2 - - expectedPolicy.FeeBaseMsat = baseFee - expectedPolicy.FeeRateMilliMsat = testFeeBase * feeRate - expectedPolicy.TimeLockDelta = timeLockDelta - expectedPolicy.MaxHtlcMsat = maxHtlc - - req = &lnrpc.PolicyUpdateRequest{ - BaseFeeMsat: baseFee, - FeeRate: float64(feeRate), - TimeLockDelta: timeLockDelta, - MaxHtlcMsat: maxHtlc, - } - req.Scope = &lnrpc.PolicyUpdateRequest_Global{} - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - _, errr = net.Alice.UpdateChannelPolicy(ctxt, req) - if errr != nil { - t.Fatalf("unable to update alice's channel policy: %v", errr) - } - - // Wait for all nodes to have seen the policy updates for both of - // Alice's channels. - for _, graphSub := range graphSubs { - waitForChannelUpdate( - t, graphSub, - []expectedChanUpdate{ - {net.Alice.PubKeyStr, expectedPolicy, chanPoint}, - {net.Alice.PubKeyStr, expectedPolicy, chanPoint3}, - }, - ) - } - - // And finally check that all nodes remembers the policy update they - // received. - for _, node := range nodes { - assertChannelPolicy( - t, node, net.Alice.PubKeyStr, expectedPolicy, - chanPoint, chanPoint3, - ) - } - - // Close the channels. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Bob, chanPoint2, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint3, false) -} - -// waitForNodeBlockHeight queries the node for its current block height until -// it reaches the passed height. -func waitForNodeBlockHeight(ctx context.Context, node *lntest.HarnessNode, - height int32) er.R { - var predErr er.R - err := wait.Predicate(func() bool { - ctxt, _ := context.WithTimeout(ctx, 10*time.Second) - info, err := node.GetInfo(ctxt, &lnrpc.GetInfoRequest{}) - if err != nil { - predErr = er.E(err) - return false - } - - if int32(info.BlockHeight) != height { - predErr = er.Errorf("expected block height to "+ - "be %v, was %v", height, info.BlockHeight) - return false - } - return true - }, 15*time.Second) - if err != nil { - return predErr - } - return nil -} - -// assertMinerBlockHeightDelta ensures that tempMiner is 'delta' blocks ahead -// of miner. -func assertMinerBlockHeightDelta(t *harnessTest, - miner, tempMiner *rpctest.Harness, delta int32) { - - // Ensure the chain lengths are what we expect. - var predErr er.R - err := wait.Predicate(func() bool { - _, tempMinerHeight, err := tempMiner.Node.GetBestBlock() - if err != nil { - predErr = er.Errorf("unable to get current "+ - "blockheight %v", err) - return false - } - - _, minerHeight, err := miner.Node.GetBestBlock() - if err != nil { - predErr = er.Errorf("unable to get current "+ - "blockheight %v", err) - return false - } - - if tempMinerHeight != minerHeight+delta { - predErr = er.Errorf("expected new miner(%d) to be %d "+ - "blocks ahead of original miner(%d)", - tempMinerHeight, delta, minerHeight) - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.String()) - } -} - -// testOpenChannelAfterReorg tests that in the case where we have an open -// channel where the funding tx gets reorged out, the channel will no -// longer be present in the node's routing table. -func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) { - // Skip test for neutrino, as we cannot disconnect the miner at will. - // TODO(halseth): remove when either can disconnect at will, or restart - // node with connection to new miner. - if net.BackendCfg.Name() == "neutrino" { - t.Skipf("skipping reorg test for neutrino backend") - } - - var ( - ctxb = context.Background() - temp = "temp" - ) - - // Set up a new miner that we can use to cause a reorg. - tempLogDir := fmt.Sprintf("%s/.tempminerlogs", lntest.GetLogDir()) - logFilename := "output-open_channel_reorg-temp_miner.log" - tempMiner, tempMinerCleanUp, err := lntest.NewMiner( - tempLogDir, logFilename, - harnessNetParams, &rpcclient.NotificationHandlers{}, - ) - util.RequireNoErr(t.t, err, "failed to create temp miner") - defer func() { - util.RequireNoErr( - t.t, tempMinerCleanUp(), - "failed to clean up temp miner", - ) - }() - - // Setup the temp miner - util.RequireNoErr( - t.t, tempMiner.SetUp(false, 0), "unable to set up mining node", - ) - - // We start by connecting the new miner to our original miner, - // such that it will sync to our original chain. - err = net.Miner.Node.Node( - btcjson.NConnect, tempMiner.P2PAddress(), &temp, - ) - if err != nil { - t.Fatalf("unable to remove node: %v", err) - } - nodeSlice := []*rpctest.Harness{net.Miner, tempMiner} - if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil { - t.Fatalf("unable to join node on blocks: %v", err) - } - - // The two miners should be on the same blockheight. - assertMinerBlockHeightDelta(t, net.Miner, tempMiner, 0) - - // We disconnect the two miners, such that we can mine two different - // chains and can cause a reorg later. - err = net.Miner.Node.Node( - btcjson.NDisconnect, tempMiner.P2PAddress(), &temp, - ) - if err != nil { - t.Fatalf("unable to remove node: %v", err) - } - - // Create a new channel that requires 1 confs before it's considered - // open, then broadcast the funding transaction - chanAmt := lnd.MaxBtcFundingAmount - pushAmt := btcutil.Amount(0) - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - pendingUpdate, err := net.OpenPendingChannel(ctxt, net.Alice, net.Bob, - chanAmt, pushAmt) - if err != nil { - t.Fatalf("unable to open channel: %v", err) - } - - // Wait for miner to have seen the funding tx. The temporary miner is - // disconnected, and won't see the transaction. - _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("failed to find funding tx in mempool: %v", err) - } - - // At this point, the channel's funding transaction will have been - // broadcast, but not confirmed, and the channel should be pending. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - assertNumOpenChannelsPending(ctxt, t, net.Alice, net.Bob, 1) - - fundingTxID, err := chainhash.NewHash(pendingUpdate.Txid) - if err != nil { - t.Fatalf("unable to convert funding txid into chainhash.Hash:"+ - " %v", err) - } - - // We now cause a fork, by letting our original miner mine 10 blocks, - // and our new miner mine 15. This will also confirm our pending - // channel on the original miner's chain, which should be considered - // open. - block := mineBlocks(t, net, 10, 1)[0] - assertTxInBlock(t, block, fundingTxID) - if _, err := tempMiner.Node.Generate(15); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - // Ensure the chain lengths are what we expect, with the temp miner - // being 5 blocks ahead. - assertMinerBlockHeightDelta(t, net.Miner, tempMiner, 5) - - // Wait for Alice to sync to the original miner's chain. - _, minerHeight, err := net.Miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current blockheight %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = waitForNodeBlockHeight(ctxt, net.Alice, minerHeight) - if err != nil { - t.Fatalf("unable to sync to chain: %v", err) - } - - chanPoint := &lnrpc.ChannelPoint{ - FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ - FundingTxidBytes: pendingUpdate.Txid, - }, - OutputIndex: pendingUpdate.OutputIndex, - } - - // Ensure channel is no longer pending. - assertNumOpenChannelsPending(ctxt, t, net.Alice, net.Bob, 0) - - // Wait for Alice and Bob to recognize and advertise the new channel - // generated above. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("alice didn't advertise channel before "+ - "timeout: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("bob didn't advertise channel before "+ - "timeout: %v", err) - } - - // Alice should now have 1 edge in her graph. - req := &lnrpc.ChannelGraphRequest{ - IncludeUnannounced: true, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - chanGraph, errr := net.Alice.DescribeGraph(ctxt, req) - if errr != nil { - t.Fatalf("unable to query for alice's routing table: %v", errr) - } - - numEdges := len(chanGraph.Edges) - if numEdges != 1 { - t.Fatalf("expected to find one edge in the graph, found %d", - numEdges) - } - - // Now we disconnect Alice's chain backend from the original miner, and - // connect the two miners together. Since the temporary miner knows - // about a longer chain, both miners should sync to that chain. - err = net.BackendCfg.DisconnectMiner() - if err != nil { - t.Fatalf("unable to remove node: %v", err) - } - - // Connecting to the temporary miner should now cause our original - // chain to be re-orged out. - err = net.Miner.Node.Node( - btcjson.NConnect, tempMiner.P2PAddress(), &temp, - ) - if err != nil { - t.Fatalf("unable to remove node: %v", err) - } - - nodes := []*rpctest.Harness{tempMiner, net.Miner} - if err := rpctest.JoinNodes(nodes, rpctest.Blocks); err != nil { - t.Fatalf("unable to join node on blocks: %v", err) - } - - // Once again they should be on the same chain. - assertMinerBlockHeightDelta(t, net.Miner, tempMiner, 0) - - // Now we disconnect the two miners, and connect our original miner to - // our chain backend once again. - err = net.Miner.Node.Node( - btcjson.NDisconnect, tempMiner.P2PAddress(), &temp, - ) - if err != nil { - t.Fatalf("unable to remove node: %v", err) - } - - err = net.BackendCfg.ConnectMiner() - if err != nil { - t.Fatalf("unable to remove node: %v", err) - } - - // This should have caused a reorg, and Alice should sync to the longer - // chain, where the funding transaction is not confirmed. - _, tempMinerHeight, err := tempMiner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current blockheight %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = waitForNodeBlockHeight(ctxt, net.Alice, tempMinerHeight) - if err != nil { - t.Fatalf("unable to sync to chain: %v", err) - } - - // Since the fundingtx was reorged out, Alice should now have no edges - // in her graph. - req = &lnrpc.ChannelGraphRequest{ - IncludeUnannounced: true, - } - - var predErr er.R - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - chanGraph, errr = net.Alice.DescribeGraph(ctxt, req) - if errr != nil { - predErr = er.Errorf("unable to query for alice's routing table: %v", errr) - return false - } - - numEdges = len(chanGraph.Edges) - if numEdges != 0 { - predErr = er.Errorf("expected to find no edge in the graph, found %d", - numEdges) - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.String()) - } - - // Cleanup by mining the funding tx again, then closing the channel. - block = mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, fundingTxID) - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeReorgedChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false) -} - -// testDisconnectingTargetPeer performs a test which disconnects Alice-peer from -// Bob-peer and then re-connects them again. We expect Alice to be able to -// disconnect at any point. -func testDisconnectingTargetPeer(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // We'll start both nodes with a high backoff so that they don't - // reconnect automatically during our test. - args := []string{ - "--minbackoff=1m", - "--maxbackoff=1m", - } - - alice, err := net.NewNode("Alice", args) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, t, alice) - - bob, err := net.NewNode("Bob", args) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, t, bob) - - // Start by connecting Alice and Bob with no channels. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, alice, bob); err != nil { - t.Fatalf("unable to connect Alice's peer to Bob's: err %v", err) - } - - // Check existing connection. - assertNumConnections(t, alice, bob, 1) - - // Give Alice some coins so she can fund a channel. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), alice) - if err != nil { - t.Fatalf("unable to send coins to alice: %v", err) - } - - chanAmt := lnd.MaxBtcFundingAmount - pushAmt := btcutil.Amount(0) - - // Create a new channel that requires 1 confs before it's considered - // open, then broadcast the funding transaction - const numConfs = 1 - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - pendingUpdate, err := net.OpenPendingChannel( - ctxt, alice, bob, chanAmt, pushAmt, - ) - if err != nil { - t.Fatalf("unable to open channel: %v", err) - } - - // At this point, the channel's funding transaction will have been - // broadcast, but not confirmed. Alice and Bob's nodes should reflect - // this when queried via RPC. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - assertNumOpenChannelsPending(ctxt, t, alice, bob, 1) - - // Disconnect Alice-peer from Bob-peer and get error causes by one - // pending channel with detach node is existing. - if err := net.DisconnectNodes(ctxt, alice, bob); err != nil { - t.Fatalf("Bob's peer was disconnected from Alice's"+ - " while one pending channel is existing: err %v", err) - } - - time.Sleep(time.Millisecond * 300) - - // Assert that the connection was torn down. - assertNumConnections(t, alice, bob, 0) - - fundingTxID, err := chainhash.NewHash(pendingUpdate.Txid) - if err != nil { - t.Fatalf("unable to convert funding txid into chainhash.Hash:"+ - " %v", err) - } - - // Mine a block, then wait for Alice's node to notify us that the - // channel has been opened. The funding transaction should be found - // within the newly mined block. - block := mineBlocks(t, net, numConfs, 1)[0] - assertTxInBlock(t, block, fundingTxID) - - // At this point, the channel should be fully opened and there should be - // no pending channels remaining for either node. - time.Sleep(time.Millisecond * 300) - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - - assertNumOpenChannelsPending(ctxt, t, alice, bob, 0) - - // Reconnect the nodes so that the channel can become active. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, alice, bob); err != nil { - t.Fatalf("unable to connect Alice's peer to Bob's: err %v", err) - } - - // The channel should be listed in the peer information returned by both - // peers. - outPoint := wire.OutPoint{ - Hash: *fundingTxID, - Index: pendingUpdate.OutputIndex, - } - - // Check both nodes to ensure that the channel is ready for operation. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.AssertChannelExists(ctxt, alice, &outPoint); err != nil { - t.Fatalf("unable to assert channel existence: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.AssertChannelExists(ctxt, bob, &outPoint); err != nil { - t.Fatalf("unable to assert channel existence: %v", err) - } - - // Disconnect Alice-peer from Bob-peer and get error causes by one - // active channel with detach node is existing. - if err := net.DisconnectNodes(ctxt, alice, bob); err != nil { - t.Fatalf("Bob's peer was disconnected from Alice's"+ - " while one active channel is existing: err %v", err) - } - - // Check existing connection. - assertNumConnections(t, alice, bob, 0) - - // Reconnect both nodes before force closing the channel. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, alice, bob); err != nil { - t.Fatalf("unable to connect Alice's peer to Bob's: err %v", err) - } - - // Finally, immediately close the channel. This function will also block - // until the channel is closed and will additionally assert the relevant - // channel closing post conditions. - chanPoint := &lnrpc.ChannelPoint{ - FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ - FundingTxidBytes: pendingUpdate.Txid, - }, - OutputIndex: pendingUpdate.OutputIndex, - } - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, alice, chanPoint, true) - - // Disconnect Alice-peer from Bob-peer without getting error about - // existing channels. - if err := net.DisconnectNodes(ctxt, alice, bob); err != nil { - t.Fatalf("unable to disconnect Bob's peer from Alice's: err %v", - err) - } - - // Check zero peer connections. - assertNumConnections(t, alice, bob, 0) - - // Finally, re-connect both nodes. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, alice, bob); err != nil { - t.Fatalf("unable to connect Alice's peer to Bob's: err %v", err) - } - - // Check existing connection. - assertNumConnections(t, alice, net.Bob, 1) - - // Cleanup by mining the force close and sweep transaction. - cleanupForceClose(t, net, alice, chanPoint) -} - -// testFundingPersistence is intended to ensure that the Funding Manager -// persists the state of new channels prior to broadcasting the channel's -// funding transaction. This ensures that the daemon maintains an up-to-date -// representation of channels if the system is restarted or disconnected. -// testFundingPersistence mirrors testBasicChannelFunding, but adds restarts -// and checks for the state of channels with unconfirmed funding transactions. -func testChannelFundingPersistence(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - chanAmt := lnd.MaxBtcFundingAmount - pushAmt := btcutil.Amount(0) - - // As we need to create a channel that requires more than 1 - // confirmation before it's open, with the current set of defaults, - // we'll need to create a new node instance. - const numConfs = 5 - carolArgs := []string{fmt.Sprintf("--bitcoin.defaultchanconfs=%v", numConfs)} - carol, err := net.NewNode("Carol", carolArgs) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - - // Clean up carol's node when the test finishes. - defer shutdownAndAssert(net, t, carol) - - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, net.Alice, carol); err != nil { - t.Fatalf("unable to connect alice to carol: %v", err) - } - - // Create a new channel that requires 5 confs before it's considered - // open, then broadcast the funding transaction - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - pendingUpdate, err := net.OpenPendingChannel(ctxt, net.Alice, carol, - chanAmt, pushAmt) - if err != nil { - t.Fatalf("unable to open channel: %v", err) - } - - // At this point, the channel's funding transaction will have been - // broadcast, but not confirmed. Alice and Bob's nodes should reflect - // this when queried via RPC. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - assertNumOpenChannelsPending(ctxt, t, net.Alice, carol, 1) - - // Restart both nodes to test that the appropriate state has been - // persisted and that both nodes recover gracefully. - if err := net.RestartNode(net.Alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - if err := net.RestartNode(carol, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - - fundingTxID, err := chainhash.NewHash(pendingUpdate.Txid) - if err != nil { - t.Fatalf("unable to convert funding txid into chainhash.Hash:"+ - " %v", err) - } - fundingTxStr := fundingTxID.String() - - // Mine a block, then wait for Alice's node to notify us that the - // channel has been opened. The funding transaction should be found - // within the newly mined block. - block := mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, fundingTxID) - - // Get the height that our transaction confirmed at. - _, height, err := net.Miner.Node.GetBestBlock() - util.RequireNoErr(t.t, err, "could not get best block") - - // Restart both nodes to test that the appropriate state has been - // persisted and that both nodes recover gracefully. - if err := net.RestartNode(net.Alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - if err := net.RestartNode(carol, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - - // The following block ensures that after both nodes have restarted, - // they have reconnected before the execution of the next test. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.EnsureConnected(ctxt, net.Alice, carol); err != nil { - t.Fatalf("peers unable to reconnect after restart: %v", err) - } - - // Next, mine enough blocks s.t the channel will open with a single - // additional block mined. - if _, err := net.Miner.Node.Generate(3); err != nil { - t.Fatalf("unable to mine blocks: %v", err) - } - - // Assert that our wallet has our opening transaction with a label - // that does not have a channel ID set yet, because we have not - // reached our required confirmations. - tx := findTxAtHeight(ctxt, t, height, fundingTxStr, net.Alice) - - // At this stage, we expect the transaction to be labelled, but not with - // our channel ID because our transaction has not yet confirmed. - label := labels.MakeLabel(labels.LabelTypeChannelOpen, nil) - require.Equal(t.t, label, tx.Label, "open channel label wrong") - - // Both nodes should still show a single channel as pending. - time.Sleep(time.Second * 1) - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - assertNumOpenChannelsPending(ctxt, t, net.Alice, carol, 1) - - // Finally, mine the last block which should mark the channel as open. - if _, err := net.Miner.Node.Generate(1); err != nil { - t.Fatalf("unable to mine blocks: %v", err) - } - - // At this point, the channel should be fully opened and there should - // be no pending channels remaining for either node. - time.Sleep(time.Second * 1) - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - assertNumOpenChannelsPending(ctxt, t, net.Alice, carol, 0) - - // The channel should be listed in the peer information returned by - // both peers. - outPoint := wire.OutPoint{ - Hash: *fundingTxID, - Index: pendingUpdate.OutputIndex, - } - - // Re-lookup our transaction in the block that it confirmed in. - tx = findTxAtHeight(ctxt, t, height, fundingTxStr, net.Alice) - - // Create an additional check for our channel assertion that will - // check that our label is as expected. - check := func(channel *lnrpc.Channel) { - shortChanID := lnwire.NewShortChanIDFromInt( - channel.ChanId, - ) - - label := labels.MakeLabel( - labels.LabelTypeChannelOpen, &shortChanID, - ) - require.Equal(t.t, label, tx.Label, - "open channel label not updated") - } - - // Check both nodes to ensure that the channel is ready for operation. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.AssertChannelExists(ctxt, net.Alice, &outPoint, check) - if err != nil { - t.Fatalf("unable to assert channel existence: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.AssertChannelExists(ctxt, carol, &outPoint); err != nil { - t.Fatalf("unable to assert channel existence: %v", err) - } - - // Finally, immediately close the channel. This function will also - // block until the channel is closed and will additionally assert the - // relevant channel closing post conditions. - chanPoint := &lnrpc.ChannelPoint{ - FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ - FundingTxidBytes: pendingUpdate.Txid, - }, - OutputIndex: pendingUpdate.OutputIndex, - } - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false) -} - -// findTxAtHeight gets all of the transactions that a node's wallet has a record -// of at the target height, and finds and returns the tx with the target txid, -// failing if it is not found. -func findTxAtHeight(ctx context.Context, t *harnessTest, height int32, - target string, node *lntest.HarnessNode) *lnrpc.Transaction { - - txns, err := node.LightningClient.GetTransactions( - ctx, &lnrpc.GetTransactionsRequest{ - StartHeight: height, - EndHeight: height, - }, - ) - require.NoError(t.t, err, "could not get transactions") - - for _, tx := range txns.Transactions { - if tx.TxHash == target { - return tx - } - } - - return nil -} - -// testChannelBalance creates a new channel between Alice and Bob, then checks -// channel balance to be equal amount specified while creation of channel. -func testChannelBalance(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // Open a channel with 0.16 BTC between Alice and Bob, ensuring the - // channel has been opened properly. - amount := lnd.MaxBtcFundingAmount - - // Creates a helper closure to be used below which asserts the proper - // response to a channel balance RPC. - checkChannelBalance := func(node *lntest.HarnessNode, - local, remote btcutil.Amount) { - - expectedResponse := &lnrpc.ChannelBalanceResponse{ - LocalBalance: &lnrpc.Amount{ - Sat: uint64(local), - Msat: uint64(lnwire.NewMSatFromSatoshis(local)), - }, - RemoteBalance: &lnrpc.Amount{ - Sat: uint64(remote), - Msat: uint64(lnwire.NewMSatFromSatoshis( - remote, - )), - }, - UnsettledLocalBalance: &lnrpc.Amount{}, - UnsettledRemoteBalance: &lnrpc.Amount{}, - PendingOpenLocalBalance: &lnrpc.Amount{}, - PendingOpenRemoteBalance: &lnrpc.Amount{}, - // Deprecated fields. - Balance: int64(local), - } - assertChannelBalanceResp(t, node, expectedResponse) - } - - // Before beginning, make sure alice and bob are connected. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.EnsureConnected(ctxt, net.Alice, net.Bob); err != nil { - t.Fatalf("unable to connect alice and bob: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: amount, - }, - ) - - // Wait for both Alice and Bob to recognize this new channel. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("alice didn't advertise channel before "+ - "timeout: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("bob didn't advertise channel before "+ - "timeout: %v", err) - } - - cType, err := channelCommitType(net.Alice, chanPoint) - if err != nil { - t.Fatalf("unable to get channel type: %v", err) - } - - // As this is a single funder channel, Alice's balance should be - // exactly 0.5 BTC since now state transitions have taken place yet. - checkChannelBalance(net.Alice, amount-cType.calcStaticFee(0), 0) - - // Ensure Bob currently has no available balance within the channel. - checkChannelBalance(net.Bob, 0, amount-cType.calcStaticFee(0)) - - // Finally close the channel between Alice and Bob, asserting that the - // channel has been properly closed on-chain. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false) -} - -// testChannelUnsettledBalance will test that the UnsettledBalance field -// is updated according to the number of Pending Htlcs. -// Alice will send Htlcs to Carol while she is in hodl mode. This will result -// in a build of pending Htlcs. We expect the channels unsettled balance to -// equal the sum of all the Pending Htlcs. -func testChannelUnsettledBalance(net *lntest.NetworkHarness, t *harnessTest) { - const chanAmt = btcutil.Amount(1000000) - ctxb := context.Background() - - // Creates a helper closure to be used below which asserts the proper - // response to a channel balance RPC. - checkChannelBalance := func(node *lntest.HarnessNode, - local, remote, unsettledLocal, unsettledRemote btcutil.Amount) { - - expectedResponse := &lnrpc.ChannelBalanceResponse{ - LocalBalance: &lnrpc.Amount{ - Sat: uint64(local), - Msat: uint64(lnwire.NewMSatFromSatoshis( - local, - )), - }, - RemoteBalance: &lnrpc.Amount{ - Sat: uint64(remote), - Msat: uint64(lnwire.NewMSatFromSatoshis( - remote, - )), - }, - UnsettledLocalBalance: &lnrpc.Amount{ - Sat: uint64(unsettledLocal), - Msat: uint64(lnwire.NewMSatFromSatoshis( - unsettledLocal, - )), - }, - UnsettledRemoteBalance: &lnrpc.Amount{ - Sat: uint64(unsettledRemote), - Msat: uint64(lnwire.NewMSatFromSatoshis( - unsettledRemote, - )), - }, - PendingOpenLocalBalance: &lnrpc.Amount{}, - PendingOpenRemoteBalance: &lnrpc.Amount{}, - // Deprecated fields. - Balance: int64(local), - } - assertChannelBalanceResp(t, node, expectedResponse) - } - - // Create carol in hodl mode. - carol, err := net.NewNode("Carol", []string{"--hodl.exit-settle"}) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - // Connect Alice to Carol. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxb, net.Alice, carol); err != nil { - t.Fatalf("unable to connect alice to carol: %v", err) - } - - // Open a channel between Alice and Carol. - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointAlice := openChannelAndAssert( - ctxt, t, net, net.Alice, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // Wait for Alice and Carol to receive the channel edge from the - // funding manager. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPointAlice) - if err != nil { - t.Fatalf("alice didn't see the alice->carol channel before "+ - "timeout: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = carol.WaitForNetworkChannelOpen(ctxt, chanPointAlice) - if err != nil { - t.Fatalf("alice didn't see the alice->carol channel before "+ - "timeout: %v", err) - } - - cType, err := channelCommitType(net.Alice, chanPointAlice) - util.RequireNoErr(t.t, err, "unable to get channel type") - - // Check alice's channel balance, which should have zero remote and zero - // pending balance. - checkChannelBalance(net.Alice, chanAmt-cType.calcStaticFee(0), 0, 0, 0) - - // Check carol's channel balance, which should have zero local and zero - // pending balance. - checkChannelBalance(carol, 0, chanAmt-cType.calcStaticFee(0), 0, 0) - - // Channel should be ready for payments. - const ( - payAmt = 100 - numInvoices = 6 - ) - - // Simulateneously send numInvoices payments from Alice to Carol. - carolPubKey := carol.PubKey[:] - errChan := make(chan er.R) - for i := 0; i < numInvoices; i++ { - go func() { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - _, errr := net.Alice.RouterClient.SendPaymentV2(ctxt, - &routerrpc.SendPaymentRequest{ - Dest: carolPubKey, - Amt: int64(payAmt), - PaymentHash: makeFakePayHash(t), - FinalCltvDelta: chainreg.DefaultBitcoinTimeLockDelta, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }) - - if err != nil { - errChan <- er.E(errr) - } - }() - } - - // Test that the UnsettledBalance for both Alice and Carol - // is equal to the amount of invoices * payAmt. - var unsettledErr er.R - nodes := []*lntest.HarnessNode{net.Alice, carol} - err = wait.Predicate(func() bool { - // There should be a number of PendingHtlcs equal - // to the amount of Invoices sent. - unsettledErr = assertNumActiveHtlcs(nodes, numInvoices) - if unsettledErr != nil { - return false - } - - // Set the amount expected for the Unsettled Balance for - // this channel. - expectedBalance := numInvoices * payAmt - - // Check each nodes UnsettledBalance field. - for _, node := range nodes { - // Get channel info for the node. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - chanInfo, err := getChanInfo(ctxt, node) - if err != nil { - unsettledErr = err - return false - } - - // Check that UnsettledBalance is what we expect. - if int(chanInfo.UnsettledBalance) != expectedBalance { - unsettledErr = er.Errorf("unsettled balance failed "+ - "expected: %v, received: %v", expectedBalance, - chanInfo.UnsettledBalance) - return false - } - } - - return true - }, defaultTimeout) - if err != nil { - t.Fatalf("unsettled balace error: %v", unsettledErr) - } - - // Check for payment errors. - select { - case err := <-errChan: - t.Fatalf("payment error: %v", err) - default: - } - - // Check alice's channel balance, which should have a remote unsettled - // balance that equals to the amount of invoices * payAmt. The remote - // balance remains zero. - aliceLocal := chanAmt - cType.calcStaticFee(0) - numInvoices*payAmt - checkChannelBalance(net.Alice, aliceLocal, 0, 0, numInvoices*payAmt) - - // Check carol's channel balance, which should have a local unsettled - // balance that equals to the amount of invoices * payAmt. The local - // balance remains zero. - checkChannelBalance(carol, 0, aliceLocal, numInvoices*payAmt, 0) - - // Force and assert the channel closure. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointAlice, true) - - // Cleanup by mining the force close and sweep transaction. - cleanupForceClose(t, net, net.Alice, chanPointAlice) -} - -// findForceClosedChannel searches a pending channel response for a particular -// channel, returning the force closed channel upon success. -func findForceClosedChannel(pendingChanResp *lnrpc.PendingChannelsResponse, - op *wire.OutPoint) (*lnrpc.PendingChannelsResponse_ForceClosedChannel, er.R) { - - for _, forceClose := range pendingChanResp.PendingForceClosingChannels { - if forceClose.Channel.ChannelPoint == op.String() { - return forceClose, nil - } - } - - return nil, er.New("channel not marked as force closed") -} - -// findWaitingCloseChannel searches a pending channel response for a particular -// channel, returning the waiting close channel upon success. -func findWaitingCloseChannel(pendingChanResp *lnrpc.PendingChannelsResponse, - op *wire.OutPoint) (*lnrpc.PendingChannelsResponse_WaitingCloseChannel, er.R) { - - for _, waitingClose := range pendingChanResp.WaitingCloseChannels { - if waitingClose.Channel.ChannelPoint == op.String() { - return waitingClose, nil - } - } - - return nil, er.New("channel not marked as waiting close") -} - -func checkCommitmentMaturity( - forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel, - maturityHeight uint32, blocksTilMaturity int32) er.R { - - if forceClose.MaturityHeight != maturityHeight { - return er.Errorf("expected commitment maturity height to be "+ - "%d, found %d instead", maturityHeight, - forceClose.MaturityHeight) - } - if forceClose.BlocksTilMaturity != blocksTilMaturity { - return er.Errorf("expected commitment blocks til maturity to "+ - "be %d, found %d instead", blocksTilMaturity, - forceClose.BlocksTilMaturity) - } - - return nil -} - -// checkForceClosedChannelNumHtlcs verifies that a force closed channel has the -// proper number of htlcs. -func checkPendingChannelNumHtlcs( - forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel, - expectedNumHtlcs int) er.R { - - if len(forceClose.PendingHtlcs) != expectedNumHtlcs { - return er.Errorf("expected force closed channel to have %d "+ - "pending htlcs, found %d instead", expectedNumHtlcs, - len(forceClose.PendingHtlcs)) - } - - return nil -} - -// checkNumForceClosedChannels checks that a pending channel response has the -// expected number of force closed channels. -func checkNumForceClosedChannels(pendingChanResp *lnrpc.PendingChannelsResponse, - expectedNumChans int) er.R { - - if len(pendingChanResp.PendingForceClosingChannels) != expectedNumChans { - return er.Errorf("expected to find %d force closed channels, "+ - "got %d", expectedNumChans, - len(pendingChanResp.PendingForceClosingChannels)) - } - - return nil -} - -// checkNumWaitingCloseChannels checks that a pending channel response has the -// expected number of channels waiting for closing tx to confirm. -func checkNumWaitingCloseChannels(pendingChanResp *lnrpc.PendingChannelsResponse, - expectedNumChans int) er.R { - - if len(pendingChanResp.WaitingCloseChannels) != expectedNumChans { - return er.Errorf("expected to find %d channels waiting "+ - "closure, got %d", expectedNumChans, - len(pendingChanResp.WaitingCloseChannels)) - } - - return nil -} - -// checkPendingHtlcStageAndMaturity uniformly tests all pending htlc's belonging -// to a force closed channel, testing for the expected stage number, blocks till -// maturity, and the maturity height. -func checkPendingHtlcStageAndMaturity( - forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel, - stage, maturityHeight uint32, blocksTillMaturity int32) er.R { - - for _, pendingHtlc := range forceClose.PendingHtlcs { - if pendingHtlc.Stage != stage { - return er.Errorf("expected pending htlc to be stage "+ - "%d, found %d", stage, pendingHtlc.Stage) - } - if pendingHtlc.MaturityHeight != maturityHeight { - return er.Errorf("expected pending htlc maturity "+ - "height to be %d, instead has %d", - maturityHeight, pendingHtlc.MaturityHeight) - } - if pendingHtlc.BlocksTilMaturity != blocksTillMaturity { - return er.Errorf("expected pending htlc blocks til "+ - "maturity to be %d, instead has %d", - blocksTillMaturity, - pendingHtlc.BlocksTilMaturity) - } - } - - return nil -} - -// padCLTV is a small helper function that pads a cltv value with a block -// padding. -func padCLTV(cltv uint32) uint32 { - return cltv + uint32(routing.BlockPadding) -} - -// testChannelForceClosure performs a test to exercise the behavior of "force" -// closing a channel or unilaterally broadcasting the latest local commitment -// state on-chain. The test creates a new channel between Alice and Carol, then -// force closes the channel after some cursory assertions. Within the test, a -// total of 3 + n transactions will be broadcast, representing the commitment -// transaction, a transaction sweeping the local CSV delayed output, a -// transaction sweeping the CSV delayed 2nd-layer htlcs outputs, and n -// htlc timeout transactions, where n is the number of payments Alice attempted -// to send to Carol. This test includes several restarts to ensure that the -// transaction output states are persisted throughout the forced closure -// process. -// -// TODO(roasbeef): also add an unsettled HTLC before force closing. -func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { - // We'll test the scenario for some of the commitment types, to ensure - // outputs can be swept. - commitTypes := []commitType{ - commitTypeLegacy, - commitTypeAnchors, - } - - for _, channelType := range commitTypes { - testName := fmt.Sprintf("committype=%v", channelType) - - channelType := channelType - success := t.t.Run(testName, func(t *testing.T) { - ht := newHarnessTest(t, net) - - args := channelType.Args() - alice, err := net.NewNode("Alice", args) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, ht, alice) - - // Since we'd like to test failure scenarios with - // outstanding htlcs, we'll introduce another node into - // our test network: Carol. - carolArgs := []string{"--hodl.exit-settle"} - carolArgs = append(carolArgs, args...) - carol, err := net.NewNode("Carol", carolArgs) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, ht, carol) - - // Each time, we'll send Alice new set of coins in - // order to fund the channel. - ctxt, _ := context.WithTimeout( - context.Background(), defaultTimeout, - ) - err = net.SendCoins( - ctxt, btcutil.UnitsPerCoin(), alice, - ) - if err != nil { - t.Fatalf("unable to send coins to Alice: %v", - err) - } - - // Also give Carol some coins to allow her to sweep her - // anchor. - err = net.SendCoins( - ctxt, btcutil.UnitsPerCoin(), carol, - ) - if err != nil { - t.Fatalf("unable to send coins to Alice: %v", - err) - } - - channelForceClosureTest( - net, ht, alice, carol, channelType, - ) - }) - if !success { - return - } - } -} - -func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, - alice, carol *lntest.HarnessNode, channelType commitType) { - - ctxb := context.Background() - - const ( - chanAmt = btcutil.Amount(10e6) - pushAmt = btcutil.Amount(5e6) - paymentAmt = 100000 - numInvoices = 6 - ) - - const commitFeeRate = 20000 - net.SetFeeEstimate(commitFeeRate) - - // TODO(roasbeef): should check default value in config here - // instead, or make delay a param - defaultCLTV := uint32(chainreg.DefaultBitcoinTimeLockDelta) - - // We must let Alice have an open channel before she can send a node - // announcement, so we open a channel with Carol, - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, alice, carol); err != nil { - t.Fatalf("unable to connect alice to carol: %v", err) - } - - // Before we start, obtain Carol's current wallet balance, we'll check - // to ensure that at the end of the force closure by Alice, Carol - // recognizes his new on-chain output. - carolBalReq := &lnrpc.WalletBalanceRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolBalResp, errr := carol.WalletBalance(ctxt, carolBalReq) - if errr != nil { - t.Fatalf("unable to get carol's balance: %v", errr) - } - - carolStartingBalance := carolBalResp.ConfirmedBalance - - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, alice, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - }, - ) - - // Wait for Alice and Carol to receive the channel edge from the - // funding manager. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err := alice.WaitForNetworkChannelOpen(ctxt, chanPoint) - if errr != nil { - t.Fatalf("alice didn't see the alice->carol channel before "+ - "timeout: %v", errr) - } - err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint) - if errr != nil { - t.Fatalf("alice didn't see the alice->carol channel before "+ - "timeout: %v", errr) - } - - // Send payments from Alice to Carol, since Carol is htlchodl mode, the - // htlc outputs should be left unsettled, and should be swept by the - // utxo nursery. - carolPubKey := carol.PubKey[:] - for i := 0; i < numInvoices; i++ { - ctx, cancel := context.WithCancel(ctxb) - defer cancel() - - _, err := alice.RouterClient.SendPaymentV2( - ctx, - &routerrpc.SendPaymentRequest{ - Dest: carolPubKey, - Amt: int64(paymentAmt), - PaymentHash: makeFakePayHash(t), - FinalCltvDelta: chainreg.DefaultBitcoinTimeLockDelta, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - if err != nil { - t.Fatalf("unable to send alice htlc: %v", err) - } - } - - // Once the HTLC has cleared, all the nodes n our mini network should - // show that the HTLC has been locked in. - nodes := []*lntest.HarnessNode{alice, carol} - var predErr er.R - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(nodes, numInvoices) - if predErr != nil { - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } - - // Fetch starting height of this test so we can compute the block - // heights we expect certain events to take place. - _, curHeight, err := net.Miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get best block height") - } - - // Using the current height of the chain, derive the relevant heights - // for incubating two-stage htlcs. - var ( - startHeight = uint32(curHeight) - commCsvMaturityHeight = startHeight + 1 + defaultCSV - htlcExpiryHeight = padCLTV(startHeight + defaultCLTV) - htlcCsvMaturityHeight = padCLTV(startHeight + defaultCLTV + 1 + defaultCSV) - ) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - aliceChan, err := getChanInfo(ctxt, alice) - if err != nil { - t.Fatalf("unable to get alice's channel info: %v", err) - } - if aliceChan.NumUpdates == 0 { - t.Fatalf("alice should see at least one update to her channel") - } - - // Now that the channel is open and we have unsettled htlcs, immediately - // execute a force closure of the channel. This will also assert that - // the commitment transaction was immediately broadcast in order to - // fulfill the force closure request. - const actualFeeRate = 30000 - net.SetFeeEstimate(actualFeeRate) - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - _, closingTxID, err := net.CloseChannel(ctxt, alice, chanPoint, true) - if err != nil { - t.Fatalf("unable to execute force channel closure: %v", err) - } - - // Now that the channel has been force closed, it should show up in the - // PendingChannels RPC under the waiting close section. - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, errr := alice.PendingChannels(ctxt, pendingChansRequest) - if err != nil { - t.Fatalf("unable to query for pending channels: %v", err) - } - err = checkNumWaitingCloseChannels(pendingChanResp, 1) - if err != nil { - t.Fatalf(err.String()) - } - - // Compute the outpoint of the channel, which we will use repeatedly to - // locate the pending channel information in the rpc responses. - txid, err := lnd.GetChanPointFundingTxid(chanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - op := wire.OutPoint{ - Hash: *txid, - Index: chanPoint.OutputIndex, - } - - waitingClose, err := findWaitingCloseChannel(pendingChanResp, &op) - if err != nil { - t.Fatalf(err.String()) - } - - // Immediately after force closing, all of the funds should be in limbo. - if waitingClose.LimboBalance == 0 { - t.Fatalf("all funds should still be in limbo") - } - - // Create a map of outpoints to expected resolutions for alice and carol - // which we will add reports to as we sweep outputs. - var ( - aliceReports = make(map[string]*lnrpc.Resolution) - carolReports = make(map[string]*lnrpc.Resolution) - ) - - // The several restarts in this test are intended to ensure that when a - // channel is force-closed, the UTXO nursery has persisted the state of - // the channel in the closure process and will recover the correct state - // when the system comes back on line. This restart tests state - // persistence at the beginning of the process, when the commitment - // transaction has been broadcast but not yet confirmed in a block. - if err := net.RestartNode(alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - - // Mine a block which should confirm the commitment transaction - // broadcast as a result of the force closure. If there are anchors, we - // also expect the anchor sweep tx to be in the mempool. - expectedTxes := 1 - expectedFeeRate := commitFeeRate - if channelType == commitTypeAnchors { - expectedTxes = 2 - expectedFeeRate = actualFeeRate - } - - sweepTxns, err := getNTxsFromMempool( - net.Miner.Node, expectedTxes, minerMempoolTimeout, - ) - if err != nil { - t.Fatalf("failed to find commitment in miner mempool: %v", err) - } - - // Verify fee rate of the commitment tx plus anchor if present. - var totalWeight, totalFee int64 - for _, tx := range sweepTxns { - utx := btcutil.NewTx(tx) - totalWeight += blockchain.GetTransactionWeight(utx) - - fee, err := getTxFee(net.Miner.Node, tx) - util.RequireNoErr(t.t, err) - totalFee += int64(fee) - } - feeRate := totalFee * 1000 / totalWeight - - // Allow some deviation because weight estimates during tx generation - // are estimates. - require.InEpsilon(t.t, expectedFeeRate, feeRate, 0.005) - - // Find alice's commit sweep and anchor sweep (if present) in the - // mempool. - aliceCloseTx := waitingClose.Commitments.LocalTxid - _, aliceAnchor := findCommitAndAnchor( - t, net, sweepTxns, aliceCloseTx, - ) - - // If we expect anchors, add alice's anchor to our expected set of - // reports. - if channelType == commitTypeAnchors { - aliceReports[aliceAnchor.OutPoint.String()] = &lnrpc.Resolution{ - ResolutionType: lnrpc.ResolutionType_ANCHOR, - Outcome: lnrpc.ResolutionOutcome_CLAIMED, - SweepTxid: aliceAnchor.SweepTx, - Outpoint: &lnrpc.OutPoint{ - TxidBytes: aliceAnchor.OutPoint.Hash[:], - TxidStr: aliceAnchor.OutPoint.Hash.String(), - OutputIndex: aliceAnchor.OutPoint.Index, - }, - AmountSat: uint64(anchorSize), - } - } - - if _, err := net.Miner.Node.Generate(1); err != nil { - t.Fatalf("unable to generate block: %v", err) - } - - // Now that the commitment has been confirmed, the channel should be - // marked as force closed. - err = wait.NoError(func() er.R { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, errr := alice.PendingChannels( - ctxt, pendingChansRequest, - ) - if errr != nil { - return er.Errorf("unable to query for pending "+ - "channels: %v", errr) - } - - err = checkNumForceClosedChannels(pendingChanResp, 1) - if err != nil { - return err - } - - forceClose, err := findForceClosedChannel(pendingChanResp, &op) - if err != nil { - return err - } - - // Now that the channel has been force closed, it should now - // have the height and number of blocks to confirm populated. - err = checkCommitmentMaturity( - forceClose, commCsvMaturityHeight, int32(defaultCSV), - ) - if err != nil { - return err - } - - // None of our outputs have been swept, so they should all be in - // limbo. For anchors, we expect the anchor amount to be - // recovered. - if forceClose.LimboBalance == 0 { - return er.New("all funds should still be in limbo") - } - expectedRecoveredBalance := int64(0) - if channelType == commitTypeAnchors { - expectedRecoveredBalance = anchorSize - } - if forceClose.RecoveredBalance != expectedRecoveredBalance { - return er.New("no funds should yet be shown as recovered") - } - - return nil - }, 15*time.Second) - if err != nil { - t.Fatalf(predErr.String()) - } - - // The following restart is intended to ensure that outputs from the - // force close commitment transaction have been persisted once the - // transaction has been confirmed, but before the outputs are spendable - // (the "kindergarten" bucket.) - if err := net.RestartNode(alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - - // Carol's sweep tx should be in the mempool already, as her output is - // not timelocked. If there are anchors, we also expect Carol's anchor - // sweep now. - sweepTxns, err = getNTxsFromMempool( - net.Miner.Node, expectedTxes, minerMempoolTimeout, - ) - if err != nil { - t.Fatalf("failed to find Carol's sweep in miner mempool: %v", - err) - } - - // We look up the sweep txns we have found in mempool and create - // expected resolutions for carol. - carolCommit, carolAnchor := findCommitAndAnchor( - t, net, sweepTxns, aliceCloseTx, - ) - - // If we have anchors, add an anchor resolution for carol. - if channelType == commitTypeAnchors { - carolReports[carolAnchor.OutPoint.String()] = &lnrpc.Resolution{ - ResolutionType: lnrpc.ResolutionType_ANCHOR, - Outcome: lnrpc.ResolutionOutcome_CLAIMED, - SweepTxid: carolAnchor.SweepTx, - AmountSat: anchorSize, - Outpoint: &lnrpc.OutPoint{ - TxidBytes: carolAnchor.OutPoint.Hash[:], - TxidStr: carolAnchor.OutPoint.Hash.String(), - OutputIndex: carolAnchor.OutPoint.Index, - }, - } - } - - // Currently within the codebase, the default CSV is 4 relative blocks. - // For the persistence test, we generate two blocks, then trigger - // a restart and then generate the final block that should trigger - // the creation of the sweep transaction. - if _, err := net.Miner.Node.Generate(defaultCSV - 2); err != nil { - t.Fatalf("unable to mine blocks: %v", err) - } - - // The following restart checks to ensure that outputs in the - // kindergarten bucket are persisted while waiting for the required - // number of confirmations to be reported. - if err := net.RestartNode(alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - - // Alice should see the channel in her set of pending force closed - // channels with her funds still in limbo. - var aliceBalance int64 - err = wait.NoError(func() er.R { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, errr := alice.PendingChannels( - ctxt, pendingChansRequest, - ) - if errr != nil { - return er.Errorf("unable to query for pending "+ - "channels: %v", errr) - } - - err = checkNumForceClosedChannels(pendingChanResp, 1) - if err != nil { - return err - } - - forceClose, err := findForceClosedChannel( - pendingChanResp, &op, - ) - if err != nil { - return err - } - - // Make a record of the balances we expect for alice and carol. - aliceBalance = forceClose.Channel.LocalBalance - - // At this point, the nursery should show that the commitment - // output has 2 block left before its CSV delay expires. In - // total, we have mined exactly defaultCSV blocks, so the htlc - // outputs should also reflect that this many blocks have - // passed. - err = checkCommitmentMaturity( - forceClose, commCsvMaturityHeight, 2, - ) - if err != nil { - return err - } - - // All funds should still be shown in limbo. - if forceClose.LimboBalance == 0 { - return er.New("all funds should still be in " + - "limbo") - } - expectedRecoveredBalance := int64(0) - if channelType == commitTypeAnchors { - expectedRecoveredBalance = anchorSize - } - if forceClose.RecoveredBalance != expectedRecoveredBalance { - return er.New("no funds should yet be shown " + - "as recovered") - } - - return nil - }, 15*time.Second) - if err != nil { - t.Fatalf(err.String()) - } - - // Generate an additional block, which should cause the CSV delayed - // output from the commitment txn to expire. - if _, err := net.Miner.Node.Generate(1); err != nil { - t.Fatalf("unable to mine blocks: %v", err) - } - - // At this point, the CSV will expire in the next block, meaning that - // the sweeping transaction should now be broadcast. So we fetch the - // node's mempool to ensure it has been properly broadcast. - sweepingTXID, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("failed to get sweep tx from mempool: %v", err) - } - - // Fetch the sweep transaction, all input it's spending should be from - // the commitment transaction which was broadcast on-chain. - sweepTx, err := net.Miner.Node.GetRawTransaction(sweepingTXID) - if err != nil { - t.Fatalf("unable to fetch sweep tx: %v", err) - } - for _, txIn := range sweepTx.MsgTx().TxIn { - if !closingTxID.IsEqual(&txIn.PreviousOutPoint.Hash) { - t.Fatalf("sweep transaction not spending from commit "+ - "tx %v, instead spending %v", - closingTxID, txIn.PreviousOutPoint) - } - } - - // We expect a resolution which spends our commit output. - output := sweepTx.MsgTx().TxIn[0].PreviousOutPoint - aliceReports[output.String()] = &lnrpc.Resolution{ - ResolutionType: lnrpc.ResolutionType_COMMIT, - Outcome: lnrpc.ResolutionOutcome_CLAIMED, - SweepTxid: sweepingTXID.String(), - Outpoint: &lnrpc.OutPoint{ - TxidBytes: output.Hash[:], - TxidStr: output.Hash.String(), - OutputIndex: output.Index, - }, - AmountSat: uint64(aliceBalance), - } - - carolReports[carolCommit.OutPoint.String()] = &lnrpc.Resolution{ - ResolutionType: lnrpc.ResolutionType_COMMIT, - Outcome: lnrpc.ResolutionOutcome_CLAIMED, - Outpoint: &lnrpc.OutPoint{ - TxidBytes: carolCommit.OutPoint.Hash[:], - TxidStr: carolCommit.OutPoint.Hash.String(), - OutputIndex: carolCommit.OutPoint.Index, - }, - AmountSat: uint64(pushAmt), - SweepTxid: carolCommit.SweepTx, - } - - // Check that we can find the commitment sweep in our set of known - // sweeps, using the simple transaction id ListSweeps output. - assertSweepFound(ctxb, t.t, alice, sweepingTXID.String(), false) - - // Restart Alice to ensure that she resumes watching the finalized - // commitment sweep txid. - if err := net.RestartNode(alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - - // Next, we mine an additional block which should include the sweep - // transaction as the input scripts and the sequence locks on the - // inputs should be properly met. - blockHash, err := net.Miner.Node.Generate(1) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } - block, err := net.Miner.Node.GetBlock(blockHash[0]) - if err != nil { - t.Fatalf("unable to get block: %v", err) - } - - assertTxInBlock(t, block, sweepTx.Hash()) - - // Update current height - _, curHeight, err = net.Miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get best block height") - } - - err = wait.Predicate(func() bool { - // Now that the commit output has been fully swept, check to see - // that the channel remains open for the pending htlc outputs. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, errr := alice.PendingChannels( - ctxt, pendingChansRequest, - ) - if errr != nil { - predErr = er.Errorf("unable to query for pending "+ - "channels: %v", errr) - return false - } - - err = checkNumForceClosedChannels(pendingChanResp, 1) - if err != nil { - predErr = err - return false - } - - // The commitment funds will have been recovered after the - // commit txn was included in the last block. The htlc funds - // will be shown in limbo. - forceClose, err := findForceClosedChannel(pendingChanResp, &op) - if err != nil { - predErr = err - return false - } - predErr = checkPendingChannelNumHtlcs(forceClose, numInvoices) - if predErr != nil { - return false - } - predErr = checkPendingHtlcStageAndMaturity( - forceClose, 1, htlcExpiryHeight, - int32(htlcExpiryHeight)-curHeight, - ) - if predErr != nil { - return false - } - if forceClose.LimboBalance == 0 { - predErr = er.Errorf("expected funds in limbo, found 0") - return false - } - - return true - }, 15*time.Second) - if err != nil { - t.Fatalf(predErr.String()) - } - - // Compute the height preceding that which will cause the htlc CLTV - // timeouts will expire. The outputs entered at the same height as the - // output spending from the commitment txn, so we must deduct the number - // of blocks we have generated since adding it to the nursery, and take - // an additional block off so that we end up one block shy of the expiry - // height, and add the block padding. - cltvHeightDelta := padCLTV(defaultCLTV - defaultCSV - 1 - 1) - - // Advance the blockchain until just before the CLTV expires, nothing - // exciting should have happened during this time. - blockHash, err = net.Miner.Node.Generate(cltvHeightDelta) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } - - // We now restart Alice, to ensure that she will broadcast the presigned - // htlc timeout txns after the delay expires after experiencing a while - // waiting for the htlc outputs to incubate. - if err := net.RestartNode(alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - - // Alice should now see the channel in her set of pending force closed - // channels with one pending HTLC. - err = wait.NoError(func() er.R { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, errr := alice.PendingChannels( - ctxt, pendingChansRequest, - ) - if errr != nil { - return er.Errorf("unable to query for pending "+ - "channels: %v", errr) - } - - err = checkNumForceClosedChannels(pendingChanResp, 1) - if err != nil { - return err - } - - forceClose, err := findForceClosedChannel( - pendingChanResp, &op, - ) - if err != nil { - return err - } - - // We should now be at the block just before the utxo nursery - // will attempt to broadcast the htlc timeout transactions. - err = checkPendingChannelNumHtlcs(forceClose, numInvoices) - if err != nil { - return err - } - err = checkPendingHtlcStageAndMaturity( - forceClose, 1, htlcExpiryHeight, 1, - ) - if err != nil { - return err - } - - // Now that our commitment confirmation depth has been - // surpassed, we should now see a non-zero recovered balance. - // All htlc outputs are still left in limbo, so it should be - // non-zero as well. - if forceClose.LimboBalance == 0 { - return er.New("htlc funds should still be in " + - "limbo") - } - - return nil - }, 15*time.Second) - if err != nil { - t.Fatalf(err.String()) - } - - // Now, generate the block which will cause Alice to broadcast the - // presigned htlc timeout txns. - blockHash, err = net.Miner.Node.Generate(1) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } - - // Since Alice had numInvoices (6) htlcs extended to Carol before force - // closing, we expect Alice to broadcast an htlc timeout txn for each - // one. Wait for them all to show up in the mempool. - htlcTxIDs, err := waitForNTxsInMempool(net.Miner.Node, numInvoices, - minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find htlc timeout txns in mempool: %v", err) - } - - // Retrieve each htlc timeout txn from the mempool, and ensure it is - // well-formed. This entails verifying that each only spends from - // output, and that that output is from the commitment txn. We do not - // the sweeper check for these timeout transactions because they are - // not swept by the sweeper; the nursery broadcasts the pre-signed - // transaction. - var htlcLessFees uint64 - for _, htlcTxID := range htlcTxIDs { - // Fetch the sweep transaction, all input it's spending should - // be from the commitment transaction which was broadcast - // on-chain. - htlcTx, err := net.Miner.Node.GetRawTransaction(htlcTxID) - if err != nil { - t.Fatalf("unable to fetch sweep tx: %v", err) - } - // Ensure the htlc transaction only has one input. - inputs := htlcTx.MsgTx().TxIn - if len(inputs) != 1 { - t.Fatalf("htlc transaction should only have one txin, "+ - "has %d", len(htlcTx.MsgTx().TxIn)) - } - // Ensure the htlc transaction is spending from the commitment - // transaction. - txIn := inputs[0] - if !closingTxID.IsEqual(&txIn.PreviousOutPoint.Hash) { - t.Fatalf("htlc transaction not spending from commit "+ - "tx %v, instead spending %v", - closingTxID, txIn.PreviousOutPoint) - } - - outputs := htlcTx.MsgTx().TxOut - if len(outputs) != 1 { - t.Fatalf("htlc transaction should only have one "+ - "txout, has: %v", len(outputs)) - } - - // For each htlc timeout transaction, we expect a resolver - // report recording this on chain resolution for both alice and - // carol. - outpoint := txIn.PreviousOutPoint - resolutionOutpoint := &lnrpc.OutPoint{ - TxidBytes: outpoint.Hash[:], - TxidStr: outpoint.Hash.String(), - OutputIndex: outpoint.Index, - } - - // We expect alice to have a timeout tx resolution with an - // amount equal to the payment amount. - aliceReports[outpoint.String()] = &lnrpc.Resolution{ - ResolutionType: lnrpc.ResolutionType_OUTGOING_HTLC, - Outcome: lnrpc.ResolutionOutcome_FIRST_STAGE, - SweepTxid: htlcTx.Hash().String(), - Outpoint: resolutionOutpoint, - AmountSat: uint64(paymentAmt), - } - - // We expect carol to have a resolution with an incoming htlc - // timeout which reflects the full amount of the htlc. It has - // no spend tx, because carol stops monitoring the htlc once - // it has timed out. - carolReports[outpoint.String()] = &lnrpc.Resolution{ - ResolutionType: lnrpc.ResolutionType_INCOMING_HTLC, - Outcome: lnrpc.ResolutionOutcome_TIMEOUT, - SweepTxid: "", - Outpoint: resolutionOutpoint, - AmountSat: uint64(paymentAmt), - } - - // We record the htlc amount less fees here, so that we know - // what value to expect for the second stage of our htlc - // htlc resolution. - htlcLessFees = uint64(outputs[0].Value) - } - - // With the htlc timeout txns still in the mempool, we restart Alice to - // verify that she can resume watching the htlc txns she broadcasted - // before crashing. - if err := net.RestartNode(alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - - // Generate a block that mines the htlc timeout txns. Doing so now - // activates the 2nd-stage CSV delayed outputs. - blockHash, err = net.Miner.Node.Generate(1) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } - - // Alice is restarted here to ensure that she promptly moved the crib - // outputs to the kindergarten bucket after the htlc timeout txns were - // confirmed. - if err := net.RestartNode(alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - - // Advance the chain until just before the 2nd-layer CSV delays expire. - blockHash, err = net.Miner.Node.Generate(defaultCSV - 1) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } - - // Restart Alice to ensure that she can recover from a failure before - // having graduated the htlc outputs in the kindergarten bucket. - if err := net.RestartNode(alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - - // Now that the channel has been fully swept, it should no longer show - // incubated, check to see that Alice's node still reports the channel - // as pending force closed. - err = wait.Predicate(func() bool { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, errr = alice.PendingChannels( - ctxt, pendingChansRequest, - ) - if errr != nil { - predErr = er.Errorf("unable to query for pending "+ - "channels: %v", errr) - return false - } - err = checkNumForceClosedChannels(pendingChanResp, 1) - if err != nil { - predErr = err - return false - } - - forceClose, err := findForceClosedChannel(pendingChanResp, &op) - if err != nil { - predErr = err - return false - } - - if forceClose.LimboBalance == 0 { - predErr = er.Errorf("htlc funds should still be in limbo") - return false - } - - predErr = checkPendingChannelNumHtlcs(forceClose, numInvoices) - if predErr != nil { - return false - } - - return true - }, 15*time.Second) - if err != nil { - t.Fatalf(predErr.String()) - } - - // Generate a block that causes Alice to sweep the htlc outputs in the - // kindergarten bucket. - blockHash, err = net.Miner.Node.Generate(1) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } - - // Wait for the single sweep txn to appear in the mempool. - htlcSweepTxID, err := waitForTxInMempool( - net.Miner.Node, minerMempoolTimeout, - ) - if err != nil { - t.Fatalf("failed to get sweep tx from mempool: %v", err) - } - - // Construct a map of the already confirmed htlc timeout txids, that - // will count the number of times each is spent by the sweep txn. We - // prepopulate it in this way so that we can later detect if we are - // spending from an output that was not a confirmed htlc timeout txn. - var htlcTxIDSet = make(map[chainhash.Hash]int) - for _, htlcTxID := range htlcTxIDs { - htlcTxIDSet[*htlcTxID] = 0 - } - - // Fetch the htlc sweep transaction from the mempool. - htlcSweepTx, err := net.Miner.Node.GetRawTransaction(htlcSweepTxID) - if err != nil { - t.Fatalf("unable to fetch sweep tx: %v", err) - } - // Ensure the htlc sweep transaction only has one input for each htlc - // Alice extended before force closing. - if len(htlcSweepTx.MsgTx().TxIn) != numInvoices { - t.Fatalf("htlc transaction should have %d txin, "+ - "has %d", numInvoices, len(htlcSweepTx.MsgTx().TxIn)) - } - outputCount := len(htlcSweepTx.MsgTx().TxOut) - if outputCount != 1 { - t.Fatalf("htlc sweep transaction should have one output, has: "+ - "%v", outputCount) - } - - // Ensure that each output spends from exactly one htlc timeout txn. - for _, txIn := range htlcSweepTx.MsgTx().TxIn { - outpoint := txIn.PreviousOutPoint.Hash - // Check that the input is a confirmed htlc timeout txn. - if _, ok := htlcTxIDSet[outpoint]; !ok { - t.Fatalf("htlc sweep output not spending from htlc "+ - "tx, instead spending output %v", outpoint) - } - // Increment our count for how many times this output was spent. - htlcTxIDSet[outpoint]++ - - // Check that each is only spent once. - if htlcTxIDSet[outpoint] > 1 { - t.Fatalf("htlc sweep tx has multiple spends from "+ - "outpoint %v", outpoint) - } - - // Since we have now swept our htlc timeout tx, we expect to - // have timeout resolutions for each of our htlcs. - output := txIn.PreviousOutPoint - aliceReports[output.String()] = &lnrpc.Resolution{ - ResolutionType: lnrpc.ResolutionType_OUTGOING_HTLC, - Outcome: lnrpc.ResolutionOutcome_TIMEOUT, - SweepTxid: htlcSweepTx.Hash().String(), - Outpoint: &lnrpc.OutPoint{ - TxidBytes: output.Hash[:], - TxidStr: output.Hash.String(), - OutputIndex: output.Index, - }, - AmountSat: htlcLessFees, - } - } - - // Check that we can find the htlc sweep in our set of sweeps using - // the verbose output of the listsweeps output. - assertSweepFound(ctxb, t.t, alice, htlcSweepTx.Hash().String(), true) - - // The following restart checks to ensure that the nursery store is - // storing the txid of the previously broadcast htlc sweep txn, and that - // it begins watching that txid after restarting. - if err := net.RestartNode(alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - - // Now that the channel has been fully swept, it should no longer show - // incubated, check to see that Alice's node still reports the channel - // as pending force closed. - err = wait.Predicate(func() bool { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, errr := alice.PendingChannels( - ctxt, pendingChansRequest, - ) - if errr != nil { - predErr = er.Errorf("unable to query for pending "+ - "channels: %v", errr) - return false - } - err = checkNumForceClosedChannels(pendingChanResp, 1) - if err != nil { - predErr = err - return false - } - - // All htlcs should show zero blocks until maturity, as - // evidenced by having checked the sweep transaction in the - // mempool. - forceClose, err := findForceClosedChannel(pendingChanResp, &op) - if err != nil { - predErr = err - return false - } - predErr = checkPendingChannelNumHtlcs(forceClose, numInvoices) - if predErr != nil { - return false - } - err = checkPendingHtlcStageAndMaturity( - forceClose, 2, htlcCsvMaturityHeight, 0, - ) - if err != nil { - predErr = err - return false - } - - return true - }, 15*time.Second) - if err != nil { - t.Fatalf(predErr.String()) - } - - // Generate the final block that sweeps all htlc funds into the user's - // wallet, and make sure the sweep is in this block. - block = mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, htlcSweepTxID) - - // Now that the channel has been fully swept, it should no longer show - // up within the pending channels RPC. - err = wait.Predicate(func() bool { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := alice.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = er.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - - predErr = checkNumForceClosedChannels(pendingChanResp, 0) - if predErr != nil { - return false - } - - // In addition to there being no pending channels, we verify - // that pending channels does not report any money still in - // limbo. - if pendingChanResp.TotalLimboBalance != 0 { - predErr = er.New("no user funds should be left " + - "in limbo after incubation") - return false - } - - return true - }, 15*time.Second) - if err != nil { - t.Fatalf(predErr.String()) - } - - // At this point, Bob should now be aware of his new immediately - // spendable on-chain balance, as it was Alice who broadcast the - // commitment transaction. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolBalResp, errr = net.Bob.WalletBalance(ctxt, carolBalReq) - if errr != nil { - t.Fatalf("unable to get carol's balance: %v", errr) - } - carolExpectedBalance := btcutil.Amount(carolStartingBalance) + pushAmt - if btcutil.Amount(carolBalResp.ConfirmedBalance) < carolExpectedBalance { - t.Fatalf("carol's balance is incorrect: expected %v got %v", - carolExpectedBalance, - carolBalResp.ConfirmedBalance) - } - - // Finally, we check that alice and carol have the set of resolutions - // we expect. - assertReports(ctxb, t, alice, op, aliceReports) - assertReports(ctxb, t, carol, op, carolReports) -} - -type sweptOutput struct { - OutPoint wire.OutPoint - SweepTx string -} - -// findCommitAndAnchor looks for a commitment sweep and anchor sweep in the -// mempool. Our anchor output is identified by having multiple inputs, because -// we have to bring another input to add fees to the anchor. Note that the -// anchor swept output may be nil if the channel did not have anchors. -func findCommitAndAnchor(t *harnessTest, net *lntest.NetworkHarness, - sweepTxns []*wire.MsgTx, closeTx string) (*sweptOutput, *sweptOutput) { - - var commitSweep, anchorSweep *sweptOutput - - for _, tx := range sweepTxns { - txHash := tx.TxHash() - sweepTx, err := net.Miner.Node.GetRawTransaction(&txHash) - util.RequireNoErr(t.t, err) - - // We expect our commitment sweep to have a single input, and, - // our anchor sweep to have more inputs (because the wallet - // needs to add balance to the anchor amount). We find their - // sweep txids here to setup appropriate resolutions. We also - // need to find the outpoint for our resolution, which we do by - // matching the inputs to the sweep to the close transaction. - inputs := sweepTx.MsgTx().TxIn - if len(inputs) == 1 { - commitSweep = &sweptOutput{ - OutPoint: inputs[0].PreviousOutPoint, - SweepTx: txHash.String(), - } - } else { - // Since we have more than one input, we run through - // them to find the outpoint that spends from the close - // tx. This will be our anchor output. - for _, txin := range inputs { - outpointStr := txin.PreviousOutPoint.Hash.String() - if outpointStr == closeTx { - anchorSweep = &sweptOutput{ - OutPoint: txin.PreviousOutPoint, - SweepTx: txHash.String(), - } - } - } - } - } - - return commitSweep, anchorSweep -} - -// assertReports checks that the count of resolutions we have present per -// type matches a set of expected resolutions. -func assertReports(ctxb context.Context, t *harnessTest, - node *lntest.HarnessNode, channelPoint wire.OutPoint, - expected map[string]*lnrpc.Resolution) { - - // Get our node's closed channels. - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - - closed, err := node.ClosedChannels( - ctxt, &lnrpc.ClosedChannelsRequest{}, - ) - require.NoError(t.t, err) - - var resolutions []*lnrpc.Resolution - for _, close := range closed.Channels { - if close.ChannelPoint == channelPoint.String() { - resolutions = close.Resolutions - break - } - } - - require.NotNil(t.t, resolutions) - require.Equal(t.t, len(expected), len(resolutions)) - - for _, res := range resolutions { - outPointStr := fmt.Sprintf("%v:%v", res.Outpoint.TxidStr, - res.Outpoint.OutputIndex) - - expected, ok := expected[outPointStr] - require.True(t.t, ok) - require.Equal(t.t, expected, res) - } -} - -// assertSweepFound looks up a sweep in a nodes list of broadcast sweeps. -func assertSweepFound(ctx context.Context, t *testing.T, node *lntest.HarnessNode, - sweep string, verbose bool) { - - // List all sweeps that alice's node had broadcast. - ctx, _ = context.WithTimeout(ctx, defaultTimeout) - sweepResp, err := node.WalletKitClient.ListSweeps( - ctx, &walletrpc.ListSweepsRequest{ - Verbose: verbose, - }, - ) - require.NoError(t, err) - - var found bool - if verbose { - found = findSweepInDetails(t, sweep, sweepResp) - } else { - found = findSweepInTxids(t, sweep, sweepResp) - } - - require.True(t, found, "sweep: %v not found", sweep) -} - -func findSweepInTxids(t *testing.T, sweepTxid string, - sweepResp *walletrpc.ListSweepsResponse) bool { - - sweepTxIDs := sweepResp.GetTransactionIds() - require.NotNil(t, sweepTxIDs, "expected transaction ids") - require.Nil(t, sweepResp.GetTransactionDetails()) - - // Check that the sweep tx we have just produced is present. - for _, tx := range sweepTxIDs.TransactionIds { - if tx == sweepTxid { - return true - } - } - - return false -} - -func findSweepInDetails(t *testing.T, sweepTxid string, - sweepResp *walletrpc.ListSweepsResponse) bool { - - sweepDetails := sweepResp.GetTransactionDetails() - require.NotNil(t, sweepDetails, "expected transaction details") - require.Nil(t, sweepResp.GetTransactionIds()) - - for _, tx := range sweepDetails.Transactions { - if tx.TxHash == sweepTxid { - return true - } - } - - return false -} - -// assertAmountSent generates a closure which queries listchannels for sndr and -// rcvr, and asserts that sndr sent amt satoshis, and that rcvr received amt -// satoshis. -// -// NOTE: This method assumes that each node only has one channel, and it is the -// channel used to send the payment. -func assertAmountSent(amt btcutil.Amount, sndr, rcvr *lntest.HarnessNode) func() er.R { - return func() er.R { - // Both channels should also have properly accounted from the - // amount that has been sent/received over the channel. - listReq := &lnrpc.ListChannelsRequest{} - ctxb := context.Background() - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - sndrListChannels, err := sndr.ListChannels(ctxt, listReq) - if err != nil { - return er.Errorf("unable to query for %s's channel "+ - "list: %v", sndr.Name(), err) - } - sndrSatoshisSent := sndrListChannels.Channels[0].TotalSatoshisSent - if sndrSatoshisSent != int64(amt) { - return er.Errorf("%s's satoshis sent is incorrect "+ - "got %v, expected %v", sndr.Name(), - sndrSatoshisSent, amt) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - rcvrListChannels, err := rcvr.ListChannels(ctxt, listReq) - if err != nil { - return er.Errorf("unable to query for %s's channel "+ - "list: %v", rcvr.Name(), err) - } - rcvrSatoshisReceived := rcvrListChannels.Channels[0].TotalSatoshisReceived - if rcvrSatoshisReceived != int64(amt) { - return er.Errorf("%s's satoshis received is "+ - "incorrect got %v, expected %v", rcvr.Name(), - rcvrSatoshisReceived, amt) - } - - return nil - } -} - -// assertLastHTLCError checks that the last sent HTLC of the last payment sent -// by the given node failed with the expected failure code. -func assertLastHTLCError(t *harnessTest, node *lntest.HarnessNode, - code lnrpc.Failure_FailureCode) { - - req := &lnrpc.ListPaymentsRequest{ - IncludeIncomplete: true, - } - ctxt, _ := context.WithTimeout(context.Background(), defaultTimeout) - paymentsResp, err := node.ListPayments(ctxt, req) - if err != nil { - t.Fatalf("error when obtaining payments: %v", err) - } - - payments := paymentsResp.Payments - if len(payments) == 0 { - t.Fatalf("no payments found") - } - - payment := payments[len(payments)-1] - htlcs := payment.Htlcs - if len(htlcs) == 0 { - t.Fatalf("no htlcs") - } - - htlc := htlcs[len(htlcs)-1] - if htlc.Failure == nil { - t.Fatalf("expected failure") - } - - if htlc.Failure.Code != code { - t.Fatalf("expected failure %v, got %v", code, htlc.Failure.Code) - } -} - -// testSphinxReplayPersistence verifies that replayed onion packets are rejected -// by a remote peer after a restart. We use a combination of unsafe -// configuration arguments to force Carol to replay the same sphinx packet after -// reconnecting to Dave, and compare the returned failure message with what we -// expect for replayed onion packets. -func testSphinxReplayPersistence(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // Open a channel with 100k satoshis between Carol and Dave with Carol being - // the sole funder of the channel. - chanAmt := btcutil.Amount(100000) - - // First, we'll create Dave, the receiver, and start him in hodl mode. - dave, err := net.NewNode("Dave", []string{"--hodl.exit-settle"}) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - - // We must remember to shutdown the nodes we created for the duration - // of the tests, only leaving the two seed nodes (Alice and Bob) within - // our test network. - defer shutdownAndAssert(net, t, dave) - - // Next, we'll create Carol and establish a channel to from her to - // Dave. Carol is started in both unsafe-replay which will cause her to - // replay any pending Adds held in memory upon reconnection. - carol, err := net.NewNode("Carol", []string{"--unsafe-replay"}) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, carol, dave); err != nil { - t.Fatalf("unable to connect carol to dave: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), carol) - if err != nil { - t.Fatalf("unable to send coins to carol: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, carol, dave, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // Next, we'll create Fred who is going to initiate the payment and - // establish a channel to from him to Carol. We can't perform this test - // by paying from Carol directly to Dave, because the '--unsafe-replay' - // setup doesn't apply to locally added htlcs. In that case, the - // mailbox, that is responsible for generating the replay, is bypassed. - fred, err := net.NewNode("Fred", nil) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, fred) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, fred, carol); err != nil { - t.Fatalf("unable to connect fred to carol: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), fred) - if err != nil { - t.Fatalf("unable to send coins to fred: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointFC := openChannelAndAssert( - ctxt, t, net, fred, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // Now that the channel is open, create an invoice for Dave which - // expects a payment of 1000 satoshis from Carol paid via a particular - // preimage. - const paymentAmt = 1000 - preimage := bytes.Repeat([]byte("A"), 32) - invoice := &lnrpc.Invoice{ - Memo: "testing", - RPreimage: preimage, - Value: paymentAmt, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - invoiceResp, errr := dave.AddInvoice(ctxt, invoice) - if errr != nil { - t.Fatalf("unable to add invoice: %v", errr) - } - - // Wait for all channels to be recognized and advertized. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("alice didn't advertise channel before "+ - "timeout: %v", err) - } - err = dave.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("bob didn't advertise channel before "+ - "timeout: %v", err) - } - err = carol.WaitForNetworkChannelOpen(ctxt, chanPointFC) - if err != nil { - t.Fatalf("alice didn't advertise channel before "+ - "timeout: %v", err) - } - err = fred.WaitForNetworkChannelOpen(ctxt, chanPointFC) - if err != nil { - t.Fatalf("bob didn't advertise channel before "+ - "timeout: %v", err) - } - - // With the invoice for Dave added, send a payment from Fred paying - // to the above generated invoice. - ctx, cancel := context.WithCancel(ctxb) - defer cancel() - - payStream, errr := fred.RouterClient.SendPaymentV2( - ctx, - &routerrpc.SendPaymentRequest{ - PaymentRequest: invoiceResp.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - if errr != nil { - t.Fatalf("unable to open payment stream: %v", errr) - } - - time.Sleep(200 * time.Millisecond) - - // Dave's invoice should not be marked as settled. - payHash := &lnrpc.PaymentHash{ - RHash: invoiceResp.RHash, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - dbInvoice, errr := dave.LookupInvoice(ctxt, payHash) - if errr != nil { - t.Fatalf("unable to lookup invoice: %v", errr) - } - if dbInvoice.Settled { - t.Fatalf("dave's invoice should not be marked as settled: %v", - spew.Sdump(dbInvoice)) - } - - // With the payment sent but hedl, all balance related stats should not - // have changed. - err = wait.InvariantNoError( - assertAmountSent(0, carol, dave), 3*time.Second, - ) - if err != nil { - t.Fatalf(err.String()) - } - - // With the first payment sent, restart dave to make sure he is - // persisting the information required to detect replayed sphinx - // packets. - if err := net.RestartNode(dave, nil); err != nil { - t.Fatalf("unable to restart dave: %v", err) - } - - // Carol should retransmit the Add hedl in her mailbox on startup. Dave - // should not accept the replayed Add, and actually fail back the - // pending payment. Even though he still holds the original settle, if - // he does fail, it is almost certainly caused by the sphinx replay - // protection, as it is the only validation we do in hodl mode. - result, err := getPaymentResult(payStream) - if err != nil { - t.Fatalf("unable to receive payment response: %v", err) - } - - // Assert that Fred receives the expected failure after Carol sent a - // duplicate packet that fails due to sphinx replay detection. - if result.Status == lnrpc.Payment_SUCCEEDED { - t.Fatalf("expected payment error") - } - assertLastHTLCError(t, fred, lnrpc.Failure_INVALID_ONION_KEY) - - // Since the payment failed, the balance should still be left - // unaltered. - err = wait.InvariantNoError( - assertAmountSent(0, carol, dave), 3*time.Second, - ) - if err != nil { - t.Fatalf(err.String()) - } - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, carol, chanPoint, true) - - // Cleanup by mining the force close and sweep transaction. - cleanupForceClose(t, net, carol, chanPoint) -} - -func assertChannelConstraintsEqual( - t *harnessTest, want, got *lnrpc.ChannelConstraints) { - - t.t.Helper() - - if want.CsvDelay != got.CsvDelay { - t.Fatalf("CsvDelay mismatched, want: %v, got: %v", - want.CsvDelay, got.CsvDelay, - ) - } - - if want.ChanReserveSat != got.ChanReserveSat { - t.Fatalf("ChanReserveSat mismatched, want: %v, got: %v", - want.ChanReserveSat, got.ChanReserveSat, - ) - } - - if want.DustLimitSat != got.DustLimitSat { - t.Fatalf("DustLimitSat mismatched, want: %v, got: %v", - want.DustLimitSat, got.DustLimitSat, - ) - } - - if want.MaxPendingAmtMsat != got.MaxPendingAmtMsat { - t.Fatalf("MaxPendingAmtMsat mismatched, want: %v, got: %v", - want.MaxPendingAmtMsat, got.MaxPendingAmtMsat, - ) - } - - if want.MinHtlcMsat != got.MinHtlcMsat { - t.Fatalf("MinHtlcMsat mismatched, want: %v, got: %v", - want.MinHtlcMsat, got.MinHtlcMsat, - ) - } - - if want.MaxAcceptedHtlcs != got.MaxAcceptedHtlcs { - t.Fatalf("MaxAcceptedHtlcs mismatched, want: %v, got: %v", - want.MaxAcceptedHtlcs, got.MaxAcceptedHtlcs, - ) - } -} - -// testListChannels checks that the response from ListChannels is correct. It -// tests the values in all ChannelConstraints are returned as expected. Once -// ListChannels becomes mature, a test against all fields in ListChannels should -// be performed. -func testListChannels(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const aliceRemoteMaxHtlcs = 50 - const bobRemoteMaxHtlcs = 100 - - // Create two fresh nodes and open a channel between them. - alice, err := net.NewNode("Alice", nil) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, t, alice) - - bob, err := net.NewNode("Bob", []string{ - fmt.Sprintf("--default-remote-max-htlcs=%v", bobRemoteMaxHtlcs), - }) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, t, bob) - - // Connect Alice to Bob. - if err := net.ConnectNodes(ctxb, alice, bob); err != nil { - t.Fatalf("unable to connect alice to bob: %v", err) - } - - // Give Alice some coins so she can fund a channel. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), alice) - if err != nil { - t.Fatalf("unable to send coins to alice: %v", err) - } - - // Open a channel with 100k satoshis between Alice and Bob with Alice - // being the sole funder of the channel. The minial HTLC amount is set to - // 4200 msats. - const customizedMinHtlc = 4200 - - chanAmt := btcutil.Amount(100000) - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, alice, bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - MinHtlc: customizedMinHtlc, - RemoteMaxHtlcs: aliceRemoteMaxHtlcs, - }, - ) - - // Wait for Alice and Bob to receive the channel edge from the - // funding manager. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = alice.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("alice didn't see the alice->bob channel before "+ - "timeout: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = bob.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("bob didn't see the bob->alice channel before "+ - "timeout: %v", err) - } - - // Alice should have one channel opened with Bob. - assertNodeNumChannels(t, alice, 1) - // Bob should have one channel opened with Alice. - assertNodeNumChannels(t, bob, 1) - - // Get the ListChannel response from Alice. - listReq := &lnrpc.ListChannelsRequest{} - ctxb = context.Background() - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, errr := alice.ListChannels(ctxt, listReq) - if errr != nil { - t.Fatalf("unable to query for %s's channel list: %v", - alice.Name(), errr) - } - - // Check the returned response is correct. - aliceChannel := resp.Channels[0] - - // defaultConstraints is a ChannelConstraints with default values. It is - // used to test against Alice's local channel constraints. - defaultConstraints := &lnrpc.ChannelConstraints{ - CsvDelay: 4, - ChanReserveSat: 1000, - DustLimitSat: uint64(lnwallet.DefaultDustLimit()), - MaxPendingAmtMsat: 99000000, - MinHtlcMsat: 1, - MaxAcceptedHtlcs: bobRemoteMaxHtlcs, - } - assertChannelConstraintsEqual( - t, defaultConstraints, aliceChannel.LocalConstraints, - ) - - // customizedConstraints is a ChannelConstraints with customized values. - // Ideally, all these values can be passed in when creating the channel. - // Currently, only the MinHtlcMsat is customized. It is used to check - // against Alice's remote channel constratins. - customizedConstraints := &lnrpc.ChannelConstraints{ - CsvDelay: 4, - ChanReserveSat: 1000, - DustLimitSat: uint64(lnwallet.DefaultDustLimit()), - MaxPendingAmtMsat: 99000000, - MinHtlcMsat: customizedMinHtlc, - MaxAcceptedHtlcs: aliceRemoteMaxHtlcs, - } - assertChannelConstraintsEqual( - t, customizedConstraints, aliceChannel.RemoteConstraints, - ) - - // Get the ListChannel response for Bob. - listReq = &lnrpc.ListChannelsRequest{} - ctxb = context.Background() - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, errr = bob.ListChannels(ctxt, listReq) - if errr != nil { - t.Fatalf("unable to query for %s's channel "+ - "list: %v", bob.Name(), errr) - } - - bobChannel := resp.Channels[0] - if bobChannel.ChannelPoint != aliceChannel.ChannelPoint { - t.Fatalf("Bob's channel point mismatched, want: %s, got: %s", - chanPoint.String(), bobChannel.ChannelPoint, - ) - } - - // Check channel constraints match. Alice's local channel constraint should - // be equal to Bob's remote channel constraint, and her remote one should - // be equal to Bob's local one. - assertChannelConstraintsEqual( - t, aliceChannel.LocalConstraints, bobChannel.RemoteConstraints, - ) - assertChannelConstraintsEqual( - t, aliceChannel.RemoteConstraints, bobChannel.LocalConstraints, - ) - -} - -func testListPayments(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // First start by deleting all payments that Alice knows of. This will - // allow us to execute the test with a clean state for Alice. - delPaymentsReq := &lnrpc.DeleteAllPaymentsRequest{} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if _, err := net.Alice.DeleteAllPayments(ctxt, delPaymentsReq); err != nil { - t.Fatalf("unable to delete payments: %v", err) - } - - // Check that there are no payments before test. - reqInit := &lnrpc.ListPaymentsRequest{} - ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) - paymentsRespInit, err := net.Alice.ListPayments(ctxt, reqInit) - if err != nil { - t.Fatalf("error when obtaining Alice payments: %v", err) - } - if len(paymentsRespInit.Payments) != 0 { - t.Fatalf("incorrect number of payments, got %v, want %v", - len(paymentsRespInit.Payments), 0) - } - - // Open a channel with 100k satoshis between Alice and Bob with Alice - // being the sole funder of the channel. - chanAmt := btcutil.Amount(100000) - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // Now that the channel is open, create an invoice for Bob which - // expects a payment of 1000 satoshis from Alice paid via a particular - // preimage. - const paymentAmt = 1000 - preimage := bytes.Repeat([]byte("B"), 32) - invoice := &lnrpc.Invoice{ - Memo: "testing", - RPreimage: preimage, - Value: paymentAmt, - } - addInvoiceCtxt, _ := context.WithTimeout(ctxb, defaultTimeout) - invoiceResp, err := net.Bob.AddInvoice(addInvoiceCtxt, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - // Wait for Alice to recognize and advertise the new channel generated - // above. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if errr := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint); errr != nil { - t.Fatalf("alice didn't advertise channel before "+ - "timeout: %v", errr) - } - if errr := net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint); errr != nil { - t.Fatalf("bob didn't advertise channel before "+ - "timeout: %v", errr) - } - - // With the invoice for Bob added, send a payment towards Alice paying - // to the above generated invoice. - sendAndAssertSuccess( - t, net.Alice, - &routerrpc.SendPaymentRequest{ - PaymentRequest: invoiceResp.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitSat: 1000000, - }, - ) - - // Grab Alice's list of payments, she should show the existence of - // exactly one payment. - req := &lnrpc.ListPaymentsRequest{} - ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) - paymentsResp, err := net.Alice.ListPayments(ctxt, req) - if err != nil { - t.Fatalf("error when obtaining Alice payments: %v", err) - } - if len(paymentsResp.Payments) != 1 { - t.Fatalf("incorrect number of payments, got %v, want %v", - len(paymentsResp.Payments), 1) - } - p := paymentsResp.Payments[0] - path := p.Htlcs[len(p.Htlcs)-1].Route.Hops - - // Ensure that the stored path shows a direct payment to Bob with no - // other nodes in-between. - if len(path) != 1 || path[0].PubKey != net.Bob.PubKeyStr { - t.Fatalf("incorrect path") - } - - // The payment amount should also match our previous payment directly. - if p.Value != paymentAmt { - t.Fatalf("incorrect amount, got %v, want %v", - p.Value, paymentAmt) - } - - // The payment hash (or r-hash) should have been stored correctly. - correctRHash := hex.EncodeToString(invoiceResp.RHash) - if !reflect.DeepEqual(p.PaymentHash, correctRHash) { - t.Fatalf("incorrect RHash, got %v, want %v", - p.PaymentHash, correctRHash) - } - - // As we made a single-hop direct payment, there should have been no fee - // applied. - if p.Fee != 0 { - t.Fatalf("incorrect Fee, got %v, want %v", p.Fee, 0) - } - - // Finally, verify that the payment request returned by the rpc matches - // the invoice that we paid. - if p.PaymentRequest != invoiceResp.PaymentRequest { - t.Fatalf("incorrect payreq, got: %v, want: %v", - p.PaymentRequest, invoiceResp.PaymentRequest) - } - - // Delete all payments from Alice. DB should have no payments. - delReq := &lnrpc.DeleteAllPaymentsRequest{} - ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) - _, err = net.Alice.DeleteAllPayments(ctxt, delReq) - if err != nil { - t.Fatalf("Can't delete payments at the end: %v", err) - } - - // Check that there are no payments after test. - listReq := &lnrpc.ListPaymentsRequest{} - ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) - paymentsResp, err = net.Alice.ListPayments(ctxt, listReq) - if err != nil { - t.Fatalf("error when obtaining Alice payments: %v", err) - } - if len(paymentsResp.Payments) != 0 { - t.Fatalf("incorrect number of payments, got %v, want %v", - len(paymentsRespInit.Payments), 0) - } - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false) -} - -// assertAmountPaid checks that the ListChannels command of the provided -// node list the total amount sent and received as expected for the -// provided channel. -func assertAmountPaid(t *harnessTest, channelName string, - node *lntest.HarnessNode, chanPoint wire.OutPoint, amountSent, - amountReceived int64) { - ctxb := context.Background() - - checkAmountPaid := func() er.R { - listReq := &lnrpc.ListChannelsRequest{} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - resp, err := node.ListChannels(ctxt, listReq) - if err != nil { - return er.Errorf("unable to for node's "+ - "channels: %v", err) - } - for _, channel := range resp.Channels { - if channel.ChannelPoint != chanPoint.String() { - continue - } - - if channel.TotalSatoshisSent != amountSent { - return er.Errorf("%v: incorrect amount"+ - " sent: %v != %v", channelName, - channel.TotalSatoshisSent, - amountSent) - } - if channel.TotalSatoshisReceived != - amountReceived { - return er.Errorf("%v: incorrect amount"+ - " received: %v != %v", - channelName, - channel.TotalSatoshisReceived, - amountReceived) - } - - return nil - } - return er.Errorf("channel not found") - } - - // As far as HTLC inclusion in commitment transaction might be - // postponed we will try to check the balance couple of times, - // and then if after some period of time we receive wrong - // balance return the error. - // TODO(roasbeef): remove sleep after invoice notification hooks - // are in place - var timeover uint32 - go func() { - <-time.After(time.Second * 20) - atomic.StoreUint32(&timeover, 1) - }() - - for { - isTimeover := atomic.LoadUint32(&timeover) == 1 - if err := checkAmountPaid(); err != nil { - if isTimeover { - t.Fatalf("Check amount Paid failed: %v", err) - } - } else { - break - } - } -} - -// updateChannelPolicy updates the channel policy of node to the -// given fees and timelock delta. This function blocks until -// listenerNode has received the policy update. -func updateChannelPolicy(t *harnessTest, node *lntest.HarnessNode, - chanPoint *lnrpc.ChannelPoint, baseFee int64, feeRate int64, - timeLockDelta uint32, maxHtlc uint64, listenerNode *lntest.HarnessNode) { - - ctxb := context.Background() - - expectedPolicy := &lnrpc.RoutingPolicy{ - FeeBaseMsat: baseFee, - FeeRateMilliMsat: feeRate, - TimeLockDelta: timeLockDelta, - MinHtlc: 1000, // default value - MaxHtlcMsat: maxHtlc, - } - - updateFeeReq := &lnrpc.PolicyUpdateRequest{ - BaseFeeMsat: baseFee, - FeeRate: float64(feeRate) / testFeeBase, - TimeLockDelta: timeLockDelta, - Scope: &lnrpc.PolicyUpdateRequest_ChanPoint{ - ChanPoint: chanPoint, - }, - MaxHtlcMsat: maxHtlc, - } - - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if _, err := node.UpdateChannelPolicy(ctxt, updateFeeReq); err != nil { - t.Fatalf("unable to update chan policy: %v", err) - } - - // Wait for listener node to receive the channel update from node. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - graphSub := subscribeGraphNotifications(t, ctxt, listenerNode) - defer close(graphSub.quit) - - waitForChannelUpdate( - t, graphSub, - []expectedChanUpdate{ - {node.PubKeyStr, expectedPolicy, chanPoint}, - }, - ) -} - -type singleHopSendToRouteCase struct { - name string - - // streaming tests streaming SendToRoute if true, otherwise tests - // synchronous SenToRoute. - streaming bool - - // routerrpc submits the request to the routerrpc subserver if true, - // otherwise submits to the main rpc server. - routerrpc bool - - // mpp sets the MPP fields on the request if true, otherwise submits a - // regular payment. - mpp bool -} - -var singleHopSendToRouteCases = []singleHopSendToRouteCase{ - { - name: "regular main sync", - }, - { - name: "regular main stream", - streaming: true, - }, - { - name: "regular routerrpc sync", - routerrpc: true, - }, - { - name: "mpp main sync", - mpp: true, - }, - { - name: "mpp main stream", - streaming: true, - mpp: true, - }, - { - name: "mpp routerrpc sync", - routerrpc: true, - mpp: true, - }, -} - -// testSingleHopSendToRoute tests that payments are properly processed through a -// provided route with a single hop. We'll create the following network -// topology: -// Carol --100k--> Dave -// We'll query the daemon for routes from Carol to Dave and then send payments -// by feeding the route back into the various SendToRoute RPC methods. Here we -// test all three SendToRoute endpoints, forcing each to perform both a regular -// payment and an MPP payment. -func testSingleHopSendToRoute(net *lntest.NetworkHarness, t *harnessTest) { - for _, test := range singleHopSendToRouteCases { - test := test - - t.t.Run(test.name, func(t1 *testing.T) { - ht := newHarnessTest(t1, t.lndHarness) - ht.RunTestCase(&testCase{ - name: test.name, - test: func(_ *lntest.NetworkHarness, tt *harnessTest) { - testSingleHopSendToRouteCase(net, tt, test) - }, - }) - }) - } -} - -func testSingleHopSendToRouteCase(net *lntest.NetworkHarness, t *harnessTest, - test singleHopSendToRouteCase) { - - const chanAmt = btcutil.Amount(100000) - const paymentAmtSat = 1000 - const numPayments = 5 - const amountPaid = int64(numPayments * paymentAmtSat) - - ctxb := context.Background() - var networkChans []*lnrpc.ChannelPoint - - // Create Carol and Dave, then establish a channel between them. Carol - // is the sole funder of the channel with 100k satoshis. The network - // topology should look like: - // Carol -> 100k -> Dave - carol, err := net.NewNode("Carol", nil) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - dave, err := net.NewNode("Dave", nil) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, dave) - - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, carol, dave); err != nil { - t.Fatalf("unable to connect carol to dave: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), carol) - if err != nil { - t.Fatalf("unable to send coins to carol: %v", err) - } - - // Open a channel with 100k satoshis between Carol and Dave with Carol - // being the sole funder of the channel. - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointCarol := openChannelAndAssert( - ctxt, t, net, carol, dave, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - networkChans = append(networkChans, chanPointCarol) - - carolChanTXID, err := lnd.GetChanPointFundingTxid(chanPointCarol) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - carolFundPoint := wire.OutPoint{ - Hash: *carolChanTXID, - Index: chanPointCarol.OutputIndex, - } - - // Wait for all nodes to have seen all channels. - nodes := []*lntest.HarnessNode{carol, dave} - for _, chanPoint := range networkChans { - for _, node := range nodes { - txid, err := lnd.GetChanPointFundingTxid(chanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - point := wire.OutPoint{ - Hash: *txid, - Index: chanPoint.OutputIndex, - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = node.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("%s(%d): timeout waiting for "+ - "channel(%s) open: %v", node.Name(), - node.NodeID, point, err) - } - } - } - - // Create invoices for Dave, which expect a payment from Carol. - payReqs, rHashes, _, err := createPayReqs( - dave, paymentAmtSat, numPayments, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } - - // Reconstruct payment addresses. - var payAddrs [][]byte - for _, payReq := range payReqs { - ctx, _ := context.WithTimeout( - context.Background(), defaultTimeout, - ) - resp, err := dave.DecodePayReq( - ctx, - &lnrpc.PayReqString{PayReq: payReq}, - ) - if err != nil { - t.Fatalf("decode pay req: %v", err) - } - payAddrs = append(payAddrs, resp.PaymentAddr) - } - - // Assert Carol and Dave are synced to the chain before proceeding, to - // ensure the queried route will have a valid final CLTV once the HTLC - // reaches Dave. - _, minerHeight, err := net.Miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get best height: %v", err) - } - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - util.RequireNoErr(t.t, waitForNodeBlockHeight(ctxt, carol, minerHeight)) - util.RequireNoErr(t.t, waitForNodeBlockHeight(ctxt, dave, minerHeight)) - - // Query for routes to pay from Carol to Dave using the default CLTV - // config. - routesReq := &lnrpc.QueryRoutesRequest{ - PubKey: dave.PubKeyStr, - Amt: paymentAmtSat, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - routes, errr := carol.QueryRoutes(ctxt, routesReq) - if errr != nil { - t.Fatalf("unable to get route from %s: %v", - carol.Name(), errr) - } - - // There should only be one route to try, so take the first item. - r := routes.Routes[0] - - // Construct a closure that will set MPP fields on the route, which - // allows us to test MPP payments. - setMPPFields := func(i int) { - hop := r.Hops[len(r.Hops)-1] - hop.TlvPayload = true - hop.MppRecord = &lnrpc.MPPRecord{ - PaymentAddr: payAddrs[i], - TotalAmtMsat: paymentAmtSat * 1000, - } - } - - // Construct closures for each of the payment types covered: - // - main rpc server sync - // - main rpc server streaming - // - routerrpc server sync - sendToRouteSync := func() { - for i, rHash := range rHashes { - // Populate the MPP fields for the final hop if we are - // testing MPP payments. - if test.mpp { - setMPPFields(i) - } - - sendReq := &lnrpc.SendToRouteRequest{ - PaymentHash: rHash, - Route: r, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, err := carol.SendToRouteSync( - ctxt, sendReq, - ) - if err != nil { - t.Fatalf("unable to send to route for "+ - "%s: %v", carol.Name(), err) - } - if resp.PaymentError != "" { - t.Fatalf("received payment error from %s: %v", - carol.Name(), resp.PaymentError) - } - } - } - sendToRouteStream := func() { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - alicePayStream, err := carol.SendToRoute(ctxt) - if err != nil { - t.Fatalf("unable to create payment stream for "+ - "carol: %v", err) - } - - for i, rHash := range rHashes { - // Populate the MPP fields for the final hop if we are - // testing MPP payments. - if test.mpp { - setMPPFields(i) - } - - sendReq := &lnrpc.SendToRouteRequest{ - PaymentHash: rHash, - Route: routes.Routes[0], - } - err := alicePayStream.Send(sendReq) - - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } - - resp, err := alicePayStream.Recv() - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } - if resp.PaymentError != "" { - t.Fatalf("received payment error: %v", - resp.PaymentError) - } - } - } - sendToRouteRouterRPC := func() { - for i, rHash := range rHashes { - // Populate the MPP fields for the final hop if we are - // testing MPP payments. - if test.mpp { - setMPPFields(i) - } - - sendReq := &routerrpc.SendToRouteRequest{ - PaymentHash: rHash, - Route: r, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, err := carol.RouterClient.SendToRouteV2( - ctxt, sendReq, - ) - if err != nil { - t.Fatalf("unable to send to route for "+ - "%s: %v", carol.Name(), err) - } - if resp.Failure != nil { - t.Fatalf("received payment error from %s: %v", - carol.Name(), resp.Failure) - } - } - } - - // Using Carol as the node as the source, send the payments - // synchronously via the the routerrpc's SendToRoute, or via the main RPC - // server's SendToRoute streaming or sync calls. - switch { - case !test.routerrpc && test.streaming: - sendToRouteStream() - case !test.routerrpc && !test.streaming: - sendToRouteSync() - case test.routerrpc && !test.streaming: - sendToRouteRouterRPC() - default: - t.Fatalf("routerrpc does not support streaming send_to_route") - } - - // Verify that the payment's from Carol's PoV have the correct payment - // hash and amount. - ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) - paymentsResp, errr := carol.ListPayments( - ctxt, &lnrpc.ListPaymentsRequest{}, - ) - if errr != nil { - t.Fatalf("error when obtaining %s payments: %v", - carol.Name(), errr) - } - if len(paymentsResp.Payments) != numPayments { - t.Fatalf("incorrect number of payments, got %v, want %v", - len(paymentsResp.Payments), numPayments) - } - - for i, p := range paymentsResp.Payments { - // Assert that the payment hashes for each payment match up. - rHashHex := hex.EncodeToString(rHashes[i]) - if p.PaymentHash != rHashHex { - t.Fatalf("incorrect payment hash for payment %d, "+ - "want: %s got: %s", - i, rHashHex, p.PaymentHash) - } - - // Assert that each payment has no invoice since the payment was - // completed using SendToRoute. - if p.PaymentRequest != "" { - t.Fatalf("incorrect payment request for payment: %d, "+ - "want: \"\", got: %s", - i, p.PaymentRequest) - } - - // Assert the payment amount is correct. - if p.ValueSat != paymentAmtSat { - t.Fatalf("incorrect payment amt for payment %d, "+ - "want: %d, got: %d", - i, paymentAmtSat, p.ValueSat) - } - - // Assert exactly one htlc was made. - if len(p.Htlcs) != 1 { - t.Fatalf("expected 1 htlc for payment %d, got: %d", - i, len(p.Htlcs)) - } - - // Assert the htlc's route is populated. - htlc := p.Htlcs[0] - if htlc.Route == nil { - t.Fatalf("expected route for payment %d", i) - } - - // Assert the hop has exactly one hop. - if len(htlc.Route.Hops) != 1 { - t.Fatalf("expected 1 hop for payment %d, got: %d", - i, len(htlc.Route.Hops)) - } - - // If this is an MPP test, assert the MPP record's fields are - // properly populated. Otherwise the hop should not have an MPP - // record. - hop := htlc.Route.Hops[0] - if test.mpp { - if hop.MppRecord == nil { - t.Fatalf("expected mpp record for mpp payment") - } - - if hop.MppRecord.TotalAmtMsat != paymentAmtSat*1000 { - t.Fatalf("incorrect mpp total msat for payment %d "+ - "want: %d, got: %d", - i, paymentAmtSat*1000, - hop.MppRecord.TotalAmtMsat) - } - - expAddr := payAddrs[i] - if !bytes.Equal(hop.MppRecord.PaymentAddr, expAddr) { - t.Fatalf("incorrect mpp payment addr for payment %d "+ - "want: %x, got: %x", - i, expAddr, hop.MppRecord.PaymentAddr) - } - } else if hop.MppRecord != nil { - t.Fatalf("unexpected mpp record for non-mpp payment") - } - } - - // Verify that the invoices's from Dave's PoV have the correct payment - // hash and amount. - ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) - invoicesResp, errr := dave.ListInvoices( - ctxt, &lnrpc.ListInvoiceRequest{}, - ) - if errr != nil { - t.Fatalf("error when obtaining %s payments: %v", - dave.Name(), errr) - } - if len(invoicesResp.Invoices) != numPayments { - t.Fatalf("incorrect number of invoices, got %v, want %v", - len(invoicesResp.Invoices), numPayments) - } - - for i, inv := range invoicesResp.Invoices { - // Assert that the payment hashes match up. - if !bytes.Equal(inv.RHash, rHashes[i]) { - t.Fatalf("incorrect payment hash for invoice %d, "+ - "want: %x got: %x", - i, rHashes[i], inv.RHash) - } - - // Assert that the amount paid to the invoice is correct. - if inv.AmtPaidSat != paymentAmtSat { - t.Fatalf("incorrect payment amt for invoice %d, "+ - "want: %d, got %d", - i, paymentAmtSat, inv.AmtPaidSat) - } - } - - // At this point all the channels within our proto network should be - // shifted by 5k satoshis in the direction of Dave, the sink within the - // payment flow generated above. The order of asserts corresponds to - // increasing of time is needed to embed the HTLC in commitment - // transaction, in channel Carol->Dave, order is Dave and then Carol. - assertAmountPaid(t, "Carol(local) => Dave(remote)", dave, - carolFundPoint, int64(0), amountPaid) - assertAmountPaid(t, "Carol(local) => Dave(remote)", carol, - carolFundPoint, amountPaid, int64(0)) - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, carol, chanPointCarol, false) -} - -// testMultiHopSendToRoute tests that payments are properly processed -// through a provided route. We'll create the following network topology: -// Alice --100k--> Bob --100k--> Carol -// We'll query the daemon for routes from Alice to Carol and then -// send payments through the routes. -func testMultiHopSendToRoute(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const chanAmt = btcutil.Amount(100000) - var networkChans []*lnrpc.ChannelPoint - - // Open a channel with 100k satoshis between Alice and Bob with Alice - // being the sole funder of the channel. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPointAlice := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - networkChans = append(networkChans, chanPointAlice) - - aliceChanTXID, err := lnd.GetChanPointFundingTxid(chanPointAlice) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - aliceFundPoint := wire.OutPoint{ - Hash: *aliceChanTXID, - Index: chanPointAlice.OutputIndex, - } - - // Create Carol and establish a channel from Bob. Bob is the sole funder - // of the channel with 100k satoshis. The network topology should look like: - // Alice -> Bob -> Carol - carol, err := net.NewNode("Carol", nil) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, carol, net.Bob); err != nil { - t.Fatalf("unable to connect carol to alice: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), net.Bob) - if err != nil { - t.Fatalf("unable to send coins to bob: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointBob := openChannelAndAssert( - ctxt, t, net, net.Bob, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - networkChans = append(networkChans, chanPointBob) - bobChanTXID, err := lnd.GetChanPointFundingTxid(chanPointBob) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - bobFundPoint := wire.OutPoint{ - Hash: *bobChanTXID, - Index: chanPointBob.OutputIndex, - } - - // Wait for all nodes to have seen all channels. - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} - nodeNames := []string{"Alice", "Bob", "Carol"} - for _, chanPoint := range networkChans { - for i, node := range nodes { - txid, err := lnd.GetChanPointFundingTxid(chanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - point := wire.OutPoint{ - Hash: *txid, - Index: chanPoint.OutputIndex, - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = node.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("%s(%d): timeout waiting for "+ - "channel(%s) open: %v", nodeNames[i], - node.NodeID, point, err) - } - } - } - - // Query for routes to pay from Alice to Carol. - // We set FinalCltvDelta to 40 since by default QueryRoutes returns - // the last hop with a final cltv delta of 9 where as the default in - // htlcswitch is 40. - const paymentAmt = 1000 - routesReq := &lnrpc.QueryRoutesRequest{ - PubKey: carol.PubKeyStr, - Amt: paymentAmt, - FinalCltvDelta: chainreg.DefaultBitcoinTimeLockDelta, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - routes, errr := net.Alice.QueryRoutes(ctxt, routesReq) - if errr != nil { - t.Fatalf("unable to get route: %v", errr) - } - - // Create 5 invoices for Carol, which expect a payment from Alice for 1k - // satoshis with a different preimage each time. - const numPayments = 5 - _, rHashes, _, err := createPayReqs( - carol, paymentAmt, numPayments, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } - - // We'll wait for all parties to recognize the new channels within the - // network. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = carol.WaitForNetworkChannelOpen(ctxt, chanPointBob) - if err != nil { - t.Fatalf("bob didn't advertise his channel in time: %v", err) - } - - time.Sleep(time.Millisecond * 50) - - // Using Alice as the source, pay to the 5 invoices from Carol created - // above. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - alicePayStream, errr := net.Alice.SendToRoute(ctxt) - if errr != nil { - t.Fatalf("unable to create payment stream for alice: %v", errr) - } - - for _, rHash := range rHashes { - sendReq := &lnrpc.SendToRouteRequest{ - PaymentHash: rHash, - Route: routes.Routes[0], - } - err := alicePayStream.Send(sendReq) - - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } - } - - for range rHashes { - resp, err := alicePayStream.Recv() - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } - if resp.PaymentError != "" { - t.Fatalf("received payment error: %v", resp.PaymentError) - } - } - - // When asserting the amount of satoshis moved, we'll factor in the - // default base fee, as we didn't modify the fee structure when - // creating the seed nodes in the network. - const baseFee = 1 - - // At this point all the channels within our proto network should be - // shifted by 5k satoshis in the direction of Carol, the sink within the - // payment flow generated above. The order of asserts corresponds to - // increasing of time is needed to embed the HTLC in commitment - // transaction, in channel Alice->Bob->Carol, order is Carol, Bob, - // Alice. - const amountPaid = int64(5000) - assertAmountPaid(t, "Bob(local) => Carol(remote)", carol, - bobFundPoint, int64(0), amountPaid) - assertAmountPaid(t, "Bob(local) => Carol(remote)", net.Bob, - bobFundPoint, amountPaid, int64(0)) - assertAmountPaid(t, "Alice(local) => Bob(remote)", net.Bob, - aliceFundPoint, int64(0), amountPaid+(baseFee*numPayments)) - assertAmountPaid(t, "Alice(local) => Bob(remote)", net.Alice, - aliceFundPoint, amountPaid+(baseFee*numPayments), int64(0)) - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointAlice, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, carol, chanPointBob, false) -} - -// testSendToRouteErrorPropagation tests propagation of errors that occur -// while processing a multi-hop payment through an unknown route. -func testSendToRouteErrorPropagation(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const chanAmt = btcutil.Amount(100000) - - // Open a channel with 100k satoshis between Alice and Bob with Alice - // being the sole funder of the channel. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPointAlice := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPointAlice) - if err != nil { - t.Fatalf("alice didn't advertise her channel: %v", err) - } - - // Create a new nodes (Carol and Charlie), load her with some funds, - // then establish a connection between Carol and Charlie with a channel - // that has identical capacity to the one created above.Then we will - // get route via queryroutes call which will be fake route for Alice -> - // Bob graph. - // - // The network topology should now look like: Alice -> Bob; Carol -> Charlie. - carol, err := net.NewNode("Carol", nil) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), carol) - if err != nil { - t.Fatalf("unable to send coins to carol: %v", err) - } - - charlie, err := net.NewNode("Charlie", nil) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, charlie) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), charlie) - if err != nil { - t.Fatalf("unable to send coins to charlie: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, carol, charlie); err != nil { - t.Fatalf("unable to connect carol to alice: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointCarol := openChannelAndAssert( - ctxt, t, net, carol, charlie, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = carol.WaitForNetworkChannelOpen(ctxt, chanPointCarol) - if err != nil { - t.Fatalf("carol didn't advertise her channel: %v", err) - } - - // Query routes from Carol to Charlie which will be an invalid route - // for Alice -> Bob. - fakeReq := &lnrpc.QueryRoutesRequest{ - PubKey: charlie.PubKeyStr, - Amt: int64(1), - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - fakeRoute, errr := carol.QueryRoutes(ctxt, fakeReq) - if errr != nil { - t.Fatalf("unable get fake route: %v", errr) - } - - // Create 1 invoices for Bob, which expect a payment from Alice for 1k - // satoshis - const paymentAmt = 1000 - - invoice := &lnrpc.Invoice{ - Memo: "testing", - Value: paymentAmt, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, errr := net.Bob.AddInvoice(ctxt, invoice) - if errr != nil { - t.Fatalf("unable to add invoice: %v", errr) - } - - rHash := resp.RHash - - // Using Alice as the source, pay to the 5 invoices from Bob created above. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - alicePayStream, errr := net.Alice.SendToRoute(ctxt) - if errr != nil { - t.Fatalf("unable to create payment stream for alice: %v", errr) - } - - sendReq := &lnrpc.SendToRouteRequest{ - PaymentHash: rHash, - Route: fakeRoute.Routes[0], - } - - if err := alicePayStream.Send(sendReq); err != nil { - t.Fatalf("unable to send payment: %v", err) - } - - // At this place we should get an rpc error with notification - // that edge is not found on hop(0) - if _, err := alicePayStream.Recv(); err != nil && strings.Contains(err.Error(), - "edge not found") { - - } else if err != nil { - t.Fatalf("payment stream has been closed but fake route has consumed: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointAlice, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, carol, chanPointCarol, false) -} - -// testUnannouncedChannels checks unannounced channels are not returned by -// describeGraph RPC request unless explicitly asked for. -func testUnannouncedChannels(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - amount := lnd.MaxBtcFundingAmount - - // Open a channel between Alice and Bob, ensuring the - // channel has been opened properly. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanOpenUpdate := openChannelStream( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: amount, - }, - ) - - // Mine 2 blocks, and check that the channel is opened but not yet - // announced to the network. - mineBlocks(t, net, 2, 1) - - // One block is enough to make the channel ready for use, since the - // nodes have defaultNumConfs=1 set. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - fundingChanPoint, err := net.WaitForChannelOpen(ctxt, chanOpenUpdate) - if err != nil { - t.Fatalf("error while waiting for channel open: %v", err) - } - - // Alice should have 1 edge in her graph. - req := &lnrpc.ChannelGraphRequest{ - IncludeUnannounced: true, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - chanGraph, errr := net.Alice.DescribeGraph(ctxt, req) - if errr != nil { - t.Fatalf("unable to query alice's graph: %v", errr) - } - - numEdges := len(chanGraph.Edges) - if numEdges != 1 { - t.Fatalf("expected to find 1 edge in the graph, found %d", numEdges) - } - - // Channels should not be announced yet, hence Alice should have no - // announced edges in her graph. - req.IncludeUnannounced = false - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - chanGraph, errr = net.Alice.DescribeGraph(ctxt, req) - if errr != nil { - t.Fatalf("unable to query alice's graph: %v", errr) - } - - numEdges = len(chanGraph.Edges) - if numEdges != 0 { - t.Fatalf("expected to find 0 announced edges in the graph, found %d", - numEdges) - } - - // Mine 4 more blocks, and check that the channel is now announced. - mineBlocks(t, net, 4, 0) - - // Give the network a chance to learn that auth proof is confirmed. - var predErr er.R - err = wait.Predicate(func() bool { - // The channel should now be announced. Check that Alice has 1 - // announced edge. - req.IncludeUnannounced = false - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - chanGraph, errr = net.Alice.DescribeGraph(ctxt, req) - if errr != nil { - predErr = er.Errorf("unable to query alice's graph: %v", errr) - return false - } - - numEdges = len(chanGraph.Edges) - if numEdges != 1 { - predErr = er.Errorf("expected to find 1 announced edge in "+ - "the graph, found %d", numEdges) - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("%v", predErr) - } - - // The channel should now be announced. Check that Alice has 1 announced - // edge. - req.IncludeUnannounced = false - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - chanGraph, errr = net.Alice.DescribeGraph(ctxt, req) - if errr != nil { - t.Fatalf("unable to query alice's graph: %v", errr) - } - - numEdges = len(chanGraph.Edges) - if numEdges != 1 { - t.Fatalf("expected to find 1 announced edge in the graph, found %d", - numEdges) - } - - // Close the channel used during the test. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, fundingChanPoint, false) -} - -// testPrivateChannels tests that a private channel can be used for -// routing by the two endpoints of the channel, but is not known by -// the rest of the nodes in the graph. -func testPrivateChannels(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const chanAmt = btcutil.Amount(100000) - var networkChans []*lnrpc.ChannelPoint - - // We create the following topology: - // - // Dave --100k--> Alice --200k--> Bob - // ^ ^ - // | | - // 100k 100k - // | | - // +---- Carol ----+ - // - // where the 100k channel between Carol and Alice is private. - - // Open a channel with 200k satoshis between Alice and Bob. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPointAlice := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt * 2, - }, - ) - networkChans = append(networkChans, chanPointAlice) - - aliceChanTXID, err := lnd.GetChanPointFundingTxid(chanPointAlice) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - aliceFundPoint := wire.OutPoint{ - Hash: *aliceChanTXID, - Index: chanPointAlice.OutputIndex, - } - - // Create Dave, and a channel to Alice of 100k. - dave, err := net.NewNode("Dave", nil) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, dave) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, dave, net.Alice); err != nil { - t.Fatalf("unable to connect dave to alice: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), dave) - if err != nil { - t.Fatalf("unable to send coins to dave: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointDave := openChannelAndAssert( - ctxt, t, net, dave, net.Alice, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - networkChans = append(networkChans, chanPointDave) - daveChanTXID, err := lnd.GetChanPointFundingTxid(chanPointDave) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - daveFundPoint := wire.OutPoint{ - Hash: *daveChanTXID, - Index: chanPointDave.OutputIndex, - } - - // Next, we'll create Carol and establish a channel from her to - // Dave of 100k. - carol, err := net.NewNode("Carol", nil) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, carol, dave); err != nil { - t.Fatalf("unable to connect carol to dave: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), carol) - if err != nil { - t.Fatalf("unable to send coins to carol: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointCarol := openChannelAndAssert( - ctxt, t, net, carol, dave, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - networkChans = append(networkChans, chanPointCarol) - - carolChanTXID, err := lnd.GetChanPointFundingTxid(chanPointCarol) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - carolFundPoint := wire.OutPoint{ - Hash: *carolChanTXID, - Index: chanPointCarol.OutputIndex, - } - - // Wait for all nodes to have seen all these channels, as they - // are all public. - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol, dave} - nodeNames := []string{"Alice", "Bob", "Carol", "Dave"} - for _, chanPoint := range networkChans { - for i, node := range nodes { - txid, err := lnd.GetChanPointFundingTxid(chanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - point := wire.OutPoint{ - Hash: *txid, - Index: chanPoint.OutputIndex, - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = node.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("%s(%d): timeout waiting for "+ - "channel(%s) open: %v", nodeNames[i], - node.NodeID, point, err) - } - } - } - // Now create a _private_ channel directly between Carol and - // Alice of 100k. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, carol, net.Alice); err != nil { - t.Fatalf("unable to connect dave to alice: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanOpenUpdate := openChannelStream( - ctxt, t, net, carol, net.Alice, - lntest.OpenChannelParams{ - Amt: chanAmt, - Private: true, - }, - ) - if err != nil { - t.Fatalf("unable to open channel: %v", err) - } - - // One block is enough to make the channel ready for use, since the - // nodes have defaultNumConfs=1 set. - block := mineBlocks(t, net, 1, 1)[0] - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - chanPointPrivate, err := net.WaitForChannelOpen(ctxt, chanOpenUpdate) - if err != nil { - t.Fatalf("error while waiting for channel open: %v", err) - } - fundingTxID, err := lnd.GetChanPointFundingTxid(chanPointPrivate) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - assertTxInBlock(t, block, fundingTxID) - - // The channel should be listed in the peer information returned by - // both peers. - privateFundPoint := wire.OutPoint{ - Hash: *fundingTxID, - Index: chanPointPrivate.OutputIndex, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.AssertChannelExists(ctxt, carol, &privateFundPoint) - if err != nil { - t.Fatalf("unable to assert channel existence: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.AssertChannelExists(ctxt, net.Alice, &privateFundPoint) - if err != nil { - t.Fatalf("unable to assert channel existence: %v", err) - } - - // The channel should be available for payments between Carol and Alice. - // We check this by sending payments from Carol to Bob, that - // collectively would deplete at least one of Carol's channels. - - // Create 2 invoices for Bob, each of 70k satoshis. Since each of - // Carol's channels is of size 100k, these payments cannot succeed - // by only using one of the channels. - const numPayments = 2 - const paymentAmt = 70000 - payReqs, _, _, err := createPayReqs( - net.Bob, paymentAmt, numPayments, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } - - time.Sleep(time.Millisecond * 50) - - // Let Carol pay the invoices. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, carol, carol.RouterClient, payReqs, true, - ) - if err != nil { - t.Fatalf("unable to send payments: %v", err) - } - - // When asserting the amount of satoshis moved, we'll factor in the - // default base fee, as we didn't modify the fee structure when - // creating the seed nodes in the network. - const baseFee = 1 - - // Bob should have received 140k satoshis from Alice. - assertAmountPaid(t, "Alice(local) => Bob(remote)", net.Bob, - aliceFundPoint, int64(0), 2*paymentAmt) - - // Alice sent 140k to Bob. - assertAmountPaid(t, "Alice(local) => Bob(remote)", net.Alice, - aliceFundPoint, 2*paymentAmt, int64(0)) - - // Alice received 70k + fee from Dave. - assertAmountPaid(t, "Dave(local) => Alice(remote)", net.Alice, - daveFundPoint, int64(0), paymentAmt+baseFee) - - // Dave sent 70k+fee to Alice. - assertAmountPaid(t, "Dave(local) => Alice(remote)", dave, - daveFundPoint, paymentAmt+baseFee, int64(0)) - - // Dave received 70k+fee of two hops from Carol. - assertAmountPaid(t, "Carol(local) => Dave(remote)", dave, - carolFundPoint, int64(0), paymentAmt+baseFee*2) - - // Carol sent 70k+fee of two hops to Dave. - assertAmountPaid(t, "Carol(local) => Dave(remote)", carol, - carolFundPoint, paymentAmt+baseFee*2, int64(0)) - - // Alice received 70k+fee from Carol. - assertAmountPaid(t, "Carol(local) [private=>] Alice(remote)", - net.Alice, privateFundPoint, int64(0), paymentAmt+baseFee) - - // Carol sent 70k+fee to Alice. - assertAmountPaid(t, "Carol(local) [private=>] Alice(remote)", - carol, privateFundPoint, paymentAmt+baseFee, int64(0)) - - // Alice should also be able to route payments using this channel, - // so send two payments of 60k back to Carol. - const paymentAmt60k = 60000 - payReqs, _, _, err = createPayReqs( - carol, paymentAmt60k, numPayments, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } - - time.Sleep(time.Millisecond * 50) - - // Let Bob pay the invoices. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, net.Alice, net.Alice.RouterClient, payReqs, true, - ) - if err != nil { - t.Fatalf("unable to send payments: %v", err) - } - - // Finally, we make sure Dave and Bob does not know about the - // private channel between Carol and Alice. We first mine - // plenty of blocks, such that the channel would have been - // announced in case it was public. - mineBlocks(t, net, 10, 0) - - // We create a helper method to check how many edges each of the - // nodes know about. Carol and Alice should know about 4, while - // Bob and Dave should only know about 3, since one channel is - // private. - numChannels := func(node *lntest.HarnessNode, includeUnannounced bool) int { - req := &lnrpc.ChannelGraphRequest{ - IncludeUnannounced: includeUnannounced, - } - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - chanGraph, err := node.DescribeGraph(ctxt, req) - if err != nil { - t.Fatalf("unable go describegraph: %v", err) - } - return len(chanGraph.Edges) - } - - var predErr er.R - err = wait.Predicate(func() bool { - aliceChans := numChannels(net.Alice, true) - if aliceChans != 4 { - predErr = er.Errorf("expected Alice to know 4 edges, "+ - "had %v", aliceChans) - return false - } - alicePubChans := numChannels(net.Alice, false) - if alicePubChans != 3 { - predErr = er.Errorf("expected Alice to know 3 public edges, "+ - "had %v", alicePubChans) - return false - } - bobChans := numChannels(net.Bob, true) - if bobChans != 3 { - predErr = er.Errorf("expected Bob to know 3 edges, "+ - "had %v", bobChans) - return false - } - carolChans := numChannels(carol, true) - if carolChans != 4 { - predErr = er.Errorf("expected Carol to know 4 edges, "+ - "had %v", carolChans) - return false - } - carolPubChans := numChannels(carol, false) - if carolPubChans != 3 { - predErr = er.Errorf("expected Carol to know 3 public edges, "+ - "had %v", carolPubChans) - return false - } - daveChans := numChannels(dave, true) - if daveChans != 3 { - predErr = er.Errorf("expected Dave to know 3 edges, "+ - "had %v", daveChans) - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("%v", predErr) - } - - // Close all channels. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointAlice, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, dave, chanPointDave, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, carol, chanPointCarol, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, carol, chanPointPrivate, false) -} - -// testInvoiceRoutingHints tests that the routing hints for an invoice are -// created properly. -func testInvoiceRoutingHints(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const chanAmt = btcutil.Amount(100000) - - // Throughout this test, we'll be opening a channel between Alice and - // several other parties. - // - // First, we'll create a private channel between Alice and Bob. This - // will be the only channel that will be considered as a routing hint - // throughout this test. We'll include a push amount since we currently - // require channels to have enough remote balance to cover the invoice's - // payment. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPointBob := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: chanAmt / 2, - Private: true, - }, - ) - - // Then, we'll create Carol's node and open a public channel between her - // and Alice. This channel will not be considered as a routing hint due - // to it being public. - carol, err := net.NewNode("Carol", nil) - if err != nil { - t.Fatalf("unable to create carol's node: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, net.Alice, carol); err != nil { - t.Fatalf("unable to connect alice to carol: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointCarol := openChannelAndAssert( - ctxt, t, net, net.Alice, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: chanAmt / 2, - }, - ) - - // We'll also create a public channel between Bob and Carol to ensure - // that Bob gets selected as the only routing hint. We do this as - // we should only include routing hints for nodes that are publicly - // advertised, otherwise we'd end up leaking information about nodes - // that wish to stay unadvertised. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, net.Bob, carol); err != nil { - t.Fatalf("unable to connect alice to carol: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointBobCarol := openChannelAndAssert( - ctxt, t, net, net.Bob, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: chanAmt / 2, - }, - ) - - // Then, we'll create Dave's node and open a private channel between him - // and Alice. We will not include a push amount in order to not consider - // this channel as a routing hint as it will not have enough remote - // balance for the invoice's amount. - dave, err := net.NewNode("Dave", nil) - if err != nil { - t.Fatalf("unable to create dave's node: %v", err) - } - defer shutdownAndAssert(net, t, dave) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, net.Alice, dave); err != nil { - t.Fatalf("unable to connect alice to dave: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointDave := openChannelAndAssert( - ctxt, t, net, net.Alice, dave, - lntest.OpenChannelParams{ - Amt: chanAmt, - Private: true, - }, - ) - - // Finally, we'll create Eve's node and open a private channel between - // her and Alice. This time though, we'll take Eve's node down after the - // channel has been created to avoid populating routing hints for - // inactive channels. - eve, err := net.NewNode("Eve", nil) - if err != nil { - t.Fatalf("unable to create eve's node: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, net.Alice, eve); err != nil { - t.Fatalf("unable to connect alice to eve: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointEve := openChannelAndAssert( - ctxt, t, net, net.Alice, eve, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: chanAmt / 2, - Private: true, - }, - ) - - // Make sure all the channels have been opened. - nodeNames := []string{"bob", "carol", "dave", "eve"} - aliceChans := []*lnrpc.ChannelPoint{ - chanPointBob, chanPointCarol, chanPointBobCarol, chanPointDave, - chanPointEve, - } - for i, chanPoint := range aliceChans { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("timed out waiting for channel open between "+ - "alice and %s: %v", nodeNames[i], err) - } - } - - // Now that the channels are open, we'll take down Eve's node. - shutdownAndAssert(net, t, eve) - - // Create an invoice for Alice that will populate the routing hints. - invoice := &lnrpc.Invoice{ - Memo: "routing hints", - Value: int64(chanAmt / 4), - Private: true, - } - - // Due to the way the channels were set up above, the channel between - // Alice and Bob should be the only channel used as a routing hint. - var predErr er.R - var decoded *lnrpc.PayReq - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, err := net.Alice.AddInvoice(ctxt, invoice) - if err != nil { - predErr = er.Errorf("unable to add invoice: %v", err) - return false - } - - // We'll decode the invoice's payment request to determine which - // channels were used as routing hints. - payReq := &lnrpc.PayReqString{ - PayReq: resp.PaymentRequest, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - decoded, err = net.Alice.DecodePayReq(ctxt, payReq) - if err != nil { - predErr = er.Errorf("unable to decode payment "+ - "request: %v", err) - return false - } - - if len(decoded.RouteHints) != 1 { - predErr = er.Errorf("expected one route hint, got %d", - len(decoded.RouteHints)) - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.String()) - } - - hops := decoded.RouteHints[0].HopHints - if len(hops) != 1 { - t.Fatalf("expected one hop in route hint, got %d", len(hops)) - } - chanID := hops[0].ChanId - - // We'll need the short channel ID of the channel between Alice and Bob - // to make sure the routing hint is for this channel. - listReq := &lnrpc.ListChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - listResp, errr := net.Alice.ListChannels(ctxt, listReq) - if errr != nil { - t.Fatalf("unable to retrieve alice's channels: %v", errr) - } - - var aliceBobChanID uint64 - for _, channel := range listResp.Channels { - if channel.RemotePubkey == net.Bob.PubKeyStr { - aliceBobChanID = channel.ChanId - } - } - - if aliceBobChanID == 0 { - t.Fatalf("channel between alice and bob not found") - } - - if chanID != aliceBobChanID { - t.Fatalf("expected channel ID %d, got %d", aliceBobChanID, - chanID) - } - - // Now that we've confirmed the routing hints were added correctly, we - // can close all the channels and shut down all the nodes created. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointBob, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointCarol, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Bob, chanPointBobCarol, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointDave, false) - - // The channel between Alice and Eve should be force closed since Eve - // is offline. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointEve, true) - - // Cleanup by mining the force close and sweep transaction. - cleanupForceClose(t, net, net.Alice, chanPointEve) -} - -// testMultiHopOverPrivateChannels tests that private channels can be used as -// intermediate hops in a route for payments. -func testMultiHopOverPrivateChannels(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // We'll test that multi-hop payments over private channels work as - // intended. To do so, we'll create the following topology: - // private public private - // Alice <--100k--> Bob <--100k--> Carol <--100k--> Dave - const chanAmt = btcutil.Amount(100000) - - // First, we'll open a private channel between Alice and Bob with Alice - // being the funder. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPointAlice := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - Private: true, - }, - ) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPointAlice) - if err != nil { - t.Fatalf("alice didn't see the channel alice <-> bob before "+ - "timeout: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPointAlice) - if err != nil { - t.Fatalf("bob didn't see the channel alice <-> bob before "+ - "timeout: %v", err) - } - - // Retrieve Alice's funding outpoint. - aliceChanTXID, err := lnd.GetChanPointFundingTxid(chanPointAlice) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - aliceFundPoint := wire.OutPoint{ - Hash: *aliceChanTXID, - Index: chanPointAlice.OutputIndex, - } - - // Next, we'll create Carol's node and open a public channel between - // her and Bob with Bob being the funder. - carol, err := net.NewNode("Carol", nil) - if err != nil { - t.Fatalf("unable to create carol's node: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, net.Bob, carol); err != nil { - t.Fatalf("unable to connect bob to carol: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointBob := openChannelAndAssert( - ctxt, t, net, net.Bob, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPointBob) - if err != nil { - t.Fatalf("bob didn't see the channel bob <-> carol before "+ - "timeout: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = carol.WaitForNetworkChannelOpen(ctxt, chanPointBob) - if err != nil { - t.Fatalf("carol didn't see the channel bob <-> carol before "+ - "timeout: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPointBob) - if err != nil { - t.Fatalf("alice didn't see the channel bob <-> carol before "+ - "timeout: %v", err) - } - - // Retrieve Bob's funding outpoint. - bobChanTXID, err := lnd.GetChanPointFundingTxid(chanPointBob) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - bobFundPoint := wire.OutPoint{ - Hash: *bobChanTXID, - Index: chanPointBob.OutputIndex, - } - - // Next, we'll create Dave's node and open a private channel between him - // and Carol with Carol being the funder. - dave, err := net.NewNode("Dave", nil) - if err != nil { - t.Fatalf("unable to create dave's node: %v", err) - } - defer shutdownAndAssert(net, t, dave) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, carol, dave); err != nil { - t.Fatalf("unable to connect carol to dave: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), carol) - if err != nil { - t.Fatalf("unable to send coins to carol: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointCarol := openChannelAndAssert( - ctxt, t, net, carol, dave, - lntest.OpenChannelParams{ - Amt: chanAmt, - Private: true, - }, - ) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = carol.WaitForNetworkChannelOpen(ctxt, chanPointCarol) - if err != nil { - t.Fatalf("carol didn't see the channel carol <-> dave before "+ - "timeout: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = dave.WaitForNetworkChannelOpen(ctxt, chanPointCarol) - if err != nil { - t.Fatalf("dave didn't see the channel carol <-> dave before "+ - "timeout: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = dave.WaitForNetworkChannelOpen(ctxt, chanPointBob) - if err != nil { - t.Fatalf("dave didn't see the channel bob <-> carol before "+ - "timeout: %v", err) - } - - // Retrieve Carol's funding point. - carolChanTXID, err := lnd.GetChanPointFundingTxid(chanPointCarol) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - carolFundPoint := wire.OutPoint{ - Hash: *carolChanTXID, - Index: chanPointCarol.OutputIndex, - } - - // Now that all the channels are set up according to the topology from - // above, we can proceed to test payments. We'll create an invoice for - // Dave of 20k satoshis and pay it with Alice. Since there is no public - // route from Alice to Dave, we'll need to use the private channel - // between Carol and Dave as a routing hint encoded in the invoice. - const paymentAmt = 20000 - - // Create the invoice for Dave. - invoice := &lnrpc.Invoice{ - Memo: "two hopz!", - Value: paymentAmt, - Private: true, - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, errr := dave.AddInvoice(ctxt, invoice) - if errr != nil { - t.Fatalf("unable to add invoice for dave: %v", errr) - } - - // Let Alice pay the invoice. - payReqs := []string{resp.PaymentRequest} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, net.Alice, net.Alice.RouterClient, payReqs, true, - ) - if err != nil { - t.Fatalf("unable to send payments from alice to dave: %v", err) - } - - // When asserting the amount of satoshis moved, we'll factor in the - // default base fee, as we didn't modify the fee structure when opening - // the channels. - const baseFee = 1 - - // Dave should have received 20k satoshis from Carol. - assertAmountPaid(t, "Carol(local) [private=>] Dave(remote)", - dave, carolFundPoint, 0, paymentAmt) - - // Carol should have sent 20k satoshis to Dave. - assertAmountPaid(t, "Carol(local) [private=>] Dave(remote)", - carol, carolFundPoint, paymentAmt, 0) - - // Carol should have received 20k satoshis + fee for one hop from Bob. - assertAmountPaid(t, "Bob(local) => Carol(remote)", - carol, bobFundPoint, 0, paymentAmt+baseFee) - - // Bob should have sent 20k satoshis + fee for one hop to Carol. - assertAmountPaid(t, "Bob(local) => Carol(remote)", - net.Bob, bobFundPoint, paymentAmt+baseFee, 0) - - // Bob should have received 20k satoshis + fee for two hops from Alice. - assertAmountPaid(t, "Alice(local) [private=>] Bob(remote)", net.Bob, - aliceFundPoint, 0, paymentAmt+baseFee*2) - - // Alice should have sent 20k satoshis + fee for two hops to Bob. - assertAmountPaid(t, "Alice(local) [private=>] Bob(remote)", net.Alice, - aliceFundPoint, paymentAmt+baseFee*2, 0) - - // At this point, the payment was successful. We can now close all the - // channels and shutdown the nodes created throughout this test. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointAlice, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Bob, chanPointBob, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, carol, chanPointCarol, false) -} - -func testInvoiceSubscriptions(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const chanAmt = btcutil.Amount(500000) - - // Open a channel with 500k satoshis between Alice and Bob with Alice - // being the sole funder of the channel. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // Next create a new invoice for Bob requesting 1k satoshis. - // TODO(roasbeef): make global list of invoices for each node to re-use - // and avoid collisions - const paymentAmt = 1000 - invoice := &lnrpc.Invoice{ - Memo: "testing", - RPreimage: makeFakePayHash(t), - Value: paymentAmt, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - invoiceResp, errr := net.Bob.AddInvoice(ctxt, invoice) - if errr != nil { - t.Fatalf("unable to add invoice: %v", errr) - } - lastAddIndex := invoiceResp.AddIndex - - // Create a new invoice subscription client for Bob, the notification - // should be dispatched shortly below. - req := &lnrpc.InvoiceSubscription{} - ctx, cancelInvoiceSubscription := context.WithCancel(ctxb) - bobInvoiceSubscription, errr := net.Bob.SubscribeInvoices(ctx, req) - if errr != nil { - t.Fatalf("unable to subscribe to bob's invoice updates: %v", errr) - } - - var settleIndex uint64 - quit := make(chan struct{}) - updateSent := make(chan struct{}) - go func() { - invoiceUpdate, err := bobInvoiceSubscription.Recv() - select { - case <-quit: - // Received cancellation - return - default: - } - - if err != nil { - t.Fatalf("unable to recv invoice update: %v", err) - } - - // The invoice update should exactly match the invoice created - // above, but should now be settled and have SettleDate - if !invoiceUpdate.Settled { - t.Fatalf("invoice not settled but should be") - } - if invoiceUpdate.SettleDate == 0 { - t.Fatalf("invoice should have non zero settle date, but doesn't") - } - - if !bytes.Equal(invoiceUpdate.RPreimage, invoice.RPreimage) { - t.Fatalf("payment preimages don't match: expected %v, got %v", - invoice.RPreimage, invoiceUpdate.RPreimage) - } - - if invoiceUpdate.SettleIndex == 0 { - t.Fatalf("invoice should have settle index") - } - - settleIndex = invoiceUpdate.SettleIndex - - close(updateSent) - }() - - // Wait for the channel to be recognized by both Alice and Bob before - // continuing the rest of the test. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - // TODO(roasbeef): will need to make num blocks to advertise a - // node param - close(quit) - t.Fatalf("channel not seen by alice before timeout: %v", err) - } - - // With the assertion above set up, send a payment from Alice to Bob - // which should finalize and settle the invoice. - sendReq := &routerrpc.SendPaymentRequest{ - PaymentRequest: invoiceResp.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - stream, errr := net.Alice.RouterClient.SendPaymentV2(ctxt, sendReq) - if errr != nil { - close(quit) - t.Fatalf("unable to send payment: %v", errr) - } - result, err := getPaymentResult(stream) - if err != nil { - close(quit) - t.Fatalf("cannot get payment result: %v", err) - } - if result.Status != lnrpc.Payment_SUCCEEDED { - close(quit) - t.Fatalf("error when attempting recv: %v", result.Status) - } - - select { - case <-time.After(time.Second * 10): - close(quit) - t.Fatalf("update not sent after 10 seconds") - case <-updateSent: // Fall through on success - } - - // With the base case working, we'll now cancel Bob's current - // subscription in order to exercise the backlog fill behavior. - cancelInvoiceSubscription() - - // We'll now add 3 more invoices to Bob's invoice registry. - const numInvoices = 3 - payReqs, _, newInvoices, err := createPayReqs( - net.Bob, paymentAmt, numInvoices, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } - - // Now that the set of invoices has been added, we'll re-register for - // streaming invoice notifications for Bob, this time specifying the - // add invoice of the last prior invoice. - req = &lnrpc.InvoiceSubscription{ - AddIndex: lastAddIndex, - } - ctx, cancelInvoiceSubscription = context.WithCancel(ctxb) - bobInvoiceSubscription, errr = net.Bob.SubscribeInvoices(ctx, req) - if errr != nil { - t.Fatalf("unable to subscribe to bob's invoice updates: %v", errr) - } - - // Since we specified a value of the prior add index above, we should - // now immediately get the invoices we just added as we should get the - // backlog of notifications. - for i := 0; i < numInvoices; i++ { - invoiceUpdate, err := bobInvoiceSubscription.Recv() - if err != nil { - t.Fatalf("unable to receive subscription") - } - - // We should now get the ith invoice we added, as they should - // be returned in order. - if invoiceUpdate.Settled { - t.Fatalf("should have only received add events") - } - originalInvoice := newInvoices[i] - rHash := sha256.Sum256(originalInvoice.RPreimage[:]) - if !bytes.Equal(invoiceUpdate.RHash, rHash[:]) { - t.Fatalf("invoices have mismatched payment hashes: "+ - "expected %x, got %x", rHash[:], - invoiceUpdate.RHash) - } - } - - cancelInvoiceSubscription() - - // We'll now have Bob settle out the remainder of these invoices so we - // can test that all settled invoices are properly notified. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, net.Alice, net.Alice.RouterClient, payReqs, true, - ) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } - - // With the set of invoices paid, we'll now cancel the old - // subscription, and create a new one for Bob, this time using the - // settle index to obtain the backlog of settled invoices. - req = &lnrpc.InvoiceSubscription{ - SettleIndex: settleIndex, - } - ctx, cancelInvoiceSubscription = context.WithCancel(ctxb) - bobInvoiceSubscription, errr = net.Bob.SubscribeInvoices(ctx, req) - if errr != nil { - t.Fatalf("unable to subscribe to bob's invoice updates: %v", errr) - } - - defer cancelInvoiceSubscription() - - // As we specified the index of the past settle index, we should now - // receive notifications for the three HTLCs that we just settled. As - // the order that the HTLCs will be settled in is partially randomized, - // we'll use a map to assert that the proper set has been settled. - settledInvoices := make(map[[32]byte]struct{}) - for _, invoice := range newInvoices { - rHash := sha256.Sum256(invoice.RPreimage[:]) - settledInvoices[rHash] = struct{}{} - } - for i := 0; i < numInvoices; i++ { - invoiceUpdate, err := bobInvoiceSubscription.Recv() - if err != nil { - t.Fatalf("unable to receive subscription") - } - - // We should now get the ith invoice we added, as they should - // be returned in order. - if !invoiceUpdate.Settled { - t.Fatalf("should have only received settle events") - } - - var rHash [32]byte - copy(rHash[:], invoiceUpdate.RHash) - if _, ok := settledInvoices[rHash]; !ok { - t.Fatalf("unknown invoice settled: %x", rHash) - } - - delete(settledInvoices, rHash) - } - - // At this point, all the invoices should be fully settled. - if len(settledInvoices) != 0 { - t.Fatalf("not all invoices settled") - } - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false) -} - -// channelSubscription houses the proxied update and error chans for a node's -// channel subscriptions. -type channelSubscription struct { - updateChan chan *lnrpc.ChannelEventUpdate - errChan chan er.R - quit chan struct{} -} - -// subscribeChannelNotifications subscribes to channel updates and launches a -// goroutine that forwards these to the returned channel. -func subscribeChannelNotifications(ctxb context.Context, t *harnessTest, - node *lntest.HarnessNode) channelSubscription { - - // We'll first start by establishing a notification client which will - // send us notifications upon channels becoming active, inactive or - // closed. - req := &lnrpc.ChannelEventSubscription{} - ctx, cancelFunc := context.WithCancel(ctxb) - - chanUpdateClient, err := node.SubscribeChannelEvents(ctx, req) - if err != nil { - t.Fatalf("unable to create channel update client: %v", err) - } - - // We'll launch a goroutine that will be responsible for proxying all - // notifications recv'd from the client into the channel below. - errChan := make(chan er.R, 1) - quit := make(chan struct{}) - chanUpdates := make(chan *lnrpc.ChannelEventUpdate, 20) - go func() { - defer cancelFunc() - for { - select { - case <-quit: - return - default: - chanUpdate, err := chanUpdateClient.Recv() - select { - case <-quit: - return - default: - } - - if err == io.EOF { - return - } else if err != nil { - select { - case errChan <- er.E(err): - case <-quit: - } - return - } - - select { - case chanUpdates <- chanUpdate: - case <-quit: - return - } - } - } - }() - - return channelSubscription{ - updateChan: chanUpdates, - errChan: errChan, - quit: quit, - } -} - -// verifyCloseUpdate is used to verify that a closed channel update is of the -// expected type. -func verifyCloseUpdate(chanUpdate *lnrpc.ChannelEventUpdate, - closeType lnrpc.ChannelCloseSummary_ClosureType, - closeInitiator lnrpc.Initiator) er.R { - - // We should receive one inactive and one closed notification - // for each channel. - switch update := chanUpdate.Channel.(type) { - case *lnrpc.ChannelEventUpdate_InactiveChannel: - if chanUpdate.Type != lnrpc.ChannelEventUpdate_INACTIVE_CHANNEL { - return er.Errorf("update type mismatch: expected %v, got %v", - lnrpc.ChannelEventUpdate_INACTIVE_CHANNEL, - chanUpdate.Type) - } - case *lnrpc.ChannelEventUpdate_ClosedChannel: - if chanUpdate.Type != - lnrpc.ChannelEventUpdate_CLOSED_CHANNEL { - return er.Errorf("update type mismatch: expected %v, got %v", - lnrpc.ChannelEventUpdate_CLOSED_CHANNEL, - chanUpdate.Type) - } - - if update.ClosedChannel.CloseType != closeType { - return er.Errorf("channel closure type "+ - "mismatch: expected %v, got %v", - closeType, - update.ClosedChannel.CloseType) - } - - if update.ClosedChannel.CloseInitiator != closeInitiator { - return er.Errorf("expected close intiator: %v, got: %v", - closeInitiator, - update.ClosedChannel.CloseInitiator) - } - - default: - return er.Errorf("channel update channel of wrong type, "+ - "expected closed channel, got %T", - update) - } - - return nil -} - -// testBasicChannelCreationAndUpdates tests multiple channel opening and closing, -// and ensures that if a node is subscribed to channel updates they will be -// received correctly for both cooperative and force closed channels. -func testBasicChannelCreationAndUpdates(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - const ( - numChannels = 2 - amount = lnd.MaxBtcFundingAmount - ) - - // Subscribe Bob and Alice to channel event notifications. - bobChanSub := subscribeChannelNotifications(ctxb, t, net.Bob) - defer close(bobChanSub.quit) - - aliceChanSub := subscribeChannelNotifications(ctxb, t, net.Alice) - defer close(aliceChanSub.quit) - - // Open the channel between Alice and Bob, asserting that the - // channel has been properly open on-chain. - chanPoints := make([]*lnrpc.ChannelPoint, numChannels) - for i := 0; i < numChannels; i++ { - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPoints[i] = openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: amount, - }, - ) - } - - // Since each of the channels just became open, Bob and Alice should - // each receive an open and an active notification for each channel. - var numChannelUpds int - const totalNtfns = 3 * numChannels - verifyOpenUpdatesReceived := func(sub channelSubscription) er.R { - numChannelUpds = 0 - for numChannelUpds < totalNtfns { - select { - case update := <-sub.updateChan: - switch update.Type { - case lnrpc.ChannelEventUpdate_PENDING_OPEN_CHANNEL: - if numChannelUpds%3 != 0 { - return er.Errorf("expected " + - "open or active" + - "channel ntfn, got pending open " + - "channel ntfn instead") - } - case lnrpc.ChannelEventUpdate_OPEN_CHANNEL: - if numChannelUpds%3 != 1 { - return er.Errorf("expected " + - "pending open or active" + - "channel ntfn, got open" + - "channel ntfn instead") - } - case lnrpc.ChannelEventUpdate_ACTIVE_CHANNEL: - if numChannelUpds%3 != 2 { - return er.Errorf("expected " + - "pending open or open" + - "channel ntfn, got active " + - "channel ntfn instead") - } - default: - return er.Errorf("update type mismatch: "+ - "expected open or active channel "+ - "notification, got: %v", - update.Type) - } - numChannelUpds++ - case <-time.After(time.Second * 10): - return er.Errorf("timeout waiting for channel "+ - "notifications, only received %d/%d "+ - "chanupds", numChannelUpds, - totalNtfns) - } - } - - return nil - } - - if err := verifyOpenUpdatesReceived(bobChanSub); err != nil { - t.Fatalf("error verifying open updates: %v", err) - } - if err := verifyOpenUpdatesReceived(aliceChanSub); err != nil { - t.Fatalf("error verifying open updates: %v", err) - } - - // Close the channel between Alice and Bob, asserting that the channel - // has been properly closed on-chain. - for i, chanPoint := range chanPoints { - ctx, _ := context.WithTimeout(context.Background(), defaultTimeout) - - // Force close half of the channels. - force := i%2 == 0 - closeChannelAndAssert(ctx, t, net, net.Alice, chanPoint, force) - if force { - cleanupForceClose(t, net, net.Alice, chanPoint) - } - } - - // verifyCloseUpdatesReceived is used to verify that Alice and Bob - // receive the correct channel updates in order. - verifyCloseUpdatesReceived := func(sub channelSubscription, - forceType lnrpc.ChannelCloseSummary_ClosureType, - closeInitiator lnrpc.Initiator) er.R { - - // Ensure one inactive and one closed notification is received for each - // closed channel. - numChannelUpds := 0 - for numChannelUpds < 2*numChannels { - expectedCloseType := lnrpc.ChannelCloseSummary_COOPERATIVE_CLOSE - - // Every other channel should be force closed. If this - // channel was force closed, set the expected close type - // the the type passed in. - force := (numChannelUpds/2)%2 == 0 - if force { - expectedCloseType = forceType - } - - select { - case chanUpdate := <-sub.updateChan: - err := verifyCloseUpdate( - chanUpdate, expectedCloseType, - closeInitiator, - ) - if err != nil { - return err - } - - numChannelUpds++ - case err := <-sub.errChan: - return err - case <-time.After(time.Second * 10): - return er.Errorf("timeout waiting "+ - "for channel notifications, only "+ - "received %d/%d chanupds", - numChannelUpds, 2*numChannels) - } - } - - return nil - } - - // Verify Bob receives all closed channel notifications. He should - // receive a remote force close notification for force closed channels. - // All channels (cooperatively and force closed) should have a remote - // close initiator because Alice closed the channels. - if err := verifyCloseUpdatesReceived(bobChanSub, - lnrpc.ChannelCloseSummary_REMOTE_FORCE_CLOSE, - lnrpc.Initiator_INITIATOR_REMOTE); err != nil { - t.Fatalf("errored verifying close updates: %v", err) - } - - // Verify Alice receives all closed channel notifications. She should - // receive a remote force close notification for force closed channels. - // All channels (cooperatively and force closed) should have a local - // close initiator because Alice closed the channels. - if err := verifyCloseUpdatesReceived(aliceChanSub, - lnrpc.ChannelCloseSummary_LOCAL_FORCE_CLOSE, - lnrpc.Initiator_INITIATOR_LOCAL); err != nil { - t.Fatalf("errored verifying close updates: %v", err) - } -} - -// testMaxPendingChannels checks that error is returned from remote peer if -// max pending channel number was exceeded and that '--maxpendingchannels' flag -// exists and works properly. -func testMaxPendingChannels(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - maxPendingChannels := lncfg.DefaultMaxPendingChannels + 1 - amount := lnd.MaxBtcFundingAmount - - // Create a new node (Carol) with greater number of max pending - // channels. - args := []string{ - fmt.Sprintf("--maxpendingchannels=%v", maxPendingChannels), - } - carol, err := net.NewNode("Carol", args) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, net.Alice, carol); err != nil { - t.Fatalf("unable to connect carol to alice: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolBalance := btcutil.Amount(maxPendingChannels) * amount - if err := net.SendCoins(ctxt, carolBalance, carol); err != nil { - t.Fatalf("unable to send coins to carol: %v", err) - } - - // Send open channel requests without generating new blocks thereby - // increasing pool of pending channels. Then check that we can't open - // the channel if the number of pending channels exceed max value. - openStreams := make([]lnrpc.Lightning_OpenChannelClient, maxPendingChannels) - for i := 0; i < maxPendingChannels; i++ { - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - stream := openChannelStream( - ctxt, t, net, net.Alice, carol, - lntest.OpenChannelParams{ - Amt: amount, - }, - ) - openStreams[i] = stream - } - - // Carol exhausted available amount of pending channels, next open - // channel request should cause ErrorGeneric to be sent back to Alice. - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - _, err = net.OpenChannel( - ctxt, net.Alice, carol, - lntest.OpenChannelParams{ - Amt: amount, - }, - ) - - if err == nil { - t.Fatalf("error wasn't received") - } else if !strings.Contains( - err.String(), lnwire.ErrMaxPendingChannels.Detail, - ) { - t.Fatalf("not expected error was received: %v", err) - } - - // For now our channels are in pending state, in order to not interfere - // with other tests we should clean up - complete opening of the - // channel and then close it. - - // Mine 6 blocks, then wait for node's to notify us that the channel has - // been opened. The funding transactions should be found within the - // first newly mined block. 6 blocks make sure the funding transaction - // has enough confirmations to be announced publicly. - block := mineBlocks(t, net, 6, maxPendingChannels)[0] - - chanPoints := make([]*lnrpc.ChannelPoint, maxPendingChannels) - for i, stream := range openStreams { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - fundingChanPoint, err := net.WaitForChannelOpen(ctxt, stream) - if err != nil { - t.Fatalf("error while waiting for channel open: %v", err) - } - - fundingTxID, err := lnd.GetChanPointFundingTxid(fundingChanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - - // Ensure that the funding transaction enters a block, and is - // properly advertised by Alice. - assertTxInBlock(t, block, fundingTxID) - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Alice.WaitForNetworkChannelOpen(ctxt, fundingChanPoint) - if err != nil { - t.Fatalf("channel not seen on network before "+ - "timeout: %v", err) - } - - // The channel should be listed in the peer information - // returned by both peers. - chanPoint := wire.OutPoint{ - Hash: *fundingTxID, - Index: fundingChanPoint.OutputIndex, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.AssertChannelExists(ctxt, net.Alice, &chanPoint); err != nil { - t.Fatalf("unable to assert channel existence: %v", err) - } - - chanPoints[i] = fundingChanPoint - } - - // Next, close the channel between Alice and Carol, asserting that the - // channel has been properly closed on-chain. - for _, chanPoint := range chanPoints { - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false) - } -} - -// getNTxsFromMempool polls until finding the desired number of transactions in -// the provided miner's mempool and returns the full transactions to the caller. -func getNTxsFromMempool(miner *rpcclient.Client, n int, - timeout time.Duration) ([]*wire.MsgTx, er.R) { - - txids, err := waitForNTxsInMempool(miner, n, timeout) - if err != nil { - return nil, err - } - - var txes []*wire.MsgTx - for _, txid := range txids { - tx, err := miner.GetRawTransaction(txid) - if err != nil { - return nil, err - } - txes = append(txes, tx.MsgTx()) - } - return txes, nil -} - -// getTxFee retrieves parent transactions and reconstructs the fee paid. -func getTxFee(miner *rpcclient.Client, tx *wire.MsgTx) (btcutil.Amount, er.R) { - var balance btcutil.Amount - for _, in := range tx.TxIn { - parentHash := in.PreviousOutPoint.Hash - rawTx, err := miner.GetRawTransaction(&parentHash) - if err != nil { - return 0, err - } - parent := rawTx.MsgTx() - balance += btcutil.Amount( - parent.TxOut[in.PreviousOutPoint.Index].Value, - ) - } - - for _, out := range tx.TxOut { - balance -= btcutil.Amount(out.Value) - } - - return balance, nil -} - -// testFailingChannel tests that we will fail the channel by force closing ii -// in the case where a counterparty tries to settle an HTLC with the wrong -// preimage. -func testFailingChannel(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const ( - paymentAmt = 10000 - ) - - chanAmt := lnd.MaxFundingAmount - - // We'll introduce Carol, which will settle any incoming invoice with a - // totally unrelated preimage. - carol, err := net.NewNode("Carol", []string{"--hodl.bogus-settle"}) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - // Let Alice connect and open a channel to Carol, - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, net.Alice, carol); err != nil { - t.Fatalf("unable to connect alice to carol: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // With the channel open, we'll create a invoice for Carol that Alice - // will attempt to pay. - preimage := bytes.Repeat([]byte{byte(192)}, 32) - invoice := &lnrpc.Invoice{ - Memo: "testing", - RPreimage: preimage, - Value: paymentAmt, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, errr := carol.AddInvoice(ctxt, invoice) - if errr != nil { - t.Fatalf("unable to add invoice: %v", errr) - } - carolPayReqs := []string{resp.PaymentRequest} - - // Wait for Alice to receive the channel edge from the funding manager. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("alice didn't see the alice->carol channel before "+ - "timeout: %v", err) - } - - // Send the payment from Alice to Carol. We expect Carol to attempt to - // settle this payment with the wrong preimage. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, net.Alice, net.Alice.RouterClient, carolPayReqs, false, - ) - if err != nil { - t.Fatalf("unable to send payments: %v", err) - } - - // Since Alice detects that Carol is trying to trick her by providing a - // fake preimage, she should fail and force close the channel. - var predErr er.R - err = wait.Predicate(func() bool { - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, errr := net.Alice.PendingChannels(ctxt, - pendingChansRequest) - if errr != nil { - predErr = er.Errorf("unable to query for pending "+ - "channels: %v", errr) - return false - } - n := len(pendingChanResp.WaitingCloseChannels) - if n != 1 { - predErr = er.Errorf("Expected to find %d channels "+ - "waiting close, found %d", 1, n) - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("%v", predErr) - } - - // Mine a block to confirm the broadcasted commitment. - block := mineBlocks(t, net, 1, 1)[0] - if len(block.Transactions) != 2 { - t.Fatalf("transaction wasn't mined") - } - - // The channel should now show up as force closed both for Alice and - // Carol. - err = wait.Predicate(func() bool { - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Alice.PendingChannels(ctxt, - pendingChansRequest) - if err != nil { - predErr = er.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - n := len(pendingChanResp.WaitingCloseChannels) - if n != 0 { - predErr = er.Errorf("Expected to find %d channels "+ - "waiting close, found %d", 0, n) - return false - } - n = len(pendingChanResp.PendingForceClosingChannels) - if n != 1 { - predErr = er.Errorf("expected to find %d channel "+ - "pending force close, found %d", 1, n) - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("%v", predErr) - } - - err = wait.Predicate(func() bool { - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := carol.PendingChannels(ctxt, - pendingChansRequest) - if err != nil { - predErr = er.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - n := len(pendingChanResp.PendingForceClosingChannels) - if n != 1 { - predErr = er.Errorf("expected to find %d channel "+ - "pending force close, found %d", 1, n) - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("%v", predErr) - } - - // Carol will use the correct preimage to resolve the HTLC on-chain. - _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find Carol's resolve tx in mempool: %v", err) - } - - // Mine enough blocks for Alice to sweep her funds from the force - // closed channel. - _, err = net.Miner.Node.Generate(defaultCSV - 1) - if err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - // Wait for the sweeping tx to be broadcast. - _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find Alice's sweep tx in mempool: %v", err) - } - - // Mine the sweep. - _, err = net.Miner.Node.Generate(1) - if err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - // No pending channels should be left. - err = wait.Predicate(func() bool { - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Alice.PendingChannels(ctxt, - pendingChansRequest) - if err != nil { - predErr = er.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - n := len(pendingChanResp.PendingForceClosingChannels) - if n != 0 { - predErr = er.Errorf("expected to find %d channel "+ - "pending force close, found %d", 0, n) - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("%v", predErr) - } -} - -// testGarbageCollectLinkNodes tests that we properly garbase collect link nodes -// from the database and the set of persistent connections within the server. -func testGarbageCollectLinkNodes(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const ( - chanAmt = 1000000 - ) - - // Open a channel between Alice and Bob which will later be - // cooperatively closed. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - coopChanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // Create Carol's node and connect Alice to her. - carol, err := net.NewNode("Carol", nil) - if err != nil { - t.Fatalf("unable to create carol's node: %v", err) - } - defer shutdownAndAssert(net, t, carol) - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, net.Alice, carol); err != nil { - t.Fatalf("unable to connect alice and carol: %v", err) - } - - // Open a channel between Alice and Carol which will later be force - // closed. - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - forceCloseChanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // Now, create Dave's a node and also open a channel between Alice and - // him. This link will serve as the only persistent link throughout - // restarts in this test. - dave, err := net.NewNode("Dave", nil) - if err != nil { - t.Fatalf("unable to create dave's node: %v", err) - } - defer shutdownAndAssert(net, t, dave) - if err := net.ConnectNodes(ctxt, net.Alice, dave); err != nil { - t.Fatalf("unable to connect alice to dave: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - persistentChanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, dave, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // isConnected is a helper closure that checks if a peer is connected to - // Alice. - isConnected := func(pubKey string) bool { - req := &lnrpc.ListPeersRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, err := net.Alice.ListPeers(ctxt, req) - if err != nil { - t.Fatalf("unable to retrieve alice's peers: %v", err) - } - - for _, peer := range resp.Peers { - if peer.PubKey == pubKey { - return true - } - } - - return false - } - - // Restart both Bob and Carol to ensure Alice is able to reconnect to - // them. - if err := net.RestartNode(net.Bob, nil); err != nil { - t.Fatalf("unable to restart bob's node: %v", err) - } - if err := net.RestartNode(carol, nil); err != nil { - t.Fatalf("unable to restart carol's node: %v", err) - } - - err = wait.Predicate(func() bool { - return isConnected(net.Bob.PubKeyStr) - }, 15*time.Second) - if err != nil { - t.Fatalf("alice did not reconnect to bob") - } - err = wait.Predicate(func() bool { - return isConnected(carol.PubKeyStr) - }, 15*time.Second) - if err != nil { - t.Fatalf("alice did not reconnect to carol") - } - - // We'll also restart Alice to ensure she can reconnect to her peers - // with open channels. - if err := net.RestartNode(net.Alice, nil); err != nil { - t.Fatalf("unable to restart alice's node: %v", err) - } - - err = wait.Predicate(func() bool { - return isConnected(net.Bob.PubKeyStr) - }, 15*time.Second) - if err != nil { - t.Fatalf("alice did not reconnect to bob") - } - err = wait.Predicate(func() bool { - return isConnected(carol.PubKeyStr) - }, 15*time.Second) - if err != nil { - t.Fatalf("alice did not reconnect to carol") - } - err = wait.Predicate(func() bool { - return isConnected(dave.PubKeyStr) - }, 15*time.Second) - if err != nil { - t.Fatalf("alice did not reconnect to dave") - } - - // testReconnection is a helper closure that restarts the nodes at both - // ends of a channel to ensure they do not reconnect after restarting. - // When restarting Alice, we'll first need to ensure she has - // reestablished her connection with Dave, as they still have an open - // channel together. - testReconnection := func(node *lntest.HarnessNode) { - // Restart both nodes, to trigger the pruning logic. - if err := net.RestartNode(node, nil); err != nil { - t.Fatalf("unable to restart %v's node: %v", - node.Name(), err) - } - - if err := net.RestartNode(net.Alice, nil); err != nil { - t.Fatalf("unable to restart alice's node: %v", err) - } - - // Now restart both nodes and make sure they don't reconnect. - if err := net.RestartNode(node, nil); err != nil { - t.Fatalf("unable to restart %v's node: %v", node.Name(), - err) - } - err = wait.Invariant(func() bool { - return !isConnected(node.PubKeyStr) - }, 5*time.Second) - if err != nil { - t.Fatalf("alice reconnected to %v", node.Name()) - } - - if err := net.RestartNode(net.Alice, nil); err != nil { - t.Fatalf("unable to restart alice's node: %v", err) - } - err = wait.Predicate(func() bool { - return isConnected(dave.PubKeyStr) - }, 20*time.Second) - if err != nil { - t.Fatalf("alice didn't reconnect to Dave") - } - - err = wait.Invariant(func() bool { - return !isConnected(node.PubKeyStr) - }, 5*time.Second) - if err != nil { - t.Fatalf("alice reconnected to %v", node.Name()) - } - } - - // Now, we'll close the channel between Alice and Bob and ensure there - // is no reconnection logic between the both once the channel is fully - // closed. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, coopChanPoint, false) - - testReconnection(net.Bob) - - // We'll do the same with Alice and Carol, but this time we'll force - // close the channel instead. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, forceCloseChanPoint, true) - - // Cleanup by mining the force close and sweep transaction. - cleanupForceClose(t, net, net.Alice, forceCloseChanPoint) - - // We'll need to mine some blocks in order to mark the channel fully - // closed. - _, err = net.Miner.Node.Generate(chainreg.DefaultBitcoinTimeLockDelta - defaultCSV) - if err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - // Before we test reconnection, we'll ensure that the channel has been - // fully cleaned up for both Carol and Alice. - var predErr er.R - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Alice.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = er.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - - predErr = checkNumForceClosedChannels(pendingChanResp, 0) - if predErr != nil { - return false - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err = carol.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = er.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - - predErr = checkNumForceClosedChannels(pendingChanResp, 0) - if predErr != nil { - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("channels not marked as fully resolved: %v", predErr) - } - - testReconnection(carol) - - // Finally, we'll ensure that Bob and Carol no longer show in Alice's - // channel graph. - describeGraphReq := &lnrpc.ChannelGraphRequest{ - IncludeUnannounced: true, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - channelGraph, errr := net.Alice.DescribeGraph(ctxt, describeGraphReq) - if errr != nil { - t.Fatalf("unable to query for alice's channel graph: %v", errr) - } - for _, node := range channelGraph.Nodes { - if node.PubKey == net.Bob.PubKeyStr { - t.Fatalf("did not expect to find bob in the channel " + - "graph, but did") - } - if node.PubKey == carol.PubKeyStr { - t.Fatalf("did not expect to find carol in the channel " + - "graph, but did") - } - } - - // Now that the test is done, we can also close the persistent link. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, persistentChanPoint, false) -} - -// testRevokedCloseRetribution tests that Carol is able carry out -// retribution in the event that she fails immediately after detecting Bob's -// breach txn in the mempool. -func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const ( - chanAmt = lnd.MaxBtcFundingAmount - paymentAmt = 10000 - numInvoices = 6 - ) - - // Carol will be the breached party. We set --nolisten to ensure Bob - // won't be able to connect to her and trigger the channel data - // protection logic automatically. We also can't have Carol - // automatically re-connect too early, otherwise DLP would be initiated - // instead of the breach we want to provoke. - carol, err := net.NewNode( - "Carol", - []string{"--hodl.exit-settle", "--nolisten", "--minbackoff=1h"}, - ) - if err != nil { - t.Fatalf("unable to create new carol node: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - // We must let Bob communicate with Carol before they are able to open - // channel, so we connect Bob and Carol, - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, carol, net.Bob); err != nil { - t.Fatalf("unable to connect dave to carol: %v", err) - } - - // Before we make a channel, we'll load up Carol with some coins sent - // directly from the miner. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), carol) - if err != nil { - t.Fatalf("unable to send coins to carol: %v", err) - } - - // In order to test Carol's response to an uncooperative channel - // closure by Bob, we'll first open up a channel between them with a - // 0.5 BTC value. - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, carol, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // With the channel open, we'll create a few invoices for Bob that - // Carol will pay to in order to advance the state of the channel. - bobPayReqs, _, _, err := createPayReqs( - net.Bob, paymentAmt, numInvoices, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } - - // Wait for Carol to receive the channel edge from the funding manager. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("carol didn't see the carol->bob channel before "+ - "timeout: %v", err) - } - - // Send payments from Carol to Bob using 3 of Bob's payment hashes - // generated above. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, carol, carol.RouterClient, bobPayReqs[:numInvoices/2], - true, - ) - if err != nil { - t.Fatalf("unable to send payments: %v", err) - } - - // Next query for Bob's channel state, as we sent 3 payments of 10k - // satoshis each, Bob should now see his balance as being 30k satoshis. - var bobChan *lnrpc.Channel - var predErr er.R - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - bChan, err := getChanInfo(ctxt, net.Bob) - if err != nil { - t.Fatalf("unable to get bob's channel info: %v", err) - } - if bChan.LocalBalance != 30000 { - predErr = er.Errorf("bob's balance is incorrect, "+ - "got %v, expected %v", bChan.LocalBalance, - 30000) - return false - } - - bobChan = bChan - return true - }, time.Second*15) - if err != nil { - t.Fatalf("%v", predErr) - } - - // Grab Bob's current commitment height (update number), we'll later - // revert him to this state after additional updates to force him to - // broadcast this soon to be revoked state. - bobStateNumPreCopy := bobChan.NumUpdates - - // Create a temporary file to house Bob's database state at this - // particular point in history. - bobTempDbPath, errr := ioutil.TempDir("", "bob-past-state") - if errr != nil { - t.Fatalf("unable to create temp db folder: %v", errr) - } - defer os.Remove(bobTempDbPath) - - // With the temporary file created, copy Bob's current state into the - // temporary file we created above. Later after more updates, we'll - // restore this state. - if err := lntest.CopyAll(bobTempDbPath, net.Bob.DBDir()); err != nil { - t.Fatalf("unable to copy database files: %v", err) - } - - // Finally, send payments from Carol to Bob, consuming Bob's remaining - // payment hashes. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, carol, carol.RouterClient, bobPayReqs[numInvoices/2:], - true, - ) - if err != nil { - t.Fatalf("unable to send payments: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - bobChan, err = getChanInfo(ctxt, net.Bob) - if err != nil { - t.Fatalf("unable to get bob chan info: %v", err) - } - - // Now we shutdown Bob, copying over the his temporary database state - // which has the *prior* channel state over his current most up to date - // state. With this, we essentially force Bob to travel back in time - // within the channel's history. - if err = net.RestartNode(net.Bob, func() er.R { - return lntest.CopyAll(net.Bob.DBDir(), bobTempDbPath) - }); err != nil { - t.Fatalf("unable to restart node: %v", err) - } - - // Now query for Bob's channel state, it should show that he's at a - // state number in the past, not the *latest* state. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - bobChan, err = getChanInfo(ctxt, net.Bob) - if err != nil { - t.Fatalf("unable to get bob chan info: %v", err) - } - if bobChan.NumUpdates != bobStateNumPreCopy { - t.Fatalf("db copy failed: %v", bobChan.NumUpdates) - } - - // Now force Bob to execute a *force* channel closure by unilaterally - // broadcasting his current channel state. This is actually the - // commitment transaction of a prior *revoked* state, so he'll soon - // feel the wrath of Carol's retribution. - var closeUpdates lnrpc.Lightning_CloseChannelClient - force := true - err = wait.Predicate(func() bool { - ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) - closeUpdates, _, err = net.CloseChannel(ctxt, net.Bob, chanPoint, force) - if err != nil { - predErr = err - return false - } - - return true - }, time.Second*10) - if err != nil { - t.Fatalf("unable to close channel: %v", predErr) - } - - // Wait for Bob's breach transaction to show up in the mempool to ensure - // that Carol's node has started waiting for confirmations. - _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find Bob's breach tx in mempool: %v", err) - } - - // Here, Carol sees Bob's breach transaction in the mempool, but is waiting - // for it to confirm before continuing her retribution. We restart Carol to - // ensure that she is persisting her retribution state and continues - // watching for the breach transaction to confirm even after her node - // restarts. - if err := net.RestartNode(carol, nil); err != nil { - t.Fatalf("unable to restart Carol's node: %v", err) - } - - // Finally, generate a single block, wait for the final close status - // update, then ensure that the closing transaction was included in the - // block. - block := mineBlocks(t, net, 1, 1)[0] - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - breachTXID, err := net.WaitForChannelClose(ctxt, closeUpdates) - if err != nil { - t.Fatalf("error while waiting for channel close: %v", err) - } - assertTxInBlock(t, block, breachTXID) - - // Query the mempool for Carol's justice transaction, this should be - // broadcast as Bob's contract breaching transaction gets confirmed - // above. - justiceTXID, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find Carol's justice tx in mempool: %v", err) - } - time.Sleep(100 * time.Millisecond) - - // Query for the mempool transaction found above. Then assert that all - // the inputs of this transaction are spending outputs generated by - // Bob's breach transaction above. - justiceTx, err := net.Miner.Node.GetRawTransaction(justiceTXID) - if err != nil { - t.Fatalf("unable to query for justice tx: %v", err) - } - for _, txIn := range justiceTx.MsgTx().TxIn { - if !bytes.Equal(txIn.PreviousOutPoint.Hash[:], breachTXID[:]) { - t.Fatalf("justice tx not spending commitment utxo "+ - "instead is: %v", txIn.PreviousOutPoint) - } - } - - // We restart Carol here to ensure that she persists her retribution state - // and successfully continues exacting retribution after restarting. At - // this point, Carol has broadcast the justice transaction, but it hasn't - // been confirmed yet; when Carol restarts, she should start waiting for - // the justice transaction to confirm again. - if err := net.RestartNode(carol, nil); err != nil { - t.Fatalf("unable to restart Carol's node: %v", err) - } - - // Now mine a block, this transaction should include Carol's justice - // transaction which was just accepted into the mempool. - block = mineBlocks(t, net, 1, 1)[0] - - // The block should have exactly *two* transactions, one of which is - // the justice transaction. - if len(block.Transactions) != 2 { - t.Fatalf("transaction wasn't mined") - } - justiceSha := block.Transactions[1].TxHash() - if !bytes.Equal(justiceTx.Hash()[:], justiceSha[:]) { - t.Fatalf("justice tx wasn't mined") - } - - assertNodeNumChannels(t, carol, 0) - - // Mine enough blocks for Bob's channel arbitrator to wrap up the - // references to the breached channel. The chanarb waits for commitment - // tx's confHeight+CSV-1 blocks and since we've already mined one that - // included the justice tx we only need to mine extra DefaultCSV-2 - // blocks to unlock it. - mineBlocks(t, net, lntest.DefaultCSV-2, 0) - - assertNumPendingChannels(t, net.Bob, 0, 0) -} - -// testRevokedCloseRetributionZeroValueRemoteOutput tests that Dave is able -// carry out retribution in the event that she fails in state where the remote -// commitment output has zero-value. -func testRevokedCloseRetributionZeroValueRemoteOutput(net *lntest.NetworkHarness, - t *harnessTest) { - ctxb := context.Background() - - const ( - chanAmt = lnd.MaxBtcFundingAmount - paymentAmt = 10000 - numInvoices = 6 - ) - - // Since we'd like to test some multi-hop failure scenarios, we'll - // introduce another node into our test network: Carol. - carol, err := net.NewNode("Carol", []string{"--hodl.exit-settle"}) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - // Dave will be the breached party. We set --nolisten to ensure Carol - // won't be able to connect to him and trigger the channel data - // protection logic automatically. We also can't have Dave automatically - // re-connect too early, otherwise DLP would be initiated instead of the - // breach we want to provoke. - dave, err := net.NewNode( - "Dave", - []string{"--hodl.exit-settle", "--nolisten", "--minbackoff=1h"}, - ) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, t, dave) - - // We must let Dave have an open channel before she can send a node - // announcement, so we open a channel with Carol, - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, dave, carol); err != nil { - t.Fatalf("unable to connect dave to carol: %v", err) - } - - // Before we make a channel, we'll load up Dave with some coins sent - // directly from the miner. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), dave) - if err != nil { - t.Fatalf("unable to send coins to dave: %v", err) - } - - // In order to test Dave's response to an uncooperative channel - // closure by Carol, we'll first open up a channel between them with a - // 0.5 BTC value. - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, dave, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // With the channel open, we'll create a few invoices for Carol that - // Dave will pay to in order to advance the state of the channel. - carolPayReqs, _, _, err := createPayReqs( - carol, paymentAmt, numInvoices, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } - - // Wait for Dave to receive the channel edge from the funding manager. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = dave.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("dave didn't see the dave->carol channel before "+ - "timeout: %v", err) - } - - // Next query for Carol's channel state, as we sent 0 payments, Carol - // should now see her balance as being 0 satoshis. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolChan, err := getChanInfo(ctxt, carol) - if err != nil { - t.Fatalf("unable to get carol's channel info: %v", err) - } - if carolChan.LocalBalance != 0 { - t.Fatalf("carol's balance is incorrect, got %v, expected %v", - carolChan.LocalBalance, 0) - } - - // Grab Carol's current commitment height (update number), we'll later - // revert her to this state after additional updates to force him to - // broadcast this soon to be revoked state. - carolStateNumPreCopy := carolChan.NumUpdates - - // Create a temporary file to house Carol's database state at this - // particular point in history. - carolTempDbPath, errr := ioutil.TempDir("", "carol-past-state") - if errr != nil { - t.Fatalf("unable to create temp db folder: %v", errr) - } - defer os.Remove(carolTempDbPath) - - // With the temporary file created, copy Carol's current state into the - // temporary file we created above. Later after more updates, we'll - // restore this state. - if err := lntest.CopyAll(carolTempDbPath, carol.DBDir()); err != nil { - t.Fatalf("unable to copy database files: %v", err) - } - - // Finally, send payments from Dave to Carol, consuming Carol's remaining - // payment hashes. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, dave, dave.RouterClient, carolPayReqs, false, - ) - if err != nil { - t.Fatalf("unable to send payments: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolChan, err = getChanInfo(ctxt, carol) - if err != nil { - t.Fatalf("unable to get carol chan info: %v", err) - } - - // Now we shutdown Carol, copying over the his temporary database state - // which has the *prior* channel state over his current most up to date - // state. With this, we essentially force Carol to travel back in time - // within the channel's history. - if err = net.RestartNode(carol, func() er.R { - return lntest.CopyAll(carol.DBDir(), carolTempDbPath) - }); err != nil { - t.Fatalf("unable to restart node: %v", err) - } - - // Now query for Carol's channel state, it should show that he's at a - // state number in the past, not the *latest* state. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolChan, err = getChanInfo(ctxt, carol) - if err != nil { - t.Fatalf("unable to get carol chan info: %v", err) - } - if carolChan.NumUpdates != carolStateNumPreCopy { - t.Fatalf("db copy failed: %v", carolChan.NumUpdates) - } - - // Now force Carol to execute a *force* channel closure by unilaterally - // broadcasting his current channel state. This is actually the - // commitment transaction of a prior *revoked* state, so he'll soon - // feel the wrath of Dave's retribution. - var ( - closeUpdates lnrpc.Lightning_CloseChannelClient - closeTxId *chainhash.Hash - closeErr er.R - force bool = true - ) - err = wait.Predicate(func() bool { - ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) - closeUpdates, closeTxId, closeErr = net.CloseChannel( - ctxt, carol, chanPoint, force, - ) - return closeErr == nil - }, time.Second*15) - if err != nil { - t.Fatalf("unable to close channel: %v", closeErr) - } - - // Query the mempool for the breaching closing transaction, this should - // be broadcast by Carol when she force closes the channel above. - txid, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find Carol's force close tx in mempool: %v", - err) - } - if *txid != *closeTxId { - t.Fatalf("expected closeTx(%v) in mempool, instead found %v", - closeTxId, txid) - } - - // Finally, generate a single block, wait for the final close status - // update, then ensure that the closing transaction was included in the - // block. - block := mineBlocks(t, net, 1, 1)[0] - - // Here, Dave receives a confirmation of Carol's breach transaction. - // We restart Dave to ensure that she is persisting her retribution - // state and continues exacting justice after her node restarts. - if err := net.RestartNode(dave, nil); err != nil { - t.Fatalf("unable to stop Dave's node: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - breachTXID, err := net.WaitForChannelClose(ctxt, closeUpdates) - if err != nil { - t.Fatalf("error while waiting for channel close: %v", err) - } - assertTxInBlock(t, block, breachTXID) - - // Query the mempool for Dave's justice transaction, this should be - // broadcast as Carol's contract breaching transaction gets confirmed - // above. - justiceTXID, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find Dave's justice tx in mempool: %v", - err) - } - time.Sleep(100 * time.Millisecond) - - // Query for the mempool transaction found above. Then assert that all - // the inputs of this transaction are spending outputs generated by - // Carol's breach transaction above. - justiceTx, err := net.Miner.Node.GetRawTransaction(justiceTXID) - if err != nil { - t.Fatalf("unable to query for justice tx: %v", err) - } - for _, txIn := range justiceTx.MsgTx().TxIn { - if !bytes.Equal(txIn.PreviousOutPoint.Hash[:], breachTXID[:]) { - t.Fatalf("justice tx not spending commitment utxo "+ - "instead is: %v", txIn.PreviousOutPoint) - } - } - - // We restart Dave here to ensure that he persists her retribution state - // and successfully continues exacting retribution after restarting. At - // this point, Dave has broadcast the justice transaction, but it hasn't - // been confirmed yet; when Dave restarts, she should start waiting for - // the justice transaction to confirm again. - if err := net.RestartNode(dave, nil); err != nil { - t.Fatalf("unable to restart Dave's node: %v", err) - } - - // Now mine a block, this transaction should include Dave's justice - // transaction which was just accepted into the mempool. - block = mineBlocks(t, net, 1, 1)[0] - - // The block should have exactly *two* transactions, one of which is - // the justice transaction. - if len(block.Transactions) != 2 { - t.Fatalf("transaction wasn't mined") - } - justiceSha := block.Transactions[1].TxHash() - if !bytes.Equal(justiceTx.Hash()[:], justiceSha[:]) { - t.Fatalf("justice tx wasn't mined") - } - - assertNodeNumChannels(t, dave, 0) -} - -// testRevokedCloseRetributionRemoteHodl tests that Dave properly responds to a -// channel breach made by the remote party, specifically in the case that the -// remote party breaches before settling extended HTLCs. -func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness, - t *harnessTest) { - ctxb := context.Background() - - const ( - chanAmt = lnd.MaxBtcFundingAmount - pushAmt = 200000 - paymentAmt = 10000 - numInvoices = 6 - ) - - // Since this test will result in the counterparty being left in a - // weird state, we will introduce another node into our test network: - // Carol. - carol, err := net.NewNode("Carol", []string{"--hodl.exit-settle"}) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - // We'll also create a new node Dave, who will have a channel with - // Carol, and also use similar settings so we can broadcast a commit - // with active HTLCs. Dave will be the breached party. We set - // --nolisten to ensure Carol won't be able to connect to him and - // trigger the channel data protection logic automatically. - dave, err := net.NewNode( - "Dave", - []string{"--hodl.exit-settle", "--nolisten"}, - ) - if err != nil { - t.Fatalf("unable to create new dave node: %v", err) - } - defer shutdownAndAssert(net, t, dave) - - // We must let Dave communicate with Carol before they are able to open - // channel, so we connect Dave and Carol, - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, dave, carol); err != nil { - t.Fatalf("unable to connect dave to carol: %v", err) - } - - // Before we make a channel, we'll load up Dave with some coins sent - // directly from the miner. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), dave) - if err != nil { - t.Fatalf("unable to send coins to dave: %v", err) - } - - // In order to test Dave's response to an uncooperative channel closure - // by Carol, we'll first open up a channel between them with a - // lnd.MaxBtcFundingAmount (2^24) satoshis value. - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, dave, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - }, - ) - - // With the channel open, we'll create a few invoices for Carol that - // Dave will pay to in order to advance the state of the channel. - carolPayReqs, _, _, err := createPayReqs( - carol, paymentAmt, numInvoices, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } - - // We'll introduce a closure to validate that Carol's current balance - // matches the given expected amount. - checkCarolBalance := func(expectedAmt int64) { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolChan, err := getChanInfo(ctxt, carol) - if err != nil { - t.Fatalf("unable to get carol's channel info: %v", err) - } - if carolChan.LocalBalance != expectedAmt { - t.Fatalf("carol's balance is incorrect, "+ - "got %v, expected %v", carolChan.LocalBalance, - expectedAmt) - } - } - - // We'll introduce another closure to validate that Carol's current - // number of updates is at least as large as the provided minimum - // number. - checkCarolNumUpdatesAtLeast := func(minimum uint64) { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolChan, err := getChanInfo(ctxt, carol) - if err != nil { - t.Fatalf("unable to get carol's channel info: %v", err) - } - if carolChan.NumUpdates < minimum { - t.Fatalf("carol's numupdates is incorrect, want %v "+ - "to be at least %v", carolChan.NumUpdates, - minimum) - } - } - - // Wait for Dave to receive the channel edge from the funding manager. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = dave.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("dave didn't see the dave->carol channel before "+ - "timeout: %v", err) - } - - // Ensure that carol's balance starts with the amount we pushed to her. - checkCarolBalance(pushAmt) - - // Send payments from Dave to Carol using 3 of Carol's payment hashes - // generated above. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, dave, dave.RouterClient, carolPayReqs[:numInvoices/2], - false, - ) - if err != nil { - t.Fatalf("unable to send payments: %v", err) - } - - // At this point, we'll also send over a set of HTLC's from Carol to - // Dave. This ensures that the final revoked transaction has HTLC's in - // both directions. - davePayReqs, _, _, err := createPayReqs( - dave, paymentAmt, numInvoices, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } - - // Send payments from Carol to Dave using 3 of Dave's payment hashes - // generated above. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, carol, carol.RouterClient, davePayReqs[:numInvoices/2], - false, - ) - if err != nil { - t.Fatalf("unable to send payments: %v", err) - } - - // Next query for Carol's channel state, as we sent 3 payments of 10k - // satoshis each, however Carol should now see her balance as being - // equal to the push amount in satoshis since she has not settled. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolChan, err := getChanInfo(ctxt, carol) - if err != nil { - t.Fatalf("unable to get carol's channel info: %v", err) - } - - // Grab Carol's current commitment height (update number), we'll later - // revert her to this state after additional updates to force her to - // broadcast this soon to be revoked state. - carolStateNumPreCopy := carolChan.NumUpdates - - // Ensure that carol's balance still reflects the original amount we - // pushed to her, minus the HTLCs she just sent to Dave. - checkCarolBalance(pushAmt - 3*paymentAmt) - - // Since Carol has not settled, she should only see at least one update - // to her channel. - checkCarolNumUpdatesAtLeast(1) - - // Create a temporary file to house Carol's database state at this - // particular point in history. - carolTempDbPath, errr := ioutil.TempDir("", "carol-past-state") - if errr != nil { - t.Fatalf("unable to create temp db folder: %v", errr) - } - defer os.Remove(carolTempDbPath) - - // With the temporary file created, copy Carol's current state into the - // temporary file we created above. Later after more updates, we'll - // restore this state. - if err := lntest.CopyAll(carolTempDbPath, carol.DBDir()); err != nil { - t.Fatalf("unable to copy database files: %v", err) - } - - // Finally, send payments from Dave to Carol, consuming Carol's - // remaining payment hashes. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, dave, dave.RouterClient, carolPayReqs[numInvoices/2:], - false, - ) - if err != nil { - t.Fatalf("unable to send payments: %v", err) - } - - // Ensure that carol's balance still shows the amount we originally - // pushed to her (minus the HTLCs she sent to Bob), and that at least - // one more update has occurred. - time.Sleep(500 * time.Millisecond) - checkCarolBalance(pushAmt - 3*paymentAmt) - checkCarolNumUpdatesAtLeast(carolStateNumPreCopy + 1) - - // Suspend Dave, such that Carol won't reconnect at startup, triggering - // the data loss protection. - restartDave, err := net.SuspendNode(dave) - if err != nil { - t.Fatalf("unable to suspend Dave: %v", err) - } - - // Now we shutdown Carol, copying over the her temporary database state - // which has the *prior* channel state over her current most up to date - // state. With this, we essentially force Carol to travel back in time - // within the channel's history. - if err = net.RestartNode(carol, func() er.R { - return lntest.CopyAll(carol.DBDir(), carolTempDbPath) - }); err != nil { - t.Fatalf("unable to restart node: %v", err) - } - - time.Sleep(200 * time.Millisecond) - - // Ensure that Carol's view of the channel is consistent with the state - // of the channel just before it was snapshotted. - checkCarolBalance(pushAmt - 3*paymentAmt) - checkCarolNumUpdatesAtLeast(1) - - // Now query for Carol's channel state, it should show that she's at a - // state number in the past, *not* the latest state. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolChan, err = getChanInfo(ctxt, carol) - if err != nil { - t.Fatalf("unable to get carol chan info: %v", err) - } - if carolChan.NumUpdates != carolStateNumPreCopy { - t.Fatalf("db copy failed: %v", carolChan.NumUpdates) - } - - // Now force Carol to execute a *force* channel closure by unilaterally - // broadcasting her current channel state. This is actually the - // commitment transaction of a prior *revoked* state, so she'll soon - // feel the wrath of Dave's retribution. - force := true - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeUpdates, closeTxId, err := net.CloseChannel(ctxt, carol, - chanPoint, force) - if err != nil { - t.Fatalf("unable to close channel: %v", err) - } - - // Query the mempool for the breaching closing transaction, this should - // be broadcast by Carol when she force closes the channel above. - txid, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find Carol's force close tx in mempool: %v", - err) - } - if *txid != *closeTxId { - t.Fatalf("expected closeTx(%v) in mempool, instead found %v", - closeTxId, txid) - } - - // Generate a single block to mine the breach transaction. - block := mineBlocks(t, net, 1, 1)[0] - - // We resurrect Dave to ensure he will be exacting justice after his - // node restarts. - if err := restartDave(); err != nil { - t.Fatalf("unable to stop Dave's node: %v", err) - } - - // Finally, wait for the final close status update, then ensure that - // the closing transaction was included in the block. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - breachTXID, err := net.WaitForChannelClose(ctxt, closeUpdates) - if err != nil { - t.Fatalf("error while waiting for channel close: %v", err) - } - if *breachTXID != *closeTxId { - t.Fatalf("expected breach ID(%v) to be equal to close ID (%v)", - breachTXID, closeTxId) - } - assertTxInBlock(t, block, breachTXID) - - // Query the mempool for Dave's justice transaction, this should be - // broadcast as Carol's contract breaching transaction gets confirmed - // above. Since Carol might have had the time to take some of the HTLC - // outputs to the second level before Dave broadcasts his justice tx, - // we'll search through the mempool for a tx that matches the number of - // expected inputs in the justice tx. - var predErr er.R - var justiceTxid *chainhash.Hash - errNotFound := er.GenericErrorType.Code("justice tx not found") - findJusticeTx := func() (*chainhash.Hash, er.R) { - mempool, err := net.Miner.Node.GetRawMempool() - if err != nil { - return nil, er.Errorf("unable to get mempool from "+ - "miner: %v", err) - } - - for _, txid := range mempool { - // Check that the justice tx has the appropriate number - // of inputs. - tx, err := net.Miner.Node.GetRawTransaction(txid) - if err != nil { - return nil, er.Errorf("unable to query for "+ - "txs: %v", err) - } - - exNumInputs := 2 + numInvoices - if len(tx.MsgTx().TxIn) == exNumInputs { - return txid, nil - } - } - return nil, errNotFound.Default() - } - - err = wait.Predicate(func() bool { - txid, err := findJusticeTx() - if err != nil { - predErr = err - return false - } - - justiceTxid = txid - return true - }, time.Second*10) - if err != nil && errNotFound.Is(predErr) { - // If Dave is unable to broadcast his justice tx on first - // attempt because of the second layer transactions, he will - // wait until the next block epoch before trying again. Because - // of this, we'll mine a block if we cannot find the justice tx - // immediately. Since we cannot tell for sure how many - // transactions will be in the mempool at this point, we pass 0 - // as the last argument, indicating we don't care what's in the - // mempool. - mineBlocks(t, net, 1, 0) - err = wait.Predicate(func() bool { - txid, err := findJusticeTx() - if err != nil { - predErr = err - return false - } - - justiceTxid = txid - return true - }, time.Second*10) - } - if err != nil { - t.Fatalf(predErr.String()) - } - - justiceTx, err := net.Miner.Node.GetRawTransaction(justiceTxid) - if err != nil { - t.Fatalf("unable to query for justice tx: %v", err) - } - - // isSecondLevelSpend checks that the passed secondLevelTxid is a - // potentitial second level spend spending from the commit tx. - isSecondLevelSpend := func(commitTxid, secondLevelTxid *chainhash.Hash) bool { - secondLevel, err := net.Miner.Node.GetRawTransaction( - secondLevelTxid) - if err != nil { - t.Fatalf("unable to query for tx: %v", err) - } - - // A second level spend should have only one input, and one - // output. - if len(secondLevel.MsgTx().TxIn) != 1 { - return false - } - if len(secondLevel.MsgTx().TxOut) != 1 { - return false - } - - // The sole input should be spending from the commit tx. - txIn := secondLevel.MsgTx().TxIn[0] - if !bytes.Equal(txIn.PreviousOutPoint.Hash[:], commitTxid[:]) { - return false - } - - return true - } - - // Check that all the inputs of this transaction are spending outputs - // generated by Carol's breach transaction above. - for _, txIn := range justiceTx.MsgTx().TxIn { - if bytes.Equal(txIn.PreviousOutPoint.Hash[:], breachTXID[:]) { - continue - } - - // If the justice tx is spending from an output that was not on - // the breach tx, Carol might have had the time to take an - // output to the second level. In that case, check that the - // justice tx is spending this second level output. - if isSecondLevelSpend(breachTXID, &txIn.PreviousOutPoint.Hash) { - continue - } - t.Fatalf("justice tx not spending commitment utxo "+ - "instead is: %v", txIn.PreviousOutPoint) - } - time.Sleep(100 * time.Millisecond) - - // We restart Dave here to ensure that he persists he retribution state - // and successfully continues exacting retribution after restarting. At - // this point, Dave has broadcast the justice transaction, but it - // hasn't been confirmed yet; when Dave restarts, he should start - // waiting for the justice transaction to confirm again. - if err := net.RestartNode(dave, nil); err != nil { - t.Fatalf("unable to restart Dave's node: %v", err) - } - - // Now mine a block, this transaction should include Dave's justice - // transaction which was just accepted into the mempool. - block = mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, justiceTxid) - - // Dave should have no open channels. - assertNodeNumChannels(t, dave, 0) -} - -// testRevokedCloseRetributionAltruistWatchtower establishes a channel between -// Carol and Dave, where Carol is using a third node Willy as her watchtower. -// After sending some payments, Dave reverts his state and force closes to -// trigger a breach. Carol is kept offline throughout the process and the test -// asserts that Willy responds by broadcasting the justice transaction on -// Carol's behalf sweeping her funds without a reward. -func testRevokedCloseRetributionAltruistWatchtower(net *lntest.NetworkHarness, - t *harnessTest) { - - ctxb := context.Background() - const ( - chanAmt = lnd.MaxBtcFundingAmount - paymentAmt = 10000 - numInvoices = 6 - externalIP = "1.2.3.4" - ) - - // Since we'd like to test some multi-hop failure scenarios, we'll - // introduce another node into our test network: Carol. - carol, err := net.NewNode("Carol", []string{"--hodl.exit-settle"}) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - // Willy the watchtower will protect Dave from Carol's breach. He will - // remain online in order to punish Carol on Dave's behalf, since the - // breach will happen while Dave is offline. - willy, err := net.NewNode("Willy", []string{ - "--watchtower.active", - "--watchtower.externalip=" + externalIP, - }) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, willy) - - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - willyInfo, errr := willy.Watchtower.GetInfo( - ctxt, &watchtowerrpc.GetInfoRequest{}, - ) - if errr != nil { - t.Fatalf("unable to getinfo from willy: %v", errr) - } - - // Assert that Willy has one listener and it is 0.0.0.0:9911 or - // [::]:9911. Since no listener is explicitly specified, one of these - // should be the default depending on whether the host supports IPv6 or - // not. - if len(willyInfo.Listeners) != 1 { - t.Fatalf("Willy should have 1 listener, has %d", - len(willyInfo.Listeners)) - } - listener := willyInfo.Listeners[0] - if listener != "0.0.0.0:9911" && listener != "[::]:9911" { - t.Fatalf("expected listener on 0.0.0.0:9911 or [::]:9911, "+ - "got %v", listener) - } - - // Assert the Willy's URIs properly display the chosen external IP. - if len(willyInfo.Uris) != 1 { - t.Fatalf("Willy should have 1 uri, has %d", - len(willyInfo.Uris)) - } - if !strings.Contains(willyInfo.Uris[0], externalIP) { - t.Fatalf("expected uri with %v, got %v", - externalIP, willyInfo.Uris[0]) - } - - // Dave will be the breached party. We set --nolisten to ensure Carol - // won't be able to connect to him and trigger the channel data - // protection logic automatically. - dave, err := net.NewNode("Dave", []string{ - "--nolisten", - "--wtclient.active", - }) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, t, dave) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - addTowerReq := &wtclientrpc.AddTowerRequest{ - Pubkey: willyInfo.Pubkey, - Address: listener, - } - if _, err := dave.WatchtowerClient.AddTower(ctxt, addTowerReq); err != nil { - t.Fatalf("unable to add willy's watchtower: %v", err) - } - - // We must let Dave have an open channel before she can send a node - // announcement, so we open a channel with Carol, - if err := net.ConnectNodes(ctxb, dave, carol); err != nil { - t.Fatalf("unable to connect dave to carol: %v", err) - } - - // Before we make a channel, we'll load up Dave with some coins sent - // directly from the miner. - err = net.SendCoins(ctxb, btcutil.UnitsPerCoin(), dave) - if err != nil { - t.Fatalf("unable to send coins to dave: %v", err) - } - - // In order to test Dave's response to an uncooperative channel - // closure by Carol, we'll first open up a channel between them with a - // 0.5 BTC value. - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, dave, carol, - lntest.OpenChannelParams{ - Amt: 3 * (chanAmt / 4), - PushAmt: chanAmt / 4, - }, - ) - - // With the channel open, we'll create a few invoices for Carol that - // Dave will pay to in order to advance the state of the channel. - carolPayReqs, _, _, err := createPayReqs( - carol, paymentAmt, numInvoices, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } - - // Wait for Dave to receive the channel edge from the funding manager. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = dave.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("dave didn't see the dave->carol channel before "+ - "timeout: %v", err) - } - - // Next query for Carol's channel state, as we sent 0 payments, Carol - // should still see her balance as the push amount, which is 1/4 of the - // capacity. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolChan, err := getChanInfo(ctxt, carol) - if err != nil { - t.Fatalf("unable to get carol's channel info: %v", err) - } - if carolChan.LocalBalance != int64(chanAmt/4) { - t.Fatalf("carol's balance is incorrect, got %v, expected %v", - carolChan.LocalBalance, chanAmt/4) - } - - // Grab Carol's current commitment height (update number), we'll later - // revert her to this state after additional updates to force him to - // broadcast this soon to be revoked state. - carolStateNumPreCopy := carolChan.NumUpdates - - // Create a temporary file to house Carol's database state at this - // particular point in history. - carolTempDbPath, errr := ioutil.TempDir("", "carol-past-state") - if errr != nil { - t.Fatalf("unable to create temp db folder: %v", errr) - } - defer os.Remove(carolTempDbPath) - - // With the temporary file created, copy Carol's current state into the - // temporary file we created above. Later after more updates, we'll - // restore this state. - if err := lntest.CopyAll(carolTempDbPath, carol.DBDir()); err != nil { - t.Fatalf("unable to copy database files: %v", err) - } - - // Finally, send payments from Dave to Carol, consuming Carol's remaining - // payment hashes. - err = completePaymentRequests( - ctxb, dave, dave.RouterClient, carolPayReqs, false, - ) - if err != nil { - t.Fatalf("unable to send payments: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - daveBalReq := &lnrpc.WalletBalanceRequest{} - daveBalResp, errr := dave.WalletBalance(ctxt, daveBalReq) - if errr != nil { - t.Fatalf("unable to get dave's balance: %v", errr) - } - - davePreSweepBalance := daveBalResp.ConfirmedBalance - - // Wait until the backup has been accepted by the watchtower before - // shutting down Dave. - err = wait.NoError(func() er.R { - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - bkpStats, errr := dave.WatchtowerClient.Stats(ctxt, &wtclientrpc.StatsRequest{}) - if errr != nil { - return er.E(errr) - - } - if bkpStats == nil { - return er.New("no active backup sessions") - } - if bkpStats.NumBackups == 0 { - return er.New("no backups accepted") - } - - return nil - }, defaultTimeout) - if err != nil { - t.Fatalf("unable to verify backup task completed: %v", err) - } - - // Shutdown Dave to simulate going offline for an extended period of - // time. Once he's not watching, Carol will try to breach the channel. - restart, err := net.SuspendNode(dave) - if err != nil { - t.Fatalf("unable to suspend Dave: %v", err) - } - - // Now we shutdown Carol, copying over the his temporary database state - // which has the *prior* channel state over his current most up to date - // state. With this, we essentially force Carol to travel back in time - // within the channel's history. - if err = net.RestartNode(carol, func() er.R { - return lntest.CopyAll(carol.DBDir(), carolTempDbPath) - }); err != nil { - t.Fatalf("unable to restart node: %v", err) - } - - // Now query for Carol's channel state, it should show that he's at a - // state number in the past, not the *latest* state. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolChan, err = getChanInfo(ctxt, carol) - if err != nil { - t.Fatalf("unable to get carol chan info: %v", err) - } - if carolChan.NumUpdates != carolStateNumPreCopy { - t.Fatalf("db copy failed: %v", carolChan.NumUpdates) - } - - // Now force Carol to execute a *force* channel closure by unilaterally - // broadcasting his current channel state. This is actually the - // commitment transaction of a prior *revoked* state, so he'll soon - // feel the wrath of Dave's retribution. - closeUpdates, closeTxId, err := net.CloseChannel( - ctxb, carol, chanPoint, true, - ) - if err != nil { - t.Fatalf("unable to close channel: %v", err) - } - - // Query the mempool for the breaching closing transaction, this should - // be broadcast by Carol when she force closes the channel above. - txid, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find Carol's force close tx in mempool: %v", - err) - } - if *txid != *closeTxId { - t.Fatalf("expected closeTx(%v) in mempool, instead found %v", - closeTxId, txid) - } - - // Finally, generate a single block, wait for the final close status - // update, then ensure that the closing transaction was included in the - // block. - block := mineBlocks(t, net, 1, 1)[0] - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - breachTXID, err := net.WaitForChannelClose(ctxt, closeUpdates) - if err != nil { - t.Fatalf("error while waiting for channel close: %v", err) - } - assertTxInBlock(t, block, breachTXID) - - // Query the mempool for Dave's justice transaction, this should be - // broadcast as Carol's contract breaching transaction gets confirmed - // above. - justiceTXID, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find Dave's justice tx in mempool: %v", - err) - } - time.Sleep(100 * time.Millisecond) - - // Query for the mempool transaction found above. Then assert that all - // the inputs of this transaction are spending outputs generated by - // Carol's breach transaction above. - justiceTx, err := net.Miner.Node.GetRawTransaction(justiceTXID) - if err != nil { - t.Fatalf("unable to query for justice tx: %v", err) - } - for _, txIn := range justiceTx.MsgTx().TxIn { - if !bytes.Equal(txIn.PreviousOutPoint.Hash[:], breachTXID[:]) { - t.Fatalf("justice tx not spending commitment utxo "+ - "instead is: %v", txIn.PreviousOutPoint) - } - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - willyBalReq := &lnrpc.WalletBalanceRequest{} - willyBalResp, errr := willy.WalletBalance(ctxt, willyBalReq) - if errr != nil { - t.Fatalf("unable to get willy's balance: %v", errr) - } - - if willyBalResp.ConfirmedBalance != 0 { - t.Fatalf("willy should have 0 balance before mining "+ - "justice transaction, instead has %d", - willyBalResp.ConfirmedBalance) - } - - // Now mine a block, this transaction should include Dave's justice - // transaction which was just accepted into the mempool. - block = mineBlocks(t, net, 1, 1)[0] - - // The block should have exactly *two* transactions, one of which is - // the justice transaction. - if len(block.Transactions) != 2 { - t.Fatalf("transaction wasn't mined") - } - justiceSha := block.Transactions[1].TxHash() - if !bytes.Equal(justiceTx.Hash()[:], justiceSha[:]) { - t.Fatalf("justice tx wasn't mined") - } - - // Ensure that Willy doesn't get any funds, as he is acting as an - // altruist watchtower. - var predErr er.R - err = wait.Invariant(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - willyBalReq := &lnrpc.WalletBalanceRequest{} - willyBalResp, err := willy.WalletBalance(ctxt, willyBalReq) - if err != nil { - t.Fatalf("unable to get willy's balance: %v", err) - } - - if willyBalResp.ConfirmedBalance != 0 { - predErr = er.Errorf("Expected Willy to have no funds "+ - "after justice transaction was mined, found %v", - willyBalResp) - return false - } - - return true - }, time.Second*5) - if err != nil { - t.Fatalf("%v", predErr) - } - - // Restart Dave, who will still think his channel with Carol is open. - // We should him to detect the breach, but realize that the funds have - // then been swept to his wallet by Willy. - err = restart() - if err != nil { - t.Fatalf("unable to restart dave: %v", err) - } - - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - daveBalReq := &lnrpc.ChannelBalanceRequest{} - daveBalResp, err := dave.ChannelBalance(ctxt, daveBalReq) - if err != nil { - t.Fatalf("unable to get dave's balance: %v", err) - } - - if daveBalResp.LocalBalance.Sat != 0 { - predErr = er.Errorf("Dave should end up with zero "+ - "channel balance, instead has %d", - daveBalResp.LocalBalance.Sat) - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("%v", predErr) - } - - assertNumPendingChannels(t, dave, 0, 0) - - err = wait.Predicate(func() bool { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - daveBalReq := &lnrpc.WalletBalanceRequest{} - daveBalResp, err := dave.WalletBalance(ctxt, daveBalReq) - if err != nil { - t.Fatalf("unable to get dave's balance: %v", err) - } - - if daveBalResp.ConfirmedBalance <= davePreSweepBalance { - predErr = er.Errorf("Dave should have more than %d "+ - "after sweep, instead has %d", - davePreSweepBalance, - daveBalResp.ConfirmedBalance) - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("%v", predErr) - } - - // Dave should have no open channels. - assertNodeNumChannels(t, dave, 0) -} - -// assertNumPendingChannels checks that a PendingChannels response from the -// node reports the expected number of pending channels. -func assertNumPendingChannels(t *harnessTest, node *lntest.HarnessNode, - expWaitingClose, expPendingForceClose int) { - ctxb := context.Background() - - var predErr er.R - err := wait.Predicate(func() bool { - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := node.PendingChannels(ctxt, - pendingChansRequest) - if err != nil { - predErr = er.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - n := len(pendingChanResp.WaitingCloseChannels) - if n != expWaitingClose { - predErr = er.Errorf("Expected to find %d channels "+ - "waiting close, found %d", expWaitingClose, n) - return false - } - n = len(pendingChanResp.PendingForceClosingChannels) - if n != expPendingForceClose { - predErr = er.Errorf("expected to find %d channel "+ - "pending force close, found %d", expPendingForceClose, n) - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("%v", predErr) - } -} - -// assertDLPExecuted asserts that Dave is a node that has recovered their state -// form scratch. Carol should then force close on chain, with Dave sweeping his -// funds immediately, and Carol sweeping her fund after her CSV delay is up. If -// the blankSlate value is true, then this means that Dave won't need to sweep -// on chain as he has no funds in the channel. -func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest, - carol *lntest.HarnessNode, carolStartingBalance int64, - dave *lntest.HarnessNode, daveStartingBalance int64, - anchors bool) { - - // Increase the fee estimate so that the following force close tx will - // be cpfp'ed. - net.SetFeeEstimate(30000) - - // We disabled auto-reconnect for some tests to avoid timing issues. - // To make sure the nodes are initiating DLP now, we have to manually - // re-connect them. - ctxb := context.Background() - err := net.ConnectNodes(ctxb, carol, dave) - if err != nil && !strings.Contains(err.String(), "already connected") { - t.Fatalf("unable to connect Carol to Dave to initiate DLP: %v", - err) - } - - // Upon reconnection, the nodes should detect that Dave is out of sync. - // Carol should force close the channel using her latest commitment. - expectedTxes := 1 - if anchors { - expectedTxes = 2 - } - _, err = waitForNTxsInMempool( - net.Miner.Node, expectedTxes, minerMempoolTimeout, - ) - if err != nil { - t.Fatalf("unable to find Carol's force close tx in mempool: %v", - err) - } - - // Channel should be in the state "waiting close" for Carol since she - // broadcasted the force close tx. - assertNumPendingChannels(t, carol, 1, 0) - - // Dave should also consider the channel "waiting close", as he noticed - // the channel was out of sync, and is now waiting for a force close to - // hit the chain. - assertNumPendingChannels(t, dave, 1, 0) - - // Restart Dave to make sure he is able to sweep the funds after - // shutdown. - if err := net.RestartNode(dave, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - - // Generate a single block, which should confirm the closing tx. - _ = mineBlocks(t, net, 1, expectedTxes)[0] - - // Dave should sweep his funds immediately, as they are not timelocked. - // We also expect Dave to sweep his anchor, if present. - - _, err = waitForNTxsInMempool( - net.Miner.Node, expectedTxes, minerMempoolTimeout, - ) - if err != nil { - t.Fatalf("unable to find Dave's sweep tx in mempool: %v", err) - } - - // Dave should consider the channel pending force close (since he is - // waiting for his sweep to confirm). - assertNumPendingChannels(t, dave, 0, 1) - - // Carol is considering it "pending force close", as we must wait - // before she can sweep her outputs. - assertNumPendingChannels(t, carol, 0, 1) - - // Mine the sweep tx. - _ = mineBlocks(t, net, 1, expectedTxes)[0] - - // Now Dave should consider the channel fully closed. - assertNumPendingChannels(t, dave, 0, 0) - - // We query Dave's balance to make sure it increased after the channel - // closed. This checks that he was able to sweep the funds he had in - // the channel. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - balReq := &lnrpc.WalletBalanceRequest{} - daveBalResp, errr := dave.WalletBalance(ctxt, balReq) - if errr != nil { - t.Fatalf("unable to get dave's balance: %v", errr) - } - - daveBalance := daveBalResp.ConfirmedBalance - if daveBalance <= daveStartingBalance { - t.Fatalf("expected dave to have balance above %d, "+ - "instead had %v", daveStartingBalance, daveBalance) - } - - // After the Carol's output matures, she should also reclaim her funds. - // - // The commit sweep resolver publishes the sweep tx at defaultCSV-1 and - // we already mined one block after the commitmment was published, so - // take that into account. - mineBlocks(t, net, defaultCSV-1-1, 0) - carolSweep, err := waitForTxInMempool( - net.Miner.Node, minerMempoolTimeout, - ) - if err != nil { - t.Fatalf("unable to find Carol's sweep tx in mempool: %v", err) - } - block := mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, carolSweep) - - // Now the channel should be fully closed also from Carol's POV. - assertNumPendingChannels(t, carol, 0, 0) - - // Make sure Carol got her balance back. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolBalResp, errr := carol.WalletBalance(ctxt, balReq) - if errr != nil { - t.Fatalf("unable to get carol's balance: %v", errr) - } - carolBalance := carolBalResp.ConfirmedBalance - if carolBalance <= carolStartingBalance { - t.Fatalf("expected carol to have balance above %d, "+ - "instead had %v", carolStartingBalance, - carolBalance) - } - - assertNodeNumChannels(t, dave, 0) - assertNodeNumChannels(t, carol, 0) -} - -// testDataLossProtection tests that if one of the nodes in a channel -// relationship lost state, they will detect this during channel sync, and the -// up-to-date party will force close the channel, giving the outdated party the -// opportunity to sweep its output. -func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - const ( - chanAmt = lnd.MaxBtcFundingAmount - paymentAmt = 10000 - numInvoices = 6 - ) - - // Carol will be the up-to-date party. We set --nolisten to ensure Dave - // won't be able to connect to her and trigger the channel data - // protection logic automatically. We also can't have Carol - // automatically re-connect too early, otherwise DLP would be initiated - // at the wrong moment. - carol, err := net.NewNode( - "Carol", []string{"--nolisten", "--minbackoff=1h"}, - ) - if err != nil { - t.Fatalf("unable to create new carol node: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - // Dave will be the party losing his state. - dave, err := net.NewNode("Dave", nil) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, t, dave) - - // Before we make a channel, we'll load up Carol with some coins sent - // directly from the miner. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), carol) - if err != nil { - t.Fatalf("unable to send coins to carol: %v", err) - } - - // timeTravel is a method that will make Carol open a channel to the - // passed node, settle a series of payments, then reset the node back - // to the state before the payments happened. When this method returns - // the node will be unaware of the new state updates. The returned - // function can be used to restart the node in this state. - timeTravel := func(node *lntest.HarnessNode) (func() er.R, - *lnrpc.ChannelPoint, int64, er.R) { - - // We must let the node communicate with Carol before they are - // able to open channel, so we connect them. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.EnsureConnected(ctxt, carol, node); err != nil { - t.Fatalf("unable to connect %v to carol: %v", - node.Name(), err) - } - - // We'll first open up a channel between them with a 0.5 BTC - // value. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, carol, node, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // With the channel open, we'll create a few invoices for the - // node that Carol will pay to in order to advance the state of - // the channel. - // TODO(halseth): have dangling HTLCs on the commitment, able to - // retrive funds? - payReqs, _, _, err := createPayReqs( - node, paymentAmt, numInvoices, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } - - // Wait for Carol to receive the channel edge from the funding - // manager. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("carol didn't see the carol->%s channel "+ - "before timeout: %v", node.Name(), err) - } - - // Send payments from Carol using 3 of the payment hashes - // generated above. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, carol, carol.RouterClient, - payReqs[:numInvoices/2], true, - ) - if err != nil { - t.Fatalf("unable to send payments: %v", err) - } - - // Next query for the node's channel state, as we sent 3 - // payments of 10k satoshis each, it should now see his balance - // as being 30k satoshis. - var nodeChan *lnrpc.Channel - var predErr er.R - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - bChan, err := getChanInfo(ctxt, node) - if err != nil { - t.Fatalf("unable to get channel info: %v", err) - } - if bChan.LocalBalance != 30000 { - predErr = er.Errorf("balance is incorrect, "+ - "got %v, expected %v", - bChan.LocalBalance, 30000) - return false - } - - nodeChan = bChan - return true - }, time.Second*15) - if err != nil { - t.Fatalf("%v", predErr) - } - - // Grab the current commitment height (update number), we'll - // later revert him to this state after additional updates to - // revoke this state. - stateNumPreCopy := nodeChan.NumUpdates - - // Create a temporary file to house the database state at this - // particular point in history. - tempDbPath, errr := ioutil.TempDir("", node.Name()+"-past-state") - if errr != nil { - t.Fatalf("unable to create temp db folder: %v", errr) - } - defer os.Remove(tempDbPath) - - // With the temporary file created, copy the current state into - // the temporary file we created above. Later after more - // updates, we'll restore this state. - if err := lntest.CopyAll(tempDbPath, node.DBDir()); err != nil { - t.Fatalf("unable to copy database files: %v", err) - } - - // Finally, send more payments from , using the remaining - // payment hashes. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, carol, carol.RouterClient, - payReqs[numInvoices/2:], true, - ) - if err != nil { - t.Fatalf("unable to send payments: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - nodeChan, err = getChanInfo(ctxt, node) - if err != nil { - t.Fatalf("unable to get dave chan info: %v", err) - } - - // Now we shutdown the node, copying over the its temporary - // database state which has the *prior* channel state over his - // current most up to date state. With this, we essentially - // force the node to travel back in time within the channel's - // history. - if err = net.RestartNode(node, func() er.R { - return lntest.CopyAll(node.DBDir(), tempDbPath) - }); err != nil { - t.Fatalf("unable to restart node: %v", err) - } - - // Make sure the channel is still there from the PoV of the - // node. - assertNodeNumChannels(t, node, 1) - - // Now query for the channel state, it should show that it's at - // a state number in the past, not the *latest* state. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - nodeChan, err = getChanInfo(ctxt, node) - if err != nil { - t.Fatalf("unable to get dave chan info: %v", err) - } - if nodeChan.NumUpdates != stateNumPreCopy { - t.Fatalf("db copy failed: %v", nodeChan.NumUpdates) - } - - balReq := &lnrpc.WalletBalanceRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - balResp, errr := node.WalletBalance(ctxt, balReq) - if errr != nil { - t.Fatalf("unable to get dave's balance: %v", errr) - } - - restart, err := net.SuspendNode(node) - if err != nil { - t.Fatalf("unable to suspend node: %v", err) - } - - return restart, chanPoint, balResp.ConfirmedBalance, nil - } - - // Reset Dave to a state where he has an outdated channel state. - restartDave, _, daveStartingBalance, err := timeTravel(dave) - if err != nil { - t.Fatalf("unable to time travel dave: %v", err) - } - - // We make a note of the nodes' current on-chain balances, to make sure - // they are able to retrieve the channel funds eventually, - balReq := &lnrpc.WalletBalanceRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolBalResp, errr := carol.WalletBalance(ctxt, balReq) - if errr != nil { - t.Fatalf("unable to get carol's balance: %v", errr) - } - carolStartingBalance := carolBalResp.ConfirmedBalance - - // Restart Dave to trigger a channel resync. - if err := restartDave(); err != nil { - t.Fatalf("unable to restart dave: %v", err) - } - - // Assert that once Dave comes up, they reconnect, Carol force closes - // on chain, and both of them properly carry out the DLP protocol. - assertDLPExecuted( - net, t, carol, carolStartingBalance, dave, daveStartingBalance, - false, - ) - - // As a second part of this test, we will test the scenario where a - // channel is closed while Dave is offline, loses his state and comes - // back online. In this case the node should attempt to resync the - // channel, and the peer should resend a channel sync message for the - // closed channel, such that Dave can retrieve his funds. - // - // We start by letting Dave time travel back to an outdated state. - restartDave, chanPoint2, daveStartingBalance, err := timeTravel(dave) - if err != nil { - t.Fatalf("unable to time travel eve: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolBalResp, errr = carol.WalletBalance(ctxt, balReq) - if errr != nil { - t.Fatalf("unable to get carol's balance: %v", errr) - } - carolStartingBalance = carolBalResp.ConfirmedBalance - - // Now let Carol force close the channel while Dave is offline. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, carol, chanPoint2, true) - - // Wait for the channel to be marked pending force close. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = waitForChannelPendingForceClose(ctxt, carol, chanPoint2) - if err != nil { - t.Fatalf("channel not pending force close: %v", err) - } - - // Mine enough blocks for Carol to sweep her funds. - mineBlocks(t, net, defaultCSV-1, 0) - - carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find Carol's sweep tx in mempool: %v", err) - } - block := mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, carolSweep) - - // Now the channel should be fully closed also from Carol's POV. - assertNumPendingChannels(t, carol, 0, 0) - - // Make sure Carol got her balance back. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolBalResp, errr = carol.WalletBalance(ctxt, balReq) - if errr != nil { - t.Fatalf("unable to get carol's balance: %v", errr) - } - carolBalance := carolBalResp.ConfirmedBalance - if carolBalance <= carolStartingBalance { - t.Fatalf("expected carol to have balance above %d, "+ - "instead had %v", carolStartingBalance, - carolBalance) - } - - assertNodeNumChannels(t, carol, 0) - - // When Dave comes online, he will reconnect to Carol, try to resync - // the channel, but it will already be closed. Carol should resend the - // information Dave needs to sweep his funds. - if err := restartDave(); err != nil { - t.Fatalf("unable to restart Eve: %v", err) - } - - // Dave should sweep his funds. - _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find Dave's sweep tx in mempool: %v", err) - } - - // Mine a block to confirm the sweep, and make sure Dave got his - // balance back. - mineBlocks(t, net, 1, 1) - assertNodeNumChannels(t, dave, 0) - - err = wait.NoError(func() er.R { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - daveBalResp, err := dave.WalletBalance(ctxt, balReq) - if err != nil { - return er.Errorf("unable to get dave's balance: %v", - err) - } - - daveBalance := daveBalResp.ConfirmedBalance - if daveBalance <= daveStartingBalance { - return er.Errorf("expected dave to have balance "+ - "above %d, intead had %v", daveStartingBalance, - daveBalance) - } - - return nil - }, time.Second*15) - if err != nil { - t.Fatalf("%v", err) - } -} - -// assertNodeNumChannels polls the provided node's list channels rpc until it -// reaches the desired number of total channels. -func assertNodeNumChannels(t *harnessTest, node *lntest.HarnessNode, - numChannels int) { - ctxb := context.Background() - - // Poll node for its list of channels. - req := &lnrpc.ListChannelsRequest{} - - var predErr er.R - pred := func() bool { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - chanInfo, err := node.ListChannels(ctxt, req) - if err != nil { - predErr = er.Errorf("unable to query for node's "+ - "channels: %v", err) - return false - } - - // Return true if the query returned the expected number of - // channels. - num := len(chanInfo.Channels) - if num != numChannels { - predErr = er.Errorf("expected %v channels, got %v", - numChannels, num) - return false - } - return true - } - - if err := wait.Predicate(pred, time.Second*15); err != nil { - t.Fatalf("node has incorrect number of channels: %v", predErr) - } -} - -// testRejectHTLC tests that a node can be created with the flag --rejecthtlc. -// This means that the node will reject all forwarded HTLCs but can still -// accept direct HTLCs as well as send HTLCs. -func testRejectHTLC(net *lntest.NetworkHarness, t *harnessTest) { - // RejectHTLC - // Alice ------> Carol ------> Bob - // - const chanAmt = btcutil.Amount(1000000) - ctxb := context.Background() - - // Create Carol with reject htlc flag. - carol, err := net.NewNode("Carol", []string{"--rejecthtlc"}) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - // Connect Alice to Carol. - if err := net.ConnectNodes(ctxb, net.Alice, carol); err != nil { - t.Fatalf("unable to connect alice to carol: %v", err) - } - - // Connect Carol to Bob. - if err := net.ConnectNodes(ctxb, carol, net.Bob); err != nil { - t.Fatalf("unable to conenct carol to net.Bob: %v", err) - } - - // Send coins to Carol. - err = net.SendCoins(ctxb, btcutil.UnitsPerCoin(), carol) - if err != nil { - t.Fatalf("unable to send coins to carol: %v", err) - } - - // Send coins to Alice. - err = net.SendCoins(ctxb, btcutil.UnitsPerCoin()/100, net.Alice) - if err != nil { - t.Fatalf("unable to send coins to alice: %v", err) - } - - // Open a channel between Alice and Carol. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPointAlice := openChannelAndAssert( - ctxt, t, net, net.Alice, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // Open a channel between Carol and Bob. - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointCarol := openChannelAndAssert( - ctxt, t, net, carol, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // Channel should be ready for payments. - const payAmt = 100 - - // Helper closure to generate a random pre image. - genPreImage := func() []byte { - preimage := make([]byte, 32) - - _, errr := rand.Read(preimage) - if errr != nil { - t.Fatalf("unable to generate preimage: %v", errr) - } - - return preimage - } - - // Create an invoice from Carol of 100 satoshis. - // We expect Alice to be able to pay this invoice. - preimage := genPreImage() - - carolInvoice := &lnrpc.Invoice{ - Memo: "testing - alice should pay carol", - RPreimage: preimage, - Value: payAmt, - } - - // Carol adds the invoice to her database. - resp, errr := carol.AddInvoice(ctxb, carolInvoice) - if errr != nil { - t.Fatalf("unable to add invoice: %v", errr) - } - - // Alice pays Carols invoice. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, net.Alice, net.Alice.RouterClient, - []string{resp.PaymentRequest}, true, - ) - if err != nil { - t.Fatalf("unable to send payments from alice to carol: %v", err) - } - - // Create an invoice from Bob of 100 satoshis. - // We expect Carol to be able to pay this invoice. - preimage = genPreImage() - - bobInvoice := &lnrpc.Invoice{ - Memo: "testing - carol should pay bob", - RPreimage: preimage, - Value: payAmt, - } - - // Bob adds the invoice to his database. - resp, errr = net.Bob.AddInvoice(ctxb, bobInvoice) - if errr != nil { - t.Fatalf("unable to add invoice: %v", errr) - } - - // Carol pays Bobs invoice. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, carol, carol.RouterClient, - []string{resp.PaymentRequest}, true, - ) - if err != nil { - t.Fatalf("unable to send payments from carol to bob: %v", err) - } - - // Create an invoice from Bob of 100 satoshis. - // Alice attempts to pay Bob but this should fail, since we are - // using Carol as a hop and her node will reject onward HTLCs. - preimage = genPreImage() - - bobInvoice = &lnrpc.Invoice{ - Memo: "testing - alice tries to pay bob", - RPreimage: preimage, - Value: payAmt, - } - - // Bob adds the invoice to his database. - resp, errr = net.Bob.AddInvoice(ctxb, bobInvoice) - if errr != nil { - t.Fatalf("unable to add invoice: %v", errr) - } - - // Alice attempts to pay Bobs invoice. This payment should be rejected since - // we are using Carol as an intermediary hop, Carol is running lnd with - // --rejecthtlc. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, net.Alice, net.Alice.RouterClient, - []string{resp.PaymentRequest}, true, - ) - if err == nil { - t.Fatalf( - "should have been rejected, carol will not accept forwarded htlcs", - ) - } - - assertLastHTLCError(t, net.Alice, lnrpc.Failure_CHANNEL_DISABLED) - - // Close all channels. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointAlice, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, carol, chanPointCarol, false) -} - -// graphSubscription houses the proxied update and error chans for a node's -// graph subscriptions. -type graphSubscription struct { - updateChan chan *lnrpc.GraphTopologyUpdate - errChan chan error - quit chan struct{} -} - -// subscribeGraphNotifications subscribes to channel graph updates and launches -// a goroutine that forwards these to the returned channel. -func subscribeGraphNotifications(t *harnessTest, ctxb context.Context, - node *lntest.HarnessNode) graphSubscription { - - // We'll first start by establishing a notification client which will - // send us notifications upon detected changes in the channel graph. - req := &lnrpc.GraphTopologySubscription{} - ctx, cancelFunc := context.WithCancel(ctxb) - topologyClient, err := node.SubscribeChannelGraph(ctx, req) - if err != nil { - t.Fatalf("unable to create topology client: %v", err) - } - - // We'll launch a goroutine that will be responsible for proxying all - // notifications recv'd from the client into the channel below. - errChan := make(chan error, 1) - quit := make(chan struct{}) - graphUpdates := make(chan *lnrpc.GraphTopologyUpdate, 20) - go func() { - for { - defer cancelFunc() - - select { - case <-quit: - return - default: - graphUpdate, err := topologyClient.Recv() - select { - case <-quit: - return - default: - } - - if err == io.EOF { - return - } else if err != nil { - select { - case errChan <- err: - case <-quit: - } - return - } - - select { - case graphUpdates <- graphUpdate: - case <-quit: - return - } - } - } - }() - - return graphSubscription{ - updateChan: graphUpdates, - errChan: errChan, - quit: quit, - } -} - -func testGraphTopologyNotifications(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const chanAmt = lnd.MaxBtcFundingAmount - - // Let Alice subscribe to graph notifications. - graphSub := subscribeGraphNotifications( - t, ctxb, net.Alice, - ) - defer close(graphSub.quit) - - // Open a new channel between Alice and Bob. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // The channel opening above should have triggered a few notifications - // sent to the notification client. We'll expect two channel updates, - // and two node announcements. - var numChannelUpds int - var numNodeAnns int - for numChannelUpds < 2 && numNodeAnns < 2 { - select { - // Ensure that a new update for both created edges is properly - // dispatched to our registered client. - case graphUpdate := <-graphSub.updateChan: - // Process all channel updates prsented in this update - // message. - for _, chanUpdate := range graphUpdate.ChannelUpdates { - switch chanUpdate.AdvertisingNode { - case net.Alice.PubKeyStr: - case net.Bob.PubKeyStr: - default: - t.Fatalf("unknown advertising node: %v", - chanUpdate.AdvertisingNode) - } - switch chanUpdate.ConnectingNode { - case net.Alice.PubKeyStr: - case net.Bob.PubKeyStr: - default: - t.Fatalf("unknown connecting node: %v", - chanUpdate.ConnectingNode) - } - - if chanUpdate.Capacity != int64(chanAmt) { - t.Fatalf("channel capacities mismatch:"+ - " expected %v, got %v", chanAmt, - btcutil.Amount(chanUpdate.Capacity)) - } - numChannelUpds++ - } - - for _, nodeUpdate := range graphUpdate.NodeUpdates { - switch nodeUpdate.IdentityKey { - case net.Alice.PubKeyStr: - case net.Bob.PubKeyStr: - default: - t.Fatalf("unknown node: %v", - nodeUpdate.IdentityKey) - } - numNodeAnns++ - } - case err := <-graphSub.errChan: - t.Fatalf("unable to recv graph update: %v", err) - case <-time.After(time.Second * 10): - t.Fatalf("timeout waiting for graph notifications, "+ - "only received %d/2 chanupds and %d/2 nodeanns", - numChannelUpds, numNodeAnns) - } - } - - _, blockHeight, err := net.Miner.Node.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current blockheight %v", err) - } - - // Now we'll test that updates are properly sent after channels are closed - // within the network. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false) - - // Now that the channel has been closed, we should receive a - // notification indicating so. -out: - for { - select { - case graphUpdate := <-graphSub.updateChan: - if len(graphUpdate.ClosedChans) != 1 { - continue - } - - closedChan := graphUpdate.ClosedChans[0] - if closedChan.ClosedHeight != uint32(blockHeight+1) { - t.Fatalf("close heights of channel mismatch: "+ - "expected %v, got %v", blockHeight+1, - closedChan.ClosedHeight) - } - chanPointTxid, err := lnd.GetChanPointFundingTxid(chanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - closedChanTxid, err := lnd.GetChanPointFundingTxid( - closedChan.ChanPoint, - ) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - if !bytes.Equal(closedChanTxid[:], chanPointTxid[:]) { - t.Fatalf("channel point hash mismatch: "+ - "expected %v, got %v", chanPointTxid, - closedChanTxid) - } - if closedChan.ChanPoint.OutputIndex != chanPoint.OutputIndex { - t.Fatalf("output index mismatch: expected %v, "+ - "got %v", chanPoint.OutputIndex, - closedChan.ChanPoint) - } - - break out - - case err := <-graphSub.errChan: - t.Fatalf("unable to recv graph update: %v", err) - case <-time.After(time.Second * 10): - t.Fatalf("notification for channel closure not " + - "sent") - } - } - - // For the final portion of the test, we'll ensure that once a new node - // appears in the network, the proper notification is dispatched. Note - // that a node that does not have any channels open is ignored, so first - // we disconnect Alice and Bob, open a channel between Bob and Carol, - // and finally connect Alice to Bob again. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.DisconnectNodes(ctxt, net.Alice, net.Bob); err != nil { - t.Fatalf("unable to disconnect alice and bob: %v", err) - } - carol, errr := net.NewNode("Carol", nil) - if errr != nil { - t.Fatalf("unable to create new nodes: %v", errr) - } - defer shutdownAndAssert(net, t, carol) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, net.Bob, carol); err != nil { - t.Fatalf("unable to connect bob to carol: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint = openChannelAndAssert( - ctxt, t, net, net.Bob, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // Reconnect Alice and Bob. This should result in the nodes syncing up - // their respective graph state, with the new addition being the - // existence of Carol in the graph, and also the channel between Bob - // and Carol. Note that we will also receive a node announcement from - // Bob, since a node will update its node announcement after a new - // channel is opened. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.EnsureConnected(ctxt, net.Alice, net.Bob); err != nil { - t.Fatalf("unable to connect alice to bob: %v", err) - } - - // We should receive an update advertising the newly connected node, - // Bob's new node announcement, and the channel between Bob and Carol. - numNodeAnns = 0 - numChannelUpds = 0 - for numChannelUpds < 2 && numNodeAnns < 1 { - select { - case graphUpdate := <-graphSub.updateChan: - for _, nodeUpdate := range graphUpdate.NodeUpdates { - switch nodeUpdate.IdentityKey { - case carol.PubKeyStr: - case net.Bob.PubKeyStr: - default: - t.Fatalf("unknown node update pubey: %v", - nodeUpdate.IdentityKey) - } - numNodeAnns++ - } - - for _, chanUpdate := range graphUpdate.ChannelUpdates { - switch chanUpdate.AdvertisingNode { - case carol.PubKeyStr: - case net.Bob.PubKeyStr: - default: - t.Fatalf("unknown advertising node: %v", - chanUpdate.AdvertisingNode) - } - switch chanUpdate.ConnectingNode { - case carol.PubKeyStr: - case net.Bob.PubKeyStr: - default: - t.Fatalf("unknown connecting node: %v", - chanUpdate.ConnectingNode) - } - - if chanUpdate.Capacity != int64(chanAmt) { - t.Fatalf("channel capacities mismatch:"+ - " expected %v, got %v", chanAmt, - btcutil.Amount(chanUpdate.Capacity)) - } - numChannelUpds++ - } - case err := <-graphSub.errChan: - t.Fatalf("unable to recv graph update: %v", err) - case <-time.After(time.Second * 10): - t.Fatalf("timeout waiting for graph notifications, "+ - "only received %d/2 chanupds and %d/2 nodeanns", - numChannelUpds, numNodeAnns) - } - } - - // Close the channel between Bob and Carol. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Bob, chanPoint, false) -} - -// testNodeAnnouncement ensures that when a node is started with one or more -// external IP addresses specified on the command line, that those addresses -// announced to the network and reported in the network graph. -func testNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - aliceSub := subscribeGraphNotifications(t, ctxb, net.Alice) - defer close(aliceSub.quit) - - advertisedAddrs := []string{ - "192.168.1.1:8333", - "[2001:db8:85a3:8d3:1319:8a2e:370:7348]:8337", - "bkb6azqggsaiskzi.onion:9735", - "fomvuglh6h6vcag73xo5t5gv56ombih3zr2xvplkpbfd7wrog4swjwid.onion:1234", - } - - var lndArgs []string - for _, addr := range advertisedAddrs { - lndArgs = append(lndArgs, "--externalip="+addr) - } - - dave, err := net.NewNode("Dave", lndArgs) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, dave) - - // We must let Dave have an open channel before he can send a node - // announcement, so we open a channel with Bob, - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, net.Bob, dave); err != nil { - t.Fatalf("unable to connect bob to carol: %v", err) - } - - // Alice shouldn't receive any new updates yet since the channel has yet - // to be opened. - select { - case <-aliceSub.updateChan: - t.Fatalf("received unexpected update from dave") - case <-time.After(time.Second): - } - - // We'll then go ahead and open a channel between Bob and Dave. This - // ensures that Alice receives the node announcement from Bob as part of - // the announcement broadcast. - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, net.Bob, dave, - lntest.OpenChannelParams{ - Amt: 1000000, - }, - ) - - assertAddrs := func(addrsFound []string, targetAddrs ...string) { - addrs := make(map[string]struct{}, len(addrsFound)) - for _, addr := range addrsFound { - addrs[addr] = struct{}{} - } - - for _, addr := range targetAddrs { - if _, ok := addrs[addr]; !ok { - t.Fatalf("address %v not found in node "+ - "announcement", addr) - } - } - } - - waitForAddrsInUpdate := func(graphSub graphSubscription, - nodePubKey string, targetAddrs ...string) { - - for { - select { - case graphUpdate := <-graphSub.updateChan: - for _, update := range graphUpdate.NodeUpdates { - if update.IdentityKey == nodePubKey { - assertAddrs( - update.Addresses, - targetAddrs..., - ) - return - } - } - case err := <-graphSub.errChan: - t.Fatalf("unable to recv graph update: %v", err) - case <-time.After(20 * time.Second): - t.Fatalf("did not receive node ann update") - } - } - } - - // We'll then wait for Alice to receive Dave's node announcement - // including the expected advertised addresses from Bob since they - // should already be connected. - waitForAddrsInUpdate( - aliceSub, dave.PubKeyStr, advertisedAddrs..., - ) - - // Close the channel between Bob and Dave. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Bob, chanPoint, false) -} - -func testNodeSignVerify(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - chanAmt := lnd.MaxBtcFundingAmount - pushAmt := btcutil.Amount(100000) - - // Create a channel between alice and bob. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - aliceBobCh := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - }, - ) - - aliceMsg := []byte("alice msg") - - // alice signs "alice msg" and sends her signature to bob. - sigReq := &lnrpc.SignMessageRequest{Msg: aliceMsg} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - sigResp, err := net.Alice.SignMessage(ctxt, sigReq) - if err != nil { - t.Fatalf("SignMessage rpc call failed: %v", err) - } - aliceSig := sigResp.Signature - - // bob verifying alice's signature should succeed since alice and bob are - // connected. - verifyReq := &lnrpc.VerifyMessageRequest{Msg: aliceMsg, Signature: aliceSig} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - verifyResp, err := net.Bob.VerifyMessage(ctxt, verifyReq) - if err != nil { - t.Fatalf("VerifyMessage failed: %v", err) - } - if !verifyResp.Valid { - t.Fatalf("alice's signature didn't validate") - } - if verifyResp.Pubkey != net.Alice.PubKeyStr { - t.Fatalf("alice's signature doesn't contain alice's pubkey.") - } - - // carol is a new node that is unconnected to alice or bob. - carol, errr := net.NewNode("Carol", nil) - if errr != nil { - t.Fatalf("unable to create new node: %v", errr) - } - defer shutdownAndAssert(net, t, carol) - - carolMsg := []byte("carol msg") - - // carol signs "carol msg" and sends her signature to bob. - sigReq = &lnrpc.SignMessageRequest{Msg: carolMsg} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - sigResp, err = carol.SignMessage(ctxt, sigReq) - if err != nil { - t.Fatalf("SignMessage rpc call failed: %v", err) - } - carolSig := sigResp.Signature - - // bob verifying carol's signature should fail since they are not connected. - verifyReq = &lnrpc.VerifyMessageRequest{Msg: carolMsg, Signature: carolSig} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - verifyResp, err = net.Bob.VerifyMessage(ctxt, verifyReq) - if err != nil { - t.Fatalf("VerifyMessage failed: %v", err) - } - if verifyResp.Valid { - t.Fatalf("carol's signature should not be valid") - } - if verifyResp.Pubkey != carol.PubKeyStr { - t.Fatalf("carol's signature doesn't contain her pubkey") - } - - // Close the channel between alice and bob. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, aliceBobCh, false) -} - -// testAsyncPayments tests the performance of the async payments. -func testAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const ( - paymentAmt = 100 - ) - - // First establish a channel with a capacity equals to the overall - // amount of payments, between Alice and Bob, at the end of the test - // Alice should send all money from her side to Bob. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - channelCapacity := btcutil.Amount(paymentAmt * 2000) - chanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: channelCapacity, - }, - ) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - info, err := getChanInfo(ctxt, net.Alice) - if err != nil { - t.Fatalf("unable to get alice channel info: %v", err) - } - - // We'll create a number of invoices equal the max number of HTLCs that - // can be carried in one direction. The number on the commitment will - // likely be lower, but we can't guarantee that any more HTLCs will - // succeed due to the limited path diversity and inability of the router - // to retry via another path. - numInvoices := int(input.MaxHTLCNumber / 2) - - bobAmt := int64(numInvoices * paymentAmt) - aliceAmt := info.LocalBalance - bobAmt - - // With the channel open, we'll create invoices for Bob that Alice - // will pay to in order to advance the state of the channel. - bobPayReqs, _, _, err := createPayReqs( - net.Bob, paymentAmt, numInvoices, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } - - // Wait for Alice to receive the channel edge from the funding manager. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("alice didn't see the alice->bob channel before "+ - "timeout: %v", err) - } - - // Simultaneously send payments from Alice to Bob using of Bob's payment - // hashes generated above. - now := time.Now() - errChan := make(chan er.R) - statusChan := make(chan *lnrpc.Payment) - for i := 0; i < numInvoices; i++ { - payReq := bobPayReqs[i] - go func() { - ctxt, _ = context.WithTimeout(ctxb, lntest.AsyncBenchmarkTimeout) - stream, errr := net.Alice.RouterClient.SendPaymentV2( - ctxt, - &routerrpc.SendPaymentRequest{ - PaymentRequest: payReq, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - if err != nil { - errChan <- er.E(errr) - } - result, err := getPaymentResult(stream) - if err != nil { - errChan <- err - } - - statusChan <- result - }() - } - - // Wait until all the payments have settled. - for i := 0; i < numInvoices; i++ { - select { - case result := <-statusChan: - if result.Status == lnrpc.Payment_SUCCEEDED { - continue - } - - case err := <-errChan: - t.Fatalf("payment error: %v", err) - } - } - - // All payments have been sent, mark the finish time. - timeTaken := time.Since(now) - - // Next query for Bob's and Alice's channel states, in order to confirm - // that all payment have been successful transmitted. - - // Wait for the revocation to be received so alice no longer has pending - // htlcs listed and has correct balances. This is needed due to the fact - // that we now pipeline the settles. - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - aliceChan, err := getChanInfo(ctxt, net.Alice) - if err != nil { - return false - } - if len(aliceChan.PendingHtlcs) != 0 { - return false - } - if aliceChan.RemoteBalance != bobAmt { - return false - } - if aliceChan.LocalBalance != aliceAmt { - return false - } - - return true - }, time.Second*5) - if err != nil { - t.Fatalf("failed to assert alice's pending htlcs and/or remote/local balance") - } - - // Wait for Bob to receive revocation from Alice. - time.Sleep(2 * time.Second) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - bobChan, err := getChanInfo(ctxt, net.Bob) - if err != nil { - t.Fatalf("unable to get bob's channel info: %v", err) - } - if len(bobChan.PendingHtlcs) != 0 { - t.Fatalf("bob's pending htlcs is incorrect, got %v, "+ - "expected %v", len(bobChan.PendingHtlcs), 0) - } - if bobChan.LocalBalance != bobAmt { - t.Fatalf("bob's local balance is incorrect, got %v, expected"+ - " %v", bobChan.LocalBalance, bobAmt) - } - if bobChan.RemoteBalance != aliceAmt { - t.Fatalf("bob's remote balance is incorrect, got %v, "+ - "expected %v", bobChan.RemoteBalance, aliceAmt) - } - - t.Log("\tBenchmark info: Elapsed time: ", timeTaken) - t.Log("\tBenchmark info: TPS: ", float64(numInvoices)/float64(timeTaken.Seconds())) - - // Finally, immediately close the channel. This function will also - // block until the channel is closed and will additionally assert the - // relevant channel closing post conditions. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false) -} - -// testBidirectionalAsyncPayments tests that nodes are able to send the -// payments to each other in async manner without blocking. -func testBidirectionalAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const ( - paymentAmt = 1000 - ) - - // First establish a channel with a capacity equals to the overall - // amount of payments, between Alice and Bob, at the end of the test - // Alice should send all money from her side to Bob. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: paymentAmt * 2000, - PushAmt: paymentAmt * 1000, - }, - ) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - info, err := getChanInfo(ctxt, net.Alice) - if err != nil { - t.Fatalf("unable to get alice channel info: %v", err) - } - - // We'll create a number of invoices equal the max number of HTLCs that - // can be carried in one direction. The number on the commitment will - // likely be lower, but we can't guarantee that any more HTLCs will - // succeed due to the limited path diversity and inability of the router - // to retry via another path. - numInvoices := int(input.MaxHTLCNumber / 2) - - // Nodes should exchange the same amount of money and because of this - // at the end balances should remain the same. - aliceAmt := info.LocalBalance - bobAmt := info.RemoteBalance - - // With the channel open, we'll create invoices for Bob that Alice - // will pay to in order to advance the state of the channel. - bobPayReqs, _, _, err := createPayReqs( - net.Bob, paymentAmt, numInvoices, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } - - // With the channel open, we'll create invoices for Alice that Bob - // will pay to in order to advance the state of the channel. - alicePayReqs, _, _, err := createPayReqs( - net.Alice, paymentAmt, numInvoices, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } - - // Wait for Alice to receive the channel edge from the funding manager. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint); err != nil { - t.Fatalf("alice didn't see the alice->bob channel before "+ - "timeout: %v", err) - } - if err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint); err != nil { - t.Fatalf("bob didn't see the bob->alice channel before "+ - "timeout: %v", err) - } - - // Reset mission control to prevent previous payment results from - // interfering with this test. A new channel has been opened, but - // mission control operates on node pairs. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - _, errr := net.Alice.RouterClient.ResetMissionControl( - ctxt, &routerrpc.ResetMissionControlRequest{}, - ) - if errr != nil { - t.Fatalf("unable to reset mc for alice: %v", errr) - } - - // Send payments from Alice to Bob and from Bob to Alice in async - // manner. - errChan := make(chan er.R) - statusChan := make(chan *lnrpc.Payment) - - send := func(node *lntest.HarnessNode, payReq string) { - go func() { - ctxt, _ = context.WithTimeout( - ctxb, lntest.AsyncBenchmarkTimeout, - ) - stream, errr := node.RouterClient.SendPaymentV2( - ctxt, - &routerrpc.SendPaymentRequest{ - PaymentRequest: payReq, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - if err != nil { - errChan <- er.E(errr) - } - result, err := getPaymentResult(stream) - if err != nil { - errChan <- err - } - - statusChan <- result - }() - } - - for i := 0; i < numInvoices; i++ { - send(net.Bob, alicePayReqs[i]) - send(net.Alice, bobPayReqs[i]) - } - - // Expect all payments to succeed. - for i := 0; i < 2*numInvoices; i++ { - select { - case result := <-statusChan: - if result.Status != lnrpc.Payment_SUCCEEDED { - t.Fatalf("payment error: %v", result.Status) - } - - case err := <-errChan: - t.Fatalf("payment error: %v", err) - } - } - - // Wait for Alice and Bob to receive revocations messages, and update - // states, i.e. balance info. - time.Sleep(1 * time.Second) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - aliceInfo, err := getChanInfo(ctxt, net.Alice) - if err != nil { - t.Fatalf("unable to get bob's channel info: %v", err) - } - if aliceInfo.RemoteBalance != bobAmt { - t.Fatalf("alice's remote balance is incorrect, got %v, "+ - "expected %v", aliceInfo.RemoteBalance, bobAmt) - } - if aliceInfo.LocalBalance != aliceAmt { - t.Fatalf("alice's local balance is incorrect, got %v, "+ - "expected %v", aliceInfo.LocalBalance, aliceAmt) - } - if len(aliceInfo.PendingHtlcs) != 0 { - t.Fatalf("alice's pending htlcs is incorrect, got %v, "+ - "expected %v", len(aliceInfo.PendingHtlcs), 0) - } - - // Next query for Bob's and Alice's channel states, in order to confirm - // that all payment have been successful transmitted. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - bobInfo, err := getChanInfo(ctxt, net.Bob) - if err != nil { - t.Fatalf("unable to get bob's channel info: %v", err) - } - - if bobInfo.LocalBalance != bobAmt { - t.Fatalf("bob's local balance is incorrect, got %v, expected"+ - " %v", bobInfo.LocalBalance, bobAmt) - } - if bobInfo.RemoteBalance != aliceAmt { - t.Fatalf("bob's remote balance is incorrect, got %v, "+ - "expected %v", bobInfo.RemoteBalance, aliceAmt) - } - if len(bobInfo.PendingHtlcs) != 0 { - t.Fatalf("bob's pending htlcs is incorrect, got %v, "+ - "expected %v", len(bobInfo.PendingHtlcs), 0) - } - - // Finally, immediately close the channel. This function will also - // block until the channel is closed and will additionally assert the - // relevant channel closing post conditions. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false) -} - -// assertActiveHtlcs makes sure all the passed nodes have the _exact_ HTLCs -// matching payHashes on _all_ their channels. -func assertActiveHtlcs(nodes []*lntest.HarnessNode, payHashes ...[]byte) er.R { - ctxb := context.Background() - - req := &lnrpc.ListChannelsRequest{} - for _, node := range nodes { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - nodeChans, err := node.ListChannels(ctxt, req) - if err != nil { - return er.Errorf("unable to get node chans: %v", err) - } - - for _, channel := range nodeChans.Channels { - // Record all payment hashes active for this channel. - htlcHashes := make(map[string]struct{}) - for _, htlc := range channel.PendingHtlcs { - _, ok := htlcHashes[string(htlc.HashLock)] - if ok { - return er.Errorf("duplicate HashLock") - } - htlcHashes[string(htlc.HashLock)] = struct{}{} - } - - // Channel should have exactly the payHashes active. - if len(payHashes) != len(htlcHashes) { - return er.Errorf("node %x had %v htlcs active, "+ - "expected %v", node.PubKey[:], - len(htlcHashes), len(payHashes)) - } - - // Make sure all the payHashes are active. - for _, payHash := range payHashes { - if _, ok := htlcHashes[string(payHash)]; ok { - continue - } - return er.Errorf("node %x didn't have the "+ - "payHash %v active", node.PubKey[:], - payHash) - } - } - } - - return nil -} - -func assertNumActiveHtlcsChanPoint(node *lntest.HarnessNode, - chanPoint wire.OutPoint, numHtlcs int) er.R { - ctxb := context.Background() - - req := &lnrpc.ListChannelsRequest{} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - nodeChans, errr := node.ListChannels(ctxt, req) - if errr != nil { - return er.E(errr) - } - - for _, channel := range nodeChans.Channels { - if channel.ChannelPoint != chanPoint.String() { - continue - } - - if len(channel.PendingHtlcs) != numHtlcs { - return er.Errorf("expected %v active HTLCs, got %v", - numHtlcs, len(channel.PendingHtlcs)) - } - return nil - } - - return er.Errorf("channel point %v not found", chanPoint) -} - -func assertNumActiveHtlcs(nodes []*lntest.HarnessNode, numHtlcs int) er.R { - ctxb := context.Background() - - req := &lnrpc.ListChannelsRequest{} - for _, node := range nodes { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - nodeChans, errr := node.ListChannels(ctxt, req) - if errr != nil { - return er.E(errr) - } - - for _, channel := range nodeChans.Channels { - if len(channel.PendingHtlcs) != numHtlcs { - return er.Errorf("expected %v HTLCs, got %v", - numHtlcs, len(channel.PendingHtlcs)) - } - } - } - - return nil -} - -func assertSpendingTxInMempool(t *harnessTest, miner *rpcclient.Client, - timeout time.Duration, chanPoint wire.OutPoint) chainhash.Hash { - - tx := getSpendingTxInMempool(t, miner, timeout, chanPoint) - return tx.TxHash() -} - -// getSpendingTxInMempool waits for a transaction spending the given outpoint to -// appear in the mempool and returns that tx in full. -func getSpendingTxInMempool(t *harnessTest, miner *rpcclient.Client, - timeout time.Duration, chanPoint wire.OutPoint) *wire.MsgTx { - - breakTimeout := time.After(timeout) - ticker := time.NewTicker(50 * time.Millisecond) - defer ticker.Stop() - - for { - select { - case <-breakTimeout: - t.Fatalf("didn't find tx in mempool") - case <-ticker.C: - mempool, err := miner.GetRawMempool() - if err != nil { - t.Fatalf("unable to get mempool: %v", err) - } - - if len(mempool) == 0 { - continue - } - - for _, txid := range mempool { - tx, err := miner.GetRawTransaction(txid) - if err != nil { - t.Fatalf("unable to fetch tx: %v", err) - } - - msgTx := tx.MsgTx() - for _, txIn := range msgTx.TxIn { - if txIn.PreviousOutPoint == chanPoint { - return msgTx - } - } - } - } - } -} - -// testSwitchCircuitPersistence creates a multihop network to ensure the sender -// and intermediaries are persisting their open payment circuits. After -// forwarding a packet via an outgoing link, all are restarted, and expected to -// forward a response back from the receiver once back online. -// -// The general flow of this test: -// 1. Carol --> Dave --> Alice --> Bob forward payment -// 2. X X X Bob restart sender and intermediaries -// 3. Carol <-- Dave <-- Alice <-- Bob expect settle to propagate -func testSwitchCircuitPersistence(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const chanAmt = btcutil.Amount(1000000) - const pushAmt = btcutil.Amount(900000) - var networkChans []*lnrpc.ChannelPoint - - // Open a channel with 100k satoshis between Alice and Bob with Alice - // being the sole funder of the channel. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPointAlice := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - }, - ) - networkChans = append(networkChans, chanPointAlice) - - aliceChanTXID, err := lnd.GetChanPointFundingTxid(chanPointAlice) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - aliceFundPoint := wire.OutPoint{ - Hash: *aliceChanTXID, - Index: chanPointAlice.OutputIndex, - } - - // As preliminary setup, we'll create two new nodes: Carol and Dave, - // such that we now have a 4 ndoe, 3 channel topology. Dave will make - // a channel with Alice, and Carol with Dave. After this setup, the - // network topology should now look like: - // Carol -> Dave -> Alice -> Bob - // - // First, we'll create Dave and establish a channel to Alice. - dave, err := net.NewNode("Dave", nil) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, dave) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, dave, net.Alice); err != nil { - t.Fatalf("unable to connect dave to alice: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), dave) - if err != nil { - t.Fatalf("unable to send coins to dave: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointDave := openChannelAndAssert( - ctxt, t, net, dave, net.Alice, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - }, - ) - networkChans = append(networkChans, chanPointDave) - daveChanTXID, err := lnd.GetChanPointFundingTxid(chanPointDave) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - daveFundPoint := wire.OutPoint{ - Hash: *daveChanTXID, - Index: chanPointDave.OutputIndex, - } - - // Next, we'll create Carol and establish a channel to from her to - // Dave. Carol is started in htlchodl mode so that we can disconnect the - // intermediary hops before starting the settle. - carol, err := net.NewNode("Carol", []string{"--hodl.exit-settle"}) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, carol, dave); err != nil { - t.Fatalf("unable to connect carol to dave: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), carol) - if err != nil { - t.Fatalf("unable to send coins to carol: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointCarol := openChannelAndAssert( - ctxt, t, net, carol, dave, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - }, - ) - networkChans = append(networkChans, chanPointCarol) - - carolChanTXID, err := lnd.GetChanPointFundingTxid(chanPointCarol) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - carolFundPoint := wire.OutPoint{ - Hash: *carolChanTXID, - Index: chanPointCarol.OutputIndex, - } - - // Wait for all nodes to have seen all channels. - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol, dave} - nodeNames := []string{"Alice", "Bob", "Carol", "Dave"} - for _, chanPoint := range networkChans { - for i, node := range nodes { - txid, err := lnd.GetChanPointFundingTxid(chanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - point := wire.OutPoint{ - Hash: *txid, - Index: chanPoint.OutputIndex, - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = node.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("%s(%d): timeout waiting for "+ - "channel(%s) open: %v", nodeNames[i], - node.NodeID, point, err) - } - } - } - - // Create 5 invoices for Carol, which expect a payment from Bob for 1k - // satoshis with a different preimage each time. - const numPayments = 5 - const paymentAmt = 1000 - payReqs, _, _, err := createPayReqs( - carol, paymentAmt, numPayments, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } - - // We'll wait for all parties to recognize the new channels within the - // network. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = dave.WaitForNetworkChannelOpen(ctxt, chanPointDave) - if err != nil { - t.Fatalf("dave didn't advertise his channel: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = carol.WaitForNetworkChannelOpen(ctxt, chanPointCarol) - if err != nil { - t.Fatalf("carol didn't advertise her channel in time: %v", - err) - } - - time.Sleep(time.Millisecond * 50) - - // Using Carol as the source, pay to the 5 invoices from Bob created - // above. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, net.Bob, net.Bob.RouterClient, payReqs, false, - ) - if err != nil { - t.Fatalf("unable to send payments: %v", err) - } - - // Wait until all nodes in the network have 5 outstanding htlcs. - var predErr er.R - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(nodes, numPayments) - if predErr != nil { - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } - - // Restart the intermediaries and the sender. - if err := net.RestartNode(dave, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - - if err := net.RestartNode(net.Alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - - if err := net.RestartNode(net.Bob, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - - // Ensure all of the intermediate links are reconnected. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.EnsureConnected(ctxt, net.Alice, dave) - if err != nil { - t.Fatalf("unable to reconnect alice and dave: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.EnsureConnected(ctxt, net.Bob, net.Alice) - if err != nil { - t.Fatalf("unable to reconnect bob and alice: %v", err) - } - - // Ensure all nodes in the network still have 5 outstanding htlcs. - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(nodes, numPayments) - return predErr == nil - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } - - // Now restart carol without hodl mode, to settle back the outstanding - // payments. - carol.SetExtraArgs(nil) - if err := net.RestartNode(carol, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.EnsureConnected(ctxt, dave, carol) - if err != nil { - t.Fatalf("unable to reconnect dave and carol: %v", err) - } - - // After the payments settle, there should be no active htlcs on any of - // the nodes in the network. - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(nodes, 0) - return predErr == nil - - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } - - // When asserting the amount of satoshis moved, we'll factor in the - // default base fee, as we didn't modify the fee structure when - // creating the seed nodes in the network. - const baseFee = 1 - - // At this point all the channels within our proto network should be - // shifted by 5k satoshis in the direction of Carol, the sink within the - // payment flow generated above. The order of asserts corresponds to - // increasing of time is needed to embed the HTLC in commitment - // transaction, in channel Bob->Alice->David->Carol, order is Carol, - // David, Alice, Bob. - var amountPaid = int64(5000) - assertAmountPaid(t, "Dave(local) => Carol(remote)", carol, - carolFundPoint, int64(0), amountPaid) - assertAmountPaid(t, "Dave(local) => Carol(remote)", dave, - carolFundPoint, amountPaid, int64(0)) - assertAmountPaid(t, "Alice(local) => Dave(remote)", dave, - daveFundPoint, int64(0), amountPaid+(baseFee*numPayments)) - assertAmountPaid(t, "Alice(local) => Dave(remote)", net.Alice, - daveFundPoint, amountPaid+(baseFee*numPayments), int64(0)) - assertAmountPaid(t, "Bob(local) => Alice(remote)", net.Alice, - aliceFundPoint, int64(0), amountPaid+((baseFee*numPayments)*2)) - assertAmountPaid(t, "Bob(local) => Alice(remote)", net.Bob, - aliceFundPoint, amountPaid+(baseFee*numPayments)*2, int64(0)) - - // Lastly, we will send one more payment to ensure all channels are - // still functioning properly. - finalInvoice := &lnrpc.Invoice{ - Memo: "testing", - Value: paymentAmt, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, errr := carol.AddInvoice(ctxt, finalInvoice) - if errr != nil { - t.Fatalf("unable to add invoice: %v", errr) - } - - payReqs = []string{resp.PaymentRequest} - - // Using Carol as the source, pay to the 5 invoices from Bob created - // above. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, net.Bob, net.Bob.RouterClient, payReqs, true, - ) - if err != nil { - t.Fatalf("unable to send payments: %v", err) - } - - amountPaid = int64(6000) - assertAmountPaid(t, "Dave(local) => Carol(remote)", carol, - carolFundPoint, int64(0), amountPaid) - assertAmountPaid(t, "Dave(local) => Carol(remote)", dave, - carolFundPoint, amountPaid, int64(0)) - assertAmountPaid(t, "Alice(local) => Dave(remote)", dave, - daveFundPoint, int64(0), amountPaid+(baseFee*(numPayments+1))) - assertAmountPaid(t, "Alice(local) => Dave(remote)", net.Alice, - daveFundPoint, amountPaid+(baseFee*(numPayments+1)), int64(0)) - assertAmountPaid(t, "Bob(local) => Alice(remote)", net.Alice, - aliceFundPoint, int64(0), amountPaid+((baseFee*(numPayments+1))*2)) - assertAmountPaid(t, "Bob(local) => Alice(remote)", net.Bob, - aliceFundPoint, amountPaid+(baseFee*(numPayments+1))*2, int64(0)) - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointAlice, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, dave, chanPointDave, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, carol, chanPointCarol, false) -} - -// testSwitchOfflineDelivery constructs a set of multihop payments, and tests -// that the returning payments are not lost if a peer on the backwards path is -// offline when the settle/fails are received. We expect the payments to be -// buffered in memory, and transmitted as soon as the disconnect link comes back -// online. -// -// The general flow of this test: -// 1. Carol --> Dave --> Alice --> Bob forward payment -// 2. Carol --- Dave X Alice --- Bob disconnect intermediaries -// 3. Carol --- Dave X Alice <-- Bob settle last hop -// 4. Carol <-- Dave <-- Alice --- Bob reconnect, expect settle to propagate -func testSwitchOfflineDelivery(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const chanAmt = btcutil.Amount(1000000) - const pushAmt = btcutil.Amount(900000) - var networkChans []*lnrpc.ChannelPoint - - // Open a channel with 100k satoshis between Alice and Bob with Alice - // being the sole funder of the channel. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPointAlice := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - }, - ) - networkChans = append(networkChans, chanPointAlice) - - aliceChanTXID, err := lnd.GetChanPointFundingTxid(chanPointAlice) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - aliceFundPoint := wire.OutPoint{ - Hash: *aliceChanTXID, - Index: chanPointAlice.OutputIndex, - } - - // As preliminary setup, we'll create two new nodes: Carol and Dave, - // such that we now have a 4 ndoe, 3 channel topology. Dave will make - // a channel with Alice, and Carol with Dave. After this setup, the - // network topology should now look like: - // Carol -> Dave -> Alice -> Bob - // - // First, we'll create Dave and establish a channel to Alice. - dave, err := net.NewNode("Dave", nil) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, dave) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, dave, net.Alice); err != nil { - t.Fatalf("unable to connect dave to alice: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), dave) - if err != nil { - t.Fatalf("unable to send coins to dave: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointDave := openChannelAndAssert( - ctxt, t, net, dave, net.Alice, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - }, - ) - networkChans = append(networkChans, chanPointDave) - daveChanTXID, err := lnd.GetChanPointFundingTxid(chanPointDave) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - daveFundPoint := wire.OutPoint{ - Hash: *daveChanTXID, - Index: chanPointDave.OutputIndex, - } - - // Next, we'll create Carol and establish a channel to from her to - // Dave. Carol is started in htlchodl mode so that we can disconnect the - // intermediary hops before starting the settle. - carol, err := net.NewNode("Carol", []string{"--hodl.exit-settle"}) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, carol, dave); err != nil { - t.Fatalf("unable to connect carol to dave: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), carol) - if err != nil { - t.Fatalf("unable to send coins to carol: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointCarol := openChannelAndAssert( - ctxt, t, net, carol, dave, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - }, - ) - networkChans = append(networkChans, chanPointCarol) - - carolChanTXID, err := lnd.GetChanPointFundingTxid(chanPointCarol) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - carolFundPoint := wire.OutPoint{ - Hash: *carolChanTXID, - Index: chanPointCarol.OutputIndex, - } - - // Wait for all nodes to have seen all channels. - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol, dave} - nodeNames := []string{"Alice", "Bob", "Carol", "Dave"} - for _, chanPoint := range networkChans { - for i, node := range nodes { - txid, err := lnd.GetChanPointFundingTxid(chanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - point := wire.OutPoint{ - Hash: *txid, - Index: chanPoint.OutputIndex, - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = node.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("%s(%d): timeout waiting for "+ - "channel(%s) open: %v", nodeNames[i], - node.NodeID, point, err) - } - } - } - - // Create 5 invoices for Carol, which expect a payment from Bob for 1k - // satoshis with a different preimage each time. - const numPayments = 5 - const paymentAmt = 1000 - payReqs, _, _, err := createPayReqs( - carol, paymentAmt, numPayments, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } - - // We'll wait for all parties to recognize the new channels within the - // network. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = dave.WaitForNetworkChannelOpen(ctxt, chanPointDave) - if err != nil { - t.Fatalf("dave didn't advertise his channel: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = carol.WaitForNetworkChannelOpen(ctxt, chanPointCarol) - if err != nil { - t.Fatalf("carol didn't advertise her channel in time: %v", - err) - } - - time.Sleep(time.Millisecond * 50) - - // Using Carol as the source, pay to the 5 invoices from Bob created - // above. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, net.Bob, net.Bob.RouterClient, payReqs, false, - ) - if err != nil { - t.Fatalf("unable to send payments: %v", err) - } - - // Wait for all of the payments to reach Carol. - var predErr er.R - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(nodes, numPayments) - return predErr == nil - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } - - // First, disconnect Dave and Alice so that their link is broken. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.DisconnectNodes(ctxt, dave, net.Alice); err != nil { - t.Fatalf("unable to disconnect alice from dave: %v", err) - } - - // Then, reconnect them to ensure Dave doesn't just fail back the htlc. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, dave, net.Alice); err != nil { - t.Fatalf("unable to reconnect alice to dave: %v", err) - } - - // Wait to ensure that the payment remain are not failed back after - // reconnecting. All node should report the number payments initiated - // for the duration of the interval. - err = wait.Invariant(func() bool { - predErr = assertNumActiveHtlcs(nodes, numPayments) - return predErr == nil - }, time.Second*2) - if err != nil { - t.Fatalf("htlc change: %v", predErr) - } - - // Now, disconnect Dave from Alice again before settling back the - // payment. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.DisconnectNodes(ctxt, dave, net.Alice); err != nil { - t.Fatalf("unable to disconnect alice from dave: %v", err) - } - - // Now restart carol without hodl mode, to settle back the outstanding - // payments. - carol.SetExtraArgs(nil) - if err := net.RestartNode(carol, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - - // Wait for Carol to report no outstanding htlcs. - carolNode := []*lntest.HarnessNode{carol} - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(carolNode, 0) - return predErr == nil - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } - - // Now that the settles have reached Dave, reconnect him with Alice, - // allowing the settles to return to the sender. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.EnsureConnected(ctxt, dave, net.Alice); err != nil { - t.Fatalf("unable to reconnect alice to dave: %v", err) - } - - // Wait until all outstanding htlcs in the network have been settled. - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(nodes, 0) - if predErr != nil { - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } - - // When asserting the amount of satoshis moved, we'll factor in the - // default base fee, as we didn't modify the fee structure when - // creating the seed nodes in the network. - const baseFee = 1 - - // At this point all the channels within our proto network should be - // shifted by 5k satoshis in the direction of Carol, the sink within the - // payment flow generated above. The order of asserts corresponds to - // increasing of time is needed to embed the HTLC in commitment - // transaction, in channel Bob->Alice->David->Carol, order is Carol, - // David, Alice, Bob. - var amountPaid = int64(5000) - assertAmountPaid(t, "Dave(local) => Carol(remote)", carol, - carolFundPoint, int64(0), amountPaid) - assertAmountPaid(t, "Dave(local) => Carol(remote)", dave, - carolFundPoint, amountPaid, int64(0)) - assertAmountPaid(t, "Alice(local) => Dave(remote)", dave, - daveFundPoint, int64(0), amountPaid+(baseFee*numPayments)) - assertAmountPaid(t, "Alice(local) => Dave(remote)", net.Alice, - daveFundPoint, amountPaid+(baseFee*numPayments), int64(0)) - assertAmountPaid(t, "Bob(local) => Alice(remote)", net.Alice, - aliceFundPoint, int64(0), amountPaid+((baseFee*numPayments)*2)) - assertAmountPaid(t, "Bob(local) => Alice(remote)", net.Bob, - aliceFundPoint, amountPaid+(baseFee*numPayments)*2, int64(0)) - - // Lastly, we will send one more payment to ensure all channels are - // still functioning properly. - finalInvoice := &lnrpc.Invoice{ - Memo: "testing", - Value: paymentAmt, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, errr := carol.AddInvoice(ctxt, finalInvoice) - if errr != nil { - t.Fatalf("unable to add invoice: %v", errr) - } - - payReqs = []string{resp.PaymentRequest} - - // Using Carol as the source, pay to the 5 invoices from Bob created - // above. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, net.Bob, net.Bob.RouterClient, payReqs, true, - ) - if err != nil { - t.Fatalf("unable to send payments: %v", err) - } - - amountPaid = int64(6000) - assertAmountPaid(t, "Dave(local) => Carol(remote)", carol, - carolFundPoint, int64(0), amountPaid) - assertAmountPaid(t, "Dave(local) => Carol(remote)", dave, - carolFundPoint, amountPaid, int64(0)) - assertAmountPaid(t, "Alice(local) => Dave(remote)", dave, - daveFundPoint, int64(0), amountPaid+(baseFee*(numPayments+1))) - assertAmountPaid(t, "Alice(local) => Dave(remote)", net.Alice, - daveFundPoint, amountPaid+(baseFee*(numPayments+1)), int64(0)) - assertAmountPaid(t, "Bob(local) => Alice(remote)", net.Alice, - aliceFundPoint, int64(0), amountPaid+((baseFee*(numPayments+1))*2)) - assertAmountPaid(t, "Bob(local) => Alice(remote)", net.Bob, - aliceFundPoint, amountPaid+(baseFee*(numPayments+1))*2, int64(0)) - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointAlice, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, dave, chanPointDave, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, carol, chanPointCarol, false) -} - -// testSwitchOfflineDeliveryPersistence constructs a set of multihop payments, -// and tests that the returning payments are not lost if a peer on the backwards -// path is offline when the settle/fails are received AND the peer buffering the -// responses is completely restarts. We expect the payments to be reloaded from -// disk, and transmitted as soon as the intermediaries are reconnected. -// -// The general flow of this test: -// 1. Carol --> Dave --> Alice --> Bob forward payment -// 2. Carol --- Dave X Alice --- Bob disconnect intermediaries -// 3. Carol --- Dave X Alice <-- Bob settle last hop -// 4. Carol --- Dave X X Bob restart Alice -// 5. Carol <-- Dave <-- Alice --- Bob expect settle to propagate -func testSwitchOfflineDeliveryPersistence(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const chanAmt = btcutil.Amount(1000000) - const pushAmt = btcutil.Amount(900000) - var networkChans []*lnrpc.ChannelPoint - - // Open a channel with 100k satoshis between Alice and Bob with Alice - // being the sole funder of the channel. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPointAlice := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - }, - ) - networkChans = append(networkChans, chanPointAlice) - - aliceChanTXID, err := lnd.GetChanPointFundingTxid(chanPointAlice) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - aliceFundPoint := wire.OutPoint{ - Hash: *aliceChanTXID, - Index: chanPointAlice.OutputIndex, - } - - // As preliminary setup, we'll create two new nodes: Carol and Dave, - // such that we now have a 4 ndoe, 3 channel topology. Dave will make - // a channel with Alice, and Carol with Dave. After this setup, the - // network topology should now look like: - // Carol -> Dave -> Alice -> Bob - // - // First, we'll create Dave and establish a channel to Alice. - dave, err := net.NewNode("Dave", nil) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, dave) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, dave, net.Alice); err != nil { - t.Fatalf("unable to connect dave to alice: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), dave) - if err != nil { - t.Fatalf("unable to send coins to dave: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointDave := openChannelAndAssert( - ctxt, t, net, dave, net.Alice, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - }, - ) - - networkChans = append(networkChans, chanPointDave) - daveChanTXID, err := lnd.GetChanPointFundingTxid(chanPointDave) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - daveFundPoint := wire.OutPoint{ - Hash: *daveChanTXID, - Index: chanPointDave.OutputIndex, - } - - // Next, we'll create Carol and establish a channel to from her to - // Dave. Carol is started in htlchodl mode so that we can disconnect the - // intermediary hops before starting the settle. - carol, err := net.NewNode("Carol", []string{"--hodl.exit-settle"}) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, carol, dave); err != nil { - t.Fatalf("unable to connect carol to dave: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), carol) - if err != nil { - t.Fatalf("unable to send coins to carol: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointCarol := openChannelAndAssert( - ctxt, t, net, carol, dave, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - }, - ) - networkChans = append(networkChans, chanPointCarol) - - carolChanTXID, err := lnd.GetChanPointFundingTxid(chanPointCarol) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - carolFundPoint := wire.OutPoint{ - Hash: *carolChanTXID, - Index: chanPointCarol.OutputIndex, - } - - // Wait for all nodes to have seen all channels. - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol, dave} - nodeNames := []string{"Alice", "Bob", "Carol", "Dave"} - for _, chanPoint := range networkChans { - for i, node := range nodes { - txid, err := lnd.GetChanPointFundingTxid(chanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - point := wire.OutPoint{ - Hash: *txid, - Index: chanPoint.OutputIndex, - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = node.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("%s(%d): timeout waiting for "+ - "channel(%s) open: %v", nodeNames[i], - node.NodeID, point, err) - } - } - } - - // Create 5 invoices for Carol, which expect a payment from Bob for 1k - // satoshis with a different preimage each time. - const numPayments = 5 - const paymentAmt = 1000 - payReqs, _, _, err := createPayReqs( - carol, paymentAmt, numPayments, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } - - // We'll wait for all parties to recognize the new channels within the - // network. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = dave.WaitForNetworkChannelOpen(ctxt, chanPointDave) - if err != nil { - t.Fatalf("dave didn't advertise his channel: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = carol.WaitForNetworkChannelOpen(ctxt, chanPointCarol) - if err != nil { - t.Fatalf("carol didn't advertise her channel in time: %v", - err) - } - - // Using Carol as the source, pay to the 5 invoices from Bob created - // above. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, net.Bob, net.Bob.RouterClient, payReqs, false, - ) - if err != nil { - t.Fatalf("unable to send payments: %v", err) - } - - var predErr er.R - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(nodes, numPayments) - if predErr != nil { - return false - } - return true - - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } - - // Disconnect the two intermediaries, Alice and Dave, by shutting down - // Alice. - if err := net.StopNode(net.Alice); err != nil { - t.Fatalf("unable to shutdown alice: %v", err) - } - - // Now restart carol without hodl mode, to settle back the outstanding - // payments. - carol.SetExtraArgs(nil) - if err := net.RestartNode(carol, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - - // Make Carol and Dave are reconnected before waiting for the htlcs to - // clear. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.EnsureConnected(ctxt, dave, carol) - if err != nil { - t.Fatalf("unable to reconnect dave and carol: %v", err) - } - - // Wait for Carol to report no outstanding htlcs, and also for Dav to - // receive all the settles from Carol. - carolNode := []*lntest.HarnessNode{carol} - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(carolNode, 0) - if predErr != nil { - return false - } - - predErr = assertNumActiveHtlcsChanPoint(dave, carolFundPoint, 0) - return predErr == nil - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } - - // Finally, restart dave who received the settles, but was unable to - // deliver them to Alice since they were disconnected. - if err := net.RestartNode(dave, nil); err != nil { - t.Fatalf("unable to restart dave: %v", err) - } - if err = net.RestartNode(net.Alice, nil); err != nil { - t.Fatalf("unable to restart alice: %v", err) - } - - // Force Dave and Alice to reconnect before waiting for the htlcs to - // clear. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.EnsureConnected(ctxt, dave, net.Alice) - if err != nil { - t.Fatalf("unable to reconnect dave and carol: %v", err) - } - - // After reconnection succeeds, the settles should be propagated all - // the way back to the sender. All nodes should report no active htlcs. - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(nodes, 0) - if predErr != nil { - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } - - // When asserting the amount of satoshis moved, we'll factor in the - // default base fee, as we didn't modify the fee structure when - // creating the seed nodes in the network. - const baseFee = 1 - - // At this point all the channels within our proto network should be - // shifted by 5k satoshis in the direction of Carol, the sink within the - // payment flow generated above. The order of asserts corresponds to - // increasing of time is needed to embed the HTLC in commitment - // transaction, in channel Bob->Alice->David->Carol, order is Carol, - // David, Alice, Bob. - var amountPaid = int64(5000) - assertAmountPaid(t, "Dave(local) => Carol(remote)", carol, - carolFundPoint, int64(0), amountPaid) - assertAmountPaid(t, "Dave(local) => Carol(remote)", dave, - carolFundPoint, amountPaid, int64(0)) - assertAmountPaid(t, "Alice(local) => Dave(remote)", dave, - daveFundPoint, int64(0), amountPaid+(baseFee*numPayments)) - assertAmountPaid(t, "Alice(local) => Dave(remote)", net.Alice, - daveFundPoint, amountPaid+(baseFee*numPayments), int64(0)) - assertAmountPaid(t, "Bob(local) => Alice(remote)", net.Alice, - aliceFundPoint, int64(0), amountPaid+((baseFee*numPayments)*2)) - assertAmountPaid(t, "Bob(local) => Alice(remote)", net.Bob, - aliceFundPoint, amountPaid+(baseFee*numPayments)*2, int64(0)) - - // Lastly, we will send one more payment to ensure all channels are - // still functioning properly. - finalInvoice := &lnrpc.Invoice{ - Memo: "testing", - Value: paymentAmt, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, errr := carol.AddInvoice(ctxt, finalInvoice) - if errr != nil { - t.Fatalf("unable to add invoice: %v", errr) - } - - payReqs = []string{resp.PaymentRequest} - - // Before completing the final payment request, ensure that the - // connection between Dave and Carol has been healed. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.EnsureConnected(ctxt, dave, carol) - if err != nil { - t.Fatalf("unable to reconnect dave and carol: %v", err) - } - - // Using Carol as the source, pay to the 5 invoices from Bob created - // above. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, net.Bob, net.Bob.RouterClient, payReqs, true, - ) - if err != nil { - t.Fatalf("unable to send payments: %v", err) - } - - amountPaid = int64(6000) - assertAmountPaid(t, "Dave(local) => Carol(remote)", carol, - carolFundPoint, int64(0), amountPaid) - assertAmountPaid(t, "Dave(local) => Carol(remote)", dave, - carolFundPoint, amountPaid, int64(0)) - assertAmountPaid(t, "Alice(local) => Dave(remote)", dave, - daveFundPoint, int64(0), amountPaid+(baseFee*(numPayments+1))) - assertAmountPaid(t, "Alice(local) => Dave(remote)", net.Alice, - daveFundPoint, amountPaid+(baseFee*(numPayments+1)), int64(0)) - assertAmountPaid(t, "Bob(local) => Alice(remote)", net.Alice, - aliceFundPoint, int64(0), amountPaid+((baseFee*(numPayments+1))*2)) - assertAmountPaid(t, "Bob(local) => Alice(remote)", net.Bob, - aliceFundPoint, amountPaid+(baseFee*(numPayments+1))*2, int64(0)) - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointAlice, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, dave, chanPointDave, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, carol, chanPointCarol, false) -} - -// testSwitchOfflineDeliveryOutgoingOffline constructs a set of multihop payments, -// and tests that the returning payments are not lost if a peer on the backwards -// path is offline when the settle/fails are received AND the peer buffering the -// responses is completely restarts. We expect the payments to be reloaded from -// disk, and transmitted as soon as the intermediaries are reconnected. -// -// The general flow of this test: -// 1. Carol --> Dave --> Alice --> Bob forward payment -// 2. Carol --- Dave X Alice --- Bob disconnect intermediaries -// 3. Carol --- Dave X Alice <-- Bob settle last hop -// 4. Carol --- Dave X X shutdown Bob, restart Alice -// 5. Carol <-- Dave <-- Alice X expect settle to propagate -func testSwitchOfflineDeliveryOutgoingOffline( - net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const chanAmt = btcutil.Amount(1000000) - const pushAmt = btcutil.Amount(900000) - var networkChans []*lnrpc.ChannelPoint - - // Open a channel with 100k satoshis between Alice and Bob with Alice - // being the sole funder of the channel. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPointAlice := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - }, - ) - networkChans = append(networkChans, chanPointAlice) - - aliceChanTXID, err := lnd.GetChanPointFundingTxid(chanPointAlice) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - aliceFundPoint := wire.OutPoint{ - Hash: *aliceChanTXID, - Index: chanPointAlice.OutputIndex, - } - - // As preliminary setup, we'll create two new nodes: Carol and Dave, - // such that we now have a 4 ndoe, 3 channel topology. Dave will make - // a channel with Alice, and Carol with Dave. After this setup, the - // network topology should now look like: - // Carol -> Dave -> Alice -> Bob - // - // First, we'll create Dave and establish a channel to Alice. - dave, err := net.NewNode("Dave", nil) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, dave) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, dave, net.Alice); err != nil { - t.Fatalf("unable to connect dave to alice: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), dave) - if err != nil { - t.Fatalf("unable to send coins to dave: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointDave := openChannelAndAssert( - ctxt, t, net, dave, net.Alice, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - }, - ) - networkChans = append(networkChans, chanPointDave) - daveChanTXID, err := lnd.GetChanPointFundingTxid(chanPointDave) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - daveFundPoint := wire.OutPoint{ - Hash: *daveChanTXID, - Index: chanPointDave.OutputIndex, - } - - // Next, we'll create Carol and establish a channel to from her to - // Dave. Carol is started in htlchodl mode so that we can disconnect the - // intermediary hops before starting the settle. - carol, err := net.NewNode("Carol", []string{"--hodl.exit-settle"}) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, carol, dave); err != nil { - t.Fatalf("unable to connect carol to dave: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), carol) - if err != nil { - t.Fatalf("unable to send coins to carol: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointCarol := openChannelAndAssert( - ctxt, t, net, carol, dave, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - }, - ) - networkChans = append(networkChans, chanPointCarol) - - carolChanTXID, err := lnd.GetChanPointFundingTxid(chanPointCarol) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - carolFundPoint := wire.OutPoint{ - Hash: *carolChanTXID, - Index: chanPointCarol.OutputIndex, - } - - // Wait for all nodes to have seen all channels. - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol, dave} - nodeNames := []string{"Alice", "Bob", "Carol", "Dave"} - for _, chanPoint := range networkChans { - for i, node := range nodes { - txid, err := lnd.GetChanPointFundingTxid(chanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - point := wire.OutPoint{ - Hash: *txid, - Index: chanPoint.OutputIndex, - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = node.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("%s(%d): timeout waiting for "+ - "channel(%s) open: %v", nodeNames[i], - node.NodeID, point, err) - } - } - } - - // Create 5 invoices for Carol, which expect a payment from Bob for 1k - // satoshis with a different preimage each time. - const numPayments = 5 - const paymentAmt = 1000 - payReqs, _, _, err := createPayReqs( - carol, paymentAmt, numPayments, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } - - // We'll wait for all parties to recognize the new channels within the - // network. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = dave.WaitForNetworkChannelOpen(ctxt, chanPointDave) - if err != nil { - t.Fatalf("dave didn't advertise his channel: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = carol.WaitForNetworkChannelOpen(ctxt, chanPointCarol) - if err != nil { - t.Fatalf("carol didn't advertise her channel in time: %v", - err) - } - - // Using Carol as the source, pay to the 5 invoices from Bob created - // above. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, net.Bob, net.Bob.RouterClient, payReqs, false, - ) - if err != nil { - t.Fatalf("unable to send payments: %v", err) - } - - // Wait for all payments to reach Carol. - var predErr er.R - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(nodes, numPayments) - if predErr != nil { - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } - - // Disconnect the two intermediaries, Alice and Dave, so that when carol - // restarts, the response will be held by Dave. - if err := net.StopNode(net.Alice); err != nil { - t.Fatalf("unable to shutdown alice: %v", err) - } - - // Now restart carol without hodl mode, to settle back the outstanding - // payments. - carol.SetExtraArgs(nil) - if err := net.RestartNode(carol, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - - // Wait for Carol to report no outstanding htlcs. - carolNode := []*lntest.HarnessNode{carol} - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(carolNode, 0) - if predErr != nil { - return false - } - - predErr = assertNumActiveHtlcsChanPoint(dave, carolFundPoint, 0) - return predErr == nil - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } - - // Now check that the total amount was transferred from Dave to Carol. - // The amount transferred should be exactly equal to the invoice total - // payment amount, 5k satsohis. - const amountPaid = int64(5000) - assertAmountPaid(t, "Dave(local) => Carol(remote)", carol, - carolFundPoint, int64(0), amountPaid) - assertAmountPaid(t, "Dave(local) => Carol(remote)", dave, - carolFundPoint, amountPaid, int64(0)) - - // Shutdown carol and leave her offline for the rest of the test. This - // is critical, as we wish to see if Dave can propragate settles even if - // the outgoing link is never revived. - shutdownAndAssert(net, t, carol) - - // Now restart Dave, ensuring he is both persisting the settles, and is - // able to reforward them to Alice after recovering from a restart. - if err := net.RestartNode(dave, nil); err != nil { - t.Fatalf("unable to restart dave: %v", err) - } - if err = net.RestartNode(net.Alice, nil); err != nil { - t.Fatalf("unable to restart alice: %v", err) - } - - // Ensure that Dave is reconnected to Alice before waiting for the - // htlcs to clear. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.EnsureConnected(ctxt, dave, net.Alice) - if err != nil { - t.Fatalf("unable to reconnect alice and dave: %v", err) - } - - // Since Carol has been shutdown permanently, we will wait until all - // other nodes in the network report no active htlcs. - nodesMinusCarol := []*lntest.HarnessNode{net.Bob, net.Alice, dave} - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(nodesMinusCarol, 0) - if predErr != nil { - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } - - // When asserting the amount of satoshis moved, we'll factor in the - // default base fee, as we didn't modify the fee structure when - // creating the seed nodes in the network. - const baseFee = 1 - - // At this point, all channels (minus Carol, who is shutdown) should - // show a shift of 5k satoshis towards Carol. The order of asserts - // corresponds to increasing of time is needed to embed the HTLC in - // commitment transaction, in channel Bob->Alice->David, order is - // David, Alice, Bob. - assertAmountPaid(t, "Alice(local) => Dave(remote)", dave, - daveFundPoint, int64(0), amountPaid+(baseFee*numPayments)) - assertAmountPaid(t, "Alice(local) => Dave(remote)", net.Alice, - daveFundPoint, amountPaid+(baseFee*numPayments), int64(0)) - assertAmountPaid(t, "Bob(local) => Alice(remote)", net.Alice, - aliceFundPoint, int64(0), amountPaid+((baseFee*numPayments)*2)) - assertAmountPaid(t, "Bob(local) => Alice(remote)", net.Bob, - aliceFundPoint, amountPaid+(baseFee*numPayments)*2, int64(0)) - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointAlice, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, dave, chanPointDave, false) -} - -// computeFee calculates the payment fee as specified in BOLT07 -func computeFee(baseFee, feeRate, amt lnwire.MilliSatoshi) lnwire.MilliSatoshi { - return baseFee + amt*feeRate/1000000 -} - -// testQueryRoutes checks the response of queryroutes. -// We'll create the following network topology: -// Alice --> Bob --> Carol --> Dave -// and query the daemon for routes from Alice to Dave. -func testQueryRoutes(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const chanAmt = btcutil.Amount(100000) - var networkChans []*lnrpc.ChannelPoint - - // Open a channel between Alice and Bob. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPointAlice := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - networkChans = append(networkChans, chanPointAlice) - - // Create Carol and establish a channel from Bob. - carol, err := net.NewNode("Carol", nil) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, carol, net.Bob); err != nil { - t.Fatalf("unable to connect carol to bob: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), net.Bob) - if err != nil { - t.Fatalf("unable to send coins to bob: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointBob := openChannelAndAssert( - ctxt, t, net, net.Bob, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - networkChans = append(networkChans, chanPointBob) - - // Create Dave and establish a channel from Carol. - dave, err := net.NewNode("Dave", nil) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, dave) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, dave, carol); err != nil { - t.Fatalf("unable to connect dave to carol: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), carol) - if err != nil { - t.Fatalf("unable to send coins to carol: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointCarol := openChannelAndAssert( - ctxt, t, net, carol, dave, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - networkChans = append(networkChans, chanPointCarol) - - // Wait for all nodes to have seen all channels. - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol, dave} - nodeNames := []string{"Alice", "Bob", "Carol", "Dave"} - for _, chanPoint := range networkChans { - for i, node := range nodes { - txid, err := lnd.GetChanPointFundingTxid(chanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - point := wire.OutPoint{ - Hash: *txid, - Index: chanPoint.OutputIndex, - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = node.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("%s(%d): timeout waiting for "+ - "channel(%s) open: %v", nodeNames[i], - node.NodeID, point, err) - } - } - } - - // Query for routes to pay from Alice to Dave. - const paymentAmt = 1000 - routesReq := &lnrpc.QueryRoutesRequest{ - PubKey: dave.PubKeyStr, - Amt: paymentAmt, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - routesRes, errr := net.Alice.QueryRoutes(ctxt, routesReq) - if errr != nil { - t.Fatalf("unable to get route: %v", errr) - } - - const mSat = 1000 - feePerHopMSat := computeFee(1000, 1, paymentAmt*mSat) - - for i, route := range routesRes.Routes { - expectedTotalFeesMSat := - lnwire.MilliSatoshi(len(route.Hops)-1) * feePerHopMSat - expectedTotalAmtMSat := (paymentAmt * mSat) + expectedTotalFeesMSat - - if route.TotalFees != route.TotalFeesMsat/mSat { - t.Fatalf("route %v: total fees %v (msat) does not "+ - "round down to %v (sat)", - i, route.TotalFeesMsat, route.TotalFees) - } - if route.TotalFeesMsat != int64(expectedTotalFeesMSat) { - t.Fatalf("route %v: total fees in msat expected %v got %v", - i, expectedTotalFeesMSat, route.TotalFeesMsat) - } - - if route.TotalAmt != route.TotalAmtMsat/mSat { - t.Fatalf("route %v: total amt %v (msat) does not "+ - "round down to %v (sat)", - i, route.TotalAmtMsat, route.TotalAmt) - } - if route.TotalAmtMsat != int64(expectedTotalAmtMSat) { - t.Fatalf("route %v: total amt in msat expected %v got %v", - i, expectedTotalAmtMSat, route.TotalAmtMsat) - } - - // For all hops except the last, we check that fee equals feePerHop - // and amount to forward deducts feePerHop on each hop. - expectedAmtToForwardMSat := expectedTotalAmtMSat - for j, hop := range route.Hops[:len(route.Hops)-1] { - expectedAmtToForwardMSat -= feePerHopMSat - - if hop.Fee != hop.FeeMsat/mSat { - t.Fatalf("route %v hop %v: fee %v (msat) does not "+ - "round down to %v (sat)", - i, j, hop.FeeMsat, hop.Fee) - } - if hop.FeeMsat != int64(feePerHopMSat) { - t.Fatalf("route %v hop %v: fee in msat expected %v got %v", - i, j, feePerHopMSat, hop.FeeMsat) - } - - if hop.AmtToForward != hop.AmtToForwardMsat/mSat { - t.Fatalf("route %v hop %v: amt to forward %v (msat) does not "+ - "round down to %v (sat)", - i, j, hop.AmtToForwardMsat, hop.AmtToForward) - } - if hop.AmtToForwardMsat != int64(expectedAmtToForwardMSat) { - t.Fatalf("route %v hop %v: amt to forward in msat "+ - "expected %v got %v", - i, j, expectedAmtToForwardMSat, hop.AmtToForwardMsat) - } - } - // Last hop should have zero fee and amount to forward should equal - // payment amount. - hop := route.Hops[len(route.Hops)-1] - - if hop.Fee != 0 || hop.FeeMsat != 0 { - t.Fatalf("route %v hop %v: fee expected 0 got %v (sat) %v (msat)", - i, len(route.Hops)-1, hop.Fee, hop.FeeMsat) - } - - if hop.AmtToForward != hop.AmtToForwardMsat/mSat { - t.Fatalf("route %v hop %v: amt to forward %v (msat) does not "+ - "round down to %v (sat)", - i, len(route.Hops)-1, hop.AmtToForwardMsat, hop.AmtToForward) - } - if hop.AmtToForwardMsat != paymentAmt*mSat { - t.Fatalf("route %v hop %v: amt to forward in msat "+ - "expected %v got %v", - i, len(route.Hops)-1, paymentAmt*mSat, hop.AmtToForwardMsat) - } - } - - // We clean up the test case by closing channels that were created for - // the duration of the tests. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointAlice, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Bob, chanPointBob, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, carol, chanPointCarol, false) -} - -// testRouteFeeCutoff tests that we are able to prevent querying routes and -// sending payments that incur a fee higher than the fee limit. -func testRouteFeeCutoff(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // For this test, we'll create the following topology: - // - // --- Bob --- - // / \ - // Alice ---- ---- Dave - // \ / - // -- Carol -- - // - // Alice will attempt to send payments to Dave that should not incur a - // fee greater than the fee limit expressed as a percentage of the - // amount and as a fixed amount of satoshis. - const chanAmt = btcutil.Amount(100000) - - // Open a channel between Alice and Bob. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPointAliceBob := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // Create Carol's node and open a channel between her and Alice with - // Alice being the funder. - carol, err := net.NewNode("Carol", nil) - if err != nil { - t.Fatalf("unable to create carol's node: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, carol, net.Alice); err != nil { - t.Fatalf("unable to connect carol to alice: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), carol) - if err != nil { - t.Fatalf("unable to send coins to carol: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointAliceCarol := openChannelAndAssert( - ctxt, t, net, net.Alice, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // Create Dave's node and open a channel between him and Bob with Bob - // being the funder. - dave, err := net.NewNode("Dave", nil) - if err != nil { - t.Fatalf("unable to create dave's node: %v", err) - } - defer shutdownAndAssert(net, t, dave) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, dave, net.Bob); err != nil { - t.Fatalf("unable to connect dave to bob: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointBobDave := openChannelAndAssert( - ctxt, t, net, net.Bob, dave, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // Open a channel between Carol and Dave. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, carol, dave); err != nil { - t.Fatalf("unable to connect carol to dave: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointCarolDave := openChannelAndAssert( - ctxt, t, net, carol, dave, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // Now that all the channels were set up, we'll wait for all the nodes - // to have seen all the channels. - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol, dave} - nodeNames := []string{"alice", "bob", "carol", "dave"} - networkChans := []*lnrpc.ChannelPoint{ - chanPointAliceBob, chanPointAliceCarol, chanPointBobDave, - chanPointCarolDave, - } - for _, chanPoint := range networkChans { - for i, node := range nodes { - txid, err := lnd.GetChanPointFundingTxid(chanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - outpoint := wire.OutPoint{ - Hash: *txid, - Index: chanPoint.OutputIndex, - } - - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - err = node.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("%s(%d) timed out waiting for "+ - "channel(%s) open: %v", nodeNames[i], - node.NodeID, outpoint, err) - } - } - } - - // The payments should only be successful across the route: - // Alice -> Bob -> Dave - // Therefore, we'll update the fee policy on Carol's side for the - // channel between her and Dave to invalidate the route: - // Alice -> Carol -> Dave - baseFee := int64(10000) - feeRate := int64(5) - timeLockDelta := uint32(chainreg.DefaultBitcoinTimeLockDelta) - maxHtlc := calculateMaxHtlc(chanAmt) - - expectedPolicy := &lnrpc.RoutingPolicy{ - FeeBaseMsat: baseFee, - FeeRateMilliMsat: testFeeBase * feeRate, - TimeLockDelta: timeLockDelta, - MinHtlc: 1000, // default value - MaxHtlcMsat: maxHtlc, - } - - updateFeeReq := &lnrpc.PolicyUpdateRequest{ - BaseFeeMsat: baseFee, - FeeRate: float64(feeRate), - TimeLockDelta: timeLockDelta, - MaxHtlcMsat: maxHtlc, - Scope: &lnrpc.PolicyUpdateRequest_ChanPoint{ - ChanPoint: chanPointCarolDave, - }, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if _, err := carol.UpdateChannelPolicy(ctxt, updateFeeReq); err != nil { - t.Fatalf("unable to update chan policy: %v", err) - } - - // Wait for Alice to receive the channel update from Carol. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - aliceSub := subscribeGraphNotifications(t, ctxt, net.Alice) - defer close(aliceSub.quit) - - waitForChannelUpdate( - t, aliceSub, - []expectedChanUpdate{ - {carol.PubKeyStr, expectedPolicy, chanPointCarolDave}, - }, - ) - - // We'll also need the channel IDs for Bob's channels in order to - // confirm the route of the payments. - listReq := &lnrpc.ListChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - listResp, errr := net.Bob.ListChannels(ctxt, listReq) - if errr != nil { - t.Fatalf("unable to retrieve bob's channels: %v", errr) - } - - var aliceBobChanID, bobDaveChanID uint64 - for _, channel := range listResp.Channels { - switch channel.RemotePubkey { - case net.Alice.PubKeyStr: - aliceBobChanID = channel.ChanId - case dave.PubKeyStr: - bobDaveChanID = channel.ChanId - } - } - - if aliceBobChanID == 0 { - t.Fatalf("channel between alice and bob not found") - } - if bobDaveChanID == 0 { - t.Fatalf("channel between bob and dave not found") - } - hopChanIDs := []uint64{aliceBobChanID, bobDaveChanID} - - // checkRoute is a helper closure to ensure the route contains the - // correct intermediate hops. - checkRoute := func(route *lnrpc.Route) { - if len(route.Hops) != 2 { - t.Fatalf("expected two hops, got %d", len(route.Hops)) - } - - for i, hop := range route.Hops { - if hop.ChanId != hopChanIDs[i] { - t.Fatalf("expected chan id %d, got %d", - hopChanIDs[i], hop.ChanId) - } - } - } - - // We'll be attempting to send two payments from Alice to Dave. One will - // have a fee cutoff expressed as a percentage of the amount and the - // other will have it expressed as a fixed amount of satoshis. - const paymentAmt = 100 - carolFee := computeFee(lnwire.MilliSatoshi(baseFee), 1, paymentAmt) - - // testFeeCutoff is a helper closure that will ensure the different - // types of fee limits work as intended when querying routes and sending - // payments. - testFeeCutoff := func(feeLimit *lnrpc.FeeLimit) { - queryRoutesReq := &lnrpc.QueryRoutesRequest{ - PubKey: dave.PubKeyStr, - Amt: paymentAmt, - FeeLimit: feeLimit, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - routesResp, err := net.Alice.QueryRoutes(ctxt, queryRoutesReq) - if err != nil { - t.Fatalf("unable to get routes: %v", err) - } - - checkRoute(routesResp.Routes[0]) - - invoice := &lnrpc.Invoice{Value: paymentAmt} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - invoiceResp, err := dave.AddInvoice(ctxt, invoice) - if err != nil { - t.Fatalf("unable to create invoice: %v", err) - } - - sendReq := &routerrpc.SendPaymentRequest{ - PaymentRequest: invoiceResp.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - } - switch limit := feeLimit.Limit.(type) { - case *lnrpc.FeeLimit_Fixed: - sendReq.FeeLimitMsat = 1000 * limit.Fixed - case *lnrpc.FeeLimit_Percent: - sendReq.FeeLimitMsat = 1000 * paymentAmt * limit.Percent / 100 - } - - result := sendAndAssertSuccess(t, net.Alice, sendReq) - - checkRoute(result.Htlcs[0].Route) - } - - // We'll start off using percentages first. Since the fee along the - // route using Carol as an intermediate hop is 10% of the payment's - // amount, we'll use a lower percentage in order to invalid that route. - feeLimitPercent := &lnrpc.FeeLimit{ - Limit: &lnrpc.FeeLimit_Percent{ - Percent: baseFee/1000 - 1, - }, - } - testFeeCutoff(feeLimitPercent) - - // Now we'll test using fixed fee limit amounts. Since we computed the - // fee for the route using Carol as an intermediate hop earlier, we can - // use a smaller value in order to invalidate that route. - feeLimitFixed := &lnrpc.FeeLimit{ - Limit: &lnrpc.FeeLimit_Fixed{ - Fixed: int64(carolFee.ToSatoshis()) - 1, - }, - } - testFeeCutoff(feeLimitFixed) - - // Once we're done, close the channels and shut down the nodes created - // throughout this test. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointAliceBob, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointAliceCarol, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Bob, chanPointBobDave, false) - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, carol, chanPointCarolDave, false) -} - -// testSendUpdateDisableChannel ensures that a channel update with the disable -// flag set is sent once a channel has been either unilaterally or cooperatively -// closed. -func testSendUpdateDisableChannel(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const ( - chanAmt = 100000 - ) - - // Open a channel between Alice and Bob and Alice and Carol. These will - // be closed later on in order to trigger channel update messages - // marking the channels as disabled. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPointAliceBob := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - carol, err := net.NewNode("Carol", []string{ - "--minbackoff=10s", - "--chan-enable-timeout=1.5s", - "--chan-disable-timeout=3s", - "--chan-status-sample-interval=.5s", - }) - if err != nil { - t.Fatalf("unable to create carol's node: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, net.Alice, carol); err != nil { - t.Fatalf("unable to connect alice to carol: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointAliceCarol := openChannelAndAssert( - ctxt, t, net, net.Alice, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // We create a new node Eve that has an inactive channel timeout of - // just 2 seconds (down from the default 20m). It will be used to test - // channel updates for channels going inactive. - eve, err := net.NewNode("Eve", []string{ - "--minbackoff=10s", - "--chan-enable-timeout=1.5s", - "--chan-disable-timeout=3s", - "--chan-status-sample-interval=.5s", - }) - if err != nil { - t.Fatalf("unable to create eve's node: %v", err) - } - defer shutdownAndAssert(net, t, eve) - - // Give Eve some coins. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), eve) - if err != nil { - t.Fatalf("unable to send coins to eve: %v", err) - } - - // Connect Eve to Carol and Bob, and open a channel to carol. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, eve, carol); err != nil { - t.Fatalf("unable to connect alice to carol: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, eve, net.Bob); err != nil { - t.Fatalf("unable to connect eve to bob: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointEveCarol := openChannelAndAssert( - ctxt, t, net, eve, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // Launch a node for Dave which will connect to Bob in order to receive - // graph updates from. This will ensure that the channel updates are - // propagated throughout the network. - dave, err := net.NewNode("Dave", nil) - if err != nil { - t.Fatalf("unable to create dave's node: %v", err) - } - defer shutdownAndAssert(net, t, dave) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, net.Bob, dave); err != nil { - t.Fatalf("unable to connect bob to dave: %v", err) - } - - daveSub := subscribeGraphNotifications(t, ctxb, dave) - defer close(daveSub.quit) - - // We should expect to see a channel update with the default routing - // policy, except that it should indicate the channel is disabled. - expectedPolicy := &lnrpc.RoutingPolicy{ - FeeBaseMsat: int64(chainreg.DefaultBitcoinBaseFeeMSat), - FeeRateMilliMsat: int64(chainreg.DefaultBitcoinFeeRate), - TimeLockDelta: chainreg.DefaultBitcoinTimeLockDelta, - MinHtlc: 1000, // default value - MaxHtlcMsat: calculateMaxHtlc(chanAmt), - Disabled: true, - } - - // Let Carol go offline. Since Eve has an inactive timeout of 2s, we - // expect her to send an update disabling the channel. - restartCarol, err := net.SuspendNode(carol) - if err != nil { - t.Fatalf("unable to suspend carol: %v", err) - } - waitForChannelUpdate( - t, daveSub, - []expectedChanUpdate{ - {eve.PubKeyStr, expectedPolicy, chanPointEveCarol}, - }, - ) - - // We restart Carol. Since the channel now becomes active again, Eve - // should send a ChannelUpdate setting the channel no longer disabled. - if err := restartCarol(); err != nil { - t.Fatalf("unable to restart carol: %v", err) - } - - expectedPolicy.Disabled = false - waitForChannelUpdate( - t, daveSub, - []expectedChanUpdate{ - {eve.PubKeyStr, expectedPolicy, chanPointEveCarol}, - }, - ) - - // Now we'll test a long disconnection. Disconnect Carol and Eve and - // ensure they both detect each other as disabled. Their min backoffs - // are high enough to not interfere with disabling logic. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.DisconnectNodes(ctxt, carol, eve); err != nil { - t.Fatalf("unable to disconnect Carol from Eve: %v", err) - } - - // Wait for a disable from both Carol and Eve to come through. - expectedPolicy.Disabled = true - waitForChannelUpdate( - t, daveSub, - []expectedChanUpdate{ - {eve.PubKeyStr, expectedPolicy, chanPointEveCarol}, - {carol.PubKeyStr, expectedPolicy, chanPointEveCarol}, - }, - ) - - // Reconnect Carol and Eve, this should cause them to reenable the - // channel from both ends after a short delay. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.EnsureConnected(ctxt, carol, eve); err != nil { - t.Fatalf("unable to reconnect Carol to Eve: %v", err) - } - - expectedPolicy.Disabled = false - waitForChannelUpdate( - t, daveSub, - []expectedChanUpdate{ - {eve.PubKeyStr, expectedPolicy, chanPointEveCarol}, - {carol.PubKeyStr, expectedPolicy, chanPointEveCarol}, - }, - ) - - // Now we'll test a short disconnection. Disconnect Carol and Eve, then - // reconnect them after one second so that their scheduled disables are - // aborted. One second is twice the status sample interval, so this - // should allow for the disconnect to be detected, but still leave time - // to cancel the announcement before the 3 second inactive timeout is - // hit. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.DisconnectNodes(ctxt, carol, eve); err != nil { - t.Fatalf("unable to disconnect Carol from Eve: %v", err) - } - time.Sleep(time.Second) - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.EnsureConnected(ctxt, eve, carol); err != nil { - t.Fatalf("unable to reconnect Carol to Eve: %v", err) - } - - // Since the disable should have been canceled by both Carol and Eve, we - // expect no channel updates to appear on the network. - assertNoChannelUpdates(t, daveSub, 4*time.Second) - - // Close Alice's channels with Bob and Carol cooperatively and - // unilaterally respectively. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - _, _, err = net.CloseChannel(ctxt, net.Alice, chanPointAliceBob, false) - if err != nil { - t.Fatalf("unable to close channel: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - _, _, err = net.CloseChannel(ctxt, net.Alice, chanPointAliceCarol, true) - if err != nil { - t.Fatalf("unable to close channel: %v", err) - } - - // Now that the channel close processes have been started, we should - // receive an update marking each as disabled. - expectedPolicy.Disabled = true - waitForChannelUpdate( - t, daveSub, - []expectedChanUpdate{ - {net.Alice.PubKeyStr, expectedPolicy, chanPointAliceBob}, - {net.Alice.PubKeyStr, expectedPolicy, chanPointAliceCarol}, - }, - ) - - // Finally, close the channels by mining the closing transactions. - mineBlocks(t, net, 1, 2) - - // Also do this check for Eve's channel with Carol. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - _, _, err = net.CloseChannel(ctxt, eve, chanPointEveCarol, false) - if err != nil { - t.Fatalf("unable to close channel: %v", err) - } - - waitForChannelUpdate( - t, daveSub, - []expectedChanUpdate{ - {eve.PubKeyStr, expectedPolicy, chanPointEveCarol}, - }, - ) - mineBlocks(t, net, 1, 1) - - // And finally, clean up the force closed channel by mining the - // sweeping transaction. - cleanupForceClose(t, net, net.Alice, chanPointAliceCarol) -} - -// testAbandonChannel abandones a channel and asserts that it is no -// longer open and not in one of the pending closure states. It also -// verifies that the abandoned channel is reported as closed with close -// type 'abandoned'. -func testAbandonChannel(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // First establish a channel between Alice and Bob. - channelParam := lntest.OpenChannelParams{ - Amt: lnd.MaxBtcFundingAmount, - PushAmt: btcutil.Amount(100000), - } - - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, channelParam, - ) - txid, err := lnd.GetChanPointFundingTxid(chanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - chanPointStr := fmt.Sprintf("%v:%v", txid, chanPoint.OutputIndex) - - // Wait for channel to be confirmed open. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("alice didn't report channel: %v", err) - } - err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("bob didn't report channel: %v", err) - } - - // Now that the channel is open, we'll obtain its channel ID real quick - // so we can use it to query the graph below. - listReq := &lnrpc.ListChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - aliceChannelList, errr := net.Alice.ListChannels(ctxt, listReq) - if errr != nil { - t.Fatalf("unable to fetch alice's channels: %v", errr) - } - var chanID uint64 - for _, channel := range aliceChannelList.Channels { - if channel.ChannelPoint == chanPointStr { - chanID = channel.ChanId - } - } - - if chanID == 0 { - t.Fatalf("unable to find channel") - } - - // To make sure the channel is removed from the backup file as well when - // being abandoned, grab a backup snapshot so we can compare it with the - // later state. - bkupBefore, errr := ioutil.ReadFile(net.Alice.ChanBackupPath()) - if errr != nil { - t.Fatalf("could not get channel backup before abandoning "+ - "channel: %v", errr) - } - - // Send request to abandon channel. - abandonChannelRequest := &lnrpc.AbandonChannelRequest{ - ChannelPoint: chanPoint, - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - _, errr = net.Alice.AbandonChannel(ctxt, abandonChannelRequest) - if errr != nil { - t.Fatalf("unable to abandon channel: %v", errr) - } - - // Assert that channel in no longer open. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - aliceChannelList, errr = net.Alice.ListChannels(ctxt, listReq) - if errr != nil { - t.Fatalf("unable to list channels: %v", errr) - } - if len(aliceChannelList.Channels) != 0 { - t.Fatalf("alice should only have no channels open, "+ - "instead she has %v", - len(aliceChannelList.Channels)) - } - - // Assert that channel is not pending closure. - pendingReq := &lnrpc.PendingChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - alicePendingList, errr := net.Alice.PendingChannels(ctxt, pendingReq) - if errr != nil { - t.Fatalf("unable to list pending channels: %v", errr) - } - if len(alicePendingList.PendingClosingChannels) != 0 { //nolint:staticcheck - t.Fatalf("alice should only have no pending closing channels, "+ - "instead she has %v", - len(alicePendingList.PendingClosingChannels)) //nolint:staticcheck - } - if len(alicePendingList.PendingForceClosingChannels) != 0 { - t.Fatalf("alice should only have no pending force closing "+ - "channels instead she has %v", - len(alicePendingList.PendingForceClosingChannels)) - } - if len(alicePendingList.WaitingCloseChannels) != 0 { - t.Fatalf("alice should only have no waiting close "+ - "channels instead she has %v", - len(alicePendingList.WaitingCloseChannels)) - } - - // Assert that channel is listed as abandoned. - closedReq := &lnrpc.ClosedChannelsRequest{ - Abandoned: true, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - aliceClosedList, errr := net.Alice.ClosedChannels(ctxt, closedReq) - if errr != nil { - t.Fatalf("unable to list closed channels: %v", errr) - } - if len(aliceClosedList.Channels) != 1 { - t.Fatalf("alice should only have a single abandoned channel, "+ - "instead she has %v", - len(aliceClosedList.Channels)) - } - - // Ensure that the channel can no longer be found in the channel graph. - _, errr = net.Alice.GetChanInfo(ctxb, &lnrpc.ChanInfoRequest{ - ChanId: chanID, - }) - if !strings.Contains(errr.Error(), "marked as zombie") { - t.Fatalf("channel shouldn't be found in the channel " + - "graph!") - } - - // Make sure the channel is no longer in the channel backup list. - err = wait.Predicate(func() bool { - bkupAfter, err := ioutil.ReadFile(net.Alice.ChanBackupPath()) - if err != nil { - t.Fatalf("could not get channel backup before "+ - "abandoning channel: %v", err) - } - - return len(bkupAfter) < len(bkupBefore) - }, defaultTimeout) - if err != nil { - t.Fatalf("channel wasn't removed from channel backup file") - } - - // Calling AbandonChannel again, should result in no new errors, as the - // channel has already been removed. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - _, errr = net.Alice.AbandonChannel(ctxt, abandonChannelRequest) - if errr != nil { - t.Fatalf("unable to abandon channel a second time: %v", errr) - } - - // Now that we're done with the test, the channel can be closed. This - // is necessary to avoid unexpected outcomes of other tests that use - // Bob's lnd instance. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Bob, chanPoint, true) - - // Cleanup by mining the force close and sweep transaction. - cleanupForceClose(t, net, net.Bob, chanPoint) -} - -// testSweepAllCoins tests that we're able to properly sweep all coins from the -// wallet into a single target address at the specified fee rate. -func testSweepAllCoins(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // First, we'll make a new node, ainz who'll we'll use to test wallet - // sweeping. - ainz, err := net.NewNode("Ainz", nil) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, t, ainz) - - // Next, we'll give Ainz exactly 2 utxos of 1 BTC each, with one of - // them being p2wkh and the other being a n2wpkh address. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), ainz) - if err != nil { - t.Fatalf("unable to send coins to eve: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoinsNP2WKH(ctxt, btcutil.UnitsPerCoin(), ainz) - if err != nil { - t.Fatalf("unable to send coins to eve: %v", err) - } - - // Ensure that we can't send coins to our own Pubkey. - info, errr := ainz.GetInfo(ctxt, &lnrpc.GetInfoRequest{}) - if errr != nil { - t.Fatalf("unable to get node info: %v", errr) - } - - // Create a label that we will used to label the transaction with. - sendCoinsLabel := "send all coins" - - sweepReq := &lnrpc.SendCoinsRequest{ - Addr: info.IdentityPubkey, - SendAll: true, - Label: sendCoinsLabel, - } - _, errr = ainz.SendCoins(ctxt, sweepReq) - if errr == nil { - t.Fatalf("expected SendCoins to users own pubkey to fail") - } - - // Ensure that we can't send coins to another users Pubkey. - info, errr = net.Alice.GetInfo(ctxt, &lnrpc.GetInfoRequest{}) - if errr != nil { - t.Fatalf("unable to get node info: %v", errr) - } - - sweepReq = &lnrpc.SendCoinsRequest{ - Addr: info.IdentityPubkey, - SendAll: true, - Label: sendCoinsLabel, - } - _, errr = ainz.SendCoins(ctxt, sweepReq) - if errr == nil { - t.Fatalf("expected SendCoins to Alices pubkey to fail") - } - - // With the two coins above mined, we'll now instruct ainz to sweep all - // the coins to an external address not under its control. - // We will first attempt to send the coins to addresses that are not - // compatible with the current network. This is to test that the wallet - // will prevent any onchain transactions to addresses that are not on the - // same network as the user. - - // Send coins to a testnet3 address. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - sweepReq = &lnrpc.SendCoinsRequest{ - Addr: "tb1qfc8fusa98jx8uvnhzavxccqlzvg749tvjw82tg", - SendAll: true, - Label: sendCoinsLabel, - } - _, errr = ainz.SendCoins(ctxt, sweepReq) - if errr == nil { - t.Fatalf("expected SendCoins to different network to fail") - } - - // Send coins to a mainnet address. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - sweepReq = &lnrpc.SendCoinsRequest{ - Addr: "1MPaXKp5HhsLNjVSqaL7fChE3TVyrTMRT3", - SendAll: true, - Label: sendCoinsLabel, - } - _, errr = ainz.SendCoins(ctxt, sweepReq) - if errr == nil { - t.Fatalf("expected SendCoins to different network to fail") - } - - // Send coins to a compatible address. - minerAddr, err := net.Miner.NewAddress() - if err != nil { - t.Fatalf("unable to create new miner addr: %v", err) - } - - sweepReq = &lnrpc.SendCoinsRequest{ - Addr: minerAddr.String(), - SendAll: true, - Label: sendCoinsLabel, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - _, errr = ainz.SendCoins(ctxt, sweepReq) - if errr != nil { - t.Fatalf("unable to sweep coins: %v", errr) - } - - // We'll mine a block which should include the sweep transaction we - // generated above. - block := mineBlocks(t, net, 1, 1)[0] - - // The sweep transaction should have exactly two inputs as we only had - // two UTXOs in the wallet. - sweepTx := block.Transactions[1] - if len(sweepTx.TxIn) != 2 { - t.Fatalf("expected 2 inputs instead have %v", len(sweepTx.TxIn)) - } - - sweepTxStr := sweepTx.TxHash().String() - assertTxLabel(ctxb, t, ainz, sweepTxStr, sendCoinsLabel) - - // While we are looking at labels, we test our label transaction command - // to make sure it is behaving as expected. First, we try to label our - // transaction with an empty label, and check that we fail as expected. - sweepHash := sweepTx.TxHash() - _, errr = ainz.WalletKitClient.LabelTransaction( - ctxt, &walletrpc.LabelTransactionRequest{ - Txid: sweepHash[:], - Label: "", - Overwrite: false, - }, - ) - if errr == nil { - t.Fatalf("expected error for zero transaction label") - } - - // Our error will be wrapped in a rpc error, so we check that it - // contains the error we expect. - errZeroLabel := "cannot label transaction with empty label" - if !strings.Contains(errr.Error(), errZeroLabel) { - t.Fatalf("expected: zero label error, got: %v", err) - } - - // Next, we try to relabel our transaction without setting the overwrite - // boolean. We expect this to fail, because the wallet requires setting - // of this param to prevent accidental overwrite of labels. - _, errr = ainz.WalletKitClient.LabelTransaction( - ctxt, &walletrpc.LabelTransactionRequest{ - Txid: sweepHash[:], - Label: "label that will not work", - Overwrite: false, - }, - ) - if errr == nil { - t.Fatalf("expected error for tx already labelled") - } - - // Our error will be wrapped in a rpc error, so we check that it - // contains the error we expect. - if !strings.Contains(errr.Error(), "ErrTxLabelExists") { - t.Fatalf("expected: label exists, got: %v", err) - } - - // Finally, we overwrite our label with a new label, which should not - // fail. - newLabel := "new sweep tx label" - _, errr = ainz.WalletKitClient.LabelTransaction( - ctxt, &walletrpc.LabelTransactionRequest{ - Txid: sweepHash[:], - Label: newLabel, - Overwrite: true, - }, - ) - if errr != nil { - t.Fatalf("could not label tx: %v", errr) - } - - assertTxLabel(ctxb, t, ainz, sweepTxStr, newLabel) - - // Finally, Ainz should now have no coins at all within his wallet. - balReq := &lnrpc.WalletBalanceRequest{} - resp, errr := ainz.WalletBalance(ctxt, balReq) - if errr != nil { - t.Fatalf("unable to get ainz's balance: %v", errr) - } - switch { - case resp.ConfirmedBalance != 0: - t.Fatalf("expected no confirmed balance, instead have %v", - resp.ConfirmedBalance) - - case resp.UnconfirmedBalance != 0: - t.Fatalf("expected no unconfirmed balance, instead have %v", - resp.UnconfirmedBalance) - } - - // If we try again, but this time specifying an amount, then the call - // should fail. - sweepReq.Amount = 10000 - _, errr = ainz.SendCoins(ctxt, sweepReq) - if errr == nil { - t.Fatalf("sweep attempt should fail") - } -} - -// assertTxLabel is a helper function which finds a target tx in our set -// of transactions and checks that it has the desired label. -func assertTxLabel(ctx context.Context, t *harnessTest, - node *lntest.HarnessNode, targetTx, label string) { - - // List all transactions relevant to our wallet, and find the tx so that - // we can check the correct label has been set. - ctxt, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() - - txResp, err := node.GetTransactions( - ctxt, &lnrpc.GetTransactionsRequest{}, - ) - if err != nil { - t.Fatalf("could not get transactions: %v", err) - } - - // Find our transaction in the set of transactions returned and check - // its label. - for _, txn := range txResp.Transactions { - if txn.TxHash == targetTx { - if txn.Label != label { - t.Fatalf("expected label: %v, got: %v", - label, txn.Label) - } - } - } -} - -// testHoldInvoicePersistence tests that a sender to a hold-invoice, can be -// restarted before the payment gets settled, and still be able to receive the -// preimage. -func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const ( - chanAmt = btcutil.Amount(1000000) - numPayments = 10 - ) - - // Create carol, and clean up when the test finishes. - carol, err := net.NewNode("Carol", nil) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - // Connect Alice to Carol. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxb, net.Alice, carol); err != nil { - t.Fatalf("unable to connect alice to carol: %v", err) - } - - // Open a channel between Alice and Carol. - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPointAlice := openChannelAndAssert( - ctxt, t, net, net.Alice, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // Wait for Alice and Carol to receive the channel edge from the - // funding manager. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPointAlice) - if err != nil { - t.Fatalf("alice didn't see the alice->carol channel before "+ - "timeout: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = carol.WaitForNetworkChannelOpen(ctxt, chanPointAlice) - if err != nil { - t.Fatalf("carol didn't see the carol->alice channel before "+ - "timeout: %v", err) - } - - // Create preimages for all payments we are going to initiate. - var preimages []lntypes.Preimage - for i := 0; i < numPayments; i++ { - var preimage lntypes.Preimage - _, errr := rand.Read(preimage[:]) - if errr != nil { - t.Fatalf("unable to generate preimage: %v", errr) - } - - preimages = append(preimages, preimage) - } - - // Let Carol create hold-invoices for all the payments. - var ( - payAmt = btcutil.Amount(4) - payReqs []string - invoiceStreams []invoicesrpc.Invoices_SubscribeSingleInvoiceClient - ) - - for _, preimage := range preimages { - payHash := preimage.Hash() - invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ - Memo: "testing", - Value: int64(payAmt), - Hash: payHash[:], - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, err := carol.AddHoldInvoice(ctxt, invoiceReq) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - ctx, cancel := context.WithCancel(ctxb) - defer cancel() - - stream, err := carol.SubscribeSingleInvoice( - ctx, - &invoicesrpc.SubscribeSingleInvoiceRequest{ - RHash: payHash[:], - }, - ) - if err != nil { - t.Fatalf("unable to subscribe to invoice: %v", err) - } - - invoiceStreams = append(invoiceStreams, stream) - payReqs = append(payReqs, resp.PaymentRequest) - } - - // Wait for all the invoices to reach the OPEN state. - for _, stream := range invoiceStreams { - invoice, err := stream.Recv() - if err != nil { - t.Fatalf("err: %v", err) - } - - if invoice.State != lnrpc.Invoice_OPEN { - t.Fatalf("expected OPEN, got state: %v", invoice.State) - } - } - - // Let Alice initiate payments for all the created invoices. - var paymentStreams []routerrpc.Router_SendPaymentV2Client - for _, payReq := range payReqs { - ctx, cancel := context.WithCancel(ctxb) - defer cancel() - - payStream, err := net.Alice.RouterClient.SendPaymentV2( - ctx, &routerrpc.SendPaymentRequest{ - PaymentRequest: payReq, - TimeoutSeconds: 60, - FeeLimitSat: 1000000, - }, - ) - if err != nil { - t.Fatalf("unable to send alice htlc: %v", err) - } - - paymentStreams = append(paymentStreams, payStream) - } - - // Wait for inlight status update. - for _, payStream := range paymentStreams { - payment, err := payStream.Recv() - if err != nil { - t.Fatalf("Failed receiving status update: %v", err) - } - - if payment.Status != lnrpc.Payment_IN_FLIGHT { - t.Fatalf("state not in flight: %v", payment.Status) - } - } - - // The payments should now show up in Alice's ListInvoices, with a zero - // preimage, indicating they are not yet settled. - err = wait.NoError(func() er.R { - req := &lnrpc.ListPaymentsRequest{ - IncludeIncomplete: true, - } - ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) - paymentsResp, err := net.Alice.ListPayments(ctxt, req) - if err != nil { - return er.Errorf("error when obtaining payments: %v", - err) - } - - // Gather the payment hashes we are looking for in the - // response. - payHashes := make(map[string]struct{}) - for _, preimg := range preimages { - payHashes[preimg.Hash().String()] = struct{}{} - } - - var zeroPreimg lntypes.Preimage - for _, payment := range paymentsResp.Payments { - _, ok := payHashes[payment.PaymentHash] - if !ok { - continue - } - - // The preimage should NEVER be non-zero at this point. - if payment.PaymentPreimage != zeroPreimg.String() { - t.Fatalf("expected zero preimage, got %v", - payment.PaymentPreimage) - } - - // We wait for the payment attempt to have been - // properly recorded in the DB. - if len(payment.Htlcs) == 0 { - return er.Errorf("no attempt recorded") - } - - delete(payHashes, payment.PaymentHash) - } - - if len(payHashes) != 0 { - return er.Errorf("payhash not found in response") - } - - return nil - }, time.Second*15) - if err != nil { - t.Fatalf("predicate not satisfied: %v", err) - } - - // Wait for all invoices to be accepted. - for _, stream := range invoiceStreams { - invoice, err := stream.Recv() - if err != nil { - t.Fatalf("err: %v", err) - } - - if invoice.State != lnrpc.Invoice_ACCEPTED { - t.Fatalf("expected ACCEPTED, got state: %v", - invoice.State) - } - } - - // Restart alice. This to ensure she will still be able to handle - // settling the invoices after a restart. - if err := net.RestartNode(net.Alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - - // Now after a restart, we must re-track the payments. We set up a - // goroutine for each to track thir status updates. - var ( - statusUpdates []chan *lnrpc.Payment - wg sync.WaitGroup - quit = make(chan struct{}) - ) - - defer close(quit) - for _, preimg := range preimages { - hash := preimg.Hash() - - ctx, cancel := context.WithCancel(ctxb) - defer cancel() - - payStream, err := net.Alice.RouterClient.TrackPaymentV2( - ctx, &routerrpc.TrackPaymentRequest{ - PaymentHash: hash[:], - }, - ) - if err != nil { - t.Fatalf("unable to send track payment: %v", err) - } - - // We set up a channel where we'll forward any status update. - upd := make(chan *lnrpc.Payment) - wg.Add(1) - go func() { - defer wg.Done() - - for { - payment, err := payStream.Recv() - if err != nil { - close(upd) - return - } - - select { - case upd <- payment: - case <-quit: - return - } - } - }() - - statusUpdates = append(statusUpdates, upd) - } - - // Wait for the in-flight status update. - for _, upd := range statusUpdates { - select { - case payment, ok := <-upd: - if !ok { - t.Fatalf("failed getting payment update") - } - - if payment.Status != lnrpc.Payment_IN_FLIGHT { - t.Fatalf("state not in in flight: %v", - payment.Status) - } - case <-time.After(5 * time.Second): - t.Fatalf("in flight status not recevied") - } - } - - // Settle invoices half the invoices, cancel the rest. - for i, preimage := range preimages { - var errr error - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if i%2 == 0 { - settle := &invoicesrpc.SettleInvoiceMsg{ - Preimage: preimage[:], - } - _, errr = carol.SettleInvoice(ctxt, settle) - } else { - hash := preimage.Hash() - settle := &invoicesrpc.CancelInvoiceMsg{ - PaymentHash: hash[:], - } - _, errr = carol.CancelInvoice(ctxt, settle) - } - if errr != nil { - t.Fatalf("unable to cancel/settle invoice: %v", errr) - } - } - - // Make sure we get the expected status update. - for i, upd := range statusUpdates { - // Read until the payment is in a terminal state. - var payment *lnrpc.Payment - for payment == nil { - select { - case p, ok := <-upd: - if !ok { - t.Fatalf("failed getting payment update") - } - - if p.Status == lnrpc.Payment_IN_FLIGHT { - continue - } - - payment = p - case <-time.After(5 * time.Second): - t.Fatalf("in flight status not recevied") - } - } - - // Assert terminal payment state. - if i%2 == 0 { - if payment.Status != lnrpc.Payment_SUCCEEDED { - t.Fatalf("state not succeeded : %v", - payment.Status) - } - } else { - if payment.FailureReason != - lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS { - - t.Fatalf("state not failed: %v", - payment.FailureReason) - } - } - } - - // Check that Alice's invoices to be shown as settled and failed - // accordingly, and preimages matching up. - req := &lnrpc.ListPaymentsRequest{ - IncludeIncomplete: true, - } - ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) - paymentsResp, errr := net.Alice.ListPayments(ctxt, req) - if errr != nil { - t.Fatalf("error when obtaining Alice payments: %v", errr) - } - for i, preimage := range preimages { - paymentHash := preimage.Hash() - var p string - for _, resp := range paymentsResp.Payments { - if resp.PaymentHash == paymentHash.String() { - p = resp.PaymentPreimage - break - } - } - if p == "" { - t.Fatalf("payment not found") - } - - if i%2 == 0 { - if p != preimage.String() { - t.Fatalf("preimage doesn't match: %v vs %v", - p, preimage.String()) - } - } else { - if p != lntypes.ZeroHash.String() { - t.Fatalf("preimage not zero: %v", p) - } - } - } -} - -// testExternalFundingChanPoint tests that we're able to carry out a normal -// channel funding workflow given a channel point that was constructed outside -// the main daemon. -func testExternalFundingChanPoint(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // First, we'll create two new nodes that we'll use to open channel - // between for this test. - carol, err := net.NewNode("carol", nil) - util.RequireNoErr(t.t, err) - defer shutdownAndAssert(net, t, carol) - - dave, err := net.NewNode("dave", nil) - util.RequireNoErr(t.t, err) - defer shutdownAndAssert(net, t, dave) - - // Carol will be funding the channel, so we'll send some coins over to - // her and ensure they have enough confirmations before we proceed. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.UnitsPerCoin(), carol) - util.RequireNoErr(t.t, err) - - // Before we start the test, we'll ensure both sides are connected to - // the funding flow can properly be executed. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.EnsureConnected(ctxt, carol, dave) - util.RequireNoErr(t.t, err) - - // At this point, we're ready to simulate our external channel funding - // flow. To start with, we'll create a pending channel with a shim for - // a transaction that will never be published. - const thawHeight uint32 = 10 - const chanSize = lnd.MaxBtcFundingAmount - fundingShim1, chanPoint1, _ := deriveFundingShim( - net, t, carol, dave, chanSize, thawHeight, 1, false, - ) - _ = openChannelStream( - ctxb, t, net, carol, dave, lntest.OpenChannelParams{ - Amt: chanSize, - FundingShim: fundingShim1, - }, - ) - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - assertNumOpenChannelsPending(ctxt, t, carol, dave, 1) - - // That channel is now pending forever and normally would saturate the - // max pending channel limit for both nodes. But because the channel is - // externally funded, we should still be able to open another one. Let's - // do exactly that now. For this one we publish the transaction so we - // can mine it later. - fundingShim2, chanPoint2, _ := deriveFundingShim( - net, t, carol, dave, chanSize, thawHeight, 2, true, - ) - - // At this point, we'll now carry out the normal basic channel funding - // test as everything should now proceed as normal (a regular channel - // funding flow). - carolChan, daveChan, _, err := basicChannelFundingTest( - t, net, carol, dave, fundingShim2, - ) - util.RequireNoErr(t.t, err) - - // Both channels should be marked as frozen with the proper thaw - // height. - if carolChan.ThawHeight != thawHeight { - t.Fatalf("expected thaw height of %v, got %v", - carolChan.ThawHeight, thawHeight) - } - if daveChan.ThawHeight != thawHeight { - t.Fatalf("expected thaw height of %v, got %v", - daveChan.ThawHeight, thawHeight) - } - - // Next, to make sure the channel functions as normal, we'll make some - // payments within the channel. - payAmt := btcutil.Amount(100000) - invoice := &lnrpc.Invoice{ - Memo: "new chans", - Value: int64(payAmt), - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, errr := dave.AddInvoice(ctxt, invoice) - require.NoError(t.t, errr) - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, carol, carol.RouterClient, []string{resp.PaymentRequest}, - true, - ) - util.RequireNoErr(t.t, err) - - // Now that the channels are open, and we've confirmed that they're - // operational, we'll now ensure that the channels are frozen as - // intended (if requested). - // - // First, we'll try to close the channel as Carol, the initiator. This - // should fail as a frozen channel only allows the responder to - // initiate a channel close. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - _, _, err = net.CloseChannel(ctxt, carol, chanPoint2, false) - if err == nil { - t.Fatalf("carol wasn't denied a co-op close attempt for a " + - "frozen channel") - } - - // Next we'll try but this time with Dave (the responder) as the - // initiator. This time the channel should be closed as normal. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, dave, chanPoint2, false) - - // As a last step, we check if we still have the pending channel hanging - // around because we never published the funding TX. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - assertNumOpenChannelsPending(ctxt, t, carol, dave, 1) - - // Let's make sure we can abandon it. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - _, errr = carol.AbandonChannel(ctxt, &lnrpc.AbandonChannelRequest{ - ChannelPoint: chanPoint1, - PendingFundingShimOnly: true, - }) - require.NoError(t.t, errr) - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - _, errr = dave.AbandonChannel(ctxt, &lnrpc.AbandonChannelRequest{ - ChannelPoint: chanPoint1, - PendingFundingShimOnly: true, - }) - require.NoError(t.t, errr) - - // It should now not appear in the pending channels anymore. - assertNumOpenChannelsPending(ctxt, t, carol, dave, 0) -} - -// deriveFundingShim creates a channel funding shim by deriving the necessary -// keys on both sides. -func deriveFundingShim(net *lntest.NetworkHarness, t *harnessTest, - carol, dave *lntest.HarnessNode, chanSize btcutil.Amount, - thawHeight uint32, keyIndex int32, publish bool) (*lnrpc.FundingShim, - *lnrpc.ChannelPoint, *chainhash.Hash) { - - ctxb := context.Background() - keyLoc := &signrpc.KeyLocator{ - KeyFamily: 9999, - KeyIndex: keyIndex, - } - carolFundingKey, errr := carol.WalletKitClient.DeriveKey(ctxb, keyLoc) - require.NoError(t.t, errr) - daveFundingKey, errr := dave.WalletKitClient.DeriveKey(ctxb, keyLoc) - require.NoError(t.t, errr) - - // Now that we have the multi-sig keys for each party, we can manually - // construct the funding transaction. We'll instruct the backend to - // immediately create and broadcast a transaction paying out an exact - // amount. Normally this would reside in the mempool, but we just - // confirm it now for simplicity. - _, fundingOutput, err := input.GenFundingPkScript( - carolFundingKey.RawKeyBytes, daveFundingKey.RawKeyBytes, - int64(chanSize), - ) - util.RequireNoErr(t.t, err) - - var txid *chainhash.Hash - targetOutputs := []*wire.TxOut{fundingOutput} - if publish { - txid, err = net.Miner.SendOutputsWithoutChange( - targetOutputs, 5, - ) - util.RequireNoErr(t.t, err) - } else { - tx, err := net.Miner.CreateTransaction(targetOutputs, 5, false) - util.RequireNoErr(t.t, err) - - txHash := tx.TxHash() - txid = &txHash - } - - // At this point, we can being our external channel funding workflow. - // We'll start by generating a pending channel ID externally that will - // be used to track this new funding type. - var pendingChanID [32]byte - _, errr = rand.Read(pendingChanID[:]) - require.NoError(t.t, errr) - - // Now that we have the pending channel ID, Dave (our responder) will - // register the intent to receive a new channel funding workflow using - // the pending channel ID. - chanPoint := &lnrpc.ChannelPoint{ - FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ - FundingTxidBytes: txid[:], - }, - } - chanPointShim := &lnrpc.ChanPointShim{ - Amt: int64(chanSize), - ChanPoint: chanPoint, - LocalKey: &lnrpc.KeyDescriptor{ - RawKeyBytes: daveFundingKey.RawKeyBytes, - KeyLoc: &lnrpc.KeyLocator{ - KeyFamily: daveFundingKey.KeyLoc.KeyFamily, - KeyIndex: daveFundingKey.KeyLoc.KeyIndex, - }, - }, - RemoteKey: carolFundingKey.RawKeyBytes, - PendingChanId: pendingChanID[:], - ThawHeight: thawHeight, - } - fundingShim := &lnrpc.FundingShim{ - Shim: &lnrpc.FundingShim_ChanPointShim{ - ChanPointShim: chanPointShim, - }, - } - _, errr = dave.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{ - Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{ - ShimRegister: fundingShim, - }, - }) - require.NoError(t.t, errr) - - // If we attempt to register the same shim (has the same pending chan - // ID), then we should get an error. - _, errr = dave.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{ - Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{ - ShimRegister: fundingShim, - }, - }) - if errr == nil { - t.Fatalf("duplicate pending channel ID funding shim " + - "registration should trigger an error") - } - - // We'll take the chan point shim we just registered for Dave (the - // responder), and swap the local/remote keys before we feed it in as - // Carol's funding shim as the initiator. - fundingShim.GetChanPointShim().LocalKey = &lnrpc.KeyDescriptor{ - RawKeyBytes: carolFundingKey.RawKeyBytes, - KeyLoc: &lnrpc.KeyLocator{ - KeyFamily: carolFundingKey.KeyLoc.KeyFamily, - KeyIndex: carolFundingKey.KeyLoc.KeyIndex, - }, - } - fundingShim.GetChanPointShim().RemoteKey = daveFundingKey.RawKeyBytes - - return fundingShim, chanPoint, txid -} - -// sendAndAssertSuccess sends the given payment requests and asserts that the -// payment completes successfully. -func sendAndAssertSuccess(t *harnessTest, node *lntest.HarnessNode, - req *routerrpc.SendPaymentRequest) *lnrpc.Payment { - - ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) - defer cancel() - - var result *lnrpc.Payment - err := wait.NoError(func() er.R { - stream, errr := node.RouterClient.SendPaymentV2(ctx, req) - if errr != nil { - return er.Errorf("unable to send payment: %v", errr) - } - - var err er.R - result, err = getPaymentResult(stream) - if err != nil { - return er.Errorf("unable to get payment result: %v", - err) - } - - if result.Status != lnrpc.Payment_SUCCEEDED { - return er.Errorf("payment failed: %v", result.Status) - } - - return nil - }, defaultTimeout) - util.RequireNoErr(t.t, err) - - return result -} - -// sendAndAssertFailure sends the given payment requests and asserts that the -// payment fails with the expected reason. -func sendAndAssertFailure(t *harnessTest, node *lntest.HarnessNode, - req *routerrpc.SendPaymentRequest, - failureReason lnrpc.PaymentFailureReason) *lnrpc.Payment { - - ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) - defer cancel() - - stream, errr := node.RouterClient.SendPaymentV2(ctx, req) - if errr != nil { - t.Fatalf("unable to send payment: %v", errr) - } - - result, err := getPaymentResult(stream) - if err != nil { - t.Fatalf("unable to get payment result: %v", err) - } - - if result.Status != lnrpc.Payment_FAILED { - t.Fatalf("payment was expected to fail, but succeeded") - } - - if result.FailureReason != failureReason { - t.Fatalf("payment should have been rejected due to "+ - "%v, but got %v", failureReason, result.Status) - } - - return result -} - -// getPaymentResult reads a final result from the stream and returns it. -func getPaymentResult(stream routerrpc.Router_SendPaymentV2Client) ( - *lnrpc.Payment, er.R) { - - for { - payment, errr := stream.Recv() - if errr != nil { - return nil, er.E(errr) - } - - if payment.Status != lnrpc.Payment_IN_FLIGHT { - return payment, nil - } - } -} - -// TestLightningNetworkDaemon performs a series of integration tests amongst a -// programmatically driven network of lnd nodes. -func TestLightningNetworkDaemon(t *testing.T) { - // If no tests are registered, then we can exit early. - if len(allTestCases) == 0 { - t.Skip("integration tests not selected with flag 'rpctest'") - } - - // Parse testing flags that influence our test execution. - logDir := lntest.GetLogDir() - require.NoError(t, os.MkdirAll(logDir, 0700)) - testCases, trancheIndex, trancheOffset := getTestCaseSplitTranche() - lntest.ApplyPortOffset(uint32(trancheIndex) * 1000) - - ht := newHarnessTest(t, nil) - - // Declare the network harness here to gain access to its - // 'OnTxAccepted' call back. - var lndHarness *lntest.NetworkHarness - - // Create an instance of the btcd's rpctest.Harness that will act as - // the miner for all tests. This will be used to fund the wallets of - // the nodes within the test network and to drive blockchain related - // events within the network. Revert the default setting of accepting - // non-standard transactions on simnet to reject them. Transactions on - // the lightning network should always be standard to get better - // guarantees of getting included in to blocks. - // - // We will also connect it to our chain backend. - minerLogDir := fmt.Sprintf("%s/.minerlogs", logDir) - miner, minerCleanUp, err := lntest.NewMiner( - minerLogDir, "output_btcd_miner.log", - harnessNetParams, &rpcclient.NotificationHandlers{}, - ) - util.RequireNoErr(t, err, "failed to create new miner") - defer func() { - util.RequireNoErr(t, minerCleanUp(), "failed to clean up miner") - }() - - // Start a chain backend. - chainBackend, cleanUp, err := lntest.NewBackend( - miner.P2PAddress(), harnessNetParams, - ) - if err != nil { - ht.Fatalf("unable to start backend: %v", err) - } - defer func() { - util.RequireNoErr( - t, cleanUp(), "failed to clean up chain backend", - ) - }() - - if err := miner.SetUp(true, 50); err != nil { - ht.Fatalf("unable to set up mining node: %v", err) - } - if err := miner.Node.NotifyNewTransactions(false); err != nil { - ht.Fatalf("unable to request transaction notifications: %v", err) - } - - // Connect chainbackend to miner. - util.RequireNoErr( - t, chainBackend.ConnectMiner(), "failed to connect to miner", - ) - - // Now we can set up our test harness (LND instance), with the chain - // backend we just created. - binary := ht.getLndBinary() - lndHarness, err = lntest.NewNetworkHarness( - miner, chainBackend, binary, *useEtcd, - ) - if err != nil { - ht.Fatalf("unable to create lightning network harness: %v", err) - } - defer lndHarness.Stop() - - // Spawn a new goroutine to watch for any fatal errors that any of the - // running lnd processes encounter. If an error occurs, then the test - // case should naturally as a result and we log the server error here to - // help debug. - go func() { - for { - select { - case err, more := <-lndHarness.ProcessErrors(): - if !more { - return - } - ht.Logf("lnd finished with error (stderr):\n%v", - err) - } - } - }() - - // Next mine enough blocks in order for segwit and the CSV package - // soft-fork to activate on SimNet. - numBlocks := harnessNetParams.MinerConfirmationWindow * 2 - if _, err := miner.Node.Generate(numBlocks); err != nil { - ht.Fatalf("unable to generate blocks: %v", err) - } - - // With the btcd harness created, we can now complete the - // initialization of the network. args - list of lnd arguments, - // example: "--debuglevel=debug" - // TODO(roasbeef): create master balanced channel with all the monies? - aliceBobArgs := []string{ - "--default-remote-max-htlcs=483", - } - - // Run the subset of the test cases selected in this tranche. - for idx, testCase := range testCases { - testCase := testCase - name := fmt.Sprintf("%02d-of-%d/%s/%s", - trancheOffset+uint(idx)+1, len(allTestCases), - chainBackend.Name(), testCase.name) - - success := t.Run(name, func(t1 *testing.T) { - cleanTestCaseName := strings.ReplaceAll( - testCase.name, " ", "_", - ) - - err = lndHarness.SetUp(cleanTestCaseName, aliceBobArgs) - util.RequireNoErr(t1, - err, "unable to set up test lightning network", - ) - defer func() { - util.RequireNoErr(t1, lndHarness.TearDown()) - }() - - err = lndHarness.EnsureConnected( - context.Background(), lndHarness.Alice, - lndHarness.Bob, - ) - util.RequireNoErr(t1, - err, "unable to connect alice to bob", - ) - - logLine := fmt.Sprintf( - "STARTING ============ %v ============\n", - testCase.name, - ) - - err = lndHarness.Alice.AddToLog(logLine) - util.RequireNoErr(t1, err, "unable to add to log") - - err = lndHarness.Bob.AddToLog(logLine) - util.RequireNoErr(t1, err, "unable to add to log") - - // Start every test with the default static fee estimate. - lndHarness.SetFeeEstimate(12500) - - // Create a separate harness test for the testcase to - // avoid overwriting the external harness test that is - // tied to the parent test. - ht := newHarnessTest(t1, lndHarness) - ht.RunTestCase(testCase) - }) - - // Stop at the first failure. Mimic behavior of original test - // framework. - if !success { - // Log failure time to help relate the lnd logs to the - // failure. - t.Logf("Failure time: %v", time.Now().Format( - "2006-01-02 15:04:05.000", - )) - break - } - } -} diff --git a/lnd/lntest/itest/lnd_test_list_off_test.go b/lnd/lntest/itest/lnd_test_list_off_test.go deleted file mode 100644 index 59795f1d..00000000 --- a/lnd/lntest/itest/lnd_test_list_off_test.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !rpctest - -package itest - -var allTestCases = []*testCase{} diff --git a/lnd/lntest/itest/lnd_test_list_on_test.go b/lnd/lntest/itest/lnd_test_list_on_test.go deleted file mode 100644 index cbbc8b37..00000000 --- a/lnd/lntest/itest/lnd_test_list_on_test.go +++ /dev/null @@ -1,289 +0,0 @@ -// +build rpctest - -package itest - -var allTestCases = []*testCase{ - { - name: "test multi-hop htlc", - test: testMultiHopHtlcClaims, - }, - { - name: "sweep coins", - test: testSweepAllCoins, - }, - { - name: "recovery info", - test: testGetRecoveryInfo, - }, - { - name: "onchain fund recovery", - test: testOnchainFundRecovery, - }, - { - name: "basic funding flow", - test: testBasicChannelFunding, - }, - { - name: "unconfirmed channel funding", - test: testUnconfirmedChannelFunding, - }, - { - name: "update channel policy", - test: testUpdateChannelPolicy, - }, - { - name: "open channel reorg test", - test: testOpenChannelAfterReorg, - }, - { - name: "disconnecting target peer", - test: testDisconnectingTargetPeer, - }, - { - name: "graph topology notifications", - test: testGraphTopologyNotifications, - }, - { - name: "funding flow persistence", - test: testChannelFundingPersistence, - }, - { - name: "channel force closure", - test: testChannelForceClosure, - }, - { - name: "channel balance", - test: testChannelBalance, - }, - { - name: "channel unsettled balance", - test: testChannelUnsettledBalance, - }, - { - name: "single hop invoice", - test: testSingleHopInvoice, - }, - { - name: "sphinx replay persistence", - test: testSphinxReplayPersistence, - }, - { - name: "list channels", - test: testListChannels, - }, - { - name: "list outgoing payments", - test: testListPayments, - }, - { - name: "max pending channel", - test: testMaxPendingChannels, - }, - { - name: "multi-hop payments", - test: testMultiHopPayments, - }, - { - name: "single-hop send to route", - test: testSingleHopSendToRoute, - }, - { - name: "multi-hop send to route", - test: testMultiHopSendToRoute, - }, - { - name: "send to route error propagation", - test: testSendToRouteErrorPropagation, - }, - { - name: "unannounced channels", - test: testUnannouncedChannels, - }, - { - name: "private channels", - test: testPrivateChannels, - }, - { - name: "invoice routing hints", - test: testInvoiceRoutingHints, - }, - { - name: "multi-hop payments over private channels", - test: testMultiHopOverPrivateChannels, - }, - { - name: "multiple channel creation and update subscription", - test: testBasicChannelCreationAndUpdates, - }, - { - name: "invoice update subscription", - test: testInvoiceSubscriptions, - }, - { - name: "multi-hop htlc error propagation", - test: testHtlcErrorPropagation, - }, - { - name: "reject onward htlc", - test: testRejectHTLC, - }, - // TODO(roasbeef): multi-path integration test - { - name: "node announcement", - test: testNodeAnnouncement, - }, - { - name: "node sign verify", - test: testNodeSignVerify, - }, - { - name: "derive shared key", - test: testDeriveSharedKey, - }, - { - name: "async payments benchmark", - test: testAsyncPayments, - }, - { - name: "async bidirectional payments", - test: testBidirectionalAsyncPayments, - }, - { - name: "switch circuit persistence", - test: testSwitchCircuitPersistence, - }, - { - name: "switch offline delivery", - test: testSwitchOfflineDelivery, - }, - { - name: "switch offline delivery persistence", - test: testSwitchOfflineDeliveryPersistence, - }, - { - name: "switch offline delivery outgoing offline", - test: testSwitchOfflineDeliveryOutgoingOffline, - }, - { - // TODO(roasbeef): test always needs to be last as Bob's state - // is borked since we trick him into attempting to cheat Alice? - name: "revoked uncooperative close retribution", - test: testRevokedCloseRetribution, - }, - { - name: "failing link", - test: testFailingChannel, - }, - { - name: "garbage collect link nodes", - test: testGarbageCollectLinkNodes, - }, - { - name: "abandonchannel", - test: testAbandonChannel, - }, - { - name: "revoked uncooperative close retribution zero value remote output", - test: testRevokedCloseRetributionZeroValueRemoteOutput, - }, - { - name: "revoked uncooperative close retribution remote hodl", - test: testRevokedCloseRetributionRemoteHodl, - }, - { - name: "revoked uncooperative close retribution altruist watchtower", - test: testRevokedCloseRetributionAltruistWatchtower, - }, - { - name: "data loss protection", - test: testDataLossProtection, - }, - { - name: "query routes", - test: testQueryRoutes, - }, - { - name: "route fee cutoff", - test: testRouteFeeCutoff, - }, - { - name: "send update disable channel", - test: testSendUpdateDisableChannel, - }, - { - name: "streaming channel backup update", - test: testChannelBackupUpdates, - }, - { - name: "export channel backup", - test: testExportChannelBackup, - }, - { - name: "channel backup restore", - test: testChannelBackupRestore, - }, - { - name: "hold invoice sender persistence", - test: testHoldInvoicePersistence, - }, - { - name: "cpfp", - test: testCPFP, - }, - { - name: "macaroon authentication", - test: testMacaroonAuthentication, - }, - { - name: "bake macaroon", - test: testBakeMacaroon, - }, - { - name: "delete macaroon id", - test: testDeleteMacaroonID, - }, - { - name: "immediate payment after channel opened", - test: testPaymentFollowingChannelOpen, - }, - { - name: "external channel funding", - test: testExternalFundingChanPoint, - }, - { - name: "psbt channel funding", - test: testPsbtChanFunding, - }, - { - name: "sendtoroute multi path payment", - test: testSendToRouteMultiPath, - }, - { - name: "send multi path payment", - test: testSendMultiPathPayment, - }, - { - name: "REST API", - test: testRestAPI, - }, - { - name: "intercept forwarded htlc packets", - test: testForwardInterceptor, - }, - { - name: "wumbo channels", - test: testWumboChannels, - }, - { - name: "maximum channel size", - test: testMaxChannelSize, - }, - { - name: "connection timeout", - test: testNetworkConnectionTimeout, - }, - { - name: "stateless init", - test: testStatelessInit, - }, -} diff --git a/lnd/lntest/itest/lnd_wumbo_channels_test.go b/lnd/lntest/itest/lnd_wumbo_channels_test.go deleted file mode 100644 index 08dd8d12..00000000 --- a/lnd/lntest/itest/lnd_wumbo_channels_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package itest - -import ( - "context" - "strings" - - "github.com/pkt-cash/pktd/btcutil" - "github.com/pkt-cash/pktd/lnd" - "github.com/pkt-cash/pktd/lnd/lntest" -) - -// testWumboChannels tests that only a node that signals wumbo channel -// acceptances will allow a wumbo channel to be created. Additionally, if a -// node is running with mini channels only enabled, then they should reject any -// inbound wumbo channel requests. -func testWumboChannels(net *lntest.NetworkHarness, t *harnessTest) { - // With all the channel types exercised, we'll now make sure the wumbo - // signalling support works properly. - // - // We'll make two new nodes, with one of them signalling support for - // wumbo channels while the other doesn't. - wumboNode, err := net.NewNode( - "wumbo", []string{"--protocol.wumbo-channels"}, - ) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, t, wumboNode) - miniNode, err := net.NewNode("mini", nil) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, t, miniNode) - - // We'll send coins to the wumbo node, as it'll be the one imitating - // the channel funding. - ctxb := context.Background() - err = net.SendCoins(ctxb, btcutil.UnitsPerCoin(), wumboNode) - if err != nil { - t.Fatalf("unable to send coins to carol: %v", err) - } - - // Next we'll connect both nodes, then attempt to make a wumbo channel - // funding request to the mini node we created above. The wumbo request - // should fail as the node isn't advertising wumbo channels. - err = net.EnsureConnected(ctxb, wumboNode, miniNode) - if err != nil { - t.Fatalf("unable to connect peers: %v", err) - } - - chanAmt := lnd.MaxBtcFundingAmount + 1 - _, err = net.OpenChannel( - ctxb, wumboNode, miniNode, lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - if err == nil { - t.Fatalf("expected wumbo channel funding to fail") - } - - // The test should indicate a failure due to the channel being too - // large. - if !strings.Contains(err.String(), "exceeds maximum chan size") { - t.Fatalf("channel should be rejected due to size, instead "+ - "error was: %v", err) - } - - // We'll now make another wumbo node to accept our wumbo channel - // funding. - wumboNode2, err := net.NewNode( - "wumbo2", []string{"--protocol.wumbo-channels"}, - ) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, t, wumboNode2) - - // Creating a wumbo channel between these two nodes should succeed. - err = net.EnsureConnected(ctxb, wumboNode, wumboNode2) - if err != nil { - t.Fatalf("unable to connect peers: %v", err) - } - chanPoint := openChannelAndAssert( - ctxb, t, net, wumboNode, wumboNode2, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - closeChannelAndAssert(ctxb, t, net, wumboNode, chanPoint, false) -} diff --git a/lnd/lntest/itest/log_check_errors.sh b/lnd/lntest/itest/log_check_errors.sh deleted file mode 100755 index 15296845..00000000 --- a/lnd/lntest/itest/log_check_errors.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -BASEDIR=$(dirname "$0") - -echo "" - -# Filter all log files for errors, substitute variable data and match against whitelist. -cat $BASEDIR/*.log | grep "\[ERR\]" | \ -sed -r -f $BASEDIR/log_substitutions.txt | \ -sort | uniq | \ -grep -Fvi -f $BASEDIR/log_error_whitelist.txt - -# If something shows up (not on whitelist) exit with error code 1. -if [[ $? -eq 0 ]]; then - echo "" - echo "In the itest logs, the log line (patterns) above were detected." - echo "[ERR] lines are generally reserved for internal errors." - echo "Resolve the issue by either changing the log level or adding an " - echo "exception to log_error_whitelist.txt" - echo "" - - exit 1 -fi - -echo "No itest errors detected." -echo "" diff --git a/lnd/lntest/itest/log_error_whitelist.txt b/lnd/lntest/itest/log_error_whitelist.txt deleted file mode 100644 index 79a5cb4a..00000000 --- a/lnd/lntest/itest/log_error_whitelist.txt +++ /dev/null @@ -1,239 +0,0 @@ -